hip_filename stringlengths 5 84 | hip_content stringlengths 79 9.69M | cuda_filename stringlengths 4 83 | cuda_content stringlengths 19 9.69M |
|---|---|---|---|
8c9fb60fd0bc3e030f6b414e4ab4c1958dd7024c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** @file gpuinflate.cu
Derived from zlib's contrib/puff.c, original copyright notice below
*/
/*
Copyright (C) 2002-2013 Mark Adler, all rights reserved
version 2.3, 21 Jan 2013
This software is provided 'as-is', without any express or implied
warranty. In no event will the author be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
Mark Adler madler@alumni.caltech.edu
*/
#include "gpuinflate.h"
#include "io_uncomp.h"
#include <io/utilities/block_utils.cuh>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace io {
constexpr int max_bits = 15; // maximum bits in a code
constexpr int max_l_codes = 286; // maximum number of literal/length codes
constexpr int max_d_codes = 30; // maximum number of distance codes
constexpr int fix_l_codes = 288; // number of fixed literal/length codes
constexpr int log2_len_lut = 10;
constexpr int log2_dist_lut = 8;
/**
* @brief Intermediate arrays for building huffman tables
*/
struct scratch_arr {
int16_t lengths[max_l_codes + max_d_codes]; ///< descriptor code lengths
int16_t offs[max_bits + 1]; ///< offset in symbol table for each length (scratch)
};
/**
* @brief Huffman LUTs for length and distance codes
*/
struct lut_arr {
int32_t lenlut[1 << log2_len_lut]; ///< LUT for length decoding
int32_t distlut[1 << log2_dist_lut]; ///< LUT for fast distance decoding
};
/// 4 batches of 32 symbols
constexpr int log2_batch_count = 2; // 1..5
constexpr int log2_batch_size = 5;
constexpr int batch_count = (1 << log2_batch_count);
constexpr int batch_size = (1 << log2_batch_size);
/**
* @brief Inter-warp communication queue
*/
struct xwarp_s {
int32_t batch_len[batch_count]; //< Length of each batch - <0:end, 0:not ready, >0:symbol count
union {
uint32_t symqueue[batch_count * batch_size];
uint8_t symqueue8[batch_count * batch_size * 4];
} u;
};
#define ENABLE_PREFETCH 1
#if ENABLE_PREFETCH
constexpr int log2_prefetch_size = 9; // Must be at least LOG2_BATCH_SIZE+3
constexpr int prefetch_size = (1 << log2_prefetch_size);
/// @brief Prefetcher state
struct prefetch_queue_s {
const uint8_t* cur_p; ///< Prefetch location
int run; ///< prefetcher will exit when run=0
uint8_t pref_data[prefetch_size];
};
template <typename T>
inline __device__ volatile uint32_t* prefetch_addr32(volatile prefetch_queue_s& q, T* ptr)
{
return reinterpret_cast<volatile uint32_t*>(&q.pref_data[(prefetch_size - 4) & (size_t)(ptr)]);
}
#endif // ENABLE_PREFETCH
/**
* @brief Inflate decompressor state
*/
struct inflate_state_s {
// output state
uint8_t* out; ///< output buffer
uint8_t* outbase; ///< start of output buffer
uint8_t* outend; ///< end of output buffer
// Input state
uint8_t* cur; ///< input buffer
uint8_t* end; ///< end of input buffer
uint2 bitbuf; ///< bit buffer (64-bit)
uint32_t bitpos; ///< position in bit buffer
int32_t err; ///< Error status
int btype; ///< current block type
int blast; ///< last block
uint32_t stored_blk_len; ///< length of stored (uncompressed) block
uint16_t first_slow_len; ///< first code not in fast LUT
uint16_t index_slow_len;
uint16_t first_slow_dist;
uint16_t index_slow_dist;
volatile xwarp_s x;
#if ENABLE_PREFETCH
volatile prefetch_queue_s pref;
#endif
int16_t lencnt[max_bits + 1];
int16_t lensym[fix_l_codes]; // Assumes fix_l_codes >= max_l_codes
int16_t distcnt[max_bits + 1];
int16_t distsym[max_d_codes];
union {
scratch_arr scratch;
lut_arr lut;
} u;
};
inline __device__ unsigned int bfe(unsigned int source,
unsigned int bit_start,
unsigned int num_bits)
{
unsigned int bits;
asm("bfe.u32 %0, %1, %2, %3;" : "=r"(bits) : "r"(source), "r"(bit_start), "r"(num_bits));
return bits;
};
inline __device__ uint32_t showbits(inflate_state_s* s, uint32_t n)
{
uint32_t next32 = __funnelshift_rc(s->bitbuf.x, s->bitbuf.y, s->bitpos);
return (next32 & ((1 << n) - 1));
}
inline __device__ uint32_t nextbits32(inflate_state_s* s)
{
return __funnelshift_rc(s->bitbuf.x, s->bitbuf.y, s->bitpos);
}
inline __device__ void skipbits(inflate_state_s* s, uint32_t n)
{
uint32_t bitpos = s->bitpos + n;
if (bitpos >= 32) {
uint8_t* cur = s->cur + 8;
s->bitbuf.x = s->bitbuf.y;
s->bitbuf.y = (cur < s->end) ? *reinterpret_cast<uint32_t*>(cur) : 0;
s->cur = cur - 4;
bitpos &= 0x1f;
}
s->bitpos = bitpos;
}
// TODO: If we require 4-byte alignment of input bitstream & length (padded), reading bits would
// become quite a bit faster
__device__ uint32_t getbits(inflate_state_s* s, uint32_t n)
{
uint32_t v = showbits(s, n);
skipbits(s, n);
return v;
}
/**
* @brief Decode a code from the stream s using huffman table {symbols,counts}.
* Return the symbol or a negative value if there is an error.
* If all of the lengths are zero, i.e. an empty code, or if the code is
* incomplete and an invalid code is received, then -10 is returned after
* reading max_bits bits.
*
* Format notes:
*
* - The codes as stored in the compressed data are bit-reversed relative to
* a simple integer ordering of codes of the same lengths. Hence below the
* bits are pulled from the compressed data one at a time and used to
* build the code value reversed from what is in the stream in order to
* permit simple integer comparisons for decoding. A table-based decoding
* scheme (as used in zlib) does not need to do this reversal.
*
* - The first code for the shortest length is all zeros. Subsequent codes of
* the same length are simply integer increments of the previous code. When
* moving up a length, a zero bit is appended to the code. For a complete
* code, the last code of the longest length will be all ones.
*
* - Incomplete codes are handled by this decoder, since they are permitted
* in the deflate format. See the format notes for fixed() and dynamic().
*/
__device__ int decode(inflate_state_s* s, const int16_t* counts, const int16_t* symbols)
{
unsigned int len; // current number of bits in code
unsigned int code; // len bits being decoded
unsigned int first; // first code of length len
unsigned int count; // number of codes of length len
uint32_t next32r = __brev(nextbits32(s));
first = 0;
for (len = 1; len <= max_bits; len++) {
code = (next32r >> (32 - len)) - first;
count = counts[len];
if (code < count) // if length len, return symbol
{
skipbits(s, len);
return symbols[code];
}
symbols += count; // else update for next length
first += count;
first <<= 1;
}
return -10; // ran out of codes
}
/**
* @brief Given the list of code lengths length[0..n-1] representing a canonical
* Huffman code for n symbols, construct the tables required to decode those
* codes. Those tables are the number of codes of each length, and the symbols
* sorted by length, retaining their original order within each length. The
* return value is zero for a complete code set, negative for an over-
* subscribed code set, and positive for an incomplete code set. The tables
* can be used if the return value is zero or positive, but they cannot be used
* if the return value is negative. If the return value is zero, it is not
* possible for decode() using that table to return an error--any stream of
* enough bits will resolve to a symbol. If the return value is positive, then
* it is possible for decode() using that table to return an error for received
* codes past the end of the incomplete lengths.
*
* Not used by decode(), but used for error checking, count[0] is the number
* of the n symbols not in the code. So n - count[0] is the number of
* codes. This is useful for checking for incomplete codes that have more than
* one symbol, which is an error in a dynamic block.
*
* Assumption: for all i in 0..n-1, 0 <= length[i] <= max_bits
* This is assured by the construction of the length arrays in dynamic() and
* fixed() and is not verified by construct().
*
* Format notes:
*
* - Permitted and expected examples of incomplete codes are one of the fixed
* codes and any code with a single symbol which in deflate is coded as one
* bit instead of zero bits. See the format notes for fixed() and dynamic().
*
* - Within a given code length, the symbols are kept in ascending order for
* the code bits definition.
*/
__device__ int construct(
inflate_state_s* s, int16_t* counts, int16_t* symbols, const int16_t* length, int n)
{
int symbol; // current symbol when stepping through length[]
int len; // current length when stepping through counts[]
int left; // number of possible codes left of current length
int16_t* offs = s->u.scratch.offs;
// count number of codes of each length
for (len = 0; len <= max_bits; len++)
counts[len] = 0;
for (symbol = 0; symbol < n; symbol++)
(counts[length[symbol]])++; // assumes lengths are within bounds
if (counts[0] == n) // no codes!
return 0; // complete, but decode() will fail
// check for an over-subscribed or incomplete set of lengths
left = 1; // one possible code of zero length
for (len = 1; len <= max_bits; len++) {
left <<= 1; // one more bit, double codes left
left -= counts[len]; // deduct count from possible codes
if (left < 0) return left; // over-subscribed--return negative
} // left > 0 means incomplete
// generate offsets into symbol table for each length for sorting
offs[1] = 0;
for (len = 1; len < max_bits; len++)
offs[len + 1] = offs[len] + counts[len];
// put symbols in table sorted by length, by symbol order within each length
for (symbol = 0; symbol < n; symbol++)
if (length[symbol] != 0) symbols[offs[length[symbol]]++] = symbol;
// return zero for complete set, positive for incomplete set
return left;
}
/// permutation of code length codes
static const __device__ __constant__ uint8_t g_code_order[19 + 1] = {
16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15, 0xff};
/// Dynamic block (custom huffman tables)
__device__ int init_dynamic(inflate_state_s* s)
{
int nlen, ndist, ncode; /* number of lengths in descriptor */
int index; /* index of lengths[] */
int err; /* construct() return value */
int16_t* lengths = s->u.scratch.lengths;
// get number of lengths in each table, check lengths
nlen = getbits(s, 5) + 257;
ndist = getbits(s, 5) + 1;
ncode = getbits(s, 4) + 4;
if (nlen > max_l_codes || ndist > max_d_codes) {
return -3; // bad counts
}
// read code length code lengths (really), missing lengths are zero
for (index = 0; index < ncode; index++)
lengths[g_code_order[index]] = getbits(s, 3);
for (; index < 19; index++)
lengths[g_code_order[index]] = 0;
// build huffman table for code lengths codes (use lencode temporarily)
err = construct(s, s->lencnt, s->lensym, lengths, 19);
if (err != 0) // require complete code set here
return -4;
// read length/literal and distance code length tables
index = 0;
while (index < nlen + ndist) {
int symbol = decode(s, s->lencnt, s->lensym);
if (symbol < 0) return symbol; // invalid symbol
if (symbol < 16) // length in 0..15
lengths[index++] = symbol;
else { // repeat instruction
int len = 0; // last length to repeat, assume repeating zeros
if (symbol == 16) { // repeat last length 3..6 times
if (index == 0) return -5; // no last length!
len = lengths[index - 1]; // last length
symbol = 3 + getbits(s, 2);
} else if (symbol == 17) // repeat zero 3..10 times
symbol = 3 + getbits(s, 3);
else // == 18, repeat zero 11..138 times
symbol = 11 + getbits(s, 7);
if (index + symbol > nlen + ndist) return -6; // too many lengths!
while (symbol--) // repeat last or zero symbol times
lengths[index++] = len;
}
}
// check for end-of-block code -- there better be one!
if (lengths[256] == 0) return -9;
// build huffman table for literal/length codes
err = construct(s, s->lencnt, s->lensym, lengths, nlen);
if (err && (err < 0 || nlen != s->lencnt[0] + s->lencnt[1]))
return -7; // incomplete code ok only for single length 1 code
// build huffman table for distance codes
err = construct(s, s->distcnt, s->distsym, &lengths[nlen], ndist);
if (err && (err < 0 || ndist != s->distcnt[0] + s->distcnt[1]))
return -8; // incomplete code ok only for single length 1 code
return 0;
}
/**
* @brief Initializes a fixed codes block.
*
* Format notes:
*
* - This block type can be useful for compressing small amounts of data for
* which the size of the code descriptions in a dynamic block exceeds the
* benefit of custom codes for that block. For fixed codes, no bits are
* spent on code descriptions. Instead the code lengths for literal/length
* codes and distance codes are fixed. The specific lengths for each symbol
* can be seen in the "for" loops below.
*
* - The literal/length code is complete, but has two symbols that are invalid
* and should result in an error if received. This cannot be implemented
* simply as an incomplete code since those two symbols are in the "middle"
* of the code. They are eight bits long and the longest literal/length\
* code is nine bits. Therefore the code must be constructed with those
* symbols, and the invalid symbols must be detected after decoding.
*
* - The fixed distance codes also have two invalid symbols that should result
* in an error if received. Since all of the distance codes are the same
* length, this can be implemented as an incomplete code. Then the invalid
* codes are detected while decoding.
*/
__device__ int init_fixed(inflate_state_s* s)
{
int16_t* lengths = s->u.scratch.lengths;
int symbol;
// literal/length table
for (symbol = 0; symbol < 144; symbol++)
lengths[symbol] = 8;
for (; symbol < 256; symbol++)
lengths[symbol] = 9;
for (; symbol < 280; symbol++)
lengths[symbol] = 7;
for (; symbol < fix_l_codes; symbol++)
lengths[symbol] = 8;
construct(s, s->lencnt, s->lensym, lengths, fix_l_codes);
// distance table
for (symbol = 0; symbol < max_d_codes; symbol++)
lengths[symbol] = 5;
// build huffman table for distance codes
construct(s, s->distcnt, s->distsym, lengths, max_d_codes);
return 0;
}
/**
* @brief Decode literal/length and distance codes until an end-of-block code.
*
* Format notes:
*
* - Compressed data that is after the block type if fixed or after the code
* description if dynamic is a combination of literals and length/distance
* pairs terminated by and end-of-block code. Literals are simply Huffman
* coded bytes. A length/distance pair is a coded length followed by a
* coded distance to represent a string that occurs earlier in the
* uncompressed data that occurs again at the current location.
*
* - Literals, lengths, and the end-of-block code are combined into a single
* code of up to 286 symbols. They are 256 literals (0..255), 29 length
* symbols (257..285), and the end-of-block symbol (256).
*
* - There are 256 possible lengths (3..258), and so 29 symbols are not enough
* to represent all of those. Lengths 3..10 and 258 are in fact represented
* by just a length symbol. Lengths 11..257 are represented as a symbol and
* some number of extra bits that are added as an integer to the base length
* of the length symbol. The number of extra bits is determined by the base
* length symbol. These are in the static arrays below, lens[] for the base
* lengths and lext[] for the corresponding number of extra bits.
*
* - The reason that 258 gets its own symbol is that the longest length is used
* often in highly redundant files. Note that 258 can also be coded as the
* base value 227 plus the maximum extra value of 31. While a good deflate
* should never do this, it is not an error, and should be decoded properly.
*
* - If a length is decoded, including its extra bits if any, then it is
* followed a distance code. There are up to 30 distance symbols. Again
* there are many more possible distances (1..32768), so extra bits are added
* to a base value represented by the symbol. The distances 1..4 get their
* own symbol, but the rest require extra bits. The base distances and
* corresponding number of extra bits are below in the static arrays dist[]
* and dext[].
*
* - Literal bytes are simply written to the output. A length/distance pair is
* an instruction to copy previously uncompressed bytes to the output. The
* copy is from distance bytes back in the output stream, copying for length
* bytes.
*
* - Distances pointing before the beginning of the output data are not
* permitted.
*
* - Overlapped copies, where the length is greater than the distance, are
* allowed and common. For example, a distance of one and a length of 258
* simply copies the last byte 258 times. A distance of four and a length of
* twelve copies the last four bytes three times. A simple forward copy
* ignoring whether the length is greater than the distance or not implements
* this correctly. You should not use memcpy() since its behavior is not
* defined for overlapped arrays. You should not use memmove() or bcopy()
* since though their behavior -is- defined for overlapping arrays, it is
* defined to do the wrong thing in this case.
*/
/// permutation of code length codes
static const __device__ __constant__ uint16_t g_lens[29] = { // Size base for length codes 257..285
3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27,
31, 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258};
static const __device__ __constant__ uint16_t
g_lext[29] = { // Extra bits for length codes 257..285
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0};
static const __device__ __constant__ uint16_t
g_dists[30] = { // Offset base for distance codes 0..29
1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129,
193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577};
static const __device__ __constant__ uint16_t g_dext[30] = { // Extra bits for distance codes 0..29
0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13};
/// @brief Thread 0 only: decode bitstreams and output symbols into the symbol queue
__device__ void decode_symbols(inflate_state_s* s)
{
uint32_t bitpos = s->bitpos;
uint2 bitbuf = s->bitbuf;
uint8_t* cur = s->cur;
uint8_t* end = s->end;
int32_t batch = 0;
int32_t sym, batch_len;
do {
volatile uint32_t* b = &s->x.u.symqueue[batch * batch_size];
// Wait for the next batch entry to be empty
#if ENABLE_PREFETCH
// Wait for prefetcher to fetch a worst-case of 48 bits per symbol
while ((*(volatile int32_t*)&s->pref.cur_p - (int32_t)(size_t)cur < batch_size * 6) ||
(s->x.batch_len[batch] != 0)) {}
#else
while (s->x.batch_len[batch] != 0) {}
#endif
batch_len = 0;
#if ENABLE_PREFETCH
if (cur + (bitpos >> 3) >= end) {
s->err = 1;
break;
}
#endif
// Inner loop decoding symbols
do {
uint32_t next32 = __funnelshift_rc(bitbuf.x, bitbuf.y, bitpos); // nextbits32(s);
uint32_t len;
sym = s->u.lut.lenlut[next32 & ((1 << log2_len_lut) - 1)];
if ((uint32_t)sym < (uint32_t)(0x100 << 5)) {
// We can lookup a second symbol if this was a short literal
len = sym & 0x1f;
sym >>= 5;
b[batch_len++] = sym;
next32 >>= len;
bitpos += len;
sym = s->u.lut.lenlut[next32 & ((1 << log2_len_lut) - 1)];
}
if (sym > 0) // short symbol
{
len = sym & 0x1f;
sym = ((sym >> 5) & 0x3ff) + ((next32 >> (sym >> 24)) & ((sym >> 16) & 0x1f));
} else {
// Slow length path
uint32_t next32r = __brev(next32);
const int16_t* symbols = &s->lensym[s->index_slow_len];
unsigned int first = s->first_slow_len;
int lext;
#pragma unroll 1
for (len = log2_len_lut + 1; len <= max_bits; len++) {
unsigned int code = (next32r >> (32 - len)) - first;
unsigned int count = s->lencnt[len];
if (code < count) // if length len, return symbol
{
sym = symbols[code];
break;
}
symbols += count; // else update for next length
first += count;
first <<= 1;
}
if (len > max_bits) {
s->err = -10;
sym = 256;
len = 0;
}
if (sym > 256) {
sym -= 257;
lext = g_lext[sym];
sym = 256 + g_lens[sym] + bfe(next32, len, lext);
len += lext;
}
}
if (sym > 256) {
int dist, dext;
// skipbits(s, len) inlined - no limit check
bitpos += len;
if (bitpos >= 32) {
bitbuf.x = bitbuf.y;
#if ENABLE_PREFETCH
bitbuf.y = *prefetch_addr32(s->pref, cur + 8);
cur += 4;
#else
cur += 8;
bitbuf.y = (cur < end) ? *(const uint32_t*)cur : 0;
cur -= 4;
#endif
bitpos &= 0x1f;
}
// get distance
next32 = __funnelshift_rc(bitbuf.x, bitbuf.y, bitpos); // nextbits32(s);
dist = s->u.lut.distlut[next32 & ((1 << log2_dist_lut) - 1)];
if (dist > 0) {
len = dist & 0x1f;
dext = bfe(dist, 20, 5);
dist = bfe(dist, 5, 15);
sym |= (dist + bfe(next32, len, dext)) << 16;
len += dext;
} else {
uint32_t next32r = __brev(next32);
const int16_t* symbols = &s->distsym[s->index_slow_dist];
unsigned int first = s->first_slow_dist;
#pragma unroll 1
for (len = log2_dist_lut + 1; len <= max_bits; len++) {
unsigned int code = (next32r >> (32 - len)) - first;
unsigned int count = s->distcnt[len];
if (code < count) // if length len, return symbol
{
dist = symbols[code];
break;
}
symbols += count; // else update for next length
first += count;
first <<= 1;
}
if (len > max_bits) {
s->err = -10;
sym = 256;
len = 0;
} else {
dext = g_dext[dist];
sym |= (g_dists[dist] + bfe(next32, len, dext)) << 16;
len += dext;
}
}
}
// skipbits(s, len) inlined with added error check for reading past the end of the input
// buffer
bitpos += len;
if (bitpos >= 32) {
bitbuf.x = bitbuf.y;
#if ENABLE_PREFETCH
bitbuf.y = *prefetch_addr32(s->pref, cur + 8);
cur += 4;
#else
cur += 8;
if (cur < end) {
bitbuf.y = *(const uint32_t*)cur;
cur -= 4;
} else {
bitbuf.y = 0;
cur -= 4;
if (cur > end) {
s->err = 1;
sym = 256;
}
}
#endif
bitpos &= 0x1f;
}
if (sym == 256) break;
b[batch_len++] = sym;
} while (batch_len < batch_size - 1);
s->x.batch_len[batch] = batch_len;
#if ENABLE_PREFETCH
((volatile inflate_state_s*)s)->cur = cur;
#endif
if (batch_len != 0) batch = (batch + 1) & (batch_count - 1);
} while (sym != 256);
while (s->x.batch_len[batch] != 0) {}
s->x.batch_len[batch] = -1;
s->bitbuf = bitbuf;
s->bitpos = bitpos;
#if !ENABLE_PREFETCH
s->cur = cur;
#endif
}
/**
* @brief Build lookup tables for faster decode
* LUT format is symbols*16+length
*/
__device__ void init_length_lut(inflate_state_s* s, int t)
{
int32_t* lut = s->u.lut.lenlut;
for (uint32_t bits = t; bits < (1 << log2_len_lut); bits += blockDim.x) {
const int16_t* cnt = s->lencnt;
const int16_t* symbols = s->lensym;
int sym = -10 << 5;
unsigned int first = 0;
unsigned int rbits = __brev(bits) >> (32 - log2_len_lut);
for (unsigned int len = 1; len <= log2_len_lut; len++) {
unsigned int code = (rbits >> (log2_len_lut - len)) - first;
unsigned int count = cnt[len];
if (code < count) {
sym = symbols[code];
if (sym > 256) {
int lext = g_lext[sym - 257];
sym = (256 + g_lens[sym - 257]) | (((1 << lext) - 1) << (16 - 5)) | (len << (24 - 5));
len += lext;
}
sym = (sym << 5) | len;
break;
}
symbols += count; // else update for next length
first += count;
first <<= 1;
}
lut[bits] = sym;
}
if (!t) {
unsigned int first = 0;
unsigned int index = 0;
const int16_t* cnt = s->lencnt;
for (unsigned int len = 1; len <= log2_len_lut; len++) {
unsigned int count = cnt[len];
index += count;
first += count;
first <<= 1;
}
s->first_slow_len = first;
s->index_slow_len = index;
}
}
/**
* @brief Build lookup tables for faster decode of distance symbol
* LUT format is symbols*16+length
*/
__device__ void init_distance_lut(inflate_state_s* s, int t)
{
int32_t* lut = s->u.lut.distlut;
for (uint32_t bits = t; bits < (1 << log2_dist_lut); bits += blockDim.x) {
const int16_t* cnt = s->distcnt;
const int16_t* symbols = s->distsym;
int sym = 0;
unsigned int first = 0;
unsigned int rbits = __brev(bits) >> (32 - log2_dist_lut);
for (unsigned int len = 1; len <= log2_dist_lut; len++) {
unsigned int code = (rbits >> (log2_dist_lut - len)) - first;
unsigned int count = cnt[len];
if (code < count) {
int dist = symbols[code];
int dext = g_dext[dist];
sym = g_dists[dist] | (dext << 15);
sym = (sym << 5) | len;
break;
}
symbols += count; // else update for next length
first += count;
first <<= 1;
}
lut[bits] = sym;
}
if (!t) {
unsigned int first = 0;
unsigned int index = 0;
const int16_t* cnt = s->distcnt;
for (unsigned int len = 1; len <= log2_dist_lut; len++) {
unsigned int count = cnt[len];
index += count;
first += count;
first <<= 1;
}
s->first_slow_dist = first;
s->index_slow_dist = index;
}
}
/// @brief WARP1: process symbols and output uncompressed stream
__device__ void process_symbols(inflate_state_s* s, int t)
{
uint8_t* out = s->out;
const uint8_t* outend = s->outend;
const uint8_t* outbase = s->outbase;
int batch = 0;
do {
volatile uint32_t* b = &s->x.u.symqueue[batch * batch_size];
int batch_len, pos;
int32_t symt;
uint32_t lit_mask;
if (t == 0) {
while ((batch_len = s->x.batch_len[batch]) == 0) {}
} else {
batch_len = 0;
}
batch_len = shuffle(batch_len);
if (batch_len < 0) { break; }
symt = (t < batch_len) ? b[t] : 256;
lit_mask = ballot(symt >= 256);
pos = min((__ffs(lit_mask) - 1) & 0xff, 32);
if (t == 0) { s->x.batch_len[batch] = 0; }
if (t < pos && out + t < outend) { out[t] = symt; }
out += pos;
batch_len -= pos;
while (batch_len > 0) {
int dist, len, symbol;
// Process a non-literal symbol
symbol = shuffle(symt, pos);
len = max((symbol & 0xffff) - 256, 0); // max should be unnecessary, but just in case
dist = symbol >> 16;
for (int i = t; i < len; i += 32) {
const uint8_t* src = out + ((i >= dist) ? (i % dist) : i) - dist;
uint8_t b = (src < outbase) ? 0 : *src;
if (out + i < outend) { out[i] = b; }
}
out += len;
pos++;
batch_len--;
// Process subsequent literals, if any
if (!((lit_mask >> pos) & 1)) {
len = min((__ffs(lit_mask >> pos) - 1) & 0xff, batch_len);
symbol = shuffle(symt, (pos + t) & 0x1f);
if (t < len && out + t < outend) { out[t] = symbol; }
out += len;
pos += len;
batch_len -= len;
}
}
batch = (batch + 1) & (batch_count - 1);
} while (1);
if (t == 0) { s->out = out; }
}
/**
* @brief Initializes a stored block.
*
* Format notes:
*
* - After the two-bit stored block type (00), the stored block length and
* stored bytes are byte-aligned for fast copying. Therefore any leftover
* bits in the byte that has the last bit of the type, as many as seven, are
* discarded. The value of the discarded bits are not defined and should not
* be checked against any expectation.
*
* - The second inverted copy of the stored block length does not have to be
* checked, but it's probably a good idea to do so anyway.
*
* - A stored block can have zero length. This is sometimes used to byte-align
* subsets of the compressed data for random access or partial recovery.
*/
__device__ int init_stored(inflate_state_s* s)
{
uint32_t len, nlen; // length of stored block
// Byte align
if (s->bitpos & 7) { skipbits(s, 8 - (s->bitpos & 7)); }
if (s->cur + (s->bitpos >> 3) >= s->end) {
return 2; // Not enough input
}
// get length and check against its one's complement
len = getbits(s, 16);
nlen = getbits(s, 16);
if (len != (nlen ^ 0xffff)) {
return -2; // didn't match complement!
}
if (s->cur + (s->bitpos >> 3) + len > s->end) {
return 2; // Not enough input
}
s->stored_blk_len = len;
// done with a valid stored block
return 0;
}
/// Copy bytes from stored block to destination
__device__ void copy_stored(inflate_state_s* s, int t)
{
int len = s->stored_blk_len;
uint8_t* cur = s->cur + (s->bitpos >> 3);
uint8_t* out = s->out;
uint8_t* outend = s->outend;
uint8_t* cur4;
int slow_bytes = min(len, (int)((16 - (size_t)out) & 0xf));
int fast_bytes, bitpos;
// Slow copy until output is 16B aligned
if (slow_bytes) {
for (int i = t; i < slow_bytes; i += blockDim.x) {
if (out + i < outend) {
out[i] = cur[i]; // Input range has already been validated in init_stored()
}
}
cur += slow_bytes;
out += slow_bytes;
len -= slow_bytes;
}
fast_bytes = len;
if (out < outend) { fast_bytes = (int)min((size_t)fast_bytes, (outend - out)); }
fast_bytes &= ~0xf;
bitpos = ((int)(3 & (size_t)cur)) << 3;
cur4 = cur - (bitpos >> 3);
if (out < outend) {
// Fast copy 16 bytes at a time
for (int i = t * 16; i < fast_bytes; i += blockDim.x * 16) {
uint4 u;
u.x = *reinterpret_cast<const uint32_t*>(cur4 + i + 0 * 4);
u.y = *reinterpret_cast<const uint32_t*>(cur4 + i + 1 * 4);
u.z = *reinterpret_cast<const uint32_t*>(cur4 + i + 2 * 4);
u.w = *reinterpret_cast<const uint32_t*>(cur4 + i + 3 * 4);
if (bitpos != 0) {
uint32_t v = (bitpos != 0) ? *reinterpret_cast<const uint32_t*>(cur4 + i + 4 * 4) : 0;
u.x = __funnelshift_rc(u.x, u.y, bitpos);
u.y = __funnelshift_rc(u.y, u.z, bitpos);
u.z = __funnelshift_rc(u.z, u.w, bitpos);
u.w = __funnelshift_rc(u.w, v, bitpos);
}
*reinterpret_cast<uint4*>(out + i) = u;
}
}
cur += fast_bytes;
out += fast_bytes;
len -= fast_bytes;
// Slow copy for remaining bytes
for (int i = t; i < len; i += blockDim.x) {
if (out + i < outend) {
out[i] = cur[i]; // Input range has already been validated in init_stored()
}
}
out += len;
__syncthreads();
if (t == 0) {
// Reset bitstream to end of block
uint8_t* p = cur + len;
uint32_t prefix_bytes = (uint32_t)(((size_t)p) & 3);
p -= prefix_bytes;
s->cur = p;
s->bitbuf.x = (p < s->end) ? *reinterpret_cast<uint32_t*>(p) : 0;
p += 4;
s->bitbuf.y = (p < s->end) ? *reinterpret_cast<uint32_t*>(p) : 0;
s->bitpos = prefix_bytes * 8;
s->out = out;
}
}
#if ENABLE_PREFETCH
__device__ void init_prefetcher(inflate_state_s* s, int t)
{
if (t == 0) {
s->pref.cur_p = s->cur;
s->pref.run = 1;
}
}
__device__ void prefetch_warp(volatile inflate_state_s* s, int t)
{
const uint8_t* cur_p = s->pref.cur_p;
const uint8_t* end = s->end;
while (shuffle((t == 0) ? s->pref.run : 0)) {
int32_t cur_lo = (int32_t)(size_t)cur_p;
int do_pref =
shuffle((t == 0) ? (cur_lo - *(volatile int32_t*)&s->cur < prefetch_size - 32 * 4 - 4) : 0);
if (do_pref) {
const uint8_t* p = cur_p + 4 * t;
*prefetch_addr32(s->pref, p) = (p < end) ? *reinterpret_cast<const uint32_t*>(p) : 0;
cur_p += 4 * 32;
__threadfence_block();
__syncwarp();
if (!t) {
s->pref.cur_p = cur_p;
__threadfence_block();
}
}
}
}
#endif // ENABLE_PREFETCH
/**
* @brief Parse GZIP header
* See https://tools.ietf.org/html/rfc1952
*/
__device__ int parse_gzip_header(const uint8_t* src, size_t src_size)
{
int hdr_len = -1;
if (src_size >= 18) {
uint32_t sig = (src[0] << 16) | (src[1] << 8) | src[2];
if (sig == 0x1f8b08) // 24-bit GZIP inflate signature {0x1f, 0x8b, 0x08}
{
uint8_t flags = src[3];
hdr_len = 10;
if (flags & GZIPHeaderFlag::fextra) // Extra fields present
{
int xlen = src[hdr_len] | (src[hdr_len + 1] << 8);
hdr_len += xlen;
if (hdr_len >= src_size) return -1;
}
if (flags & GZIPHeaderFlag::fname) // Original file name present
{
// Skip zero-terminated string
do {
if (hdr_len >= src_size) return -1;
} while (src[hdr_len++] != 0);
}
if (flags & GZIPHeaderFlag::fcomment) // Comment present
{
// Skip zero-terminated string
do {
if (hdr_len >= src_size) return -1;
} while (src[hdr_len++] != 0);
}
if (flags & GZIPHeaderFlag::fhcrc) // Header CRC present
{
hdr_len += 2;
}
if (hdr_len + 8 >= src_size) hdr_len = -1;
}
}
return hdr_len;
}
/**
* @brief INFLATE decompression kernel
*
* blockDim {block_size,1,1}
*
* @tparam block_size Thread block dimension for this call
* @param inputs Source and destination buffer information per block
* @param outputs Decompression status buffer per block
* @param parse_hdr If nonzero, indicates that the compressed bitstream includes a GZIP header
*/
template <int block_size>
__global__ void __launch_bounds__(block_size)
inflate_kernel(gpu_inflate_input_s* inputs, gpu_inflate_status_s* outputs, int parse_hdr)
{
__shared__ __align__(16) inflate_state_s state_g;
int t = threadIdx.x;
int z = blockIdx.x;
inflate_state_s* state = &state_g;
if (!t) {
uint8_t* p = const_cast<uint8_t*>(static_cast<uint8_t const*>(inputs[z].srcDevice));
size_t src_size = inputs[z].srcSize;
uint32_t prefix_bytes;
// Parse header if needed
state->err = 0;
if (parse_hdr) {
int hdr_len = parse_gzip_header(p, src_size);
src_size = (src_size >= 8) ? src_size - 8 : 0; // ignore footer
if (hdr_len >= 0) {
p += hdr_len;
src_size -= hdr_len;
} else {
state->err = hdr_len;
}
}
// Initialize shared state
state->out = const_cast<uint8_t*>(static_cast<uint8_t const*>(inputs[z].dstDevice));
state->outbase = state->out;
state->outend = state->out + inputs[z].dstSize;
state->end = p + src_size;
prefix_bytes = (uint32_t)(((size_t)p) & 3);
p -= prefix_bytes;
state->cur = p;
state->bitbuf.x = (p < state->end) ? *reinterpret_cast<uint32_t*>(p) : 0;
p += 4;
state->bitbuf.y = (p < state->end) ? *reinterpret_cast<uint32_t*>(p) : 0;
state->bitpos = prefix_bytes * 8;
}
__syncthreads();
// Main loop decoding blocks
while (!state->err) {
if (!t) {
// Thread0: read last flag, block type and custom huffman tables if any
if (state->cur + (state->bitpos >> 3) >= state->end)
state->err = 2;
else {
state->blast = getbits(state, 1);
state->btype = getbits(state, 2);
if (state->btype == 0)
state->err = init_stored(state);
else if (state->btype == 1)
state->err = init_fixed(state);
else if (state->btype == 2)
state->err = init_dynamic(state);
else
state->err = -1; // Invalid block
}
}
__syncthreads();
if (!state->err && (state->btype == 1 || state->btype == 2)) {
// Initializes lookup tables (block wide)
init_length_lut(state, t);
init_distance_lut(state, t);
#if ENABLE_PREFETCH
// Initialize prefetcher
init_prefetcher(state, t);
#endif
if (t < batch_count) { state->x.batch_len[t] = 0; }
__syncthreads();
// decode data until end-of-block code
if (t < 1 * 32) {
// WARP0: decode variable-length symbols
if (!t) {
// Thread0: decode symbols (single threaded)
decode_symbols(state);
#if ENABLE_PREFETCH
state->pref.run = 0;
#endif
}
} else if (t < 2 * 32) {
// WARP1: perform LZ77 using length and distance codes from WARP0
process_symbols(state, t & 0x1f);
}
#if ENABLE_PREFETCH
else if (t < 3 * 32) {
// WARP2: Prefetcher: prefetch data for WARP0
prefetch_warp(state, t & 0x1f);
}
#endif
// else WARP3: idle
} else if (!state->err && state->btype == 0) {
// Uncompressed block (block-wide memcpy)
copy_stored(state, t);
}
if (state->blast) break;
__syncthreads();
}
__syncthreads();
// Output decompression status and length
if (!t) {
if (state->err == 0 && state->cur + ((state->bitpos + 7) >> 3) > state->end) {
// Read past the end of the input buffer
state->err = 2;
} else if (state->err == 0 && state->out > state->outend) {
// Output buffer too small
state->err = 1;
}
outputs[z].bytes_written = state->out - state->outbase;
outputs[z].status = state->err;
outputs[z].reserved = (int)(state->end - state->cur); // Here mainly for debug purposes
}
}
/**
* @brief Copy a group of buffers
*
* blockDim {1024,1,1}
*
* @param inputs Source and destination information per block
*/
__global__ void __launch_bounds__(1024) copy_uncompressed_kernel(gpu_inflate_input_s* inputs)
{
__shared__ const uint8_t* volatile src_g;
__shared__ uint8_t* volatile dst_g;
__shared__ uint32_t volatile copy_len_g;
uint32_t t = threadIdx.x;
uint32_t z = blockIdx.x;
const uint8_t* src;
uint8_t* dst;
uint32_t len, src_align_bytes, src_align_bits, dst_align_bytes;
if (!t) {
src = static_cast<const uint8_t*>(inputs[z].srcDevice);
dst = static_cast<uint8_t*>(inputs[z].dstDevice);
len = min((uint32_t)inputs[z].srcSize, (uint32_t)inputs[z].dstSize);
src_g = src;
dst_g = dst;
copy_len_g = len;
}
__syncthreads();
src = src_g;
dst = dst_g;
len = copy_len_g;
// Align output to 32-bit
dst_align_bytes = 3 & -reinterpret_cast<intptr_t>(dst);
if (dst_align_bytes != 0) {
uint32_t align_len = min(dst_align_bytes, len);
if (t < align_len) { dst[t] = src[t]; }
src += align_len;
dst += align_len;
len -= align_len;
}
src_align_bytes = (uint32_t)(3 & reinterpret_cast<uintptr_t>(src));
src_align_bits = src_align_bytes << 3;
while (len >= 32) {
const uint32_t* src32 = reinterpret_cast<const uint32_t*>(src - src_align_bytes);
uint32_t copy_cnt = min(len >> 2, 1024);
if (t < copy_cnt) {
uint32_t v = src32[t];
if (src_align_bits != 0) { v = __funnelshift_r(v, src32[t + 1], src_align_bits); }
reinterpret_cast<uint32_t*>(dst)[t] = v;
}
src += copy_cnt * 4;
dst += copy_cnt * 4;
len -= copy_cnt * 4;
}
if (t < len) { dst[t] = src[t]; }
}
hipError_t __host__ gpuinflate(gpu_inflate_input_s* inputs,
gpu_inflate_status_s* outputs,
int count,
int parse_hdr,
rmm::cuda_stream_view stream)
{
constexpr int block_size = 128; // Threads per block
if (count > 0) {
hipLaunchKernelGGL(( inflate_kernel<block_size>)
, dim3(count), dim3(block_size), 0, stream.value(), inputs, outputs, parse_hdr);
}
return hipSuccess;
}
hipError_t __host__ gpu_copy_uncompressed_blocks(gpu_inflate_input_s* inputs,
int count,
rmm::cuda_stream_view stream)
{
if (count > 0) {hipLaunchKernelGGL(( copy_uncompressed_kernel), dim3(count), dim3(1024), 0, stream.value(), inputs); }
return hipSuccess;
}
} // namespace io
} // namespace cudf
| 8c9fb60fd0bc3e030f6b414e4ab4c1958dd7024c.cu | /*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** @file gpuinflate.cu
Derived from zlib's contrib/puff.c, original copyright notice below
*/
/*
Copyright (C) 2002-2013 Mark Adler, all rights reserved
version 2.3, 21 Jan 2013
This software is provided 'as-is', without any express or implied
warranty. In no event will the author be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
Mark Adler madler@alumni.caltech.edu
*/
#include "gpuinflate.h"
#include "io_uncomp.h"
#include <io/utilities/block_utils.cuh>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace io {
constexpr int max_bits = 15; // maximum bits in a code
constexpr int max_l_codes = 286; // maximum number of literal/length codes
constexpr int max_d_codes = 30; // maximum number of distance codes
constexpr int fix_l_codes = 288; // number of fixed literal/length codes
constexpr int log2_len_lut = 10;
constexpr int log2_dist_lut = 8;
/**
* @brief Intermediate arrays for building huffman tables
*/
struct scratch_arr {
int16_t lengths[max_l_codes + max_d_codes]; ///< descriptor code lengths
int16_t offs[max_bits + 1]; ///< offset in symbol table for each length (scratch)
};
/**
* @brief Huffman LUTs for length and distance codes
*/
struct lut_arr {
int32_t lenlut[1 << log2_len_lut]; ///< LUT for length decoding
int32_t distlut[1 << log2_dist_lut]; ///< LUT for fast distance decoding
};
/// 4 batches of 32 symbols
constexpr int log2_batch_count = 2; // 1..5
constexpr int log2_batch_size = 5;
constexpr int batch_count = (1 << log2_batch_count);
constexpr int batch_size = (1 << log2_batch_size);
/**
* @brief Inter-warp communication queue
*/
struct xwarp_s {
int32_t batch_len[batch_count]; //< Length of each batch - <0:end, 0:not ready, >0:symbol count
union {
uint32_t symqueue[batch_count * batch_size];
uint8_t symqueue8[batch_count * batch_size * 4];
} u;
};
#define ENABLE_PREFETCH 1
#if ENABLE_PREFETCH
constexpr int log2_prefetch_size = 9; // Must be at least LOG2_BATCH_SIZE+3
constexpr int prefetch_size = (1 << log2_prefetch_size);
/// @brief Prefetcher state
struct prefetch_queue_s {
const uint8_t* cur_p; ///< Prefetch location
int run; ///< prefetcher will exit when run=0
uint8_t pref_data[prefetch_size];
};
template <typename T>
inline __device__ volatile uint32_t* prefetch_addr32(volatile prefetch_queue_s& q, T* ptr)
{
return reinterpret_cast<volatile uint32_t*>(&q.pref_data[(prefetch_size - 4) & (size_t)(ptr)]);
}
#endif // ENABLE_PREFETCH
/**
* @brief Inflate decompressor state
*/
struct inflate_state_s {
// output state
uint8_t* out; ///< output buffer
uint8_t* outbase; ///< start of output buffer
uint8_t* outend; ///< end of output buffer
// Input state
uint8_t* cur; ///< input buffer
uint8_t* end; ///< end of input buffer
uint2 bitbuf; ///< bit buffer (64-bit)
uint32_t bitpos; ///< position in bit buffer
int32_t err; ///< Error status
int btype; ///< current block type
int blast; ///< last block
uint32_t stored_blk_len; ///< length of stored (uncompressed) block
uint16_t first_slow_len; ///< first code not in fast LUT
uint16_t index_slow_len;
uint16_t first_slow_dist;
uint16_t index_slow_dist;
volatile xwarp_s x;
#if ENABLE_PREFETCH
volatile prefetch_queue_s pref;
#endif
int16_t lencnt[max_bits + 1];
int16_t lensym[fix_l_codes]; // Assumes fix_l_codes >= max_l_codes
int16_t distcnt[max_bits + 1];
int16_t distsym[max_d_codes];
union {
scratch_arr scratch;
lut_arr lut;
} u;
};
inline __device__ unsigned int bfe(unsigned int source,
unsigned int bit_start,
unsigned int num_bits)
{
unsigned int bits;
asm("bfe.u32 %0, %1, %2, %3;" : "=r"(bits) : "r"(source), "r"(bit_start), "r"(num_bits));
return bits;
};
inline __device__ uint32_t showbits(inflate_state_s* s, uint32_t n)
{
uint32_t next32 = __funnelshift_rc(s->bitbuf.x, s->bitbuf.y, s->bitpos);
return (next32 & ((1 << n) - 1));
}
inline __device__ uint32_t nextbits32(inflate_state_s* s)
{
return __funnelshift_rc(s->bitbuf.x, s->bitbuf.y, s->bitpos);
}
inline __device__ void skipbits(inflate_state_s* s, uint32_t n)
{
uint32_t bitpos = s->bitpos + n;
if (bitpos >= 32) {
uint8_t* cur = s->cur + 8;
s->bitbuf.x = s->bitbuf.y;
s->bitbuf.y = (cur < s->end) ? *reinterpret_cast<uint32_t*>(cur) : 0;
s->cur = cur - 4;
bitpos &= 0x1f;
}
s->bitpos = bitpos;
}
// TODO: If we require 4-byte alignment of input bitstream & length (padded), reading bits would
// become quite a bit faster
__device__ uint32_t getbits(inflate_state_s* s, uint32_t n)
{
uint32_t v = showbits(s, n);
skipbits(s, n);
return v;
}
/**
* @brief Decode a code from the stream s using huffman table {symbols,counts}.
* Return the symbol or a negative value if there is an error.
* If all of the lengths are zero, i.e. an empty code, or if the code is
* incomplete and an invalid code is received, then -10 is returned after
* reading max_bits bits.
*
* Format notes:
*
* - The codes as stored in the compressed data are bit-reversed relative to
* a simple integer ordering of codes of the same lengths. Hence below the
* bits are pulled from the compressed data one at a time and used to
* build the code value reversed from what is in the stream in order to
* permit simple integer comparisons for decoding. A table-based decoding
* scheme (as used in zlib) does not need to do this reversal.
*
* - The first code for the shortest length is all zeros. Subsequent codes of
* the same length are simply integer increments of the previous code. When
* moving up a length, a zero bit is appended to the code. For a complete
* code, the last code of the longest length will be all ones.
*
* - Incomplete codes are handled by this decoder, since they are permitted
* in the deflate format. See the format notes for fixed() and dynamic().
*/
__device__ int decode(inflate_state_s* s, const int16_t* counts, const int16_t* symbols)
{
unsigned int len; // current number of bits in code
unsigned int code; // len bits being decoded
unsigned int first; // first code of length len
unsigned int count; // number of codes of length len
uint32_t next32r = __brev(nextbits32(s));
first = 0;
for (len = 1; len <= max_bits; len++) {
code = (next32r >> (32 - len)) - first;
count = counts[len];
if (code < count) // if length len, return symbol
{
skipbits(s, len);
return symbols[code];
}
symbols += count; // else update for next length
first += count;
first <<= 1;
}
return -10; // ran out of codes
}
/**
* @brief Given the list of code lengths length[0..n-1] representing a canonical
* Huffman code for n symbols, construct the tables required to decode those
* codes. Those tables are the number of codes of each length, and the symbols
* sorted by length, retaining their original order within each length. The
* return value is zero for a complete code set, negative for an over-
* subscribed code set, and positive for an incomplete code set. The tables
* can be used if the return value is zero or positive, but they cannot be used
* if the return value is negative. If the return value is zero, it is not
* possible for decode() using that table to return an error--any stream of
* enough bits will resolve to a symbol. If the return value is positive, then
* it is possible for decode() using that table to return an error for received
* codes past the end of the incomplete lengths.
*
* Not used by decode(), but used for error checking, count[0] is the number
* of the n symbols not in the code. So n - count[0] is the number of
* codes. This is useful for checking for incomplete codes that have more than
* one symbol, which is an error in a dynamic block.
*
* Assumption: for all i in 0..n-1, 0 <= length[i] <= max_bits
* This is assured by the construction of the length arrays in dynamic() and
* fixed() and is not verified by construct().
*
* Format notes:
*
* - Permitted and expected examples of incomplete codes are one of the fixed
* codes and any code with a single symbol which in deflate is coded as one
* bit instead of zero bits. See the format notes for fixed() and dynamic().
*
* - Within a given code length, the symbols are kept in ascending order for
* the code bits definition.
*/
__device__ int construct(
inflate_state_s* s, int16_t* counts, int16_t* symbols, const int16_t* length, int n)
{
int symbol; // current symbol when stepping through length[]
int len; // current length when stepping through counts[]
int left; // number of possible codes left of current length
int16_t* offs = s->u.scratch.offs;
// count number of codes of each length
for (len = 0; len <= max_bits; len++)
counts[len] = 0;
for (symbol = 0; symbol < n; symbol++)
(counts[length[symbol]])++; // assumes lengths are within bounds
if (counts[0] == n) // no codes!
return 0; // complete, but decode() will fail
// check for an over-subscribed or incomplete set of lengths
left = 1; // one possible code of zero length
for (len = 1; len <= max_bits; len++) {
left <<= 1; // one more bit, double codes left
left -= counts[len]; // deduct count from possible codes
if (left < 0) return left; // over-subscribed--return negative
} // left > 0 means incomplete
// generate offsets into symbol table for each length for sorting
offs[1] = 0;
for (len = 1; len < max_bits; len++)
offs[len + 1] = offs[len] + counts[len];
// put symbols in table sorted by length, by symbol order within each length
for (symbol = 0; symbol < n; symbol++)
if (length[symbol] != 0) symbols[offs[length[symbol]]++] = symbol;
// return zero for complete set, positive for incomplete set
return left;
}
/// permutation of code length codes
static const __device__ __constant__ uint8_t g_code_order[19 + 1] = {
16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15, 0xff};
/// Dynamic block (custom huffman tables)
__device__ int init_dynamic(inflate_state_s* s)
{
int nlen, ndist, ncode; /* number of lengths in descriptor */
int index; /* index of lengths[] */
int err; /* construct() return value */
int16_t* lengths = s->u.scratch.lengths;
// get number of lengths in each table, check lengths
nlen = getbits(s, 5) + 257;
ndist = getbits(s, 5) + 1;
ncode = getbits(s, 4) + 4;
if (nlen > max_l_codes || ndist > max_d_codes) {
return -3; // bad counts
}
// read code length code lengths (really), missing lengths are zero
for (index = 0; index < ncode; index++)
lengths[g_code_order[index]] = getbits(s, 3);
for (; index < 19; index++)
lengths[g_code_order[index]] = 0;
// build huffman table for code lengths codes (use lencode temporarily)
err = construct(s, s->lencnt, s->lensym, lengths, 19);
if (err != 0) // require complete code set here
return -4;
// read length/literal and distance code length tables
index = 0;
while (index < nlen + ndist) {
int symbol = decode(s, s->lencnt, s->lensym);
if (symbol < 0) return symbol; // invalid symbol
if (symbol < 16) // length in 0..15
lengths[index++] = symbol;
else { // repeat instruction
int len = 0; // last length to repeat, assume repeating zeros
if (symbol == 16) { // repeat last length 3..6 times
if (index == 0) return -5; // no last length!
len = lengths[index - 1]; // last length
symbol = 3 + getbits(s, 2);
} else if (symbol == 17) // repeat zero 3..10 times
symbol = 3 + getbits(s, 3);
else // == 18, repeat zero 11..138 times
symbol = 11 + getbits(s, 7);
if (index + symbol > nlen + ndist) return -6; // too many lengths!
while (symbol--) // repeat last or zero symbol times
lengths[index++] = len;
}
}
// check for end-of-block code -- there better be one!
if (lengths[256] == 0) return -9;
// build huffman table for literal/length codes
err = construct(s, s->lencnt, s->lensym, lengths, nlen);
if (err && (err < 0 || nlen != s->lencnt[0] + s->lencnt[1]))
return -7; // incomplete code ok only for single length 1 code
// build huffman table for distance codes
err = construct(s, s->distcnt, s->distsym, &lengths[nlen], ndist);
if (err && (err < 0 || ndist != s->distcnt[0] + s->distcnt[1]))
return -8; // incomplete code ok only for single length 1 code
return 0;
}
/**
* @brief Initializes a fixed codes block.
*
* Format notes:
*
* - This block type can be useful for compressing small amounts of data for
* which the size of the code descriptions in a dynamic block exceeds the
* benefit of custom codes for that block. For fixed codes, no bits are
* spent on code descriptions. Instead the code lengths for literal/length
* codes and distance codes are fixed. The specific lengths for each symbol
* can be seen in the "for" loops below.
*
* - The literal/length code is complete, but has two symbols that are invalid
* and should result in an error if received. This cannot be implemented
* simply as an incomplete code since those two symbols are in the "middle"
* of the code. They are eight bits long and the longest literal/length\
* code is nine bits. Therefore the code must be constructed with those
* symbols, and the invalid symbols must be detected after decoding.
*
* - The fixed distance codes also have two invalid symbols that should result
* in an error if received. Since all of the distance codes are the same
* length, this can be implemented as an incomplete code. Then the invalid
* codes are detected while decoding.
*/
__device__ int init_fixed(inflate_state_s* s)
{
int16_t* lengths = s->u.scratch.lengths;
int symbol;
// literal/length table
for (symbol = 0; symbol < 144; symbol++)
lengths[symbol] = 8;
for (; symbol < 256; symbol++)
lengths[symbol] = 9;
for (; symbol < 280; symbol++)
lengths[symbol] = 7;
for (; symbol < fix_l_codes; symbol++)
lengths[symbol] = 8;
construct(s, s->lencnt, s->lensym, lengths, fix_l_codes);
// distance table
for (symbol = 0; symbol < max_d_codes; symbol++)
lengths[symbol] = 5;
// build huffman table for distance codes
construct(s, s->distcnt, s->distsym, lengths, max_d_codes);
return 0;
}
/**
* @brief Decode literal/length and distance codes until an end-of-block code.
*
* Format notes:
*
* - Compressed data that is after the block type if fixed or after the code
* description if dynamic is a combination of literals and length/distance
* pairs terminated by and end-of-block code. Literals are simply Huffman
* coded bytes. A length/distance pair is a coded length followed by a
* coded distance to represent a string that occurs earlier in the
* uncompressed data that occurs again at the current location.
*
* - Literals, lengths, and the end-of-block code are combined into a single
* code of up to 286 symbols. They are 256 literals (0..255), 29 length
* symbols (257..285), and the end-of-block symbol (256).
*
* - There are 256 possible lengths (3..258), and so 29 symbols are not enough
* to represent all of those. Lengths 3..10 and 258 are in fact represented
* by just a length symbol. Lengths 11..257 are represented as a symbol and
* some number of extra bits that are added as an integer to the base length
* of the length symbol. The number of extra bits is determined by the base
* length symbol. These are in the static arrays below, lens[] for the base
* lengths and lext[] for the corresponding number of extra bits.
*
* - The reason that 258 gets its own symbol is that the longest length is used
* often in highly redundant files. Note that 258 can also be coded as the
* base value 227 plus the maximum extra value of 31. While a good deflate
* should never do this, it is not an error, and should be decoded properly.
*
* - If a length is decoded, including its extra bits if any, then it is
* followed a distance code. There are up to 30 distance symbols. Again
* there are many more possible distances (1..32768), so extra bits are added
* to a base value represented by the symbol. The distances 1..4 get their
* own symbol, but the rest require extra bits. The base distances and
* corresponding number of extra bits are below in the static arrays dist[]
* and dext[].
*
* - Literal bytes are simply written to the output. A length/distance pair is
* an instruction to copy previously uncompressed bytes to the output. The
* copy is from distance bytes back in the output stream, copying for length
* bytes.
*
* - Distances pointing before the beginning of the output data are not
* permitted.
*
* - Overlapped copies, where the length is greater than the distance, are
* allowed and common. For example, a distance of one and a length of 258
* simply copies the last byte 258 times. A distance of four and a length of
* twelve copies the last four bytes three times. A simple forward copy
* ignoring whether the length is greater than the distance or not implements
* this correctly. You should not use memcpy() since its behavior is not
* defined for overlapped arrays. You should not use memmove() or bcopy()
* since though their behavior -is- defined for overlapping arrays, it is
* defined to do the wrong thing in this case.
*/
/// permutation of code length codes
static const __device__ __constant__ uint16_t g_lens[29] = { // Size base for length codes 257..285
3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27,
31, 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258};
static const __device__ __constant__ uint16_t
g_lext[29] = { // Extra bits for length codes 257..285
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0};
static const __device__ __constant__ uint16_t
g_dists[30] = { // Offset base for distance codes 0..29
1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129,
193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577};
static const __device__ __constant__ uint16_t g_dext[30] = { // Extra bits for distance codes 0..29
0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13};
/// @brief Thread 0 only: decode bitstreams and output symbols into the symbol queue
__device__ void decode_symbols(inflate_state_s* s)
{
uint32_t bitpos = s->bitpos;
uint2 bitbuf = s->bitbuf;
uint8_t* cur = s->cur;
uint8_t* end = s->end;
int32_t batch = 0;
int32_t sym, batch_len;
do {
volatile uint32_t* b = &s->x.u.symqueue[batch * batch_size];
// Wait for the next batch entry to be empty
#if ENABLE_PREFETCH
// Wait for prefetcher to fetch a worst-case of 48 bits per symbol
while ((*(volatile int32_t*)&s->pref.cur_p - (int32_t)(size_t)cur < batch_size * 6) ||
(s->x.batch_len[batch] != 0)) {}
#else
while (s->x.batch_len[batch] != 0) {}
#endif
batch_len = 0;
#if ENABLE_PREFETCH
if (cur + (bitpos >> 3) >= end) {
s->err = 1;
break;
}
#endif
// Inner loop decoding symbols
do {
uint32_t next32 = __funnelshift_rc(bitbuf.x, bitbuf.y, bitpos); // nextbits32(s);
uint32_t len;
sym = s->u.lut.lenlut[next32 & ((1 << log2_len_lut) - 1)];
if ((uint32_t)sym < (uint32_t)(0x100 << 5)) {
// We can lookup a second symbol if this was a short literal
len = sym & 0x1f;
sym >>= 5;
b[batch_len++] = sym;
next32 >>= len;
bitpos += len;
sym = s->u.lut.lenlut[next32 & ((1 << log2_len_lut) - 1)];
}
if (sym > 0) // short symbol
{
len = sym & 0x1f;
sym = ((sym >> 5) & 0x3ff) + ((next32 >> (sym >> 24)) & ((sym >> 16) & 0x1f));
} else {
// Slow length path
uint32_t next32r = __brev(next32);
const int16_t* symbols = &s->lensym[s->index_slow_len];
unsigned int first = s->first_slow_len;
int lext;
#pragma unroll 1
for (len = log2_len_lut + 1; len <= max_bits; len++) {
unsigned int code = (next32r >> (32 - len)) - first;
unsigned int count = s->lencnt[len];
if (code < count) // if length len, return symbol
{
sym = symbols[code];
break;
}
symbols += count; // else update for next length
first += count;
first <<= 1;
}
if (len > max_bits) {
s->err = -10;
sym = 256;
len = 0;
}
if (sym > 256) {
sym -= 257;
lext = g_lext[sym];
sym = 256 + g_lens[sym] + bfe(next32, len, lext);
len += lext;
}
}
if (sym > 256) {
int dist, dext;
// skipbits(s, len) inlined - no limit check
bitpos += len;
if (bitpos >= 32) {
bitbuf.x = bitbuf.y;
#if ENABLE_PREFETCH
bitbuf.y = *prefetch_addr32(s->pref, cur + 8);
cur += 4;
#else
cur += 8;
bitbuf.y = (cur < end) ? *(const uint32_t*)cur : 0;
cur -= 4;
#endif
bitpos &= 0x1f;
}
// get distance
next32 = __funnelshift_rc(bitbuf.x, bitbuf.y, bitpos); // nextbits32(s);
dist = s->u.lut.distlut[next32 & ((1 << log2_dist_lut) - 1)];
if (dist > 0) {
len = dist & 0x1f;
dext = bfe(dist, 20, 5);
dist = bfe(dist, 5, 15);
sym |= (dist + bfe(next32, len, dext)) << 16;
len += dext;
} else {
uint32_t next32r = __brev(next32);
const int16_t* symbols = &s->distsym[s->index_slow_dist];
unsigned int first = s->first_slow_dist;
#pragma unroll 1
for (len = log2_dist_lut + 1; len <= max_bits; len++) {
unsigned int code = (next32r >> (32 - len)) - first;
unsigned int count = s->distcnt[len];
if (code < count) // if length len, return symbol
{
dist = symbols[code];
break;
}
symbols += count; // else update for next length
first += count;
first <<= 1;
}
if (len > max_bits) {
s->err = -10;
sym = 256;
len = 0;
} else {
dext = g_dext[dist];
sym |= (g_dists[dist] + bfe(next32, len, dext)) << 16;
len += dext;
}
}
}
// skipbits(s, len) inlined with added error check for reading past the end of the input
// buffer
bitpos += len;
if (bitpos >= 32) {
bitbuf.x = bitbuf.y;
#if ENABLE_PREFETCH
bitbuf.y = *prefetch_addr32(s->pref, cur + 8);
cur += 4;
#else
cur += 8;
if (cur < end) {
bitbuf.y = *(const uint32_t*)cur;
cur -= 4;
} else {
bitbuf.y = 0;
cur -= 4;
if (cur > end) {
s->err = 1;
sym = 256;
}
}
#endif
bitpos &= 0x1f;
}
if (sym == 256) break;
b[batch_len++] = sym;
} while (batch_len < batch_size - 1);
s->x.batch_len[batch] = batch_len;
#if ENABLE_PREFETCH
((volatile inflate_state_s*)s)->cur = cur;
#endif
if (batch_len != 0) batch = (batch + 1) & (batch_count - 1);
} while (sym != 256);
while (s->x.batch_len[batch] != 0) {}
s->x.batch_len[batch] = -1;
s->bitbuf = bitbuf;
s->bitpos = bitpos;
#if !ENABLE_PREFETCH
s->cur = cur;
#endif
}
/**
* @brief Build lookup tables for faster decode
* LUT format is symbols*16+length
*/
__device__ void init_length_lut(inflate_state_s* s, int t)
{
int32_t* lut = s->u.lut.lenlut;
for (uint32_t bits = t; bits < (1 << log2_len_lut); bits += blockDim.x) {
const int16_t* cnt = s->lencnt;
const int16_t* symbols = s->lensym;
int sym = -10 << 5;
unsigned int first = 0;
unsigned int rbits = __brev(bits) >> (32 - log2_len_lut);
for (unsigned int len = 1; len <= log2_len_lut; len++) {
unsigned int code = (rbits >> (log2_len_lut - len)) - first;
unsigned int count = cnt[len];
if (code < count) {
sym = symbols[code];
if (sym > 256) {
int lext = g_lext[sym - 257];
sym = (256 + g_lens[sym - 257]) | (((1 << lext) - 1) << (16 - 5)) | (len << (24 - 5));
len += lext;
}
sym = (sym << 5) | len;
break;
}
symbols += count; // else update for next length
first += count;
first <<= 1;
}
lut[bits] = sym;
}
if (!t) {
unsigned int first = 0;
unsigned int index = 0;
const int16_t* cnt = s->lencnt;
for (unsigned int len = 1; len <= log2_len_lut; len++) {
unsigned int count = cnt[len];
index += count;
first += count;
first <<= 1;
}
s->first_slow_len = first;
s->index_slow_len = index;
}
}
/**
* @brief Build lookup tables for faster decode of distance symbol
* LUT format is symbols*16+length
*/
__device__ void init_distance_lut(inflate_state_s* s, int t)
{
int32_t* lut = s->u.lut.distlut;
for (uint32_t bits = t; bits < (1 << log2_dist_lut); bits += blockDim.x) {
const int16_t* cnt = s->distcnt;
const int16_t* symbols = s->distsym;
int sym = 0;
unsigned int first = 0;
unsigned int rbits = __brev(bits) >> (32 - log2_dist_lut);
for (unsigned int len = 1; len <= log2_dist_lut; len++) {
unsigned int code = (rbits >> (log2_dist_lut - len)) - first;
unsigned int count = cnt[len];
if (code < count) {
int dist = symbols[code];
int dext = g_dext[dist];
sym = g_dists[dist] | (dext << 15);
sym = (sym << 5) | len;
break;
}
symbols += count; // else update for next length
first += count;
first <<= 1;
}
lut[bits] = sym;
}
if (!t) {
unsigned int first = 0;
unsigned int index = 0;
const int16_t* cnt = s->distcnt;
for (unsigned int len = 1; len <= log2_dist_lut; len++) {
unsigned int count = cnt[len];
index += count;
first += count;
first <<= 1;
}
s->first_slow_dist = first;
s->index_slow_dist = index;
}
}
/// @brief WARP1: process symbols and output uncompressed stream
__device__ void process_symbols(inflate_state_s* s, int t)
{
uint8_t* out = s->out;
const uint8_t* outend = s->outend;
const uint8_t* outbase = s->outbase;
int batch = 0;
do {
volatile uint32_t* b = &s->x.u.symqueue[batch * batch_size];
int batch_len, pos;
int32_t symt;
uint32_t lit_mask;
if (t == 0) {
while ((batch_len = s->x.batch_len[batch]) == 0) {}
} else {
batch_len = 0;
}
batch_len = shuffle(batch_len);
if (batch_len < 0) { break; }
symt = (t < batch_len) ? b[t] : 256;
lit_mask = ballot(symt >= 256);
pos = min((__ffs(lit_mask) - 1) & 0xff, 32);
if (t == 0) { s->x.batch_len[batch] = 0; }
if (t < pos && out + t < outend) { out[t] = symt; }
out += pos;
batch_len -= pos;
while (batch_len > 0) {
int dist, len, symbol;
// Process a non-literal symbol
symbol = shuffle(symt, pos);
len = max((symbol & 0xffff) - 256, 0); // max should be unnecessary, but just in case
dist = symbol >> 16;
for (int i = t; i < len; i += 32) {
const uint8_t* src = out + ((i >= dist) ? (i % dist) : i) - dist;
uint8_t b = (src < outbase) ? 0 : *src;
if (out + i < outend) { out[i] = b; }
}
out += len;
pos++;
batch_len--;
// Process subsequent literals, if any
if (!((lit_mask >> pos) & 1)) {
len = min((__ffs(lit_mask >> pos) - 1) & 0xff, batch_len);
symbol = shuffle(symt, (pos + t) & 0x1f);
if (t < len && out + t < outend) { out[t] = symbol; }
out += len;
pos += len;
batch_len -= len;
}
}
batch = (batch + 1) & (batch_count - 1);
} while (1);
if (t == 0) { s->out = out; }
}
/**
* @brief Initializes a stored block.
*
* Format notes:
*
* - After the two-bit stored block type (00), the stored block length and
* stored bytes are byte-aligned for fast copying. Therefore any leftover
* bits in the byte that has the last bit of the type, as many as seven, are
* discarded. The value of the discarded bits are not defined and should not
* be checked against any expectation.
*
* - The second inverted copy of the stored block length does not have to be
* checked, but it's probably a good idea to do so anyway.
*
* - A stored block can have zero length. This is sometimes used to byte-align
* subsets of the compressed data for random access or partial recovery.
*/
__device__ int init_stored(inflate_state_s* s)
{
uint32_t len, nlen; // length of stored block
// Byte align
if (s->bitpos & 7) { skipbits(s, 8 - (s->bitpos & 7)); }
if (s->cur + (s->bitpos >> 3) >= s->end) {
return 2; // Not enough input
}
// get length and check against its one's complement
len = getbits(s, 16);
nlen = getbits(s, 16);
if (len != (nlen ^ 0xffff)) {
return -2; // didn't match complement!
}
if (s->cur + (s->bitpos >> 3) + len > s->end) {
return 2; // Not enough input
}
s->stored_blk_len = len;
// done with a valid stored block
return 0;
}
/// Copy bytes from stored block to destination
__device__ void copy_stored(inflate_state_s* s, int t)
{
int len = s->stored_blk_len;
uint8_t* cur = s->cur + (s->bitpos >> 3);
uint8_t* out = s->out;
uint8_t* outend = s->outend;
uint8_t* cur4;
int slow_bytes = min(len, (int)((16 - (size_t)out) & 0xf));
int fast_bytes, bitpos;
// Slow copy until output is 16B aligned
if (slow_bytes) {
for (int i = t; i < slow_bytes; i += blockDim.x) {
if (out + i < outend) {
out[i] = cur[i]; // Input range has already been validated in init_stored()
}
}
cur += slow_bytes;
out += slow_bytes;
len -= slow_bytes;
}
fast_bytes = len;
if (out < outend) { fast_bytes = (int)min((size_t)fast_bytes, (outend - out)); }
fast_bytes &= ~0xf;
bitpos = ((int)(3 & (size_t)cur)) << 3;
cur4 = cur - (bitpos >> 3);
if (out < outend) {
// Fast copy 16 bytes at a time
for (int i = t * 16; i < fast_bytes; i += blockDim.x * 16) {
uint4 u;
u.x = *reinterpret_cast<const uint32_t*>(cur4 + i + 0 * 4);
u.y = *reinterpret_cast<const uint32_t*>(cur4 + i + 1 * 4);
u.z = *reinterpret_cast<const uint32_t*>(cur4 + i + 2 * 4);
u.w = *reinterpret_cast<const uint32_t*>(cur4 + i + 3 * 4);
if (bitpos != 0) {
uint32_t v = (bitpos != 0) ? *reinterpret_cast<const uint32_t*>(cur4 + i + 4 * 4) : 0;
u.x = __funnelshift_rc(u.x, u.y, bitpos);
u.y = __funnelshift_rc(u.y, u.z, bitpos);
u.z = __funnelshift_rc(u.z, u.w, bitpos);
u.w = __funnelshift_rc(u.w, v, bitpos);
}
*reinterpret_cast<uint4*>(out + i) = u;
}
}
cur += fast_bytes;
out += fast_bytes;
len -= fast_bytes;
// Slow copy for remaining bytes
for (int i = t; i < len; i += blockDim.x) {
if (out + i < outend) {
out[i] = cur[i]; // Input range has already been validated in init_stored()
}
}
out += len;
__syncthreads();
if (t == 0) {
// Reset bitstream to end of block
uint8_t* p = cur + len;
uint32_t prefix_bytes = (uint32_t)(((size_t)p) & 3);
p -= prefix_bytes;
s->cur = p;
s->bitbuf.x = (p < s->end) ? *reinterpret_cast<uint32_t*>(p) : 0;
p += 4;
s->bitbuf.y = (p < s->end) ? *reinterpret_cast<uint32_t*>(p) : 0;
s->bitpos = prefix_bytes * 8;
s->out = out;
}
}
#if ENABLE_PREFETCH
__device__ void init_prefetcher(inflate_state_s* s, int t)
{
if (t == 0) {
s->pref.cur_p = s->cur;
s->pref.run = 1;
}
}
__device__ void prefetch_warp(volatile inflate_state_s* s, int t)
{
const uint8_t* cur_p = s->pref.cur_p;
const uint8_t* end = s->end;
while (shuffle((t == 0) ? s->pref.run : 0)) {
int32_t cur_lo = (int32_t)(size_t)cur_p;
int do_pref =
shuffle((t == 0) ? (cur_lo - *(volatile int32_t*)&s->cur < prefetch_size - 32 * 4 - 4) : 0);
if (do_pref) {
const uint8_t* p = cur_p + 4 * t;
*prefetch_addr32(s->pref, p) = (p < end) ? *reinterpret_cast<const uint32_t*>(p) : 0;
cur_p += 4 * 32;
__threadfence_block();
__syncwarp();
if (!t) {
s->pref.cur_p = cur_p;
__threadfence_block();
}
}
}
}
#endif // ENABLE_PREFETCH
/**
* @brief Parse GZIP header
* See https://tools.ietf.org/html/rfc1952
*/
__device__ int parse_gzip_header(const uint8_t* src, size_t src_size)
{
int hdr_len = -1;
if (src_size >= 18) {
uint32_t sig = (src[0] << 16) | (src[1] << 8) | src[2];
if (sig == 0x1f8b08) // 24-bit GZIP inflate signature {0x1f, 0x8b, 0x08}
{
uint8_t flags = src[3];
hdr_len = 10;
if (flags & GZIPHeaderFlag::fextra) // Extra fields present
{
int xlen = src[hdr_len] | (src[hdr_len + 1] << 8);
hdr_len += xlen;
if (hdr_len >= src_size) return -1;
}
if (flags & GZIPHeaderFlag::fname) // Original file name present
{
// Skip zero-terminated string
do {
if (hdr_len >= src_size) return -1;
} while (src[hdr_len++] != 0);
}
if (flags & GZIPHeaderFlag::fcomment) // Comment present
{
// Skip zero-terminated string
do {
if (hdr_len >= src_size) return -1;
} while (src[hdr_len++] != 0);
}
if (flags & GZIPHeaderFlag::fhcrc) // Header CRC present
{
hdr_len += 2;
}
if (hdr_len + 8 >= src_size) hdr_len = -1;
}
}
return hdr_len;
}
/**
* @brief INFLATE decompression kernel
*
* blockDim {block_size,1,1}
*
* @tparam block_size Thread block dimension for this call
* @param inputs Source and destination buffer information per block
* @param outputs Decompression status buffer per block
* @param parse_hdr If nonzero, indicates that the compressed bitstream includes a GZIP header
*/
template <int block_size>
__global__ void __launch_bounds__(block_size)
inflate_kernel(gpu_inflate_input_s* inputs, gpu_inflate_status_s* outputs, int parse_hdr)
{
__shared__ __align__(16) inflate_state_s state_g;
int t = threadIdx.x;
int z = blockIdx.x;
inflate_state_s* state = &state_g;
if (!t) {
uint8_t* p = const_cast<uint8_t*>(static_cast<uint8_t const*>(inputs[z].srcDevice));
size_t src_size = inputs[z].srcSize;
uint32_t prefix_bytes;
// Parse header if needed
state->err = 0;
if (parse_hdr) {
int hdr_len = parse_gzip_header(p, src_size);
src_size = (src_size >= 8) ? src_size - 8 : 0; // ignore footer
if (hdr_len >= 0) {
p += hdr_len;
src_size -= hdr_len;
} else {
state->err = hdr_len;
}
}
// Initialize shared state
state->out = const_cast<uint8_t*>(static_cast<uint8_t const*>(inputs[z].dstDevice));
state->outbase = state->out;
state->outend = state->out + inputs[z].dstSize;
state->end = p + src_size;
prefix_bytes = (uint32_t)(((size_t)p) & 3);
p -= prefix_bytes;
state->cur = p;
state->bitbuf.x = (p < state->end) ? *reinterpret_cast<uint32_t*>(p) : 0;
p += 4;
state->bitbuf.y = (p < state->end) ? *reinterpret_cast<uint32_t*>(p) : 0;
state->bitpos = prefix_bytes * 8;
}
__syncthreads();
// Main loop decoding blocks
while (!state->err) {
if (!t) {
// Thread0: read last flag, block type and custom huffman tables if any
if (state->cur + (state->bitpos >> 3) >= state->end)
state->err = 2;
else {
state->blast = getbits(state, 1);
state->btype = getbits(state, 2);
if (state->btype == 0)
state->err = init_stored(state);
else if (state->btype == 1)
state->err = init_fixed(state);
else if (state->btype == 2)
state->err = init_dynamic(state);
else
state->err = -1; // Invalid block
}
}
__syncthreads();
if (!state->err && (state->btype == 1 || state->btype == 2)) {
// Initializes lookup tables (block wide)
init_length_lut(state, t);
init_distance_lut(state, t);
#if ENABLE_PREFETCH
// Initialize prefetcher
init_prefetcher(state, t);
#endif
if (t < batch_count) { state->x.batch_len[t] = 0; }
__syncthreads();
// decode data until end-of-block code
if (t < 1 * 32) {
// WARP0: decode variable-length symbols
if (!t) {
// Thread0: decode symbols (single threaded)
decode_symbols(state);
#if ENABLE_PREFETCH
state->pref.run = 0;
#endif
}
} else if (t < 2 * 32) {
// WARP1: perform LZ77 using length and distance codes from WARP0
process_symbols(state, t & 0x1f);
}
#if ENABLE_PREFETCH
else if (t < 3 * 32) {
// WARP2: Prefetcher: prefetch data for WARP0
prefetch_warp(state, t & 0x1f);
}
#endif
// else WARP3: idle
} else if (!state->err && state->btype == 0) {
// Uncompressed block (block-wide memcpy)
copy_stored(state, t);
}
if (state->blast) break;
__syncthreads();
}
__syncthreads();
// Output decompression status and length
if (!t) {
if (state->err == 0 && state->cur + ((state->bitpos + 7) >> 3) > state->end) {
// Read past the end of the input buffer
state->err = 2;
} else if (state->err == 0 && state->out > state->outend) {
// Output buffer too small
state->err = 1;
}
outputs[z].bytes_written = state->out - state->outbase;
outputs[z].status = state->err;
outputs[z].reserved = (int)(state->end - state->cur); // Here mainly for debug purposes
}
}
/**
* @brief Copy a group of buffers
*
* blockDim {1024,1,1}
*
* @param inputs Source and destination information per block
*/
__global__ void __launch_bounds__(1024) copy_uncompressed_kernel(gpu_inflate_input_s* inputs)
{
__shared__ const uint8_t* volatile src_g;
__shared__ uint8_t* volatile dst_g;
__shared__ uint32_t volatile copy_len_g;
uint32_t t = threadIdx.x;
uint32_t z = blockIdx.x;
const uint8_t* src;
uint8_t* dst;
uint32_t len, src_align_bytes, src_align_bits, dst_align_bytes;
if (!t) {
src = static_cast<const uint8_t*>(inputs[z].srcDevice);
dst = static_cast<uint8_t*>(inputs[z].dstDevice);
len = min((uint32_t)inputs[z].srcSize, (uint32_t)inputs[z].dstSize);
src_g = src;
dst_g = dst;
copy_len_g = len;
}
__syncthreads();
src = src_g;
dst = dst_g;
len = copy_len_g;
// Align output to 32-bit
dst_align_bytes = 3 & -reinterpret_cast<intptr_t>(dst);
if (dst_align_bytes != 0) {
uint32_t align_len = min(dst_align_bytes, len);
if (t < align_len) { dst[t] = src[t]; }
src += align_len;
dst += align_len;
len -= align_len;
}
src_align_bytes = (uint32_t)(3 & reinterpret_cast<uintptr_t>(src));
src_align_bits = src_align_bytes << 3;
while (len >= 32) {
const uint32_t* src32 = reinterpret_cast<const uint32_t*>(src - src_align_bytes);
uint32_t copy_cnt = min(len >> 2, 1024);
if (t < copy_cnt) {
uint32_t v = src32[t];
if (src_align_bits != 0) { v = __funnelshift_r(v, src32[t + 1], src_align_bits); }
reinterpret_cast<uint32_t*>(dst)[t] = v;
}
src += copy_cnt * 4;
dst += copy_cnt * 4;
len -= copy_cnt * 4;
}
if (t < len) { dst[t] = src[t]; }
}
cudaError_t __host__ gpuinflate(gpu_inflate_input_s* inputs,
gpu_inflate_status_s* outputs,
int count,
int parse_hdr,
rmm::cuda_stream_view stream)
{
constexpr int block_size = 128; // Threads per block
if (count > 0) {
inflate_kernel<block_size>
<<<count, block_size, 0, stream.value()>>>(inputs, outputs, parse_hdr);
}
return cudaSuccess;
}
cudaError_t __host__ gpu_copy_uncompressed_blocks(gpu_inflate_input_s* inputs,
int count,
rmm::cuda_stream_view stream)
{
if (count > 0) { copy_uncompressed_kernel<<<count, 1024, 0, stream.value()>>>(inputs); }
return cudaSuccess;
}
} // namespace io
} // namespace cudf
|
d1496c61e9962ccd0313f939a3bae5af3465ed83.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#pragma once
#include "matchingAggregation.h"
#include "hipcub/hipcub.hpp"
#include "relaxation.cu"
// kernels block size
#define make_P_BLOCKSIZE 1024
#define aggregate_BLOCKSIZE 1024
// algorithm contast
#define FTCOARSE_INC 100
#define COARSERATIO_THRSLD 1.2
double TOTAL_MUL_TIME;
//#############################################################################
void relaxPrepare(handles *h, int level, CSR *A, hierarchy *hrrch, buildData *amg_data, int force_relax_type=-1){
int relax_type;
if(force_relax_type != -1)
relax_type = force_relax_type;
else
relax_type = amg_data->CRrelax_type;
if(relax_type == 0){
// jacobi
if(hrrch->D_array[level] != NULL)
Vector::free(hrrch->D_array[level]);
hrrch->D_array[level] = CSRm::diag(A);
}else if(relax_type == 1 || relax_type == 2){
// Gauss Seidel
assert(false);
}else if(relax_type == 4){
// L1 smoother
if(hrrch->D_array[level] != NULL)
Vector::free(hrrch->D_array[level]);
hrrch->D_array[level] = CSRm::diag(A);
if(hrrch->M_array[level] != NULL)
Vector::free(hrrch->M_array[level]);
hrrch->M_array[level] = CSRm::absoluteRowSum(A, NULL);
}else if(relax_type == 5){
#if AFSAI == 1
// afsai
// precond(A, GPUDevId, nProc, parPrec.nscal, parPrec.nstep, parPrec.stepSize, parPrec.epsilon);
//int ITERATION_NUM = 6;
//printf("\n\n aFSAI_PRE_ITER: %d\n\n", aFSAI_PRE_ITER);
PRECOND *pre = precond2(A, aFSAI_PRE_ITER);
hrrch->pre_array[level] = pre;
#else
printf("ERROR] RELAX_TYPE NOT SUPPORTED\n");
exit(1);
#endif
}
}
//##################################################################################################
#define applyOmega_BLOCKSIZE 1024
__global__ void _applyOmega(itype n, vtype *A_val, itype *A_col, itype *A_row, const vtype omega){
stype tid = blockDim.x * blockIdx.x + threadIdx.x;
int warp = tid / WARP_SIZE;
if(warp >= n)
return;
int lane = tid % WARP_SIZE;
for(int j=A_row[warp]+lane; j<A_row[warp+1]; j+=WARP_SIZE){
A_val[j] = -omega * A_val[j];
if(A_col[j] == warp)
A_val[j] += 1.;
}
}
void applyOmega(CSR *A, const vtype omega, hipStream_t stream=DEFAULT_STREAM){
assert(A->on_the_device);
gridblock gb = gb1d(A->n, applyOmega_BLOCKSIZE, true);
hipLaunchKernelGGL(( _applyOmega), dim3(gb.g), dim3(gb.b), 0, stream, A->n, A->val, A->col, A->row, omega);
}
//##################################################################################################
__global__
void _aggregate_symmetric(stype n, itype *A_row, vtype *P_val, itype *M, itype *markc, vtype *w, itype *nuns){
itype i = blockDim.x * blockIdx.x + threadIdx.x;
if(i >= n)
return;
itype v = i;
itype u = M[i];
// if it's a matched pair
if(u != -1){
vtype wv = w[v], wu = w[u];
vtype normwagg = sqrt(wv * wv + wu * wu);
if(normwagg > DBL_EPSILON){
// good pair
// if v is the pair master
if(v < u){
int nuns_local = atomicAdd(nuns, 1);
markc[v] = nuns_local;
markc[u] = nuns_local;
P_val[v] = wv / normwagg;
P_val[u] = wu / normwagg;
}
// if v,u is a good pair, exit
return;
}
}
// only single vertex and no-good pairs reach this point
if( fabs(w[i]) > DBL_EPSILON ){
// good single
int nuns_local = atomicAdd(nuns, 1);
markc[v] = nuns_local;
P_val[v] = w[v] / fabs(w[v]);
}else{
// bad single
int nuns_local = atomicAdd(nuns, 1);
markc[v] = nuns_local;
P_val[v] = 0.0;
}
}
//####################################################################################
__global__
void _make_P_row(itype n, itype* P_row){
itype v = blockDim.x * blockIdx.x + threadIdx.x;
if(v > n)
return;
P_row[v] = v;
}
//####################################################################################
CSR* matchingPairAggregation(CSR *A, vector<vtype> *w){
itype n = A->n;
// Matching
vector<itype> *M = Matching::suitor(A, w);
gridblock gb;
CSR *P = CSRm::init(n, 1, n, true, true, false);
scalar<itype> *nuns = Scalar::init<itype>(0, true);
gb = gb1d(n, aggregate_BLOCKSIZE);
hipLaunchKernelGGL(( _aggregate_symmetric), dim3(gb.g), dim3(gb.b), 0, 0, n, A->row, P->val, M->val, P->col, w->val, nuns->val);
int* nuns_local = Scalar::getvalueFromDevice(nuns);
Scalar::free(nuns);
gb = gb1d(n, make_P_BLOCKSIZE);
hipLaunchKernelGGL(( _make_P_row), dim3(gb.g), dim3(gb.b), 0, 0, n, P->row);
P->m = nuns_local[0];
free(nuns_local);
return P;
}
CSR* matchingAggregation(handles *h, buildData *amg_data, CSR *A, vector<vtype> **w, CSR **P, CSR **R){
// A_{i-1}
CSR *Ai_ = A, *Ai = NULL;
CSR *Ri_ = NULL;
// w_{i-1}
vector<vtype> *wi_ = *w, *wi = NULL;
double size_coarse, size_precoarse;
double coarse_ratio;
for(int i=0; i<amg_data->sweepnumber; i++){
CSR *Pi_ = matchingPairAggregation(Ai_, wi_); /* routine with the real work. It calls the suitor procedure */
//CSR *Pi_ = aggregateCPU(Ai_, wi_);
// transpose
Ri_ = CSRm::T(h->cusparse_h0, Pi_);
TIME::start();
#if GALERKIN_PRODUCT_TYPE == 0
// Pi-1.T * Ai-1
CSR *temp = CSRm::CSRCSR_product(h->cusparse_h0, Ri_, Ai_, false, false);
// Ai = (RA)P
Ai = CSRm::CSRCSR_product(h->cusparse_h0, temp, Pi_, false, false);
#elif GALERKIN_PRODUCT_TYPE == 1
// Pi-1.T * Ai-1
CSR *temp = CSRm::CSRCSR_product(h->cusparse_h0, Ai_, Pi_, false, false);
// Ai = R(AP)
Ai = CSRm::CSRCSR_product(h->cusparse_h0, Ri_, temp, false, false);
#endif
TOTAL_MUL_TIME += TIME::stop();
CSRm::free(temp);
//wi = Pi-1.T * wi-1
wi = CSRm::CSRVector_product_CUSPARSE(h->cusparse_h0, Ri_, wi_, NULL, false);
size_precoarse = Ai_->n;
size_coarse = Ai->n;
coarse_ratio = size_precoarse / size_coarse;
if(i == 0)
*P = Pi_;
else{
TIME::start();
if(i == 1){
//TODO special nsparse
*P = CSRm::CSRCSR_product(h->cusparse_h0, *P, Pi_, false, false);
}else{
*P = CSRm::CSRCSR_product(h->cusparse_h0, *P, Pi_, false, false);
}
TOTAL_MUL_TIME += TIME::stop();
CSRm::free(Ri_);
Ri_ = NULL;
CSRm::free(Pi_);
CSRm::free(Ai_);
}
Vector::free(wi_);
if (coarse_ratio <= COARSERATIO_THRSLD)
amg_data->ftcoarse = FTCOARSE_INC;
// exit condiction
if(size_coarse <= amg_data->ftcoarse * amg_data->maxcoarsesize)
break;
Ai_ = Ai;
wi_ = wi;
}
*w = wi;
if(Ri_ == NULL)
*R = CSRm::T(h->cusparse_h0, *P);
else
*R = Ri_;
return Ai;
}
hierarchy* adaptiveCoarsening(handles *h, buildData *amg_data){
TOTAL_MUL_TIME = 0;
CSR *A = amg_data->A;
vector<vtype> *w = amg_data->w;
vector<vtype> *w_temp = Vector::clone(w);
CSR *P = NULL, *R = NULL;
hierarchy *hrrch = AMG::Hierarchy::init(amg_data->maxlevels + 1);
hrrch->A_array[0] = A;
vtype normw = CSRm::vectorANorm(h->cusparse_h0, h->cublas_h, A, w_temp);
vtype avcoarseratio = 0.;
int level = 0;
relaxPrepare(h, level, hrrch->A_array[level], hrrch, amg_data);
matchingAggregationContext::initContext(A->n);
amg_data->ftcoarse = 1;
TIME::start();
if(normw > DBL_EPSILON){
for(level=1; level < amg_data->maxlevels;){
hrrch->A_array[level] = matchingAggregation(h, amg_data, hrrch->A_array[level-1], &w_temp, &P, &R);
//CSRMatrixPrintMM(hrrch->A_array[level], "/home/pasquini/singularity.mtx");
if(!amg_data->agg_interp_type){
// #change STREAM
relaxPrepare(h, level, hrrch->A_array[level], hrrch, amg_data);
}
hrrch->P_array[level-1] = P;
hrrch->R_array[level-1] = R;
vtype size_coarse = hrrch->A_array[level]->n;
vtype coarse_ratio = hrrch->A_array[level-1]->n / size_coarse;
avcoarseratio = avcoarseratio + coarse_ratio;
level++;
// exit condiction
if(size_coarse <= amg_data->ftcoarse * amg_data->maxcoarsesize)
break;
}
}else{
std::cout << "Warning: no need to build multigrid since the matrix is well conditioned\n";
}
float aggregation_time = TIME::stop();
// ##############################################################################################
if(amg_data->agg_interp_type == 1){
for(int j=0; j<level-1; j++){
CSR *A = hrrch->A_array[j];
vector<vtype> *D = hrrch->D_array[j];
//vector<vtype> *D = CSRm::diag(A);
assert(D != NULL);
CSR *A_temp = CSRm::clone(A);
CSRm::matrixVectorScaling(A_temp, D);
vtype omega = 4.0 / ( 3.0 * CSRm::infinityNorm(A_temp) );
applyOmega(A_temp, omega);
CSR *P_temp = hrrch->P_array[j];
hrrch->P_array[j] = CSRm::CSRCSR_product(h->cusparse_h0, A_temp, P_temp);
CSRm::free(P_temp);
CSRm::free(A_temp);
// transpose
hrrch->R_array[j] = CSRm::T(h->cusparse_h0, hrrch->P_array[j]);
A_temp = CSRm::CSRCSR_product(h->cusparse_h0, hrrch->R_array[j], hrrch->A_array[j]);
CSRm::free(hrrch->A_array[j+1]);
hrrch->A_array[j+1] = CSRm::CSRCSR_product(h->cusparse_h0, A_temp, hrrch->P_array[j]);
relaxPrepare(h, j+1, hrrch->A_array[j+1], hrrch, amg_data);
CSRm::free(A_temp);
}
}
// ##############################################################################################
AMG::Hierarchy::finalize_level(hrrch, level);
if(amg_data->coarse_solver == 9){
assert(0);
}else{
// in order to apply, to the coarsest matrix, the correct relax-preprocessing
if(amg_data->coarse_solver != amg_data->CRrelax_type ){
relaxPrepare(h, level-1, hrrch->A_array[level-1], hrrch, amg_data, amg_data->coarse_solver);
}
}
AMG::Hierarchy::finalize_cmplx(hrrch);
AMG::Hierarchy::finalize_wcmplx(hrrch);
hrrch->avg_cratio = avcoarseratio / (level-1);
AMG::Hierarchy::printInfo(hrrch);
//Eval::printMetaData("time;aggregation_time", aggregation_time, 1);
Eval::printMetaData("agg;level_number", level-1, 0);
Eval::printMetaData("agg;avg_coarse_ratio", hrrch->avg_cratio, 1);
Eval::printMetaData("agg;OpCmplx", hrrch->op_cmplx, 1);
//Eval::printMetaData("agg;OpCmplxW", hrrch->op_wcmplx, 1);
//Eval::printMetaData("agg;coarsest_size", hrrch->A_array[level-1]->n, 0);
Vector::free(w_temp);
matchingAggregationContext::freeContext();
std::cout << "TOTAL_MUL_TIME: " << TOTAL_MUL_TIME << "\n\n";
return hrrch;
}
//#############################################################################
| d1496c61e9962ccd0313f939a3bae5af3465ed83.cu | #pragma once
#include "matchingAggregation.h"
#include "cub/cub.cuh"
#include "relaxation.cu"
// kernels block size
#define make_P_BLOCKSIZE 1024
#define aggregate_BLOCKSIZE 1024
// algorithm contast
#define FTCOARSE_INC 100
#define COARSERATIO_THRSLD 1.2
double TOTAL_MUL_TIME;
//#############################################################################
void relaxPrepare(handles *h, int level, CSR *A, hierarchy *hrrch, buildData *amg_data, int force_relax_type=-1){
int relax_type;
if(force_relax_type != -1)
relax_type = force_relax_type;
else
relax_type = amg_data->CRrelax_type;
if(relax_type == 0){
// jacobi
if(hrrch->D_array[level] != NULL)
Vector::free(hrrch->D_array[level]);
hrrch->D_array[level] = CSRm::diag(A);
}else if(relax_type == 1 || relax_type == 2){
// Gauss Seidel
assert(false);
}else if(relax_type == 4){
// L1 smoother
if(hrrch->D_array[level] != NULL)
Vector::free(hrrch->D_array[level]);
hrrch->D_array[level] = CSRm::diag(A);
if(hrrch->M_array[level] != NULL)
Vector::free(hrrch->M_array[level]);
hrrch->M_array[level] = CSRm::absoluteRowSum(A, NULL);
}else if(relax_type == 5){
#if AFSAI == 1
// afsai
// precond(A, GPUDevId, nProc, parPrec.nscal, parPrec.nstep, parPrec.stepSize, parPrec.epsilon);
//int ITERATION_NUM = 6;
//printf("\n\n aFSAI_PRE_ITER: %d\n\n", aFSAI_PRE_ITER);
PRECOND *pre = precond2(A, aFSAI_PRE_ITER);
hrrch->pre_array[level] = pre;
#else
printf("ERROR] RELAX_TYPE NOT SUPPORTED\n");
exit(1);
#endif
}
}
//##################################################################################################
#define applyOmega_BLOCKSIZE 1024
__global__ void _applyOmega(itype n, vtype *A_val, itype *A_col, itype *A_row, const vtype omega){
stype tid = blockDim.x * blockIdx.x + threadIdx.x;
int warp = tid / WARP_SIZE;
if(warp >= n)
return;
int lane = tid % WARP_SIZE;
for(int j=A_row[warp]+lane; j<A_row[warp+1]; j+=WARP_SIZE){
A_val[j] = -omega * A_val[j];
if(A_col[j] == warp)
A_val[j] += 1.;
}
}
void applyOmega(CSR *A, const vtype omega, cudaStream_t stream=DEFAULT_STREAM){
assert(A->on_the_device);
gridblock gb = gb1d(A->n, applyOmega_BLOCKSIZE, true);
_applyOmega<<<gb.g, gb.b, 0, stream>>>(A->n, A->val, A->col, A->row, omega);
}
//##################################################################################################
__global__
void _aggregate_symmetric(stype n, itype *A_row, vtype *P_val, itype *M, itype *markc, vtype *w, itype *nuns){
itype i = blockDim.x * blockIdx.x + threadIdx.x;
if(i >= n)
return;
itype v = i;
itype u = M[i];
// if it's a matched pair
if(u != -1){
vtype wv = w[v], wu = w[u];
vtype normwagg = sqrt(wv * wv + wu * wu);
if(normwagg > DBL_EPSILON){
// good pair
// if v is the pair master
if(v < u){
int nuns_local = atomicAdd(nuns, 1);
markc[v] = nuns_local;
markc[u] = nuns_local;
P_val[v] = wv / normwagg;
P_val[u] = wu / normwagg;
}
// if v,u is a good pair, exit
return;
}
}
// only single vertex and no-good pairs reach this point
if( fabs(w[i]) > DBL_EPSILON ){
// good single
int nuns_local = atomicAdd(nuns, 1);
markc[v] = nuns_local;
P_val[v] = w[v] / fabs(w[v]);
}else{
// bad single
int nuns_local = atomicAdd(nuns, 1);
markc[v] = nuns_local;
P_val[v] = 0.0;
}
}
//####################################################################################
__global__
void _make_P_row(itype n, itype* P_row){
itype v = blockDim.x * blockIdx.x + threadIdx.x;
if(v > n)
return;
P_row[v] = v;
}
//####################################################################################
CSR* matchingPairAggregation(CSR *A, vector<vtype> *w){
itype n = A->n;
// Matching
vector<itype> *M = Matching::suitor(A, w);
gridblock gb;
CSR *P = CSRm::init(n, 1, n, true, true, false);
scalar<itype> *nuns = Scalar::init<itype>(0, true);
gb = gb1d(n, aggregate_BLOCKSIZE);
_aggregate_symmetric<<<gb.g, gb.b>>>(n, A->row, P->val, M->val, P->col, w->val, nuns->val);
int* nuns_local = Scalar::getvalueFromDevice(nuns);
Scalar::free(nuns);
gb = gb1d(n, make_P_BLOCKSIZE);
_make_P_row<<<gb.g, gb.b>>>(n, P->row);
P->m = nuns_local[0];
free(nuns_local);
return P;
}
CSR* matchingAggregation(handles *h, buildData *amg_data, CSR *A, vector<vtype> **w, CSR **P, CSR **R){
// A_{i-1}
CSR *Ai_ = A, *Ai = NULL;
CSR *Ri_ = NULL;
// w_{i-1}
vector<vtype> *wi_ = *w, *wi = NULL;
double size_coarse, size_precoarse;
double coarse_ratio;
for(int i=0; i<amg_data->sweepnumber; i++){
CSR *Pi_ = matchingPairAggregation(Ai_, wi_); /* routine with the real work. It calls the suitor procedure */
//CSR *Pi_ = aggregateCPU(Ai_, wi_);
// transpose
Ri_ = CSRm::T(h->cusparse_h0, Pi_);
TIME::start();
#if GALERKIN_PRODUCT_TYPE == 0
// Pi-1.T * Ai-1
CSR *temp = CSRm::CSRCSR_product(h->cusparse_h0, Ri_, Ai_, false, false);
// Ai = (RA)P
Ai = CSRm::CSRCSR_product(h->cusparse_h0, temp, Pi_, false, false);
#elif GALERKIN_PRODUCT_TYPE == 1
// Pi-1.T * Ai-1
CSR *temp = CSRm::CSRCSR_product(h->cusparse_h0, Ai_, Pi_, false, false);
// Ai = R(AP)
Ai = CSRm::CSRCSR_product(h->cusparse_h0, Ri_, temp, false, false);
#endif
TOTAL_MUL_TIME += TIME::stop();
CSRm::free(temp);
//wi = Pi-1.T * wi-1
wi = CSRm::CSRVector_product_CUSPARSE(h->cusparse_h0, Ri_, wi_, NULL, false);
size_precoarse = Ai_->n;
size_coarse = Ai->n;
coarse_ratio = size_precoarse / size_coarse;
if(i == 0)
*P = Pi_;
else{
TIME::start();
if(i == 1){
//TODO special nsparse
*P = CSRm::CSRCSR_product(h->cusparse_h0, *P, Pi_, false, false);
}else{
*P = CSRm::CSRCSR_product(h->cusparse_h0, *P, Pi_, false, false);
}
TOTAL_MUL_TIME += TIME::stop();
CSRm::free(Ri_);
Ri_ = NULL;
CSRm::free(Pi_);
CSRm::free(Ai_);
}
Vector::free(wi_);
if (coarse_ratio <= COARSERATIO_THRSLD)
amg_data->ftcoarse = FTCOARSE_INC;
// exit condiction
if(size_coarse <= amg_data->ftcoarse * amg_data->maxcoarsesize)
break;
Ai_ = Ai;
wi_ = wi;
}
*w = wi;
if(Ri_ == NULL)
*R = CSRm::T(h->cusparse_h0, *P);
else
*R = Ri_;
return Ai;
}
hierarchy* adaptiveCoarsening(handles *h, buildData *amg_data){
TOTAL_MUL_TIME = 0;
CSR *A = amg_data->A;
vector<vtype> *w = amg_data->w;
vector<vtype> *w_temp = Vector::clone(w);
CSR *P = NULL, *R = NULL;
hierarchy *hrrch = AMG::Hierarchy::init(amg_data->maxlevels + 1);
hrrch->A_array[0] = A;
vtype normw = CSRm::vectorANorm(h->cusparse_h0, h->cublas_h, A, w_temp);
vtype avcoarseratio = 0.;
int level = 0;
relaxPrepare(h, level, hrrch->A_array[level], hrrch, amg_data);
matchingAggregationContext::initContext(A->n);
amg_data->ftcoarse = 1;
TIME::start();
if(normw > DBL_EPSILON){
for(level=1; level < amg_data->maxlevels;){
hrrch->A_array[level] = matchingAggregation(h, amg_data, hrrch->A_array[level-1], &w_temp, &P, &R);
//CSRMatrixPrintMM(hrrch->A_array[level], "/home/pasquini/singularity.mtx");
if(!amg_data->agg_interp_type){
// #change STREAM
relaxPrepare(h, level, hrrch->A_array[level], hrrch, amg_data);
}
hrrch->P_array[level-1] = P;
hrrch->R_array[level-1] = R;
vtype size_coarse = hrrch->A_array[level]->n;
vtype coarse_ratio = hrrch->A_array[level-1]->n / size_coarse;
avcoarseratio = avcoarseratio + coarse_ratio;
level++;
// exit condiction
if(size_coarse <= amg_data->ftcoarse * amg_data->maxcoarsesize)
break;
}
}else{
std::cout << "Warning: no need to build multigrid since the matrix is well conditioned\n";
}
float aggregation_time = TIME::stop();
// ##############################################################################################
if(amg_data->agg_interp_type == 1){
for(int j=0; j<level-1; j++){
CSR *A = hrrch->A_array[j];
vector<vtype> *D = hrrch->D_array[j];
//vector<vtype> *D = CSRm::diag(A);
assert(D != NULL);
CSR *A_temp = CSRm::clone(A);
CSRm::matrixVectorScaling(A_temp, D);
vtype omega = 4.0 / ( 3.0 * CSRm::infinityNorm(A_temp) );
applyOmega(A_temp, omega);
CSR *P_temp = hrrch->P_array[j];
hrrch->P_array[j] = CSRm::CSRCSR_product(h->cusparse_h0, A_temp, P_temp);
CSRm::free(P_temp);
CSRm::free(A_temp);
// transpose
hrrch->R_array[j] = CSRm::T(h->cusparse_h0, hrrch->P_array[j]);
A_temp = CSRm::CSRCSR_product(h->cusparse_h0, hrrch->R_array[j], hrrch->A_array[j]);
CSRm::free(hrrch->A_array[j+1]);
hrrch->A_array[j+1] = CSRm::CSRCSR_product(h->cusparse_h0, A_temp, hrrch->P_array[j]);
relaxPrepare(h, j+1, hrrch->A_array[j+1], hrrch, amg_data);
CSRm::free(A_temp);
}
}
// ##############################################################################################
AMG::Hierarchy::finalize_level(hrrch, level);
if(amg_data->coarse_solver == 9){
assert(0);
}else{
// in order to apply, to the coarsest matrix, the correct relax-preprocessing
if(amg_data->coarse_solver != amg_data->CRrelax_type ){
relaxPrepare(h, level-1, hrrch->A_array[level-1], hrrch, amg_data, amg_data->coarse_solver);
}
}
AMG::Hierarchy::finalize_cmplx(hrrch);
AMG::Hierarchy::finalize_wcmplx(hrrch);
hrrch->avg_cratio = avcoarseratio / (level-1);
AMG::Hierarchy::printInfo(hrrch);
//Eval::printMetaData("time;aggregation_time", aggregation_time, 1);
Eval::printMetaData("agg;level_number", level-1, 0);
Eval::printMetaData("agg;avg_coarse_ratio", hrrch->avg_cratio, 1);
Eval::printMetaData("agg;OpCmplx", hrrch->op_cmplx, 1);
//Eval::printMetaData("agg;OpCmplxW", hrrch->op_wcmplx, 1);
//Eval::printMetaData("agg;coarsest_size", hrrch->A_array[level-1]->n, 0);
Vector::free(w_temp);
matchingAggregationContext::freeContext();
std::cout << "TOTAL_MUL_TIME: " << TOTAL_MUL_TIME << "\n\n";
return hrrch;
}
//#############################################################################
|
4f0f265870ebb4e71477eca9d71f25ba560e52a8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/tensor.h"
namespace paddle {
namespace operators {
struct Box {
float x, y, w, h;
};
struct Detection {
Box bbox;
int classes;
float* prob;
float* mask;
float objectness;
int sort_class;
int max_prob_class_index;
};
struct TensorInfo {
int bbox_count_host; // record bbox numbers
int bbox_count_max_alloc{50};
float* bboxes_dev_ptr;
float* bboxes_host_ptr;
int* bbox_count_device_ptr; // Box counter in gpu memory, used by atomicAdd
};
static int NMSComparator(const void* pa, const void* pb) {
const Detection a = *reinterpret_cast<const Detection*>(pa);
const Detection b = *reinterpret_cast<const Detection*>(pb);
if (a.max_prob_class_index > b.max_prob_class_index)
return 1;
else if (a.max_prob_class_index < b.max_prob_class_index)
return -1;
float diff = 0;
if (b.sort_class >= 0) {
diff = a.prob[b.sort_class] - b.prob[b.sort_class];
} else {
diff = a.objectness - b.objectness;
}
if (diff < 0)
return 1;
else if (diff > 0)
return -1;
return 0;
}
static float Overlap(float x1, float w1, float x2, float w2) {
float l1 = x1 - w1 / 2;
float l2 = x2 - w2 / 2;
float left = l1 > l2 ? l1 : l2;
float r1 = x1 + w1 / 2;
float r2 = x2 + w2 / 2;
float right = r1 < r2 ? r1 : r2;
return right - left;
}
static float BoxIntersection(Box a, Box b) {
float w = Overlap(a.x, a.w, b.x, b.w);
float h = Overlap(a.y, a.h, b.y, b.h);
if (w < 0 || h < 0) return 0;
float area = w * h;
return area;
}
static float BoxUnion(Box a, Box b) {
float i = BoxIntersection(a, b);
float u = a.w * a.h + b.w * b.h - i;
return u;
}
static float BoxIOU(Box a, Box b) {
return BoxIntersection(a, b) / BoxUnion(a, b);
}
static void PostNMS(std::vector<Detection>* det_bboxes,
float thresh,
int classes) {
int total = det_bboxes->size();
if (total <= 0) {
return;
}
Detection* dets = det_bboxes->data();
int i, j, k;
k = total - 1;
for (i = 0; i <= k; ++i) {
if (dets[i].objectness == 0) {
Detection swap = dets[i];
dets[i] = dets[k];
dets[k] = swap;
--k;
--i;
}
}
total = k + 1;
qsort(dets, total, sizeof(Detection), NMSComparator);
for (i = 0; i < total; ++i) {
if (dets[i].objectness == 0) continue;
Box a = dets[i].bbox;
for (j = i + 1; j < total; ++j) {
if (dets[j].objectness == 0) continue;
if (dets[j].max_prob_class_index != dets[i].max_prob_class_index) break;
Box b = dets[j].bbox;
if (BoxIOU(a, b) > thresh) {
dets[j].objectness = 0;
for (k = 0; k < classes; ++k) {
dets[j].prob[k] = 0;
}
}
}
}
}
__global__ void YoloBoxNum(const float* input,
int* bbox_count,
const int grid_size,
const int class_num,
const int anchors_num,
float prob_thresh) {
int x_id = blockIdx.x * blockDim.x + threadIdx.x;
int y_id = blockIdx.y * blockDim.y + threadIdx.y;
int z_id = blockIdx.z * blockDim.z + threadIdx.z;
if ((x_id >= grid_size) || (y_id >= grid_size) || (z_id >= anchors_num)) {
return;
}
const int grids_num = grid_size * grid_size;
const int bbindex = y_id * grid_size + x_id;
float objectness = input[bbindex + grids_num * (z_id * (5 + class_num) + 4)];
if (objectness < prob_thresh) {
return;
}
atomicAdd(bbox_count, 1);
}
__global__ void YoloTensorParseKernel(const float* input,
const float* im_shape_data,
const float* im_scale_data,
float* output,
int* bbox_index,
const int grid_size,
const int class_num,
const int anchors_num,
const int netw,
const int neth,
int* biases,
float prob_thresh) {
int x_id = blockIdx.x * blockDim.x + threadIdx.x;
int y_id = blockIdx.y * blockDim.y + threadIdx.y;
int z_id = blockIdx.z * blockDim.z + threadIdx.z;
if ((x_id >= grid_size) || (y_id >= grid_size) || (z_id >= anchors_num)) {
return;
}
const float pic_h = im_shape_data[0] / im_scale_data[0];
const float pic_w = im_shape_data[1] / im_scale_data[1];
const int grids_num = grid_size * grid_size;
const int bbindex = y_id * grid_size + x_id;
float objectness = input[bbindex + grids_num * (z_id * (5 + class_num) + 4)];
if (objectness < prob_thresh) {
return;
}
int cur_bbox_index = atomicAdd(bbox_index, 1);
int tensor_index = cur_bbox_index * (5 + class_num);
// x
float x = input[bbindex + grids_num * (z_id * (5 + class_num) + 0)];
x = (x + static_cast<float>(x_id)) * static_cast<float>(pic_w) /
static_cast<float>(grid_size);
// y
float y = input[bbindex + grids_num * (z_id * (5 + class_num) + 1)];
y = (y + static_cast<float>(y_id)) * static_cast<float>(pic_h) /
static_cast<float>(grid_size);
// w
float w = input[bbindex + grids_num * (z_id * (5 + class_num) + 2)];
w = w * biases[2 * z_id] * pic_w / netw;
// h
float h = input[bbindex + grids_num * (z_id * (5 + class_num) + 3)];
h = h * biases[2 * z_id + 1] * pic_h / neth;
output[tensor_index] = objectness;
output[tensor_index + 1] = x - w / 2;
output[tensor_index + 2] = y - h / 2;
output[tensor_index + 3] = x + w / 2;
output[tensor_index + 4] = y + h / 2;
output[tensor_index + 1] =
output[tensor_index + 1] > 0 ? output[tensor_index + 1] : 0.f;
output[tensor_index + 2] =
output[tensor_index + 2] > 0 ? output[tensor_index + 2] : 0.f;
output[tensor_index + 3] = output[tensor_index + 3] < pic_w - 1
? output[tensor_index + 3]
: pic_w - 1;
output[tensor_index + 4] = output[tensor_index + 4] < pic_h - 1
? output[tensor_index + 4]
: pic_h - 1;
// Probabilities of classes
for (int i = 0; i < class_num; ++i) {
float prob =
input[bbindex + grids_num * (z_id * (5 + class_num) + (5 + i))] *
objectness;
output[tensor_index + 5 + i] = prob;
}
}
static void YoloTensorParseCuda(
const float* input_data, // [in] YOLO_BOX_HEAD layer output
const float* image_shape_data,
const float* image_scale_data,
float** bboxes_tensor_ptr, // [out] Bounding boxes output tensor
int* bbox_count_max_alloc, // [in/out] maximum bounding Box number
// allocated in dev
int* bbox_count_host, // [in/out] bounding boxes number recorded in host
int* bbox_count_device_ptr, // [in/out] bounding boxes number calculated
// in
// device side
int* bbox_index_device_ptr, // [in] bounding Box index for kernel threads
// shared access
int grid_size,
int class_num,
int anchors_num,
int netw,
int neth,
int* biases_device,
float prob_thresh) {
dim3 threads_per_block(16, 16, 4);
dim3 number_of_blocks((grid_size / threads_per_block.x) + 1,
(grid_size / threads_per_block.y) + 1,
(anchors_num / threads_per_block.z) + 1);
// Estimate how many boxes will be choosed
int bbox_count = 0;
#ifdef PADDLE_WITH_HIP
hipMemcpy(
bbox_count_device_ptr, &bbox_count, sizeof(int), hipMemcpyHostToDevice);
#else
hipMemcpy(
bbox_count_device_ptr, &bbox_count, sizeof(int), hipMemcpyHostToDevice);
#endif
hipLaunchKernelGGL(( YoloBoxNum), dim3(number_of_blocks), dim3(threads_per_block), 0, 0, input_data,
bbox_count_device_ptr,
grid_size,
class_num,
anchors_num,
prob_thresh);
#ifdef PADDLE_WITH_HIP
hipMemcpy(
&bbox_count, bbox_count_device_ptr, sizeof(int), hipMemcpyDeviceToHost);
#else
hipMemcpy(
&bbox_count, bbox_count_device_ptr, sizeof(int), hipMemcpyDeviceToHost);
#endif
// Record actual bbox number
*bbox_count_host = bbox_count;
// Obtain previous allocated bbox tensor in device side
float* bbox_tensor = *bboxes_tensor_ptr;
// Update previous maximum bbox number
if (bbox_count > *bbox_count_max_alloc) {
#ifdef PADDLE_WITH_HIP
hipFree(bbox_tensor);
hipMalloc(&bbox_tensor, bbox_count * (5 + class_num) * sizeof(float));
#else
hipFree(bbox_tensor);
hipMalloc(&bbox_tensor, bbox_count * (5 + class_num) * sizeof(float));
#endif
*bbox_count_max_alloc = bbox_count;
*bboxes_tensor_ptr = bbox_tensor;
}
// Now generate bboxes
int bbox_index = 0;
#ifdef PADDLE_WITH_HIP
hipMemcpy(
bbox_index_device_ptr, &bbox_index, sizeof(int), hipMemcpyHostToDevice);
#else
hipMemcpy(
bbox_index_device_ptr, &bbox_index, sizeof(int), hipMemcpyHostToDevice);
#endif
hipLaunchKernelGGL(( YoloTensorParseKernel), dim3(number_of_blocks), dim3(threads_per_block), 0, 0,
input_data,
image_shape_data,
image_scale_data,
bbox_tensor,
bbox_index_device_ptr,
grid_size,
class_num,
anchors_num,
netw,
neth,
biases_device,
prob_thresh);
}
template <typename T, typename DeviceContext>
class YoloBoxPostKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
// prepare inputs
std::vector<const float*> boxes_input(3);
std::vector<std::vector<int32_t>> boxes_input_dims(3);
for (int i = 0; i < 3; i++) {
auto* boxes_tensor =
context.Input<phi::DenseTensor>("Boxes" + std::to_string(i));
boxes_input[i] = boxes_tensor->data<float>();
auto dims = boxes_tensor->dims();
for (int j = 0; j < dims.size(); j++) {
boxes_input_dims[i].push_back(dims[j]);
}
}
const float* image_shape_data =
context.Input<phi::DenseTensor>("ImageShape")->data<float>();
const float* image_scale_data =
context.Input<phi::DenseTensor>("ImageScale")->data<float>();
// prepare outputs
auto* boxes_scores_tensor = context.Output<phi::DenseTensor>("Out");
auto* boxes_num_tensor = context.Output<phi::DenseTensor>("NmsRoisNum");
// prepare anchors
std::vector<int32_t> anchors;
auto anchors0 = context.Attr<std::vector<int>>("anchors0");
auto anchors1 = context.Attr<std::vector<int>>("anchors1");
auto anchors2 = context.Attr<std::vector<int>>("anchors2");
anchors.insert(anchors.end(), anchors0.begin(), anchors0.end());
anchors.insert(anchors.end(), anchors1.begin(), anchors1.end());
anchors.insert(anchors.end(), anchors2.begin(), anchors2.end());
int* device_anchors;
#ifdef PADDLE_WITH_HIP
hipMalloc(reinterpret_cast<void**>(&device_anchors),
anchors.size() * sizeof(int));
hipMemcpy(device_anchors,
anchors.data(),
anchors.size() * sizeof(int),
hipMemcpyHostToDevice);
#else
hipMalloc(reinterpret_cast<void**>(&device_anchors),
anchors.size() * sizeof(int));
hipMemcpy(device_anchors,
anchors.data(),
anchors.size() * sizeof(int),
hipMemcpyHostToDevice);
#endif
int* device_anchors_ptr[3];
device_anchors_ptr[0] = device_anchors;
device_anchors_ptr[1] = device_anchors_ptr[0] + anchors0.size();
device_anchors_ptr[2] = device_anchors_ptr[1] + anchors1.size();
std::vector<int> anchors_num{static_cast<int>(anchors0.size()) / 2,
static_cast<int>(anchors1.size()) / 2,
static_cast<int>(anchors2.size()) / 2};
// prepare other attrs
int class_num = context.Attr<int>("class_num");
float conf_thresh = context.Attr<float>("conf_thresh");
std::vector<int> downsample_ratio{context.Attr<int>("downsample_ratio0"),
context.Attr<int>("downsample_ratio1"),
context.Attr<int>("downsample_ratio2")};
// clip_bbox and scale_x_y is not used now!
float nms_threshold = context.Attr<float>("nms_threshold");
int batch = context.Input<phi::DenseTensor>("ImageShape")->dims()[0];
TensorInfo* ts_info = new TensorInfo[batch * boxes_input.size()];
for (int i = 0; i < batch * static_cast<int>(boxes_input.size()); i++) {
#ifdef PADDLE_WITH_HIP
hipMalloc(
reinterpret_cast<void**>(&ts_info[i].bboxes_dev_ptr),
ts_info[i].bbox_count_max_alloc * (5 + class_num) * sizeof(float));
#else
hipMalloc(
reinterpret_cast<void**>(&ts_info[i].bboxes_dev_ptr),
ts_info[i].bbox_count_max_alloc * (5 + class_num) * sizeof(float));
#endif
ts_info[i].bboxes_host_ptr = reinterpret_cast<float*>(malloc(
ts_info[i].bbox_count_max_alloc * (5 + class_num) * sizeof(float)));
#ifdef PADDLE_WITH_HIP
hipMalloc(reinterpret_cast<void**>(&ts_info[i].bbox_count_device_ptr),
sizeof(int));
#else
hipMalloc(reinterpret_cast<void**>(&ts_info[i].bbox_count_device_ptr),
sizeof(int));
#endif
}
// Box index counter in gpu memory
// *bbox_index_device_ptr used by atomicAdd
int* bbox_index_device_ptr;
#ifdef PADDLE_WITH_HIP
hipMalloc(reinterpret_cast<void**>(&bbox_index_device_ptr), sizeof(int));
#else
hipMalloc(reinterpret_cast<void**>(&bbox_index_device_ptr), sizeof(int));
#endif
int total_bbox = 0;
for (int batch_id = 0; batch_id < batch; batch_id++) {
for (int input_id = 0; input_id < boxes_input.size(); input_id++) {
int c = boxes_input_dims[input_id][1];
int h = boxes_input_dims[input_id][2];
int w = boxes_input_dims[input_id][3];
int ts_id = batch_id * boxes_input.size() + input_id;
int bbox_count_max_alloc = ts_info[ts_id].bbox_count_max_alloc;
YoloTensorParseCuda(
boxes_input[input_id] + batch_id * c * h * w,
image_shape_data + batch_id * 2,
image_scale_data + batch_id * 2,
&(ts_info[ts_id].bboxes_dev_ptr), // output in gpu,must use 2-level
// pointer, because we may
// re-malloc
&bbox_count_max_alloc, // bbox_count_alloc_ptr boxes we
// pre-allocate
&(ts_info[ts_id].bbox_count_host), // record bbox numbers
ts_info[ts_id].bbox_count_device_ptr, // for atomicAdd
bbox_index_device_ptr, // for atomicAdd
h,
class_num,
anchors_num[input_id],
downsample_ratio[input_id] * h,
downsample_ratio[input_id] * w,
device_anchors_ptr[input_id],
conf_thresh);
// batch info update
if (bbox_count_max_alloc > ts_info[ts_id].bbox_count_max_alloc) {
ts_info[ts_id].bbox_count_max_alloc = bbox_count_max_alloc;
ts_info[ts_id].bboxes_host_ptr = reinterpret_cast<float*>(
realloc(ts_info[ts_id].bboxes_host_ptr,
bbox_count_max_alloc * (5 + class_num) * sizeof(float)));
}
// we need copy bbox_count_host boxes to cpu memory
#ifdef PADDLE_WITH_HIP
hipMemcpyAsync(
ts_info[ts_id].bboxes_host_ptr,
ts_info[ts_id].bboxes_dev_ptr,
ts_info[ts_id].bbox_count_host * (5 + class_num) * sizeof(float),
hipMemcpyDeviceToHost);
#else
hipMemcpyAsync(
ts_info[ts_id].bboxes_host_ptr,
ts_info[ts_id].bboxes_dev_ptr,
ts_info[ts_id].bbox_count_host * (5 + class_num) * sizeof(float),
hipMemcpyDeviceToHost);
#endif
total_bbox += ts_info[ts_id].bbox_count_host;
}
}
boxes_scores_tensor->Resize({total_bbox > 0 ? total_bbox : 1, 6});
float* boxes_scores_data =
boxes_scores_tensor->mutable_data<float>(platform::CPUPlace());
memset(boxes_scores_data, 0, sizeof(float) * 6);
boxes_num_tensor->Resize({batch});
int* boxes_num_data =
boxes_num_tensor->mutable_data<int>(platform::CPUPlace());
int boxes_scores_id = 0;
// NMS
for (int batch_id = 0; batch_id < batch; batch_id++) {
std::vector<Detection> bbox_det_vec;
for (int input_id = 0; input_id < boxes_input.size(); input_id++) {
int ts_id = batch_id * boxes_input.size() + input_id;
int bbox_count = ts_info[ts_id].bbox_count_host;
if (bbox_count <= 0) {
continue;
}
float* bbox_host_ptr = ts_info[ts_id].bboxes_host_ptr;
for (int bbox_index = 0; bbox_index < bbox_count; ++bbox_index) {
Detection bbox_det;
memset(&bbox_det, 0, sizeof(Detection));
bbox_det.objectness = bbox_host_ptr[bbox_index * (5 + class_num) + 0];
bbox_det.bbox.x = bbox_host_ptr[bbox_index * (5 + class_num) + 1];
bbox_det.bbox.y = bbox_host_ptr[bbox_index * (5 + class_num) + 2];
bbox_det.bbox.w =
bbox_host_ptr[bbox_index * (5 + class_num) + 3] - bbox_det.bbox.x;
bbox_det.bbox.h =
bbox_host_ptr[bbox_index * (5 + class_num) + 4] - bbox_det.bbox.y;
bbox_det.classes = class_num;
bbox_det.prob =
reinterpret_cast<float*>(malloc(class_num * sizeof(float)));
int max_prob_class_id = -1;
float max_class_prob = 0.0;
for (int class_id = 0; class_id < class_num; class_id++) {
float prob =
bbox_host_ptr[bbox_index * (5 + class_num) + 5 + class_id];
bbox_det.prob[class_id] = prob;
if (prob > max_class_prob) {
max_class_prob = prob;
max_prob_class_id = class_id;
}
}
bbox_det.max_prob_class_index = max_prob_class_id;
bbox_det.sort_class = max_prob_class_id;
bbox_det_vec.push_back(bbox_det);
}
}
PostNMS(&bbox_det_vec, nms_threshold, class_num);
for (int i = 0; i < bbox_det_vec.size(); i++) {
boxes_scores_data[boxes_scores_id++] =
bbox_det_vec[i].max_prob_class_index;
boxes_scores_data[boxes_scores_id++] = bbox_det_vec[i].objectness;
boxes_scores_data[boxes_scores_id++] = bbox_det_vec[i].bbox.x;
boxes_scores_data[boxes_scores_id++] = bbox_det_vec[i].bbox.y;
boxes_scores_data[boxes_scores_id++] =
bbox_det_vec[i].bbox.w + bbox_det_vec[i].bbox.x;
boxes_scores_data[boxes_scores_id++] =
bbox_det_vec[i].bbox.h + bbox_det_vec[i].bbox.y;
free(bbox_det_vec[i].prob);
}
boxes_num_data[batch_id] = bbox_det_vec.size();
}
#ifdef PADDLE_WITH_HIP
hipFree(bbox_index_device_ptr);
#else
hipFree(bbox_index_device_ptr);
#endif
for (int i = 0; i < batch * boxes_input.size(); i++) {
#ifdef PADDLE_WITH_HIP
hipFree(ts_info[i].bboxes_dev_ptr);
hipFree(ts_info[i].bbox_count_device_ptr);
#else
hipFree(ts_info[i].bboxes_dev_ptr);
hipFree(ts_info[i].bbox_count_device_ptr);
#endif
free(ts_info[i].bboxes_host_ptr);
}
delete[] ts_info;
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
PD_REGISTER_STRUCT_KERNEL(
yolo_box_post, GPU, ALL_LAYOUT, ops::YoloBoxPostKernel, float) {}
| 4f0f265870ebb4e71477eca9d71f25ba560e52a8.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/tensor.h"
namespace paddle {
namespace operators {
struct Box {
float x, y, w, h;
};
struct Detection {
Box bbox;
int classes;
float* prob;
float* mask;
float objectness;
int sort_class;
int max_prob_class_index;
};
struct TensorInfo {
int bbox_count_host; // record bbox numbers
int bbox_count_max_alloc{50};
float* bboxes_dev_ptr;
float* bboxes_host_ptr;
int* bbox_count_device_ptr; // Box counter in gpu memory, used by atomicAdd
};
static int NMSComparator(const void* pa, const void* pb) {
const Detection a = *reinterpret_cast<const Detection*>(pa);
const Detection b = *reinterpret_cast<const Detection*>(pb);
if (a.max_prob_class_index > b.max_prob_class_index)
return 1;
else if (a.max_prob_class_index < b.max_prob_class_index)
return -1;
float diff = 0;
if (b.sort_class >= 0) {
diff = a.prob[b.sort_class] - b.prob[b.sort_class];
} else {
diff = a.objectness - b.objectness;
}
if (diff < 0)
return 1;
else if (diff > 0)
return -1;
return 0;
}
static float Overlap(float x1, float w1, float x2, float w2) {
float l1 = x1 - w1 / 2;
float l2 = x2 - w2 / 2;
float left = l1 > l2 ? l1 : l2;
float r1 = x1 + w1 / 2;
float r2 = x2 + w2 / 2;
float right = r1 < r2 ? r1 : r2;
return right - left;
}
static float BoxIntersection(Box a, Box b) {
float w = Overlap(a.x, a.w, b.x, b.w);
float h = Overlap(a.y, a.h, b.y, b.h);
if (w < 0 || h < 0) return 0;
float area = w * h;
return area;
}
static float BoxUnion(Box a, Box b) {
float i = BoxIntersection(a, b);
float u = a.w * a.h + b.w * b.h - i;
return u;
}
static float BoxIOU(Box a, Box b) {
return BoxIntersection(a, b) / BoxUnion(a, b);
}
static void PostNMS(std::vector<Detection>* det_bboxes,
float thresh,
int classes) {
int total = det_bboxes->size();
if (total <= 0) {
return;
}
Detection* dets = det_bboxes->data();
int i, j, k;
k = total - 1;
for (i = 0; i <= k; ++i) {
if (dets[i].objectness == 0) {
Detection swap = dets[i];
dets[i] = dets[k];
dets[k] = swap;
--k;
--i;
}
}
total = k + 1;
qsort(dets, total, sizeof(Detection), NMSComparator);
for (i = 0; i < total; ++i) {
if (dets[i].objectness == 0) continue;
Box a = dets[i].bbox;
for (j = i + 1; j < total; ++j) {
if (dets[j].objectness == 0) continue;
if (dets[j].max_prob_class_index != dets[i].max_prob_class_index) break;
Box b = dets[j].bbox;
if (BoxIOU(a, b) > thresh) {
dets[j].objectness = 0;
for (k = 0; k < classes; ++k) {
dets[j].prob[k] = 0;
}
}
}
}
}
__global__ void YoloBoxNum(const float* input,
int* bbox_count,
const int grid_size,
const int class_num,
const int anchors_num,
float prob_thresh) {
int x_id = blockIdx.x * blockDim.x + threadIdx.x;
int y_id = blockIdx.y * blockDim.y + threadIdx.y;
int z_id = blockIdx.z * blockDim.z + threadIdx.z;
if ((x_id >= grid_size) || (y_id >= grid_size) || (z_id >= anchors_num)) {
return;
}
const int grids_num = grid_size * grid_size;
const int bbindex = y_id * grid_size + x_id;
float objectness = input[bbindex + grids_num * (z_id * (5 + class_num) + 4)];
if (objectness < prob_thresh) {
return;
}
atomicAdd(bbox_count, 1);
}
__global__ void YoloTensorParseKernel(const float* input,
const float* im_shape_data,
const float* im_scale_data,
float* output,
int* bbox_index,
const int grid_size,
const int class_num,
const int anchors_num,
const int netw,
const int neth,
int* biases,
float prob_thresh) {
int x_id = blockIdx.x * blockDim.x + threadIdx.x;
int y_id = blockIdx.y * blockDim.y + threadIdx.y;
int z_id = blockIdx.z * blockDim.z + threadIdx.z;
if ((x_id >= grid_size) || (y_id >= grid_size) || (z_id >= anchors_num)) {
return;
}
const float pic_h = im_shape_data[0] / im_scale_data[0];
const float pic_w = im_shape_data[1] / im_scale_data[1];
const int grids_num = grid_size * grid_size;
const int bbindex = y_id * grid_size + x_id;
float objectness = input[bbindex + grids_num * (z_id * (5 + class_num) + 4)];
if (objectness < prob_thresh) {
return;
}
int cur_bbox_index = atomicAdd(bbox_index, 1);
int tensor_index = cur_bbox_index * (5 + class_num);
// x
float x = input[bbindex + grids_num * (z_id * (5 + class_num) + 0)];
x = (x + static_cast<float>(x_id)) * static_cast<float>(pic_w) /
static_cast<float>(grid_size);
// y
float y = input[bbindex + grids_num * (z_id * (5 + class_num) + 1)];
y = (y + static_cast<float>(y_id)) * static_cast<float>(pic_h) /
static_cast<float>(grid_size);
// w
float w = input[bbindex + grids_num * (z_id * (5 + class_num) + 2)];
w = w * biases[2 * z_id] * pic_w / netw;
// h
float h = input[bbindex + grids_num * (z_id * (5 + class_num) + 3)];
h = h * biases[2 * z_id + 1] * pic_h / neth;
output[tensor_index] = objectness;
output[tensor_index + 1] = x - w / 2;
output[tensor_index + 2] = y - h / 2;
output[tensor_index + 3] = x + w / 2;
output[tensor_index + 4] = y + h / 2;
output[tensor_index + 1] =
output[tensor_index + 1] > 0 ? output[tensor_index + 1] : 0.f;
output[tensor_index + 2] =
output[tensor_index + 2] > 0 ? output[tensor_index + 2] : 0.f;
output[tensor_index + 3] = output[tensor_index + 3] < pic_w - 1
? output[tensor_index + 3]
: pic_w - 1;
output[tensor_index + 4] = output[tensor_index + 4] < pic_h - 1
? output[tensor_index + 4]
: pic_h - 1;
// Probabilities of classes
for (int i = 0; i < class_num; ++i) {
float prob =
input[bbindex + grids_num * (z_id * (5 + class_num) + (5 + i))] *
objectness;
output[tensor_index + 5 + i] = prob;
}
}
static void YoloTensorParseCuda(
const float* input_data, // [in] YOLO_BOX_HEAD layer output
const float* image_shape_data,
const float* image_scale_data,
float** bboxes_tensor_ptr, // [out] Bounding boxes output tensor
int* bbox_count_max_alloc, // [in/out] maximum bounding Box number
// allocated in dev
int* bbox_count_host, // [in/out] bounding boxes number recorded in host
int* bbox_count_device_ptr, // [in/out] bounding boxes number calculated
// in
// device side
int* bbox_index_device_ptr, // [in] bounding Box index for kernel threads
// shared access
int grid_size,
int class_num,
int anchors_num,
int netw,
int neth,
int* biases_device,
float prob_thresh) {
dim3 threads_per_block(16, 16, 4);
dim3 number_of_blocks((grid_size / threads_per_block.x) + 1,
(grid_size / threads_per_block.y) + 1,
(anchors_num / threads_per_block.z) + 1);
// Estimate how many boxes will be choosed
int bbox_count = 0;
#ifdef PADDLE_WITH_HIP
hipMemcpy(
bbox_count_device_ptr, &bbox_count, sizeof(int), hipMemcpyHostToDevice);
#else
cudaMemcpy(
bbox_count_device_ptr, &bbox_count, sizeof(int), cudaMemcpyHostToDevice);
#endif
YoloBoxNum<<<number_of_blocks, threads_per_block, 0>>>(input_data,
bbox_count_device_ptr,
grid_size,
class_num,
anchors_num,
prob_thresh);
#ifdef PADDLE_WITH_HIP
hipMemcpy(
&bbox_count, bbox_count_device_ptr, sizeof(int), hipMemcpyDeviceToHost);
#else
cudaMemcpy(
&bbox_count, bbox_count_device_ptr, sizeof(int), cudaMemcpyDeviceToHost);
#endif
// Record actual bbox number
*bbox_count_host = bbox_count;
// Obtain previous allocated bbox tensor in device side
float* bbox_tensor = *bboxes_tensor_ptr;
// Update previous maximum bbox number
if (bbox_count > *bbox_count_max_alloc) {
#ifdef PADDLE_WITH_HIP
hipFree(bbox_tensor);
hipMalloc(&bbox_tensor, bbox_count * (5 + class_num) * sizeof(float));
#else
cudaFree(bbox_tensor);
cudaMalloc(&bbox_tensor, bbox_count * (5 + class_num) * sizeof(float));
#endif
*bbox_count_max_alloc = bbox_count;
*bboxes_tensor_ptr = bbox_tensor;
}
// Now generate bboxes
int bbox_index = 0;
#ifdef PADDLE_WITH_HIP
hipMemcpy(
bbox_index_device_ptr, &bbox_index, sizeof(int), hipMemcpyHostToDevice);
#else
cudaMemcpy(
bbox_index_device_ptr, &bbox_index, sizeof(int), cudaMemcpyHostToDevice);
#endif
YoloTensorParseKernel<<<number_of_blocks, threads_per_block, 0>>>(
input_data,
image_shape_data,
image_scale_data,
bbox_tensor,
bbox_index_device_ptr,
grid_size,
class_num,
anchors_num,
netw,
neth,
biases_device,
prob_thresh);
}
template <typename T, typename DeviceContext>
class YoloBoxPostKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
// prepare inputs
std::vector<const float*> boxes_input(3);
std::vector<std::vector<int32_t>> boxes_input_dims(3);
for (int i = 0; i < 3; i++) {
auto* boxes_tensor =
context.Input<phi::DenseTensor>("Boxes" + std::to_string(i));
boxes_input[i] = boxes_tensor->data<float>();
auto dims = boxes_tensor->dims();
for (int j = 0; j < dims.size(); j++) {
boxes_input_dims[i].push_back(dims[j]);
}
}
const float* image_shape_data =
context.Input<phi::DenseTensor>("ImageShape")->data<float>();
const float* image_scale_data =
context.Input<phi::DenseTensor>("ImageScale")->data<float>();
// prepare outputs
auto* boxes_scores_tensor = context.Output<phi::DenseTensor>("Out");
auto* boxes_num_tensor = context.Output<phi::DenseTensor>("NmsRoisNum");
// prepare anchors
std::vector<int32_t> anchors;
auto anchors0 = context.Attr<std::vector<int>>("anchors0");
auto anchors1 = context.Attr<std::vector<int>>("anchors1");
auto anchors2 = context.Attr<std::vector<int>>("anchors2");
anchors.insert(anchors.end(), anchors0.begin(), anchors0.end());
anchors.insert(anchors.end(), anchors1.begin(), anchors1.end());
anchors.insert(anchors.end(), anchors2.begin(), anchors2.end());
int* device_anchors;
#ifdef PADDLE_WITH_HIP
hipMalloc(reinterpret_cast<void**>(&device_anchors),
anchors.size() * sizeof(int));
hipMemcpy(device_anchors,
anchors.data(),
anchors.size() * sizeof(int),
hipMemcpyHostToDevice);
#else
cudaMalloc(reinterpret_cast<void**>(&device_anchors),
anchors.size() * sizeof(int));
cudaMemcpy(device_anchors,
anchors.data(),
anchors.size() * sizeof(int),
cudaMemcpyHostToDevice);
#endif
int* device_anchors_ptr[3];
device_anchors_ptr[0] = device_anchors;
device_anchors_ptr[1] = device_anchors_ptr[0] + anchors0.size();
device_anchors_ptr[2] = device_anchors_ptr[1] + anchors1.size();
std::vector<int> anchors_num{static_cast<int>(anchors0.size()) / 2,
static_cast<int>(anchors1.size()) / 2,
static_cast<int>(anchors2.size()) / 2};
// prepare other attrs
int class_num = context.Attr<int>("class_num");
float conf_thresh = context.Attr<float>("conf_thresh");
std::vector<int> downsample_ratio{context.Attr<int>("downsample_ratio0"),
context.Attr<int>("downsample_ratio1"),
context.Attr<int>("downsample_ratio2")};
// clip_bbox and scale_x_y is not used now!
float nms_threshold = context.Attr<float>("nms_threshold");
int batch = context.Input<phi::DenseTensor>("ImageShape")->dims()[0];
TensorInfo* ts_info = new TensorInfo[batch * boxes_input.size()];
for (int i = 0; i < batch * static_cast<int>(boxes_input.size()); i++) {
#ifdef PADDLE_WITH_HIP
hipMalloc(
reinterpret_cast<void**>(&ts_info[i].bboxes_dev_ptr),
ts_info[i].bbox_count_max_alloc * (5 + class_num) * sizeof(float));
#else
cudaMalloc(
reinterpret_cast<void**>(&ts_info[i].bboxes_dev_ptr),
ts_info[i].bbox_count_max_alloc * (5 + class_num) * sizeof(float));
#endif
ts_info[i].bboxes_host_ptr = reinterpret_cast<float*>(malloc(
ts_info[i].bbox_count_max_alloc * (5 + class_num) * sizeof(float)));
#ifdef PADDLE_WITH_HIP
hipMalloc(reinterpret_cast<void**>(&ts_info[i].bbox_count_device_ptr),
sizeof(int));
#else
cudaMalloc(reinterpret_cast<void**>(&ts_info[i].bbox_count_device_ptr),
sizeof(int));
#endif
}
// Box index counter in gpu memory
// *bbox_index_device_ptr used by atomicAdd
int* bbox_index_device_ptr;
#ifdef PADDLE_WITH_HIP
hipMalloc(reinterpret_cast<void**>(&bbox_index_device_ptr), sizeof(int));
#else
cudaMalloc(reinterpret_cast<void**>(&bbox_index_device_ptr), sizeof(int));
#endif
int total_bbox = 0;
for (int batch_id = 0; batch_id < batch; batch_id++) {
for (int input_id = 0; input_id < boxes_input.size(); input_id++) {
int c = boxes_input_dims[input_id][1];
int h = boxes_input_dims[input_id][2];
int w = boxes_input_dims[input_id][3];
int ts_id = batch_id * boxes_input.size() + input_id;
int bbox_count_max_alloc = ts_info[ts_id].bbox_count_max_alloc;
YoloTensorParseCuda(
boxes_input[input_id] + batch_id * c * h * w,
image_shape_data + batch_id * 2,
image_scale_data + batch_id * 2,
&(ts_info[ts_id].bboxes_dev_ptr), // output in gpu,must use 2-level
// pointer, because we may
// re-malloc
&bbox_count_max_alloc, // bbox_count_alloc_ptr boxes we
// pre-allocate
&(ts_info[ts_id].bbox_count_host), // record bbox numbers
ts_info[ts_id].bbox_count_device_ptr, // for atomicAdd
bbox_index_device_ptr, // for atomicAdd
h,
class_num,
anchors_num[input_id],
downsample_ratio[input_id] * h,
downsample_ratio[input_id] * w,
device_anchors_ptr[input_id],
conf_thresh);
// batch info update
if (bbox_count_max_alloc > ts_info[ts_id].bbox_count_max_alloc) {
ts_info[ts_id].bbox_count_max_alloc = bbox_count_max_alloc;
ts_info[ts_id].bboxes_host_ptr = reinterpret_cast<float*>(
realloc(ts_info[ts_id].bboxes_host_ptr,
bbox_count_max_alloc * (5 + class_num) * sizeof(float)));
}
// we need copy bbox_count_host boxes to cpu memory
#ifdef PADDLE_WITH_HIP
hipMemcpyAsync(
ts_info[ts_id].bboxes_host_ptr,
ts_info[ts_id].bboxes_dev_ptr,
ts_info[ts_id].bbox_count_host * (5 + class_num) * sizeof(float),
hipMemcpyDeviceToHost);
#else
cudaMemcpyAsync(
ts_info[ts_id].bboxes_host_ptr,
ts_info[ts_id].bboxes_dev_ptr,
ts_info[ts_id].bbox_count_host * (5 + class_num) * sizeof(float),
cudaMemcpyDeviceToHost);
#endif
total_bbox += ts_info[ts_id].bbox_count_host;
}
}
boxes_scores_tensor->Resize({total_bbox > 0 ? total_bbox : 1, 6});
float* boxes_scores_data =
boxes_scores_tensor->mutable_data<float>(platform::CPUPlace());
memset(boxes_scores_data, 0, sizeof(float) * 6);
boxes_num_tensor->Resize({batch});
int* boxes_num_data =
boxes_num_tensor->mutable_data<int>(platform::CPUPlace());
int boxes_scores_id = 0;
// NMS
for (int batch_id = 0; batch_id < batch; batch_id++) {
std::vector<Detection> bbox_det_vec;
for (int input_id = 0; input_id < boxes_input.size(); input_id++) {
int ts_id = batch_id * boxes_input.size() + input_id;
int bbox_count = ts_info[ts_id].bbox_count_host;
if (bbox_count <= 0) {
continue;
}
float* bbox_host_ptr = ts_info[ts_id].bboxes_host_ptr;
for (int bbox_index = 0; bbox_index < bbox_count; ++bbox_index) {
Detection bbox_det;
memset(&bbox_det, 0, sizeof(Detection));
bbox_det.objectness = bbox_host_ptr[bbox_index * (5 + class_num) + 0];
bbox_det.bbox.x = bbox_host_ptr[bbox_index * (5 + class_num) + 1];
bbox_det.bbox.y = bbox_host_ptr[bbox_index * (5 + class_num) + 2];
bbox_det.bbox.w =
bbox_host_ptr[bbox_index * (5 + class_num) + 3] - bbox_det.bbox.x;
bbox_det.bbox.h =
bbox_host_ptr[bbox_index * (5 + class_num) + 4] - bbox_det.bbox.y;
bbox_det.classes = class_num;
bbox_det.prob =
reinterpret_cast<float*>(malloc(class_num * sizeof(float)));
int max_prob_class_id = -1;
float max_class_prob = 0.0;
for (int class_id = 0; class_id < class_num; class_id++) {
float prob =
bbox_host_ptr[bbox_index * (5 + class_num) + 5 + class_id];
bbox_det.prob[class_id] = prob;
if (prob > max_class_prob) {
max_class_prob = prob;
max_prob_class_id = class_id;
}
}
bbox_det.max_prob_class_index = max_prob_class_id;
bbox_det.sort_class = max_prob_class_id;
bbox_det_vec.push_back(bbox_det);
}
}
PostNMS(&bbox_det_vec, nms_threshold, class_num);
for (int i = 0; i < bbox_det_vec.size(); i++) {
boxes_scores_data[boxes_scores_id++] =
bbox_det_vec[i].max_prob_class_index;
boxes_scores_data[boxes_scores_id++] = bbox_det_vec[i].objectness;
boxes_scores_data[boxes_scores_id++] = bbox_det_vec[i].bbox.x;
boxes_scores_data[boxes_scores_id++] = bbox_det_vec[i].bbox.y;
boxes_scores_data[boxes_scores_id++] =
bbox_det_vec[i].bbox.w + bbox_det_vec[i].bbox.x;
boxes_scores_data[boxes_scores_id++] =
bbox_det_vec[i].bbox.h + bbox_det_vec[i].bbox.y;
free(bbox_det_vec[i].prob);
}
boxes_num_data[batch_id] = bbox_det_vec.size();
}
#ifdef PADDLE_WITH_HIP
hipFree(bbox_index_device_ptr);
#else
cudaFree(bbox_index_device_ptr);
#endif
for (int i = 0; i < batch * boxes_input.size(); i++) {
#ifdef PADDLE_WITH_HIP
hipFree(ts_info[i].bboxes_dev_ptr);
hipFree(ts_info[i].bbox_count_device_ptr);
#else
cudaFree(ts_info[i].bboxes_dev_ptr);
cudaFree(ts_info[i].bbox_count_device_ptr);
#endif
free(ts_info[i].bboxes_host_ptr);
}
delete[] ts_info;
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
PD_REGISTER_STRUCT_KERNEL(
yolo_box_post, GPU, ALL_LAYOUT, ops::YoloBoxPostKernel, float) {}
|
36452c257f57887ffee4b609bd84b231c9cd0a3a.hip | // !!! This is a file automatically generated by hipify!!!
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include "count.cuh"
#include <iostream>
int main(int argc, char *argv[]) {
long n = atol(argv[1]);
hipEvent_t start;
hipEvent_t stop;
hipEventCreate(&start);
hipEventCreate(&stop);
thrust::host_vector<int> h_vec(n);
// Empty output vectors
thrust::host_vector<int> h_vals;
thrust::host_vector<int> h_cnts;
for (long i = 0; i < n; i++) {
h_vec[i] = 1;
}
// int arr[] = {3,5,1,2,3,1};
// h_vec = std::vector<int>(arr, arr+n);
// for (int i = 0; i < n; i++)
// std::cout << h_vec[i] << " ";
// std::cout << std::endl;
thrust::device_vector<int> d_vec(h_vec.size());
thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin());
// Empty device vectors
thrust::device_vector<int> d_vals;
thrust::device_vector<int> d_cnts;
hipEventRecord(start);
count(d_vec, d_vals, d_cnts);
hipEventRecord(stop);
hipEventSynchronize(stop);
float ms;
hipEventElapsedTime(&ms, start, stop);
// Empty host vectors will resize automatically
h_vals = d_vals;
// thrust::copy(d_vals.begin(), d_vals.end(), h_vals.begin());
h_cnts = d_cnts;
// thrust::copy(d_cnts.begin(), d_cnts.end(), h_cnts.begin());
// for (size_t i = 0; i < h_vals.size(); i++)
// std::cout << h_vals[i] << " ";
// std::cout << std::endl;
// for (size_t i = 0; i < h_cnts.size(); i++)
// std::cout << h_cnts[i] << " ";
// std::cout << std::endl;
std::cout << h_vals.back() << std::endl;
std::cout << h_cnts.back() << std::endl;
std::cout << ms << std::endl;
return 0;
}
| 36452c257f57887ffee4b609bd84b231c9cd0a3a.cu | #include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include "count.cuh"
#include <iostream>
int main(int argc, char *argv[]) {
long n = atol(argv[1]);
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
thrust::host_vector<int> h_vec(n);
// Empty output vectors
thrust::host_vector<int> h_vals;
thrust::host_vector<int> h_cnts;
for (long i = 0; i < n; i++) {
h_vec[i] = 1;
}
// int arr[] = {3,5,1,2,3,1};
// h_vec = std::vector<int>(arr, arr+n);
// for (int i = 0; i < n; i++)
// std::cout << h_vec[i] << " ";
// std::cout << std::endl;
thrust::device_vector<int> d_vec(h_vec.size());
thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin());
// Empty device vectors
thrust::device_vector<int> d_vals;
thrust::device_vector<int> d_cnts;
cudaEventRecord(start);
count(d_vec, d_vals, d_cnts);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float ms;
cudaEventElapsedTime(&ms, start, stop);
// Empty host vectors will resize automatically
h_vals = d_vals;
// thrust::copy(d_vals.begin(), d_vals.end(), h_vals.begin());
h_cnts = d_cnts;
// thrust::copy(d_cnts.begin(), d_cnts.end(), h_cnts.begin());
// for (size_t i = 0; i < h_vals.size(); i++)
// std::cout << h_vals[i] << " ";
// std::cout << std::endl;
// for (size_t i = 0; i < h_cnts.size(); i++)
// std::cout << h_cnts[i] << " ";
// std::cout << std::endl;
std::cout << h_vals.back() << std::endl;
std::cout << h_cnts.back() << std::endl;
std::cout << ms << std::endl;
return 0;
}
|
d9495fd6fd4c8b7df159c20abc3bcc91b30bc11e.hip | // !!! This is a file automatically generated by hipify!!!
#include "mycuda.h"
#include "mycuda_public.h"
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <thrust/device_ptr.h>
#include <thrust/copy.h>
namespace cuda {
/********** Reduce the matrix `m` over F_2 by the `i`th row **********/
__global__ void EchelonKernel(int* m, int nRows, int nColumns, int i, int j) {
extern __shared__ int sdata[];
int bid = blockIdx.x;
int i1 = i + bid + 1;
int j1 = threadIdx.x;
if (j1 == 0)
sdata[0] = m[i1 * nColumns + j];
__syncthreads();
while (j1 < nColumns) {
m[i1 * nColumns + j1] ^= sdata[0] & m[i * nColumns + j1];
j1 += blockDim.x;
}
}
/* Wrapper for the lauch of the kernel */
void Echelon(cuda::ArrayInt& dev_m, int nRows, int nColumns, int i, int j)
{
int blocks = nRows - i - 1;
if (!blocks)
return;
int threads = nColumns > MAX_THREADS ? MAX_THREADS : nColumns;
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
hipLaunchKernelGGL(( EchelonKernel) , dim3(dimGrid), dim3(dimBlock), sizeof(int), 0, dev_m.data(), nRows, nColumns, i, j);
}
/********** replace the entries of the matrix `m` with indices for compression **********/
__global__ void ReplaceWithIndicesKernel(int* m, int size) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < size)
m[i] = m[i] ? i : -1;
}
/* Wrapper for the lauch of the kernel */
void ReplaceWithIndices(int* m, int size)
{
int numBlocks;
int numThreads;
if (size < MAX_THREADS) {
numBlocks = 1;
numThreads = size;
}
else {
numBlocks = (size + MAX_THREADS - 1) / MAX_THREADS;
numThreads = MAX_THREADS;
}
hipLaunchKernelGGL(( ReplaceWithIndicesKernel), dim3(numBlocks), dim3(numThreads), 0, 0, m, size);
#ifdef _DEBUG
cuda::CheckLastError();
cuda::DeviceSynchronize();
#endif
}
struct is_nonnegative
{
__host__ __device__ bool operator()(const int x) {
return x >= 0;
}
};
/* Return the row echelon form
** the input and out matrices are both in the form of compressed sparse rows.
*/
array2d EchelonCuda(const array2d& matrix_csr)
{
/* Create the sparse matrix dev_m in GPU. */
int nRows = (int)matrix_csr.size();
int nColumns = 0;
std::vector<cuda::ArrayInt> dev_rows(nRows);
for (int i = 0; i < nRows; ++i) {
dev_rows[i].init(matrix_csr[i]);
if (!matrix_csr[i].empty() && nColumns < matrix_csr[i].back() + 1)
nColumns = matrix_csr[i].back() + 1;
}
if (nRows == 0 || nColumns == 0)
return array2d{};
int size_m = nRows * nColumns;
cuda::ArrayInt dev_m(size_m);
cuda::FillZero(dev_m.data(), size_m * sizeof(int));
for (int i = 0; i < (int)matrix_csr.size(); ++i) {
int size_rowi = (int)matrix_csr[i].size();
cuda::Decompress(dev_rows[i].data(), dev_m.data() + i * nColumns, size_rowi); //
}
/* Reduce the rows */
array2d result;
cuda::ArrayInt tmp(nColumns);
for (int i = 0; i < nRows; ++i) {
int index = cuda::MinIndex(dev_m.data() + i * nColumns, nColumns); //
if (index != INT_MAX) {
Echelon(dev_m, nRows, nColumns, i, index);
int size_rowi = cuda::Sum(dev_m.data() + i * nColumns, nColumns);
result.emplace_back(size_rowi);
ReplaceWithIndices(dev_m.data() + i * nColumns, nColumns); //
thrust::device_ptr<int> thrust_m = thrust::device_pointer_cast(dev_m.data());
thrust::copy_if(thrust_m + i * nColumns, thrust_m + (i + 1) * nColumns, tmp.data(), is_nonnegative()); //
cuda::Memcpy(result.back(), tmp, result.back().size());
}
}
return result;
}
} | d9495fd6fd4c8b7df159c20abc3bcc91b30bc11e.cu | #include "mycuda.h"
#include "mycuda_public.h"
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <thrust/device_ptr.h>
#include <thrust/copy.h>
namespace cuda {
/********** Reduce the matrix `m` over F_2 by the `i`th row **********/
__global__ void EchelonKernel(int* m, int nRows, int nColumns, int i, int j) {
extern __shared__ int sdata[];
int bid = blockIdx.x;
int i1 = i + bid + 1;
int j1 = threadIdx.x;
if (j1 == 0)
sdata[0] = m[i1 * nColumns + j];
__syncthreads();
while (j1 < nColumns) {
m[i1 * nColumns + j1] ^= sdata[0] & m[i * nColumns + j1];
j1 += blockDim.x;
}
}
/* Wrapper for the lauch of the kernel */
void Echelon(cuda::ArrayInt& dev_m, int nRows, int nColumns, int i, int j)
{
int blocks = nRows - i - 1;
if (!blocks)
return;
int threads = nColumns > MAX_THREADS ? MAX_THREADS : nColumns;
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
EchelonKernel <<<dimGrid, dimBlock, sizeof(int)>>> (dev_m.data(), nRows, nColumns, i, j);
}
/********** replace the entries of the matrix `m` with indices for compression **********/
__global__ void ReplaceWithIndicesKernel(int* m, int size) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < size)
m[i] = m[i] ? i : -1;
}
/* Wrapper for the lauch of the kernel */
void ReplaceWithIndices(int* m, int size)
{
int numBlocks;
int numThreads;
if (size < MAX_THREADS) {
numBlocks = 1;
numThreads = size;
}
else {
numBlocks = (size + MAX_THREADS - 1) / MAX_THREADS;
numThreads = MAX_THREADS;
}
ReplaceWithIndicesKernel<<<numBlocks, numThreads>>>(m, size);
#ifdef _DEBUG
cuda::CheckLastError();
cuda::DeviceSynchronize();
#endif
}
struct is_nonnegative
{
__host__ __device__ bool operator()(const int x) {
return x >= 0;
}
};
/* Return the row echelon form
** the input and out matrices are both in the form of compressed sparse rows.
*/
array2d EchelonCuda(const array2d& matrix_csr)
{
/* Create the sparse matrix dev_m in GPU. */
int nRows = (int)matrix_csr.size();
int nColumns = 0;
std::vector<cuda::ArrayInt> dev_rows(nRows);
for (int i = 0; i < nRows; ++i) {
dev_rows[i].init(matrix_csr[i]);
if (!matrix_csr[i].empty() && nColumns < matrix_csr[i].back() + 1)
nColumns = matrix_csr[i].back() + 1;
}
if (nRows == 0 || nColumns == 0)
return array2d{};
int size_m = nRows * nColumns;
cuda::ArrayInt dev_m(size_m);
cuda::FillZero(dev_m.data(), size_m * sizeof(int));
for (int i = 0; i < (int)matrix_csr.size(); ++i) {
int size_rowi = (int)matrix_csr[i].size();
cuda::Decompress(dev_rows[i].data(), dev_m.data() + i * nColumns, size_rowi); //
}
/* Reduce the rows */
array2d result;
cuda::ArrayInt tmp(nColumns);
for (int i = 0; i < nRows; ++i) {
int index = cuda::MinIndex(dev_m.data() + i * nColumns, nColumns); //
if (index != INT_MAX) {
Echelon(dev_m, nRows, nColumns, i, index);
int size_rowi = cuda::Sum(dev_m.data() + i * nColumns, nColumns);
result.emplace_back(size_rowi);
ReplaceWithIndices(dev_m.data() + i * nColumns, nColumns); //
thrust::device_ptr<int> thrust_m = thrust::device_pointer_cast(dev_m.data());
thrust::copy_if(thrust_m + i * nColumns, thrust_m + (i + 1) * nColumns, tmp.data(), is_nonnegative()); //
cuda::Memcpy(result.back(), tmp, result.back().size());
}
}
return result;
}
} |
7a36b4432b9468e233c4e73eddf937024215f364.hip | // !!! This is a file automatically generated by hipify!!!
// generated by gen_cuda_conv_bias_kern_impls.py
#include "../conv_bias_int8_implicit_gemm_cdiv4hwn4_ld_64bit_unroll_width.cuinl"
template void megdnn::cuda::conv_bias_int8::
do_conv_bias_int8_implicit_gemm_cdiv4hwn4_ld_64bit_unroll_width<
PerChannelBiasVisitor,
IConvEpilogue<Activation<
megdnn::param_enumv::ConvBias::NonlineMode::IDENTITY>>>(
const int8_t* d_src, const int8_t* d_filter, PerChannelBiasVisitor bias,
IConvEpilogue<Activation<
megdnn::param_enumv::ConvBias::NonlineMode::IDENTITY>>
epilogue,
const ConvParam& param, float alpha, float beta, hipStream_t stream);
| 7a36b4432b9468e233c4e73eddf937024215f364.cu | // generated by gen_cuda_conv_bias_kern_impls.py
#include "../conv_bias_int8_implicit_gemm_cdiv4hwn4_ld_64bit_unroll_width.cuinl"
template void megdnn::cuda::conv_bias_int8::
do_conv_bias_int8_implicit_gemm_cdiv4hwn4_ld_64bit_unroll_width<
PerChannelBiasVisitor,
IConvEpilogue<Activation<
megdnn::param_enumv::ConvBias::NonlineMode::IDENTITY>>>(
const int8_t* d_src, const int8_t* d_filter, PerChannelBiasVisitor bias,
IConvEpilogue<Activation<
megdnn::param_enumv::ConvBias::NonlineMode::IDENTITY>>
epilogue,
const ConvParam& param, float alpha, float beta, cudaStream_t stream);
|
c9792926064456ef867e397b2cd09c40843b06c8.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "computePdKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *particle_pd = NULL;
hipMalloc(&particle_pd, XSIZE*YSIZE);
int particles_per_feature = 1;
int n_features = 1;
double *feature_pd = NULL;
hipMalloc(&feature_pd, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
computePdKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, particle_pd,particles_per_feature,n_features,feature_pd);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
computePdKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, particle_pd,particles_per_feature,n_features,feature_pd);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
computePdKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, particle_pd,particles_per_feature,n_features,feature_pd);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | c9792926064456ef867e397b2cd09c40843b06c8.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "computePdKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *particle_pd = NULL;
cudaMalloc(&particle_pd, XSIZE*YSIZE);
int particles_per_feature = 1;
int n_features = 1;
double *feature_pd = NULL;
cudaMalloc(&feature_pd, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
computePdKernel<<<gridBlock,threadBlock>>>(particle_pd,particles_per_feature,n_features,feature_pd);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
computePdKernel<<<gridBlock,threadBlock>>>(particle_pd,particles_per_feature,n_features,feature_pd);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
computePdKernel<<<gridBlock,threadBlock>>>(particle_pd,particles_per_feature,n_features,feature_pd);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
7531fd3a67911b765b548f3e6d58e68856e03442.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ORPooling
#include <vector>
#include "caffe/layers/orpooling_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ORPoolingForward(
const unsigned int nthreads,
const Dtype* bottom_data,
Dtype* top_data,
unsigned int* indices_data,
const unsigned int nOrientation,
const unsigned int nBatch,
const unsigned int nChannel,
const unsigned int nEntry)
{
CUDA_KERNEL_LOOP(n, nthreads) {
const unsigned int l = n % nEntry;
const unsigned int j = (n / nEntry) % nChannel;
const unsigned int i = n / nEntry / nChannel;
unsigned int k;
Dtype max_val = -999999;
unsigned int* indice = indices_data + n;
Dtype* target = top_data + n;
// find maximum
for (k = 0; k < nOrientation; ++k) {
const unsigned int src_offset = i * (nChannel * nOrientation * nEntry)
+ j * (nOrientation * nEntry)
+ k * nEntry
+ l;
const Dtype val = *(bottom_data + src_offset);
if (val > max_val) {
max_val = val;
*indice = src_offset;
}
}
*target = max_val;
}
}
template <typename Dtype>
void ORPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
unsigned int* indices_data = indices.mutable_gpu_data();
const unsigned int nBatch = top[0]->shape(0);
const unsigned int nChannel = top[0]->shape(1);
const unsigned nEntry = top[0]->shape(2) * top[0]->shape(3);
const unsigned int count = nBatch * nChannel * nEntry;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ORPoolingForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, top_data, indices_data, nOrientation, nBatch, nChannel, nEntry);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void ORPoolingBackward(
const unsigned int nthreads,
Dtype* bottom_diff,
const Dtype* top_diff,
const unsigned int* indices_data)
{
CUDA_KERNEL_LOOP(n, nthreads) {
Dtype* target = bottom_diff + *(indices_data + n);
*target = *(top_diff + n);
}
}
template <typename Dtype>
void ORPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const unsigned int* indices_data = indices.gpu_data();
const unsigned int count = top[0]->shape(0) * top[0]->shape(1) *
top[0]->shape(2) * top[0]->shape(3);
caffe_gpu_memset(sizeof(Dtype) * (count * nOrientation), 0, bottom_diff);
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ORPoolingBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_diff, top_diff, indices_data);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ORPoolingLayer);
} // namespace caffe | 7531fd3a67911b765b548f3e6d58e68856e03442.cu | // ORPooling
#include <vector>
#include "caffe/layers/orpooling_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ORPoolingForward(
const unsigned int nthreads,
const Dtype* bottom_data,
Dtype* top_data,
unsigned int* indices_data,
const unsigned int nOrientation,
const unsigned int nBatch,
const unsigned int nChannel,
const unsigned int nEntry)
{
CUDA_KERNEL_LOOP(n, nthreads) {
const unsigned int l = n % nEntry;
const unsigned int j = (n / nEntry) % nChannel;
const unsigned int i = n / nEntry / nChannel;
unsigned int k;
Dtype max_val = -999999;
unsigned int* indice = indices_data + n;
Dtype* target = top_data + n;
// find maximum
for (k = 0; k < nOrientation; ++k) {
const unsigned int src_offset = i * (nChannel * nOrientation * nEntry)
+ j * (nOrientation * nEntry)
+ k * nEntry
+ l;
const Dtype val = *(bottom_data + src_offset);
if (val > max_val) {
max_val = val;
*indice = src_offset;
}
}
*target = max_val;
}
}
template <typename Dtype>
void ORPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
unsigned int* indices_data = indices.mutable_gpu_data();
const unsigned int nBatch = top[0]->shape(0);
const unsigned int nChannel = top[0]->shape(1);
const unsigned nEntry = top[0]->shape(2) * top[0]->shape(3);
const unsigned int count = nBatch * nChannel * nEntry;
// NOLINT_NEXT_LINE(whitespace/operators)
ORPoolingForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, top_data, indices_data, nOrientation, nBatch, nChannel, nEntry);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void ORPoolingBackward(
const unsigned int nthreads,
Dtype* bottom_diff,
const Dtype* top_diff,
const unsigned int* indices_data)
{
CUDA_KERNEL_LOOP(n, nthreads) {
Dtype* target = bottom_diff + *(indices_data + n);
*target = *(top_diff + n);
}
}
template <typename Dtype>
void ORPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const unsigned int* indices_data = indices.gpu_data();
const unsigned int count = top[0]->shape(0) * top[0]->shape(1) *
top[0]->shape(2) * top[0]->shape(3);
caffe_gpu_memset(sizeof(Dtype) * (count * nOrientation), 0, bottom_diff);
// NOLINT_NEXT_LINE(whitespace/operators)
ORPoolingBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_diff, top_diff, indices_data);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ORPoolingLayer);
} // namespace caffe |
c64ffca3f595b1442a6c363228591b6a3c4c572d.hip | // !!! This is a file automatically generated by hipify!!!
#include <ros/ros.h>
#include <hip/hip_runtime.h>
__global__ void mykernel(int* device_a){
*device_a = 22;
}
int main(int argc, char** argv){
ros::init(argc, argv, "hello_world");
ros::NodeHandle nh;
int* host_a = (int*) malloc(sizeof(int));
int* device_a; hipMalloc((void**) &device_a, sizeof(int));
hipMemcpy(device_a, host_a, sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( mykernel), dim3(1),dim3(1), 0, 0, device_a);
hipMemcpy(host_a, device_a, sizeof(int), hipMemcpyDeviceToHost);
ROS_INFO("Hello World! %d", *host_a);
hipFree(device_a);
free(host_a);
return 0;
}
| c64ffca3f595b1442a6c363228591b6a3c4c572d.cu | #include <ros/ros.h>
#include <cuda.h>
__global__ void mykernel(int* device_a){
*device_a = 22;
}
int main(int argc, char** argv){
ros::init(argc, argv, "hello_world");
ros::NodeHandle nh;
int* host_a = (int*) malloc(sizeof(int));
int* device_a; cudaMalloc((void**) &device_a, sizeof(int));
cudaMemcpy(device_a, host_a, sizeof(int), cudaMemcpyHostToDevice);
mykernel<<<1,1>>>(device_a);
cudaMemcpy(host_a, device_a, sizeof(int), cudaMemcpyDeviceToHost);
ROS_INFO("Hello World! %d", *host_a);
cudaFree(device_a);
free(host_a);
return 0;
}
|
af9a2db7daac87c6823542446fd1b4f3ba8e994f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cutil_inline.h>
#include <cutil_math.h>
#include <cassert>
float4* X[2];
float4* X_last[2];
float4 * X_in, *X_out;
float4 * X_last_in, *X_last_out;
extern int readID, writeID;
__global__ void verlet( float4 * pos_vbo, float4 * g_pos_in, float4 * g_pos_old_in, float4 * g_pos_out, float4 * g_pos_old_out,
int2 texsize, float2 step, float damp, float mass, float dt, float2 inv_cloth_size);
void InitCUDA(const unsigned int size) {
const unsigned int num_threads = size;
const unsigned int mem_size = sizeof(float4) * num_threads;
// allocate device memory for float4 version
cutilSafeCall(hipMalloc((void**) &X[0], mem_size)); // positions
cutilSafeCall(hipMalloc((void**) &X[1], mem_size)); // positions
cutilSafeCall(hipMalloc((void**) &X_last[0], mem_size)); // old positions
cutilSafeCall(hipMalloc((void**) &X_last[1], mem_size)); // old positions
}
void ShutdownCUDA()
{
// cleanup memory
if (X[0] != NULL)
{
cutilSafeCall(hipFree(X[0]));
cutilSafeCall(hipFree(X[1]));
X[0] = NULL;
X[1] = NULL;
}
if (X_last[0] != NULL)
{
cutilSafeCall(hipFree(X_last[0]));
cutilSafeCall(hipFree(X_last[1]));
X_last[0] = NULL;
X_last[1] = NULL;
}
}
void computeGridSize(uint n, uint blockSize, uint &numBlocks, uint &numThreads)
{
numThreads = min(blockSize, n);
numBlocks = (n % numThreads != 0) ? (n / numThreads + 1) : (n / numThreads);
}
void UploadCUDA(float * positions, float * positions_old, const int size)
{
static bool start = true;
assert(X[0] != NULL);
assert(X_last[0] != NULL);
const unsigned int num_threads = size;
const unsigned int mem_size = sizeof(float4) * num_threads;
X_in = X[readID];
X_out = X[writeID];
X_last_in = X_last[readID];
X_last_out = X_last[writeID];
if (start)
{
cutilSafeCall(hipMemcpy(X_in, positions, mem_size, hipMemcpyHostToDevice));
cutilSafeCall(hipMemcpy(X_last_in, positions_old, mem_size, hipMemcpyHostToDevice));
cutilCheckMsg("Cuda memory copy host to device failed.");
start=false;
}
int tmp=readID;
readID = writeID;
writeID=tmp;
}
void VerletCUDA(float4 * pos_vbo, int2 texsize, float2 step, const float & damp, const float & mass, float dt, float2 inv_cloth_size)
{
// setup execution parameters
uint numThreads, numBlocks;
uint numParticles = texsize.x*texsize.y;
computeGridSize(numParticles, 256, numBlocks, numThreads);
// printf("%3d particles, %3d blocks, %3d threads\n", numParticles, numBlocks, numThreads);
// execute the kernel
// printf("numParticles: %d, numThreads: %d numBlocks: %d\n", numParticles, numThreads, numBlocks);
hipLaunchKernelGGL(( verlet), dim3(numBlocks), dim3(numThreads) , 0, 0, pos_vbo, X_in, X_last_in, X_out, X_last_out, texsize, step, damp, mass, dt, inv_cloth_size);
// stop the CPU until the kernel has been executed
hipDeviceSynchronize();
// check if kernel execution generated and error
cutilCheckMsg("Cuda kernel execution failed.");
}
| af9a2db7daac87c6823542446fd1b4f3ba8e994f.cu | #include <cutil_inline.h>
#include <cutil_math.h>
#include <cassert>
float4* X[2];
float4* X_last[2];
float4 * X_in, *X_out;
float4 * X_last_in, *X_last_out;
extern int readID, writeID;
__global__ void verlet( float4 * pos_vbo, float4 * g_pos_in, float4 * g_pos_old_in, float4 * g_pos_out, float4 * g_pos_old_out,
int2 texsize, float2 step, float damp, float mass, float dt, float2 inv_cloth_size);
void InitCUDA(const unsigned int size) {
const unsigned int num_threads = size;
const unsigned int mem_size = sizeof(float4) * num_threads;
// allocate device memory for float4 version
cutilSafeCall(cudaMalloc((void**) &X[0], mem_size)); // positions
cutilSafeCall(cudaMalloc((void**) &X[1], mem_size)); // positions
cutilSafeCall(cudaMalloc((void**) &X_last[0], mem_size)); // old positions
cutilSafeCall(cudaMalloc((void**) &X_last[1], mem_size)); // old positions
}
void ShutdownCUDA()
{
// cleanup memory
if (X[0] != NULL)
{
cutilSafeCall(cudaFree(X[0]));
cutilSafeCall(cudaFree(X[1]));
X[0] = NULL;
X[1] = NULL;
}
if (X_last[0] != NULL)
{
cutilSafeCall(cudaFree(X_last[0]));
cutilSafeCall(cudaFree(X_last[1]));
X_last[0] = NULL;
X_last[1] = NULL;
}
}
void computeGridSize(uint n, uint blockSize, uint &numBlocks, uint &numThreads)
{
numThreads = min(blockSize, n);
numBlocks = (n % numThreads != 0) ? (n / numThreads + 1) : (n / numThreads);
}
void UploadCUDA(float * positions, float * positions_old, const int size)
{
static bool start = true;
assert(X[0] != NULL);
assert(X_last[0] != NULL);
const unsigned int num_threads = size;
const unsigned int mem_size = sizeof(float4) * num_threads;
X_in = X[readID];
X_out = X[writeID];
X_last_in = X_last[readID];
X_last_out = X_last[writeID];
if (start)
{
cutilSafeCall(cudaMemcpy(X_in, positions, mem_size, cudaMemcpyHostToDevice));
cutilSafeCall(cudaMemcpy(X_last_in, positions_old, mem_size, cudaMemcpyHostToDevice));
cutilCheckMsg("Cuda memory copy host to device failed.");
start=false;
}
int tmp=readID;
readID = writeID;
writeID=tmp;
}
void VerletCUDA(float4 * pos_vbo, int2 texsize, float2 step, const float & damp, const float & mass, float dt, float2 inv_cloth_size)
{
// setup execution parameters
uint numThreads, numBlocks;
uint numParticles = texsize.x*texsize.y;
computeGridSize(numParticles, 256, numBlocks, numThreads);
// printf("%3d particles, %3d blocks, %3d threads\n", numParticles, numBlocks, numThreads);
// execute the kernel
// printf("numParticles: %d, numThreads: %d numBlocks: %d\n", numParticles, numThreads, numBlocks);
verlet<<< numBlocks, numThreads >>>(pos_vbo, X_in, X_last_in, X_out, X_last_out, texsize, step, damp, mass, dt, inv_cloth_size);
// stop the CPU until the kernel has been executed
cudaThreadSynchronize();
// check if kernel execution generated and error
cutilCheckMsg("Cuda kernel execution failed.");
}
|
e9d18caa6e45473ccfb82fa24717d04fafbf6e26.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* Copyright (c) 2017 Microsoft
* Licensed under The Apache-2.0 License [see LICENSE for details]
* \file multi_proposal.cu
* \brief MultiProposal Operator
* \author Shaoqing Ren, Xizhou Zhu, Jian Guo
*/
#include <dmlc/logging.h>
#include <dmlc/parameter.h>
#include <mxnet/operator.h>
#include <mshadow/tensor.h>
#include <mshadow/cuda/reduce.cuh>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include <thrust/functional.h>
#include <map>
#include <vector>
#include <string>
#include <utility>
#include <ctime>
#include <iostream>
#include "../operator_common.h"
#include "../mshadow_op.h"
#include "./multi_proposal-inl.h"
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
#define FRCNN_CUDA_CHECK(condition) \
/* Code block avoids redefinition of hipError_t error */ \
do { \
hipError_t error = condition; \
CHECK_EQ(error, hipSuccess) << " " << hipGetErrorString(error); \
} while (0)
namespace mshadow {
namespace cuda {
namespace multi_proposal {
// scores are (b, 2 * anchor, h, w)
// workspace_proposals are (b, h * w * anchor, 5)
// w defines "x" and h defines "y"
// count should be total anchors numbers, h * w * anchors
template<typename Dtype>
__global__ void ProposalGridKernel(const int count,
const int num_anchors,
const int height,
const int width,
const int feature_stride,
const Dtype* scores,
Dtype* workspace_proposals) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
int a = index % num_anchors;
int w = (index / num_anchors) % width;
int h = (index / num_anchors / width) % height;
int b = index / num_anchors / width / height;
workspace_proposals[index * 5 + 0] = workspace_proposals[a * 5 + 0] + w * feature_stride;
workspace_proposals[index * 5 + 1] = workspace_proposals[a * 5 + 1] + h * feature_stride;
workspace_proposals[index * 5 + 2] = workspace_proposals[a * 5 + 2] + w * feature_stride;
workspace_proposals[index * 5 + 3] = workspace_proposals[a * 5 + 3] + h * feature_stride;
workspace_proposals[index * 5 + 4] =
scores[((b * (2 * num_anchors) + a + num_anchors) * height + h) * width + w];
}
}
// boxes are (b, h * w * anchor, 5)
// deltas are (b, 4 * anchor, h, w)
// out_pred_boxes are (b, h * w * anchor, 5)
// count should be total anchors numbers, b * h * w * anchors
// in-place write: boxes and out_pred_boxes are the same location
template<typename Dtype>
__global__ void BBoxPredKernel(const int count,
const int num_anchors,
const int feat_height,
const int feat_width,
const int feature_stride,
const Dtype* im_infos,
const Dtype* boxes,
const Dtype* deltas,
Dtype* out_pred_boxes) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
int a = index % num_anchors;
int w = (index / num_anchors) % feat_width;
int h = (index / num_anchors / feat_width) % feat_height;
int b = index / num_anchors / feat_width / feat_height;
float im_height = im_infos[b * 3];
float im_width = im_infos[b * 3 + 1];
int real_height = static_cast<int>(im_height / feature_stride);
int real_width = static_cast<int>(im_width / feature_stride);
float width = boxes[index * 5 + 2] - boxes[index * 5 + 0] + 1.0f;
float height = boxes[index * 5 + 3] - boxes[index * 5 + 1] + 1.0f;
float ctr_x = boxes[index * 5 + 0] + 0.5f * (width - 1.0f);
float ctr_y = boxes[index * 5 + 1] + 0.5f * (height - 1.0f);
int ba = (b * num_anchors + a);
float dx = deltas[((ba * 4) * feat_height + h) * feat_width + w];
float dy = deltas[((ba * 4 + 1) * feat_height + h) * feat_width + w];
float dw = deltas[((ba * 4 + 2) * feat_height + h) * feat_width + w];
float dh = deltas[((ba * 4 + 3) * feat_height + h) * feat_width + w];
float pred_ctr_x = dx * width + ctr_x;
float pred_ctr_y = dy * height + ctr_y;
float pred_w = exp(dw) * width;
float pred_h = exp(dh) * height;
float pred_x1 = pred_ctr_x - 0.5f * (pred_w - 1.0f);
float pred_y1 = pred_ctr_y - 0.5f * (pred_h - 1.0f);
float pred_x2 = pred_ctr_x + 0.5f * (pred_w - 1.0f);
float pred_y2 = pred_ctr_y + 0.5f * (pred_h - 1.0f);
pred_x1 = max(min(pred_x1, im_width - 1.0f), 0.0f);
pred_y1 = max(min(pred_y1, im_height - 1.0f), 0.0f);
pred_x2 = max(min(pred_x2, im_width - 1.0f), 0.0f);
pred_y2 = max(min(pred_y2, im_height - 1.0f), 0.0f);
out_pred_boxes[index * 5 + 0] = pred_x1;
out_pred_boxes[index * 5 + 1] = pred_y1;
out_pred_boxes[index * 5 + 2] = pred_x2;
out_pred_boxes[index * 5 + 3] = pred_y2;
if (h >= real_height || w >= real_width) {
out_pred_boxes[index * 5 + 4] = -1.0f;
}
}
}
// boxes are (b, h * w * anchor, 5)
// deltas are (b, 4 * anchor, h, w)
// out_pred_boxes are (b, h * w * anchor, 5)
// count should be total anchors numbers, b * h * w * anchors
// in-place write: boxes and out_pred_boxes are the same location
template<typename Dtype>
__global__ void IoUPredKernel(const int count,
const int num_anchors,
const int feat_height,
const int feat_width,
const int feature_stride,
const Dtype* im_infos,
const Dtype* boxes,
const Dtype* deltas,
Dtype* out_pred_boxes) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
int a = index % num_anchors;
int w = (index / num_anchors) % feat_width;
int h = (index / num_anchors / feat_width) % feat_height;
int b = index / num_anchors / feat_width / feat_height;
float im_height = im_infos[b * 3];
float im_width = im_infos[b * 3 + 1];
int real_height = static_cast<int>(im_height / feature_stride);
int real_width = static_cast<int>(im_width / feature_stride);
float x1 = boxes[index * 5 + 0];
float y1 = boxes[index * 5 + 1];
float x2 = boxes[index * 5 + 2];
float y2 = boxes[index * 5 + 3];
int ba = (b * num_anchors + a);
float dx1 = deltas[((ba * 4) * feat_height + h) * feat_width + w];
float dy1 = deltas[((ba * 4 + 1) * feat_height + h) * feat_width + w];
float dx2 = deltas[((ba * 4 + 2) * feat_height + h) * feat_width + w];
float dy2 = deltas[((ba * 4 + 3) * feat_height + h) * feat_width + w];
float pred_x1 = max(min(x1 + dx1, im_width - 1.0f), 0.0f);
float pred_y1 = max(min(y1 + dy1, im_height - 1.0f), 0.0f);
float pred_x2 = max(min(x2 + dx2, im_width - 1.0f), 0.0f);
float pred_y2 = max(min(y2 + dy2, im_height - 1.0f), 0.0f);
out_pred_boxes[index * 5 + 0] = pred_x1;
out_pred_boxes[index * 5 + 1] = pred_y1;
out_pred_boxes[index * 5 + 2] = pred_x2;
out_pred_boxes[index * 5 + 3] = pred_y2;
if (h >= real_height || w >= real_width) {
out_pred_boxes[index * 5 + 4] = -1.0f;
}
}
}
// filter box with stride less than rpn_min_size
// filter: set score to zero
// dets (b, n, 5)
template<typename Dtype>
__global__ void FilterBoxKernel(const int count,
const int count_anchors,
const float original_min_size,
const Dtype* im_infos,
Dtype* dets) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
int b = index / count_anchors;
float iw = dets[index * 5 + 2] - dets[index * 5 + 0] + 1.0f;
float ih = dets[index * 5 + 3] - dets[index * 5 + 1] + 1.0f;
float min_size = original_min_size * im_infos[b * 3 + 2];
if (iw < min_size || ih < min_size) {
dets[index * 5 + 0] -= min_size / 2;
dets[index * 5 + 1] -= min_size / 2;
dets[index * 5 + 2] += min_size / 2;
dets[index * 5 + 3] += min_size / 2;
dets[index * 5 + 4] = -1.0f;
}
}
}
// copy score and init order
// dets (n, 5); score (n, ); order (n, )
// count should be n (total anchors or proposals)
template<typename Dtype>
__global__ void CopyScoreKernel(const int count,
const Dtype* dets,
Dtype* score,
int* order) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
score[index] = dets[index * 5 + 4];
order[index] = index;
}
}
// reorder proposals according to order and keep the top_n proposals
// prev_dets (n, 5); order (n, ); dets (n, 5)
// count should be output anchor numbers (top_n)
template<typename Dtype>
__global__ void ReorderProposalsKernel(const int count,
const Dtype* prev_dets,
const int* order,
Dtype* dets) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
const int order_i = order[index];
for (int j = 0; j < 5; j ++) {
dets[index * 5 + j] = prev_dets[order_i * 5 + j];
}
}
}
__device__ inline float devIoU(float const * const a, float const * const b) {
float left = max(a[0], b[0]), right = min(a[2], b[2]);
float top = max(a[1], b[1]), bottom = min(a[3], b[3]);
float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
return interS / (Sa + Sb - interS);
}
__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh,
const float *dev_boxes, uint64_t *dev_mask) {
const int threadsPerBlock = sizeof(uint64_t) * 8;
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const float *cur_box = dev_boxes + cur_box_idx * 5;
int i = 0;
uint64_t t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
void _nms(const mshadow::Tensor<gpu, 2>& boxes,
const float nms_overlap_thresh,
const int rpn_post_nms_top_n,
int *keep,
int *num_out) {
const int threadsPerBlock = sizeof(uint64_t) * 8;
const int boxes_num = boxes.size(0);
const int boxes_dim = boxes.size(1);
float* boxes_dev = boxes.dptr_;
uint64_t* mask_dev = NULL;
const int col_blocks = DIVUP(boxes_num, threadsPerBlock);
FRCNN_CUDA_CHECK(hipMalloc(&mask_dev,
boxes_num * col_blocks * sizeof(uint64_t)));
dim3 blocks(DIVUP(boxes_num, threadsPerBlock),
DIVUP(boxes_num, threadsPerBlock));
dim3 threads(threadsPerBlock);
hipLaunchKernelGGL((ProposalGridKernel), dim3(dimGrid), dim3(dimBlock), 0, 0,\
static_cast<const int>(count),\
static_cast<const int>(num_anchors),\
static_cast<const int>(height),\
static_cast<const int>(width),\
static_cast<const int>(param_.feature_stride),\
static_cast<const float*>(scores.dptr_),\
static_cast<float*>(workspace_proposals.dptr_));
FRCNN_CUDA_CHECK(hipPeekAtLastError());
std::vector<uint64_t> mask_host(boxes_num * col_blocks);
FRCNN_CUDA_CHECK(hipMemcpy(&mask_host[0],
mask_dev,
sizeof(uint64_t) * boxes_num * col_blocks,
hipMemcpyDeviceToHost));
std::vector<uint64_t> remv(col_blocks);
memset(&remv[0], 0, sizeof(uint64_t) * col_blocks);
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep[num_to_keep++] = i;
if (num_to_keep >= rpn_post_nms_top_n) break;
uint64_t *p = &mask_host[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
*num_out = num_to_keep;
FRCNN_CUDA_CHECK(hipFree(mask_dev));
}
// copy proposals to output
// dets (top_n, 5); keep (top_n, ); out (top_n, )
// count should be top_n (total anchors or proposals)
template<typename Dtype>
__global__ void PrepareOutput(const int count,
const Dtype* dets,
const int* keep,
const int out_size,
const int image_index,
Dtype* out,
Dtype* score) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
out[index * 5] = image_index;
if (index < out_size) {
int keep_i = keep[index];
for (int j = 0; j < 4; ++j) {
out[index * 5 + j + 1] = dets[keep_i * 5 + j];
}
score[index] = dets[keep_i * 5 + 4];
} else {
int keep_i = keep[index % out_size];
for (int j = 0; j < 4; ++j) {
out[index * 5 + j + 1] = dets[keep_i * 5 + j];
}
score[index] = dets[keep_i * 5 + 4];
}
}
}
} // namespace multi_proposal
} // namespace cuda
} // namespace mshadow
namespace mxnet {
namespace op {
template<typename xpu>
class MultiProposalGPUOp : public Operator{
public:
explicit MultiProposalGPUOp(MultiProposalParam param) {
this->param_ = param;
}
virtual void Forward(const OpContext &ctx,
const std::vector<TBlob> &in_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &out_data,
const std::vector<TBlob> &aux_states) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace mshadow::cuda;
using namespace mshadow::cuda::multi_proposal;
CHECK_EQ(in_data.size(), 3);
CHECK_EQ(out_data.size(), 2);
CHECK_GT(req.size(), 1);
CHECK_EQ(req[proposal::kOut], kWriteTo);
/*CHECK_EQ(in_data[proposal::kClsProb].shape_[0], 1)
<< "Sorry, multiple images each device is not implemented.";*/
Stream<xpu> *s = ctx.get_stream<xpu>();
Tensor<xpu, 4> scores = in_data[proposal::kClsProb].get<xpu, 4, real_t>(s);
Tensor<xpu, 4> bbox_deltas = in_data[proposal::kBBoxPred].get<xpu, 4, real_t>(s);
Tensor<xpu, 2> im_info = in_data[proposal::kImInfo].get<xpu, 2, real_t>(s);
Tensor<xpu, 2> out = out_data[proposal::kOut].get<xpu, 2, real_t>(s);
Tensor<xpu, 2> out_score = out_data[proposal::kScore].get<xpu, 2, real_t>(s);
int num_images = scores.size(0);
int num_anchors = scores.size(1) / 2;
int height = scores.size(2);
int width = scores.size(3);
int count_anchors = num_anchors * height * width; // count of total anchors
int count = num_images * count_anchors;
// set to -1 for max
int rpn_pre_nms_top_n = (param_.rpn_pre_nms_top_n > 0) ? param_.rpn_pre_nms_top_n
: count_anchors;
rpn_pre_nms_top_n = ::min(rpn_pre_nms_top_n, count_anchors);
int rpn_post_nms_top_n = ::min(param_.rpn_post_nms_top_n, rpn_pre_nms_top_n);
// Generate first anchors based on base anchor
std::vector<float> base_anchor(4);
base_anchor[0] = 0.0;
base_anchor[1] = 0.0;
base_anchor[2] = param_.feature_stride - 1.0;
base_anchor[3] = param_.feature_stride - 1.0;
CHECK_EQ(num_anchors, param_.ratios.ndim() * param_.scales.ndim());
std::vector<float> anchors;
utils::GenerateAnchors(base_anchor,
param_.ratios,
param_.scales,
&anchors);
// Copy generated anchors to GPU
float* workspace_proposals_ptr = NULL;
FRCNN_CUDA_CHECK(hipMalloc(&workspace_proposals_ptr,
sizeof(float) * num_images * count_anchors * 5));
Tensor<xpu, 3> workspace_proposals(workspace_proposals_ptr,
Shape3(num_images, count_anchors, 5));
FRCNN_CUDA_CHECK(hipMemcpy(workspace_proposals.dptr_, &anchors[0],
sizeof(float) * anchors.size(), hipMemcpyHostToDevice));
// Copy proposals to a mesh grid
dim3 dimGrid((count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock);
dim3 dimBlock(kMaxThreadsPerBlock);
CheckLaunchParam(dimGrid, dimBlock, "ProposalGrid");
hipLaunchKernelGGL((ProposalGridKernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
count, num_anchors, height, width, param_.feature_stride,
scores.dptr_, workspace_proposals.dptr_);
FRCNN_CUDA_CHECK(hipPeekAtLastError());
// Transform anchors and bbox_deltas into bboxes
CheckLaunchParam(dimGrid, dimBlock, "BBoxPred");
if (param_.iou_loss) {
hipLaunchKernelGGL((IoUPredKernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
static_cast<const int>(count), static_cast<const int>(num_anchors), static_cast<const int>(height), static_cast<const int>(width),static_cast<const int>(param_.feature_stride), static_cast<const float*>(im_info.dptr_),static_cast<const float*>(workspace_proposals.dptr_), static_cast<const float*>(bbox_deltas.dptr_),static_cast<float*>(workspace_proposals.dptr_));
} else {
hipLaunchKernelGGL((BBoxPredKernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
static_cast<const int>(count), static_cast<const int>(num_anchors), static_cast<const int>(height), static_cast<const int>(width),static_cast<const int>(param_.feature_stride),static_cast<const float*>(im_info.dptr_),static_cast<const float*>(workspace_proposals.dptr_), static_cast<const float*>(bbox_deltas.dptr_),static_cast<float*>(workspace_proposals.dptr_));
}
FRCNN_CUDA_CHECK(hipPeekAtLastError());
// filter boxes with less than rpn_min_size
CheckLaunchParam(dimGrid, dimBlock, "FilterBox");
hipLaunchKernelGGL((FilterBoxKernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
static_cast<const int>(count), static_cast<const int>(count_anchors), static_cast<const float>(param_.rpn_min_size),static_cast<const float*>(im_info.dptr_), static_cast<float*>(workspace_proposals.dptr_));
FRCNN_CUDA_CHECK(hipPeekAtLastError());
dimGrid = dim3((count_anchors + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock);
dimBlock = dim3(kMaxThreadsPerBlock);
// Copy score to a continuous memory
float* score_ptr = NULL;
FRCNN_CUDA_CHECK(hipMalloc(&score_ptr, sizeof(float) * count_anchors));
Tensor<xpu, 1> score(score_ptr, Shape1(count_anchors));
int* order_ptr = NULL;
FRCNN_CUDA_CHECK(hipMalloc(&order_ptr, sizeof(int) * count_anchors));
Tensor<xpu, 1, int> order(order_ptr, Shape1(count_anchors));
float* workspace_ordered_proposals_ptr = NULL;
FRCNN_CUDA_CHECK(hipMalloc(&workspace_ordered_proposals_ptr,
sizeof(float) * rpn_pre_nms_top_n * 5));
Tensor<xpu, 2> workspace_ordered_proposals(workspace_ordered_proposals_ptr,
Shape2(rpn_pre_nms_top_n, 5));
int* keep;
FRCNN_CUDA_CHECK(hipMalloc(&keep, sizeof(int) * rpn_pre_nms_top_n));
for (int b = 0; b < num_images; b++) {
CheckLaunchParam(dimGrid, dimBlock, "CopyScore");
hipLaunchKernelGGL(HIP_KERNEL_NAME(CopyScoreKernel), dim3(dimGrid), dim3(dimBlock) , 0, 0,static_cast<const int>(count_anchors), static_cast<const float*>(workspace_proposals.dptr_b * count_anchors * 5),static_cast<float*>(score.dptr_), static_cast<int*>(order.dptr_));
FRCNN_CUDA_CHECK(hipPeekAtLastError());
// argsort score, save order
thrust::stable_sort_by_key(thrust::device,
score.dptr_,
score.dptr_ + score.size(0),
order.dptr_,
thrust::greater<real_t>());
FRCNN_CUDA_CHECK(hipPeekAtLastError());
// Reorder proposals according to order
dimGrid.x = (rpn_pre_nms_top_n + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
CheckLaunchParam(dimGrid, dimBlock, "ReorderProposals");
hipLaunchKernelGGL(HIP_KERNEL_NAME(ReorderProposalsKernel), dim3(dimGrid), dim3(dimBlock), 0,0,static_cast<const int>(rpn_pre_nms_top_n), static_cast<const float*>(workspace_proposals.dptr_ + b * count_anchors * 5),static_cast<const int*>(order.dptr_), static_cast<float*>(workspace_ordered_proposals.dptr_));
FRCNN_CUDA_CHECK(hipPeekAtLastError());
// perform nms
std::vector<int> _keep(workspace_ordered_proposals.size(0));
int out_size = 0;
_nms(workspace_ordered_proposals,
param_.threshold,
rpn_post_nms_top_n,
&_keep[0],
&out_size);
// copy nms result to gpu
FRCNN_CUDA_CHECK(hipMemcpy(keep, &_keep[0], sizeof(int) * _keep.size(),
hipMemcpyHostToDevice));
// copy results after nms
dimGrid.x = (param_.rpn_post_nms_top_n + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
CheckLaunchParam(dimGrid, dimBlock, "PrepareOutput");
hipLaunchKernelGGL(HIP_KERNEL_NAME(PrepareOutput), dim3(dimGrid), dim3(dimBlock), 0, 0,static_cast<const int>(param_.rpn_post_nms_top_n), static_cast<const float*>(workspace_ordered_proposals.dptr_), static_cast<const int*>(keep),static_cast<const int>(out_size), static_cast<const int>(b),static_cast<float*>(out.dptr_ + b * param_.rpn_post_nms_top_n * 5),static_cast<float*>(out_score.dptr_ + b * param_.rpn_post_nms_top_n));
FRCNN_CUDA_CHECK(hipPeekAtLastError());
}
// free temporary memory
FRCNN_CUDA_CHECK(hipFree(keep));
FRCNN_CUDA_CHECK(hipFree(workspace_ordered_proposals_ptr));
FRCNN_CUDA_CHECK(hipFree(workspace_proposals_ptr));
FRCNN_CUDA_CHECK(hipFree(score_ptr));
FRCNN_CUDA_CHECK(hipFree(order_ptr));
}
virtual void Backward(const OpContext &ctx,
const std::vector<TBlob> &out_grad,
const std::vector<TBlob> &in_data,
const std::vector<TBlob> &out_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &in_grad,
const std::vector<TBlob> &aux_states) {
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(in_grad.size(), 3);
Stream<xpu> *s = ctx.get_stream<xpu>();
Tensor<xpu, 4> gscores = in_grad[proposal::kClsProb].get<xpu, 4, real_t>(s);
Tensor<xpu, 4> gbbox = in_grad[proposal::kBBoxPred].get<xpu, 4, real_t>(s);
Tensor<xpu, 2> ginfo = in_grad[proposal::kImInfo].get<xpu, 2, real_t>(s);
// can not assume the grad would be zero
Assign(gscores, req[proposal::kClsProb], 0);
Assign(gbbox, req[proposal::kBBoxPred], 0);
Assign(ginfo, req[proposal::kImInfo], 0);
}
private:
MultiProposalParam param_;
}; // class MultiProposalGPUOp
template<>
Operator* CreateOp<gpu>(MultiProposalParam param) {
return new MultiProposalGPUOp<gpu>(param);
}
} // namespace op
} // namespace mxnet
| e9d18caa6e45473ccfb82fa24717d04fafbf6e26.cu | #include "hip/hip_runtime.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* Copyright (c) 2017 Microsoft
* Licensed under The Apache-2.0 License [see LICENSE for details]
* \file multi_proposal.cu
* \brief MultiProposal Operator
* \author Shaoqing Ren, Xizhou Zhu, Jian Guo
*/
#include <dmlc/logging.h>
#include <dmlc/parameter.h>
#include <mxnet/operator.h>
#include <mshadow/tensor.h>
#include <mshadow/cuda/reduce.cuh>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include <thrust/functional.h>
#include <map>
#include <vector>
#include <string>
#include <utility>
#include <ctime>
#include <iostream>
#include "../operator_common.h"
#include "../mshadow_op.h"
#include "./multi_proposal-inl.h"
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
#define FRCNN_CUDA_CHECK(condition) \
/* Code block avoids redefinition of hipError_t error */ \
do { \
hipError_t error = condition; \
CHECK_EQ(error, hipSuccess) << " " << hipGetErrorString(error); \
} while (0)
namespace mshadow {
namespace cuda {
namespace multi_proposal {
// scores are (b, 2 * anchor, h, w)
// workspace_proposals are (b, h * w * anchor, 5)
// w defines "x" and h defines "y"
// count should be total anchors numbers, h * w * anchors
template<typename Dtype>
__global__ void ProposalGridKernel(const int count,
const int num_anchors,
const int height,
const int width,
const int feature_stride,
const Dtype* scores,
Dtype* workspace_proposals) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
int a = index % num_anchors;
int w = (index / num_anchors) % width;
int h = (index / num_anchors / width) % height;
int b = index / num_anchors / width / height;
workspace_proposals[index * 5 + 0] = workspace_proposals[a * 5 + 0] + w * feature_stride;
workspace_proposals[index * 5 + 1] = workspace_proposals[a * 5 + 1] + h * feature_stride;
workspace_proposals[index * 5 + 2] = workspace_proposals[a * 5 + 2] + w * feature_stride;
workspace_proposals[index * 5 + 3] = workspace_proposals[a * 5 + 3] + h * feature_stride;
workspace_proposals[index * 5 + 4] =
scores[((b * (2 * num_anchors) + a + num_anchors) * height + h) * width + w];
}
}
// boxes are (b, h * w * anchor, 5)
// deltas are (b, 4 * anchor, h, w)
// out_pred_boxes are (b, h * w * anchor, 5)
// count should be total anchors numbers, b * h * w * anchors
// in-place write: boxes and out_pred_boxes are the same location
template<typename Dtype>
__global__ void BBoxPredKernel(const int count,
const int num_anchors,
const int feat_height,
const int feat_width,
const int feature_stride,
const Dtype* im_infos,
const Dtype* boxes,
const Dtype* deltas,
Dtype* out_pred_boxes) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
int a = index % num_anchors;
int w = (index / num_anchors) % feat_width;
int h = (index / num_anchors / feat_width) % feat_height;
int b = index / num_anchors / feat_width / feat_height;
float im_height = im_infos[b * 3];
float im_width = im_infos[b * 3 + 1];
int real_height = static_cast<int>(im_height / feature_stride);
int real_width = static_cast<int>(im_width / feature_stride);
float width = boxes[index * 5 + 2] - boxes[index * 5 + 0] + 1.0f;
float height = boxes[index * 5 + 3] - boxes[index * 5 + 1] + 1.0f;
float ctr_x = boxes[index * 5 + 0] + 0.5f * (width - 1.0f);
float ctr_y = boxes[index * 5 + 1] + 0.5f * (height - 1.0f);
int ba = (b * num_anchors + a);
float dx = deltas[((ba * 4) * feat_height + h) * feat_width + w];
float dy = deltas[((ba * 4 + 1) * feat_height + h) * feat_width + w];
float dw = deltas[((ba * 4 + 2) * feat_height + h) * feat_width + w];
float dh = deltas[((ba * 4 + 3) * feat_height + h) * feat_width + w];
float pred_ctr_x = dx * width + ctr_x;
float pred_ctr_y = dy * height + ctr_y;
float pred_w = exp(dw) * width;
float pred_h = exp(dh) * height;
float pred_x1 = pred_ctr_x - 0.5f * (pred_w - 1.0f);
float pred_y1 = pred_ctr_y - 0.5f * (pred_h - 1.0f);
float pred_x2 = pred_ctr_x + 0.5f * (pred_w - 1.0f);
float pred_y2 = pred_ctr_y + 0.5f * (pred_h - 1.0f);
pred_x1 = max(min(pred_x1, im_width - 1.0f), 0.0f);
pred_y1 = max(min(pred_y1, im_height - 1.0f), 0.0f);
pred_x2 = max(min(pred_x2, im_width - 1.0f), 0.0f);
pred_y2 = max(min(pred_y2, im_height - 1.0f), 0.0f);
out_pred_boxes[index * 5 + 0] = pred_x1;
out_pred_boxes[index * 5 + 1] = pred_y1;
out_pred_boxes[index * 5 + 2] = pred_x2;
out_pred_boxes[index * 5 + 3] = pred_y2;
if (h >= real_height || w >= real_width) {
out_pred_boxes[index * 5 + 4] = -1.0f;
}
}
}
// boxes are (b, h * w * anchor, 5)
// deltas are (b, 4 * anchor, h, w)
// out_pred_boxes are (b, h * w * anchor, 5)
// count should be total anchors numbers, b * h * w * anchors
// in-place write: boxes and out_pred_boxes are the same location
template<typename Dtype>
__global__ void IoUPredKernel(const int count,
const int num_anchors,
const int feat_height,
const int feat_width,
const int feature_stride,
const Dtype* im_infos,
const Dtype* boxes,
const Dtype* deltas,
Dtype* out_pred_boxes) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
int a = index % num_anchors;
int w = (index / num_anchors) % feat_width;
int h = (index / num_anchors / feat_width) % feat_height;
int b = index / num_anchors / feat_width / feat_height;
float im_height = im_infos[b * 3];
float im_width = im_infos[b * 3 + 1];
int real_height = static_cast<int>(im_height / feature_stride);
int real_width = static_cast<int>(im_width / feature_stride);
float x1 = boxes[index * 5 + 0];
float y1 = boxes[index * 5 + 1];
float x2 = boxes[index * 5 + 2];
float y2 = boxes[index * 5 + 3];
int ba = (b * num_anchors + a);
float dx1 = deltas[((ba * 4) * feat_height + h) * feat_width + w];
float dy1 = deltas[((ba * 4 + 1) * feat_height + h) * feat_width + w];
float dx2 = deltas[((ba * 4 + 2) * feat_height + h) * feat_width + w];
float dy2 = deltas[((ba * 4 + 3) * feat_height + h) * feat_width + w];
float pred_x1 = max(min(x1 + dx1, im_width - 1.0f), 0.0f);
float pred_y1 = max(min(y1 + dy1, im_height - 1.0f), 0.0f);
float pred_x2 = max(min(x2 + dx2, im_width - 1.0f), 0.0f);
float pred_y2 = max(min(y2 + dy2, im_height - 1.0f), 0.0f);
out_pred_boxes[index * 5 + 0] = pred_x1;
out_pred_boxes[index * 5 + 1] = pred_y1;
out_pred_boxes[index * 5 + 2] = pred_x2;
out_pred_boxes[index * 5 + 3] = pred_y2;
if (h >= real_height || w >= real_width) {
out_pred_boxes[index * 5 + 4] = -1.0f;
}
}
}
// filter box with stride less than rpn_min_size
// filter: set score to zero
// dets (b, n, 5)
template<typename Dtype>
__global__ void FilterBoxKernel(const int count,
const int count_anchors,
const float original_min_size,
const Dtype* im_infos,
Dtype* dets) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
int b = index / count_anchors;
float iw = dets[index * 5 + 2] - dets[index * 5 + 0] + 1.0f;
float ih = dets[index * 5 + 3] - dets[index * 5 + 1] + 1.0f;
float min_size = original_min_size * im_infos[b * 3 + 2];
if (iw < min_size || ih < min_size) {
dets[index * 5 + 0] -= min_size / 2;
dets[index * 5 + 1] -= min_size / 2;
dets[index * 5 + 2] += min_size / 2;
dets[index * 5 + 3] += min_size / 2;
dets[index * 5 + 4] = -1.0f;
}
}
}
// copy score and init order
// dets (n, 5); score (n, ); order (n, )
// count should be n (total anchors or proposals)
template<typename Dtype>
__global__ void CopyScoreKernel(const int count,
const Dtype* dets,
Dtype* score,
int* order) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
score[index] = dets[index * 5 + 4];
order[index] = index;
}
}
// reorder proposals according to order and keep the top_n proposals
// prev_dets (n, 5); order (n, ); dets (n, 5)
// count should be output anchor numbers (top_n)
template<typename Dtype>
__global__ void ReorderProposalsKernel(const int count,
const Dtype* prev_dets,
const int* order,
Dtype* dets) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
const int order_i = order[index];
for (int j = 0; j < 5; j ++) {
dets[index * 5 + j] = prev_dets[order_i * 5 + j];
}
}
}
__device__ inline float devIoU(float const * const a, float const * const b) {
float left = max(a[0], b[0]), right = min(a[2], b[2]);
float top = max(a[1], b[1]), bottom = min(a[3], b[3]);
float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
return interS / (Sa + Sb - interS);
}
__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh,
const float *dev_boxes, uint64_t *dev_mask) {
const int threadsPerBlock = sizeof(uint64_t) * 8;
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const float *cur_box = dev_boxes + cur_box_idx * 5;
int i = 0;
uint64_t t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
void _nms(const mshadow::Tensor<gpu, 2>& boxes,
const float nms_overlap_thresh,
const int rpn_post_nms_top_n,
int *keep,
int *num_out) {
const int threadsPerBlock = sizeof(uint64_t) * 8;
const int boxes_num = boxes.size(0);
const int boxes_dim = boxes.size(1);
float* boxes_dev = boxes.dptr_;
uint64_t* mask_dev = NULL;
const int col_blocks = DIVUP(boxes_num, threadsPerBlock);
FRCNN_CUDA_CHECK(hipMalloc(&mask_dev,
boxes_num * col_blocks * sizeof(uint64_t)));
dim3 blocks(DIVUP(boxes_num, threadsPerBlock),
DIVUP(boxes_num, threadsPerBlock));
dim3 threads(threadsPerBlock);
hipLaunchKernelGGL((ProposalGridKernel), dim3(dimGrid), dim3(dimBlock), 0, 0,\
static_cast<const int>(count),\
static_cast<const int>(num_anchors),\
static_cast<const int>(height),\
static_cast<const int>(width),\
static_cast<const int>(param_.feature_stride),\
static_cast<const float*>(scores.dptr_),\
static_cast<float*>(workspace_proposals.dptr_));
FRCNN_CUDA_CHECK(hipPeekAtLastError());
std::vector<uint64_t> mask_host(boxes_num * col_blocks);
FRCNN_CUDA_CHECK(hipMemcpy(&mask_host[0],
mask_dev,
sizeof(uint64_t) * boxes_num * col_blocks,
hipMemcpyDeviceToHost));
std::vector<uint64_t> remv(col_blocks);
memset(&remv[0], 0, sizeof(uint64_t) * col_blocks);
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep[num_to_keep++] = i;
if (num_to_keep >= rpn_post_nms_top_n) break;
uint64_t *p = &mask_host[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
*num_out = num_to_keep;
FRCNN_CUDA_CHECK(hipFree(mask_dev));
}
// copy proposals to output
// dets (top_n, 5); keep (top_n, ); out (top_n, )
// count should be top_n (total anchors or proposals)
template<typename Dtype>
__global__ void PrepareOutput(const int count,
const Dtype* dets,
const int* keep,
const int out_size,
const int image_index,
Dtype* out,
Dtype* score) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
out[index * 5] = image_index;
if (index < out_size) {
int keep_i = keep[index];
for (int j = 0; j < 4; ++j) {
out[index * 5 + j + 1] = dets[keep_i * 5 + j];
}
score[index] = dets[keep_i * 5 + 4];
} else {
int keep_i = keep[index % out_size];
for (int j = 0; j < 4; ++j) {
out[index * 5 + j + 1] = dets[keep_i * 5 + j];
}
score[index] = dets[keep_i * 5 + 4];
}
}
}
} // namespace multi_proposal
} // namespace cuda
} // namespace mshadow
namespace mxnet {
namespace op {
template<typename xpu>
class MultiProposalGPUOp : public Operator{
public:
explicit MultiProposalGPUOp(MultiProposalParam param) {
this->param_ = param;
}
virtual void Forward(const OpContext &ctx,
const std::vector<TBlob> &in_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &out_data,
const std::vector<TBlob> &aux_states) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace mshadow::cuda;
using namespace mshadow::cuda::multi_proposal;
CHECK_EQ(in_data.size(), 3);
CHECK_EQ(out_data.size(), 2);
CHECK_GT(req.size(), 1);
CHECK_EQ(req[proposal::kOut], kWriteTo);
/*CHECK_EQ(in_data[proposal::kClsProb].shape_[0], 1)
<< "Sorry, multiple images each device is not implemented.";*/
Stream<xpu> *s = ctx.get_stream<xpu>();
Tensor<xpu, 4> scores = in_data[proposal::kClsProb].get<xpu, 4, real_t>(s);
Tensor<xpu, 4> bbox_deltas = in_data[proposal::kBBoxPred].get<xpu, 4, real_t>(s);
Tensor<xpu, 2> im_info = in_data[proposal::kImInfo].get<xpu, 2, real_t>(s);
Tensor<xpu, 2> out = out_data[proposal::kOut].get<xpu, 2, real_t>(s);
Tensor<xpu, 2> out_score = out_data[proposal::kScore].get<xpu, 2, real_t>(s);
int num_images = scores.size(0);
int num_anchors = scores.size(1) / 2;
int height = scores.size(2);
int width = scores.size(3);
int count_anchors = num_anchors * height * width; // count of total anchors
int count = num_images * count_anchors;
// set to -1 for max
int rpn_pre_nms_top_n = (param_.rpn_pre_nms_top_n > 0) ? param_.rpn_pre_nms_top_n
: count_anchors;
rpn_pre_nms_top_n = std::min(rpn_pre_nms_top_n, count_anchors);
int rpn_post_nms_top_n = std::min(param_.rpn_post_nms_top_n, rpn_pre_nms_top_n);
// Generate first anchors based on base anchor
std::vector<float> base_anchor(4);
base_anchor[0] = 0.0;
base_anchor[1] = 0.0;
base_anchor[2] = param_.feature_stride - 1.0;
base_anchor[3] = param_.feature_stride - 1.0;
CHECK_EQ(num_anchors, param_.ratios.ndim() * param_.scales.ndim());
std::vector<float> anchors;
utils::GenerateAnchors(base_anchor,
param_.ratios,
param_.scales,
&anchors);
// Copy generated anchors to GPU
float* workspace_proposals_ptr = NULL;
FRCNN_CUDA_CHECK(hipMalloc(&workspace_proposals_ptr,
sizeof(float) * num_images * count_anchors * 5));
Tensor<xpu, 3> workspace_proposals(workspace_proposals_ptr,
Shape3(num_images, count_anchors, 5));
FRCNN_CUDA_CHECK(hipMemcpy(workspace_proposals.dptr_, &anchors[0],
sizeof(float) * anchors.size(), hipMemcpyHostToDevice));
// Copy proposals to a mesh grid
dim3 dimGrid((count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock);
dim3 dimBlock(kMaxThreadsPerBlock);
CheckLaunchParam(dimGrid, dimBlock, "ProposalGrid");
hipLaunchKernelGGL((ProposalGridKernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
count, num_anchors, height, width, param_.feature_stride,
scores.dptr_, workspace_proposals.dptr_);
FRCNN_CUDA_CHECK(hipPeekAtLastError());
// Transform anchors and bbox_deltas into bboxes
CheckLaunchParam(dimGrid, dimBlock, "BBoxPred");
if (param_.iou_loss) {
hipLaunchKernelGGL((IoUPredKernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
static_cast<const int>(count), static_cast<const int>(num_anchors), static_cast<const int>(height), static_cast<const int>(width),static_cast<const int>(param_.feature_stride), static_cast<const float*>(im_info.dptr_),static_cast<const float*>(workspace_proposals.dptr_), static_cast<const float*>(bbox_deltas.dptr_),static_cast<float*>(workspace_proposals.dptr_));
} else {
hipLaunchKernelGGL((BBoxPredKernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
static_cast<const int>(count), static_cast<const int>(num_anchors), static_cast<const int>(height), static_cast<const int>(width),static_cast<const int>(param_.feature_stride),static_cast<const float*>(im_info.dptr_),static_cast<const float*>(workspace_proposals.dptr_), static_cast<const float*>(bbox_deltas.dptr_),static_cast<float*>(workspace_proposals.dptr_));
}
FRCNN_CUDA_CHECK(hipPeekAtLastError());
// filter boxes with less than rpn_min_size
CheckLaunchParam(dimGrid, dimBlock, "FilterBox");
hipLaunchKernelGGL((FilterBoxKernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
static_cast<const int>(count), static_cast<const int>(count_anchors), static_cast<const float>(param_.rpn_min_size),static_cast<const float*>(im_info.dptr_), static_cast<float*>(workspace_proposals.dptr_));
FRCNN_CUDA_CHECK(hipPeekAtLastError());
dimGrid = dim3((count_anchors + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock);
dimBlock = dim3(kMaxThreadsPerBlock);
// Copy score to a continuous memory
float* score_ptr = NULL;
FRCNN_CUDA_CHECK(hipMalloc(&score_ptr, sizeof(float) * count_anchors));
Tensor<xpu, 1> score(score_ptr, Shape1(count_anchors));
int* order_ptr = NULL;
FRCNN_CUDA_CHECK(hipMalloc(&order_ptr, sizeof(int) * count_anchors));
Tensor<xpu, 1, int> order(order_ptr, Shape1(count_anchors));
float* workspace_ordered_proposals_ptr = NULL;
FRCNN_CUDA_CHECK(hipMalloc(&workspace_ordered_proposals_ptr,
sizeof(float) * rpn_pre_nms_top_n * 5));
Tensor<xpu, 2> workspace_ordered_proposals(workspace_ordered_proposals_ptr,
Shape2(rpn_pre_nms_top_n, 5));
int* keep;
FRCNN_CUDA_CHECK(hipMalloc(&keep, sizeof(int) * rpn_pre_nms_top_n));
for (int b = 0; b < num_images; b++) {
CheckLaunchParam(dimGrid, dimBlock, "CopyScore");
hipLaunchKernelGGL(HIP_KERNEL_NAME(CopyScoreKernel), dim3(dimGrid), dim3(dimBlock) , 0, 0,static_cast<const int>(count_anchors), static_cast<const float*>(workspace_proposals.dptr_b * count_anchors * 5),static_cast<float*>(score.dptr_), static_cast<int*>(order.dptr_));
FRCNN_CUDA_CHECK(hipPeekAtLastError());
// argsort score, save order
thrust::stable_sort_by_key(thrust::device,
score.dptr_,
score.dptr_ + score.size(0),
order.dptr_,
thrust::greater<real_t>());
FRCNN_CUDA_CHECK(hipPeekAtLastError());
// Reorder proposals according to order
dimGrid.x = (rpn_pre_nms_top_n + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
CheckLaunchParam(dimGrid, dimBlock, "ReorderProposals");
hipLaunchKernelGGL(HIP_KERNEL_NAME(ReorderProposalsKernel), dim3(dimGrid), dim3(dimBlock), 0,0,static_cast<const int>(rpn_pre_nms_top_n), static_cast<const float*>(workspace_proposals.dptr_ + b * count_anchors * 5),static_cast<const int*>(order.dptr_), static_cast<float*>(workspace_ordered_proposals.dptr_));
FRCNN_CUDA_CHECK(hipPeekAtLastError());
// perform nms
std::vector<int> _keep(workspace_ordered_proposals.size(0));
int out_size = 0;
_nms(workspace_ordered_proposals,
param_.threshold,
rpn_post_nms_top_n,
&_keep[0],
&out_size);
// copy nms result to gpu
FRCNN_CUDA_CHECK(hipMemcpy(keep, &_keep[0], sizeof(int) * _keep.size(),
hipMemcpyHostToDevice));
// copy results after nms
dimGrid.x = (param_.rpn_post_nms_top_n + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
CheckLaunchParam(dimGrid, dimBlock, "PrepareOutput");
hipLaunchKernelGGL(HIP_KERNEL_NAME(PrepareOutput), dim3(dimGrid), dim3(dimBlock), 0, 0,static_cast<const int>(param_.rpn_post_nms_top_n), static_cast<const float*>(workspace_ordered_proposals.dptr_), static_cast<const int*>(keep),static_cast<const int>(out_size), static_cast<const int>(b),static_cast<float*>(out.dptr_ + b * param_.rpn_post_nms_top_n * 5),static_cast<float*>(out_score.dptr_ + b * param_.rpn_post_nms_top_n));
FRCNN_CUDA_CHECK(hipPeekAtLastError());
}
// free temporary memory
FRCNN_CUDA_CHECK(hipFree(keep));
FRCNN_CUDA_CHECK(hipFree(workspace_ordered_proposals_ptr));
FRCNN_CUDA_CHECK(hipFree(workspace_proposals_ptr));
FRCNN_CUDA_CHECK(hipFree(score_ptr));
FRCNN_CUDA_CHECK(hipFree(order_ptr));
}
virtual void Backward(const OpContext &ctx,
const std::vector<TBlob> &out_grad,
const std::vector<TBlob> &in_data,
const std::vector<TBlob> &out_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &in_grad,
const std::vector<TBlob> &aux_states) {
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(in_grad.size(), 3);
Stream<xpu> *s = ctx.get_stream<xpu>();
Tensor<xpu, 4> gscores = in_grad[proposal::kClsProb].get<xpu, 4, real_t>(s);
Tensor<xpu, 4> gbbox = in_grad[proposal::kBBoxPred].get<xpu, 4, real_t>(s);
Tensor<xpu, 2> ginfo = in_grad[proposal::kImInfo].get<xpu, 2, real_t>(s);
// can not assume the grad would be zero
Assign(gscores, req[proposal::kClsProb], 0);
Assign(gbbox, req[proposal::kBBoxPred], 0);
Assign(ginfo, req[proposal::kImInfo], 0);
}
private:
MultiProposalParam param_;
}; // class MultiProposalGPUOp
template<>
Operator* CreateOp<gpu>(MultiProposalParam param) {
return new MultiProposalGPUOp<gpu>(param);
}
} // namespace op
} // namespace mxnet
|
149712025a6c33ad8de79e081d39685c3b7f0e63.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kExp(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = __expf(mat[i]);
}
} | 149712025a6c33ad8de79e081d39685c3b7f0e63.cu | #include "includes.h"
__global__ void kExp(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = __expf(mat[i]);
}
} |
8c4344f25ff83fe6cb9404cc0338bfbac40f2e6f.hip | // !!! This is a file automatically generated by hipify!!!
#include "Renderer.h"
#include "cuda_gl_interop.h"
#include <iostream>
#include <time.h>
#ifdef DEBUG
#define HANDLE_ERROR(x){\
hipError_t cudaStatus = (x); \
if (cudaStatus != hipSuccess){\
fprintf(stdout, ": %s\n", hipGetErrorString(cudaStatus)); \
system("Pause"); \
exit(1); \
} \
}
#else
#define HANDLE_ERROR(x) x
#endif
void glfw_error_callback(int error, const char* description){
fprintf(stderr, "Glfw Error %d: %s\n", error, description);
}
void newGLCheckError(const char *filename, const int line){
GLenum err;
char str[64];
for (int i = 0; i < 5; i++){
if ((err = glGetError()) != GL_NO_ERROR){
switch (err){
case GL_INVALID_ENUM:
strcpy_s(str, "GL_INVALID_ENUM");
break;
case GL_INVALID_VALUE:
strcpy_s(str, "GL_INVALID_VALUE");
break;
case GL_INVALID_OPERATION:
strcpy_s(str, "GL_INVALID_OPERATION");
break;
case GL_STACK_OVERFLOW:
strcpy_s(str, "GL_STACK_OVERFLOW");
break;
case GL_STACK_UNDERFLOW:
strcpy_s(str, "GL_STACK_UNDERFLOW");
break;
case GL_OUT_OF_MEMORY:
strcpy_s(str, "GL_OUT_OF_MEMORY");
break;
/*case GL_INVALID_FRAMEBUFFER_OPERATION:
strcpy_s(str, "GL_INVALID_FRAMEBUFFER_OPERATION");
break;*/
default: break;
}
printf("GL error %ss %#x in file %s in line %d \n", str, err, filename, line);
}
else break;
}
}
ImgSizePos getCorrectSize(int screenW, int screenH, int imgW, int imgH){
float screenRatio = float(screenW) / float(screenH);
float imageRatio = float(imgW) / float(imgH);
ImgSizePos sizePos;
if (imageRatio > screenRatio){
sizePos.w = 1.0f;
sizePos.h = screenRatio / imageRatio;
sizePos.x = 0.0f;
sizePos.y = 0.0f;
}
else{
sizePos.w = imageRatio / screenRatio;
sizePos.h = 1.0f;
sizePos.x = 0.0f;
sizePos.y = 0.0f;
}
return sizePos;
}
inline float clamp(float x){ return x < 0.0f ? 0.0f : x > 1.0f ? 1.0f : x; }
inline int toInt(float x){ return int(clamp(x) * 255 + .5); }
void Renderer::postProcess(){
glBindFramebuffer(GL_FRAMEBUFFER, fbos[FBO::FBO2]);
glViewport(0, 0, width, height);
glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
glClear(GL_COLOR_BUFFER_BIT);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, textures[TEXTURE::BUFFER1]);
glBindVertexArray(vaos[VAO::ScreenGeomVao]);
programs[ShaderEnum::DrawBuffer].bind();
glUniform1f(programs[ShaderEnum::DrawBuffer].getUniformLocation("exposure"), glm::pow(2.0f, exposure));
glUniform1f(programs[ShaderEnum::DrawBuffer].getUniformLocation("rays"), float(rays));
glUniform1i(programs[ShaderEnum::DrawBuffer].getUniformLocation("buffer"), 0);
glDrawArrays(GL_TRIANGLES, 0, 6);
glEnable(GL_LINE_SMOOTH);
programs[ShaderEnum::DrawShape].bind();
glUniform1f(programs[ShaderEnum::DrawShape].getUniformLocation("aspectRatio"), float(width) / height);
for (int i = 0; i < objects.size(); i++){
glUniformMatrix3fv(programs[ShaderEnum::DrawShape].getUniformLocation("matrix"), 1, GL_FALSE, glm::value_ptr(objects[i].t.inv));
if (objects[i].objectType == 0){
glBindVertexArray(vaos[VAO::LineShapeVao]);
glDrawArrays(GL_LINES, 0, 2);
}
else if (objects[i].objectType == 1){
glBindVertexArray(vaos[VAO::CircleShapeVao]);
glDrawArrays(GL_LINE_LOOP, 0, CIRCLE_TESSELATION);
}
}
}
void Renderer::saveFrame(std::string filename){
postProcess();
std::vector<float3> data(width * height);
glBindTexture(GL_TEXTURE_2D, textures[TEXTURE::OFFSCREEN]);
glGetTexImage(GL_TEXTURE_2D, 0, GL_RGB, GL_FLOAT, &data[0]);
FreeImage_Initialise();
FIBITMAP *bitmap = FreeImage_Allocate(width, height, 24);
for (int j = 0; j < height; j++)
for (int i = 0; i < width; i++){
RGBQUAD rgb;
float3 frgb = data[i + j*width];
rgb.rgbRed = (BYTE)toInt(frgb.x);
rgb.rgbGreen = (BYTE)toInt(frgb.y);
rgb.rgbBlue = (BYTE)toInt(frgb.z);
FreeImage_SetPixelColor(bitmap, i, j, &rgb);
}
filename += ".png";
FreeImage_Save(FREE_IMAGE_FORMAT::FIF_PNG, bitmap, filename.c_str());
FreeImage_Unload(bitmap);
FreeImage_DeInitialise();
}
void Renderer::saveLineFrames(std::string filename, int framerate, int frames){
initLineBuffer();
setupFrame();
for (int i = 0; i < frames; i++){
printf("Frame : %d\n", i);
HANDLE_ERROR(hipMemset(dev_rayInfo, 0x00, rayBatchSize * sizeof(RayInfo)));
HANDLE_ERROR(hipMemset(dev_rays, 0x00, sizeof(int)));
float _time = float(i) / framerate;
renderLineFrame(_time);
saveFrame(filename + std::to_string(i + 1));
}
HANDLE_ERROR(hipGraphicsUnregisterResource(resourceVBO));
hipFree(dev_rayInfo);
hipFree(dev_states);
}
//render pulse animation
void Renderer::savePulseFrames(std::string filename, int framerate, int frames){
initLineBuffer();
setupFrame();
for (int i = 0; i < frames; i++){
printf("Frame : %d\n", i);
HANDLE_ERROR(hipMemset(dev_rayInfo, 0x00, rayBatchSize * sizeof(RayInfo)));
HANDLE_ERROR(hipMemset(dev_rays, 0x00, sizeof(int)));
float _time = float(i) / framerate;
renderPulseFrame(_time, dt);
saveFrame(filename + std::to_string(i + 1));
}
HANDLE_ERROR(hipGraphicsUnregisterResource(resourceVBO));
hipFree(dev_rayInfo);
hipFree(dev_states);
}
/*
single frame rendering
*/
void Renderer::renderLineFrame(float time){
int seed = rand();
dim3 block(64, 1, 1);
dim3 grid((int)ceil(float(rayBatchSize) / block.x), 1, 1);
curandInitKernel << <grid, block >> >(dev_states, seed, rayBatchSize);
HANDLE_ERROR(hipDeviceSynchronize());
HANDLE_ERROR(hipGetLastError());
glBindFramebuffer(GL_FRAMEBUFFER, fbos[FBO::FBO1]);
glViewport(0, 0, width, height);
glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
glClear(GL_COLOR_BUFFER_BIT);
for (int i = 0; i < rayBatchCount; i++){
//run cuda
size_t num_bytes = 0;
HANDLE_ERROR(hipGraphicsMapResources(1, &resourceVBO, 0));
HANDLE_ERROR(hipGraphicsResourceGetMappedPointer((void **)&dev_vbo,
&num_bytes, resourceVBO));
kernelLine << < grid, block >> >(dev_rayInfo, dev_vbo, cudaScene, dev_states, dev_rays, rayBatchSize, time);
HANDLE_ERROR(hipDeviceSynchronize());
HANDLE_ERROR(hipGetLastError());
if (i != rayBatchCount - 1){
printf("\rProgress %d ", i + 1);
Sleep(sleepAmount);
}
HANDLE_ERROR(hipGraphicsUnmapResources(1, &resourceVBO, 0));
glLineWidth((float)size);
glDisable(GL_DEPTH_TEST);
glDisable(GL_CULL_FACE);
//glEnable(GL_LINE_SMOOTH);
glEnable(GL_BLEND);
glBlendFunc(GL_ONE, GL_ONE);
glBlendEquation(GL_FUNC_ADD);
programs[1].bind();
glUniform1f(programs[1].getUniformLocation("aspectRatio"), float(width) / height);
glUniform2f(programs[1].getUniformLocation("screenSize"), (float)width, (float)height);
glBindVertexArray(vaos[VAO::LineVao]);
glDrawArrays(GL_LINES, 0, rayBatchSize * 2);
}
HANDLE_ERROR(hipMemcpy((void*)&rays, dev_rays, sizeof(int), hipMemcpyDeviceToHost));
printf("\n");
}
void Renderer::renderPulseFrame(float time, float dt){
int seed = rand();
dim3 block(64, 1, 1);
dim3 grid((int)ceil(float(rayBatchSize) / block.x), 1, 1);
curandInitKernel << <grid, block >> >(dev_states, seed, rayBatchSize);
glBindFramebuffer(GL_FRAMEBUFFER, fbos[FBO::FBO1]);
glViewport(0, 0, width, height);
glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
glClear(GL_COLOR_BUFFER_BIT);
HANDLE_ERROR(hipDeviceSynchronize());
HANDLE_ERROR(hipGetLastError());
for (int i = 0; i < rayBatchCount; i++){
//run cuda
size_t num_bytes = 0;
HANDLE_ERROR(hipGraphicsMapResources(1, &resourceVBO, 0));
HANDLE_ERROR(hipGraphicsResourceGetMappedPointer((void **)&dev_vbo,
&num_bytes, resourceVBO));
kernelLinePulse << < grid, block >> >(dev_rayInfo, dev_vbo, cudaScene, dev_states, dev_rays, rayBatchSize, time, dt);
HANDLE_ERROR(hipDeviceSynchronize());
HANDLE_ERROR(hipGetLastError());
if (i != rayBatchCount - 1){
printf("\rProgress %d ", i + 1);
Sleep(sleepAmount);
}
HANDLE_ERROR(hipGraphicsUnmapResources(1, &resourceVBO, 0));
glLineWidth((float)size);
glDisable(GL_DEPTH_TEST);
glDisable(GL_CULL_FACE);
//glEnable(GL_LINE_SMOOTH);
glEnable(GL_BLEND);
glBlendFunc(GL_ONE, GL_ONE);
glBlendEquation(GL_FUNC_ADD);
programs[1].bind();
glUniform1f(programs[1].getUniformLocation("aspectRatio"), float(width) / height);
glUniform2f(programs[1].getUniformLocation("screenSize"), (float)width, (float)height);
glBindVertexArray(vaos[VAO::LineVao]);
glDrawArrays(GL_LINES, 0, rayBatchSize * 2);
}
HANDLE_ERROR(hipMemcpy((void*)&rays, dev_rays, sizeof(int), hipMemcpyDeviceToHost));
printf("\n");
}
//render full lines
void Renderer::renderLines(float time){
initLineBuffer();
setupFrame();
renderLineFrame(time);
HANDLE_ERROR(hipGraphicsUnregisterResource(resourceVBO));
hipFree(dev_rayInfo);
hipFree(dev_states);
}
//render pulses
void Renderer::renderPulses(float time, float dt){
initLineBuffer();
setupFrame();
renderPulseFrame(time, dt);
HANDLE_ERROR(hipGraphicsUnregisterResource(resourceVBO));
hipFree(dev_rayInfo);
hipFree(dev_states);
}
Renderer::Renderer(){
cudaScene.lights = NULL;
cudaScene.objects = NULL;
cudaScene.materials = NULL;
}
Renderer::~Renderer(){
glDeleteTextures(TEXTURECOUNT, textures);
glDeleteFramebuffers(FBOCOUNT, fbos);
hipFree(dev_rays);
hipFree(cudaScene.lights);
hipFree(cudaScene.objects);
hipFree(cudaScene.materials);
}
void Renderer::start(){
glfwSetErrorCallback(glfw_error_callback);
if (!glfwInit())
return;
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
GLFWwindow* window = glfwCreateWindow(1024, 1024, "Cuda forward tracing 2d", NULL, NULL);
if (window == NULL){
glfwTerminate();
return;
}
glfwMakeContextCurrent(window);
glewExperimental = GL_TRUE;
GLenum glew_status = glewInit();
if (glew_status != GLEW_OK){
fprintf(stdout, "Error: %s\n", glewGetErrorString(glew_status));
return;
}
glGetError();
glfwSwapInterval(1);
IMGUI_CHECKVERSION();
ImGui::CreateContext();
ImGuiIO& io = ImGui::GetIO();
const char* glsl_version = "#version 330";
ImGui::StyleColorsDark();
ImGui_ImplGlfw_InitForOpenGL(window, true);
ImGui_ImplOpenGL3_Init(glsl_version);
init();
initScene();
exposure = 1.0f;
rayBatchSize = 2048;
rayBatchCount = 100;
sleepAmount = 20;
size = 1;
timeFrame = 0.0;
frameRate = 24;
frames = 240;
w = 1280;
h = 720;
dt = 100.0f;
srand(time(NULL));
char buf[64];
memset(buf, 0x00, sizeof(char)* 64);
while (!glfwWindowShouldClose(window)){
glfwPollEvents();
ImGui_ImplOpenGL3_NewFrame();
ImGui_ImplGlfw_NewFrame();
ImGui::NewFrame();
{
ImGui::Begin("main");
ImGui::SliderFloat("exposure", &exposure, -100.0f, 100.0f);
ImGui::DragInt("width", &w, 1.0f, 200, 1920);
ImGui::DragInt("height", &h, 1.0f, 200, 1080);
ImGui::DragInt("ray batch size", &rayBatchSize, 1.0f, 64, 8192);
ImGui::DragInt("ray batch count", &rayBatchCount, 1.0f, 1, 10000);
ImGui::DragFloat("time frame", &timeFrame, 0.1f, 0.0f, 1000.0f);
ImGui::DragFloat("dt", &dt, 0.1f, 0.0f, 1000.0f);
if (ImGui::Button("load scene")){
loadScene();
}
if (ImGui::Button("render frame")){
width = w;
height = h;
renderLines(timeFrame);
}
if (ImGui::Button("render frame pulse")){
width = w;
height = h;
renderPulses(timeFrame, dt);
}
ImGui::End();
//render fbo
ImGui::Begin("anim");
ImGui::DragInt("framerate", &frameRate, 1.0f, 1, 60);
ImGui::DragInt("frames", &frames, 1.0f, 1, 10000);
ImGui::InputText("filename", buf, IM_ARRAYSIZE(buf));
if (ImGui::Button("render full line anim")){
width = w;
height = h;
saveLineFrames(std::string(buf), frameRate, frames);
}
if (ImGui::Button("render pulse anim")){
width = w;
height = h;
savePulseFrames(std::string(buf), frameRate, frames);
}
ImGui::End();
}
ImGui::Render();
glfwMakeContextCurrent(window);
int screenWidth, screenHeight;
glfwGetFramebufferSize(window, &screenWidth, &screenHeight);
postProcess();
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glViewport(0, 0, screenWidth, screenHeight);
glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
glClear(GL_COLOR_BUFFER_BIT);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, textures[TEXTURE::OFFSCREEN]);
programs[ShaderEnum::DrawToScreen].bind();
ImgSizePos imgSizePos = getCorrectSize(screenWidth, screenHeight, width, height);
glm::mat3 mvp = glm::mat3(1.0f);
mvp = glm::translate(mvp, glm::vec2(imgSizePos.x, imgSizePos.y));
mvp = glm::scale(mvp, glm::vec2(imgSizePos.w, imgSizePos.h));
//mvp = glm::inverse(mvp);
glBindVertexArray(vaos[VAO::ScreenGeomVao]);
glUniform1i(programs[ShaderEnum::DrawToScreen].getUniformLocation("buffer"), 0);
glUniformMatrix3fv(programs[ShaderEnum::DrawToScreen].getUniformLocation("mvp"), 1, GL_FALSE, glm::value_ptr(mvp));
glDrawArrays(GL_TRIANGLES, 0, 6);
programs[2].unbind();
//!!
ImGui_ImplOpenGL3_RenderDrawData(ImGui::GetDrawData());
glfwMakeContextCurrent(window);
glfwSwapBuffers(window);
}
ImGui_ImplOpenGL3_Shutdown();
ImGui_ImplGlfw_Shutdown();
ImGui::DestroyContext();
glfwDestroyWindow(window);
glfwTerminate();
}
void Renderer::init(){
const char* shaderNames[] = {
"Shaders/drawBuffer.vert", "Shaders/drawBuffer.frag",
"Shaders/lineShader.vert", "Shaders/lineShader.frag",
"Shaders/drawScreen.vert", "Shaders/drawScreen.frag",
"Shaders/drawShape.vert", "Shaders/drawShape.frag"
};
for (int i = 0; i < SHADERCOUNT; i++){
if (!programs[i].loadShaderProgram(shaderNames[i * 2], shaderNames[i * 2 + 1])){
printf("Error loading shader file.\n");
system("pause");
exit(1);
}
}
//INIT FBO AND TEXURES
glGenFramebuffers(FBOCOUNT, fbos);
glGenTextures(TEXTURE::TEXTURECOUNT, textures);
float white[3] = { 1.0f, 1.0f, 1.0f };
width = 1;
height = 1;
glBindTexture(GL_TEXTURE_2D, textures[TEXTURE::BUFFER1]);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB32F, width, height, 0, GL_RGB, GL_FLOAT, white);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
//create vaos and vbos
glGenVertexArrays(VAOCOUNT, vaos);
glGenBuffers(VBOCOUNT, vbos);
glBindVertexArray(vaos[LineVao]);
glBindBuffer(GL_ARRAY_BUFFER, vbos[LightInfoVBO]);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, sizeof(VBOInfo), 0);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, sizeof(VBOInfo), (void *)(4 * sizeof(float)));
glEnableVertexAttribArray(2);
glVertexAttribPointer(2, 1, GL_FLOAT, GL_FALSE, sizeof(VBOInfo), (void *)(7 * sizeof(float)));
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
programs[LineShader].bindAttribLocation(0, "in_pos");
programs[LineShader].bindAttribLocation(1, "in_color");
programs[LineShader].bindAttribLocation(2, "in_angle");
programs[LineShader].link();
/*
programs[PointShader].bindAttribLocation(0, "in_pos");
programs[PointShader].bindAttribLocation(1, "in_color");
programs[PointShader].bindAttribLocation(2, "in_angle");
programs[PointShader].link();*/
float fullscreenRect[] = { -1.0, -1.0
, -1.0, 1.0
, 1.0, 1.0
, -1.0, -1.0
, 1.0, 1.0
, 1.0, -1.0
};
glBindVertexArray(vaos[ScreenGeomVao]);
glBindBuffer(GL_ARRAY_BUFFER, vbos[VBO::ScreenGeomVBO]);
glBufferData(GL_ARRAY_BUFFER, sizeof(float)* 12, fullscreenRect, GL_STATIC_DRAW);
programs[0].bindAttribLocation(0, "pos");
programs[0].link();
programs[2].bindAttribLocation(0, "pos");
programs[2].link();
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 0, 0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
programs[DrawShape].bindAttribLocation(0, "in_pos");
programs[DrawShape].link();
glBindVertexArray(vaos[VAO::LineShapeVao]);
glBindBuffer(GL_ARRAY_BUFFER, vbos[VBO::LineVBO]);
float line[] = { 0.0f, 0.0f, 1.0f, 0.0f };
glBufferData(GL_ARRAY_BUFFER, sizeof(float)* 4, line, GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 0, 0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
glBindVertexArray(vaos[VAO::CircleShapeVao]);
glBindBuffer(GL_ARRAY_BUFFER, vbos[VBO::CircleVBO]);
std::vector<float> circleVectors(CIRCLE_TESSELATION * 2);
for (int i = 0; i < CIRCLE_TESSELATION; i++){
circleVectors[i * 2] = glm::cos(i*glm::two_pi<float>() / CIRCLE_TESSELATION);
circleVectors[i * 2 + 1] = glm::sin(i*glm::two_pi<float>() / CIRCLE_TESSELATION);
}
glBufferData(GL_ARRAY_BUFFER, sizeof(float)* 2 * CIRCLE_TESSELATION, &circleVectors[0], GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 0, 0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
CheckGl();
HANDLE_ERROR(hipMalloc((void**)&dev_rays, sizeof(int)));
}
void Renderer::initScene(){
SceneDescription scene;
Light light;
light.lightType = _POINT;
light.color = glm::vec3(1.0,1.,0.);
light.angle = 0.0;
light.t = createTransform(glm::vec2(0.0f, 0.0f), glm::radians(0.0f), glm::vec2(0.01f));
scene.lights.push_back(light);
initCudaScene(scene);
}
void Renderer::initCudaScene(const SceneDescription& scene){
objects.clear();
//could be null pointers
hipFree(cudaScene.lights);
hipFree(cudaScene.objects);
hipFree(cudaScene.materials);
for (auto iter = scene.objects.begin(); iter != scene.objects.end(); ++iter){
objects.push_back(*iter);
}
cudaScene.lightCount = scene.lights.size();
cudaScene.matCount = scene.materials.size();
cudaScene.objCount = scene.objects.size();
cudaScene.minDepth = 3;
cudaScene.maxDepth = 8;
if (cudaScene.matCount>0)
HANDLE_ERROR(hipMalloc((void**)&(cudaScene.materials), cudaScene.matCount * sizeof(Material)));
if (cudaScene.objCount>0)
HANDLE_ERROR(hipMalloc((void**)&(cudaScene.objects), cudaScene.objCount * sizeof(Object)));
if (cudaScene.lightCount>0)
HANDLE_ERROR(hipMalloc((void**)&(cudaScene.lights), cudaScene.lightCount * sizeof(Light)));
if (cudaScene.matCount>0)
HANDLE_ERROR(hipMemcpy(cudaScene.materials, scene.materials.data(), cudaScene.matCount * sizeof(Material), hipMemcpyHostToDevice));
if (cudaScene.objCount>0)
HANDLE_ERROR(hipMemcpy(cudaScene.objects, scene.objects.data(), cudaScene.objCount * sizeof(Object), hipMemcpyHostToDevice));
if (cudaScene.lightCount>0)
HANDLE_ERROR(hipMemcpy(cudaScene.lights, scene.lights.data(), cudaScene.lightCount * sizeof(Light), hipMemcpyHostToDevice));
}
void Renderer::initLineBuffer(){
glBindBuffer(GL_ARRAY_BUFFER, vbos[VBO::LightInfoVBO]);
glBufferData(GL_ARRAY_BUFFER, sizeof(VBOInfo)* rayBatchSize * 2, NULL, GL_DYNAMIC_DRAW);
resourceVBO = NULL;
HANDLE_ERROR(hipGraphicsGLRegisterBuffer(&resourceVBO, vbos[VBO::LightInfoVBO], hipGraphicsMapFlagsWriteDiscard));
}
void Renderer::setupFrame(){
glBindTexture(GL_TEXTURE_2D, textures[TEXTURE::BUFFER1]);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, width, height, 0, GL_RGBA, GL_FLOAT, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glBindTexture(GL_TEXTURE_2D, textures[TEXTURE::OFFSCREEN]);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB32F, width, height, 0, GL_RGB, GL_FLOAT, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glBindFramebuffer(GL_FRAMEBUFFER, fbos[FBO::FBO1]);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, textures[TEXTURE::BUFFER1], 0);
if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE){
fprintf(stdout, "fbo1 attachement failed\n"); \
system("pause"); \
exit(1);
}
glBindFramebuffer(GL_FRAMEBUFFER, fbos[FBO::FBO2]);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, textures[TEXTURE::OFFSCREEN], 0);
if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE){
fprintf(stdout, "fbo2 attachement failed\n"); \
system("pause"); \
exit(1);
}
//setup cuda
HANDLE_ERROR(hipMalloc((void**)&dev_rayInfo, rayBatchSize * sizeof(RayInfo)));
HANDLE_ERROR(hipMalloc((void**)&dev_states, rayBatchSize*sizeof(hiprandState_t)));
HANDLE_ERROR(hipMemset(dev_rayInfo, 0x00, rayBatchSize * sizeof(RayInfo)));
HANDLE_ERROR(hipMemset(dev_rays, 0x00, sizeof(int)));
}
void Renderer::loadScene(){
throw "Not implemented";
}
| 8c4344f25ff83fe6cb9404cc0338bfbac40f2e6f.cu | #include "Renderer.h"
#include "cuda_gl_interop.h"
#include <iostream>
#include <time.h>
#ifdef DEBUG
#define HANDLE_ERROR(x){\
cudaError_t cudaStatus = (x); \
if (cudaStatus != cudaSuccess){\
fprintf(stdout, ": %s\n", cudaGetErrorString(cudaStatus)); \
system("Pause"); \
exit(1); \
} \
}
#else
#define HANDLE_ERROR(x) x
#endif
void glfw_error_callback(int error, const char* description){
fprintf(stderr, "Glfw Error %d: %s\n", error, description);
}
void newGLCheckError(const char *filename, const int line){
GLenum err;
char str[64];
for (int i = 0; i < 5; i++){
if ((err = glGetError()) != GL_NO_ERROR){
switch (err){
case GL_INVALID_ENUM:
strcpy_s(str, "GL_INVALID_ENUM");
break;
case GL_INVALID_VALUE:
strcpy_s(str, "GL_INVALID_VALUE");
break;
case GL_INVALID_OPERATION:
strcpy_s(str, "GL_INVALID_OPERATION");
break;
case GL_STACK_OVERFLOW:
strcpy_s(str, "GL_STACK_OVERFLOW");
break;
case GL_STACK_UNDERFLOW:
strcpy_s(str, "GL_STACK_UNDERFLOW");
break;
case GL_OUT_OF_MEMORY:
strcpy_s(str, "GL_OUT_OF_MEMORY");
break;
/*case GL_INVALID_FRAMEBUFFER_OPERATION:
strcpy_s(str, "GL_INVALID_FRAMEBUFFER_OPERATION");
break;*/
default: break;
}
printf("GL error %ss %#x in file %s in line %d \n", str, err, filename, line);
}
else break;
}
}
ImgSizePos getCorrectSize(int screenW, int screenH, int imgW, int imgH){
float screenRatio = float(screenW) / float(screenH);
float imageRatio = float(imgW) / float(imgH);
ImgSizePos sizePos;
if (imageRatio > screenRatio){
sizePos.w = 1.0f;
sizePos.h = screenRatio / imageRatio;
sizePos.x = 0.0f;
sizePos.y = 0.0f;
}
else{
sizePos.w = imageRatio / screenRatio;
sizePos.h = 1.0f;
sizePos.x = 0.0f;
sizePos.y = 0.0f;
}
return sizePos;
}
inline float clamp(float x){ return x < 0.0f ? 0.0f : x > 1.0f ? 1.0f : x; }
inline int toInt(float x){ return int(clamp(x) * 255 + .5); }
void Renderer::postProcess(){
glBindFramebuffer(GL_FRAMEBUFFER, fbos[FBO::FBO2]);
glViewport(0, 0, width, height);
glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
glClear(GL_COLOR_BUFFER_BIT);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, textures[TEXTURE::BUFFER1]);
glBindVertexArray(vaos[VAO::ScreenGeomVao]);
programs[ShaderEnum::DrawBuffer].bind();
glUniform1f(programs[ShaderEnum::DrawBuffer].getUniformLocation("exposure"), glm::pow(2.0f, exposure));
glUniform1f(programs[ShaderEnum::DrawBuffer].getUniformLocation("rays"), float(rays));
glUniform1i(programs[ShaderEnum::DrawBuffer].getUniformLocation("buffer"), 0);
glDrawArrays(GL_TRIANGLES, 0, 6);
glEnable(GL_LINE_SMOOTH);
programs[ShaderEnum::DrawShape].bind();
glUniform1f(programs[ShaderEnum::DrawShape].getUniformLocation("aspectRatio"), float(width) / height);
for (int i = 0; i < objects.size(); i++){
glUniformMatrix3fv(programs[ShaderEnum::DrawShape].getUniformLocation("matrix"), 1, GL_FALSE, glm::value_ptr(objects[i].t.inv));
if (objects[i].objectType == 0){
glBindVertexArray(vaos[VAO::LineShapeVao]);
glDrawArrays(GL_LINES, 0, 2);
}
else if (objects[i].objectType == 1){
glBindVertexArray(vaos[VAO::CircleShapeVao]);
glDrawArrays(GL_LINE_LOOP, 0, CIRCLE_TESSELATION);
}
}
}
void Renderer::saveFrame(std::string filename){
postProcess();
std::vector<float3> data(width * height);
glBindTexture(GL_TEXTURE_2D, textures[TEXTURE::OFFSCREEN]);
glGetTexImage(GL_TEXTURE_2D, 0, GL_RGB, GL_FLOAT, &data[0]);
FreeImage_Initialise();
FIBITMAP *bitmap = FreeImage_Allocate(width, height, 24);
for (int j = 0; j < height; j++)
for (int i = 0; i < width; i++){
RGBQUAD rgb;
float3 frgb = data[i + j*width];
rgb.rgbRed = (BYTE)toInt(frgb.x);
rgb.rgbGreen = (BYTE)toInt(frgb.y);
rgb.rgbBlue = (BYTE)toInt(frgb.z);
FreeImage_SetPixelColor(bitmap, i, j, &rgb);
}
filename += ".png";
FreeImage_Save(FREE_IMAGE_FORMAT::FIF_PNG, bitmap, filename.c_str());
FreeImage_Unload(bitmap);
FreeImage_DeInitialise();
}
void Renderer::saveLineFrames(std::string filename, int framerate, int frames){
initLineBuffer();
setupFrame();
for (int i = 0; i < frames; i++){
printf("Frame : %d\n", i);
HANDLE_ERROR(cudaMemset(dev_rayInfo, 0x00, rayBatchSize * sizeof(RayInfo)));
HANDLE_ERROR(cudaMemset(dev_rays, 0x00, sizeof(int)));
float _time = float(i) / framerate;
renderLineFrame(_time);
saveFrame(filename + std::to_string(i + 1));
}
HANDLE_ERROR(cudaGraphicsUnregisterResource(resourceVBO));
cudaFree(dev_rayInfo);
cudaFree(dev_states);
}
//render pulse animation
void Renderer::savePulseFrames(std::string filename, int framerate, int frames){
initLineBuffer();
setupFrame();
for (int i = 0; i < frames; i++){
printf("Frame : %d\n", i);
HANDLE_ERROR(cudaMemset(dev_rayInfo, 0x00, rayBatchSize * sizeof(RayInfo)));
HANDLE_ERROR(cudaMemset(dev_rays, 0x00, sizeof(int)));
float _time = float(i) / framerate;
renderPulseFrame(_time, dt);
saveFrame(filename + std::to_string(i + 1));
}
HANDLE_ERROR(cudaGraphicsUnregisterResource(resourceVBO));
cudaFree(dev_rayInfo);
cudaFree(dev_states);
}
/*
single frame rendering
*/
void Renderer::renderLineFrame(float time){
int seed = rand();
dim3 block(64, 1, 1);
dim3 grid((int)ceil(float(rayBatchSize) / block.x), 1, 1);
curandInitKernel << <grid, block >> >(dev_states, seed, rayBatchSize);
HANDLE_ERROR(cudaDeviceSynchronize());
HANDLE_ERROR(cudaGetLastError());
glBindFramebuffer(GL_FRAMEBUFFER, fbos[FBO::FBO1]);
glViewport(0, 0, width, height);
glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
glClear(GL_COLOR_BUFFER_BIT);
for (int i = 0; i < rayBatchCount; i++){
//run cuda
size_t num_bytes = 0;
HANDLE_ERROR(cudaGraphicsMapResources(1, &resourceVBO, 0));
HANDLE_ERROR(cudaGraphicsResourceGetMappedPointer((void **)&dev_vbo,
&num_bytes, resourceVBO));
kernelLine << < grid, block >> >(dev_rayInfo, dev_vbo, cudaScene, dev_states, dev_rays, rayBatchSize, time);
HANDLE_ERROR(cudaDeviceSynchronize());
HANDLE_ERROR(cudaGetLastError());
if (i != rayBatchCount - 1){
printf("\rProgress %d ", i + 1);
Sleep(sleepAmount);
}
HANDLE_ERROR(cudaGraphicsUnmapResources(1, &resourceVBO, 0));
glLineWidth((float)size);
glDisable(GL_DEPTH_TEST);
glDisable(GL_CULL_FACE);
//glEnable(GL_LINE_SMOOTH);
glEnable(GL_BLEND);
glBlendFunc(GL_ONE, GL_ONE);
glBlendEquation(GL_FUNC_ADD);
programs[1].bind();
glUniform1f(programs[1].getUniformLocation("aspectRatio"), float(width) / height);
glUniform2f(programs[1].getUniformLocation("screenSize"), (float)width, (float)height);
glBindVertexArray(vaos[VAO::LineVao]);
glDrawArrays(GL_LINES, 0, rayBatchSize * 2);
}
HANDLE_ERROR(cudaMemcpy((void*)&rays, dev_rays, sizeof(int), cudaMemcpyDeviceToHost));
printf("\n");
}
void Renderer::renderPulseFrame(float time, float dt){
int seed = rand();
dim3 block(64, 1, 1);
dim3 grid((int)ceil(float(rayBatchSize) / block.x), 1, 1);
curandInitKernel << <grid, block >> >(dev_states, seed, rayBatchSize);
glBindFramebuffer(GL_FRAMEBUFFER, fbos[FBO::FBO1]);
glViewport(0, 0, width, height);
glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
glClear(GL_COLOR_BUFFER_BIT);
HANDLE_ERROR(cudaDeviceSynchronize());
HANDLE_ERROR(cudaGetLastError());
for (int i = 0; i < rayBatchCount; i++){
//run cuda
size_t num_bytes = 0;
HANDLE_ERROR(cudaGraphicsMapResources(1, &resourceVBO, 0));
HANDLE_ERROR(cudaGraphicsResourceGetMappedPointer((void **)&dev_vbo,
&num_bytes, resourceVBO));
kernelLinePulse << < grid, block >> >(dev_rayInfo, dev_vbo, cudaScene, dev_states, dev_rays, rayBatchSize, time, dt);
HANDLE_ERROR(cudaDeviceSynchronize());
HANDLE_ERROR(cudaGetLastError());
if (i != rayBatchCount - 1){
printf("\rProgress %d ", i + 1);
Sleep(sleepAmount);
}
HANDLE_ERROR(cudaGraphicsUnmapResources(1, &resourceVBO, 0));
glLineWidth((float)size);
glDisable(GL_DEPTH_TEST);
glDisable(GL_CULL_FACE);
//glEnable(GL_LINE_SMOOTH);
glEnable(GL_BLEND);
glBlendFunc(GL_ONE, GL_ONE);
glBlendEquation(GL_FUNC_ADD);
programs[1].bind();
glUniform1f(programs[1].getUniformLocation("aspectRatio"), float(width) / height);
glUniform2f(programs[1].getUniformLocation("screenSize"), (float)width, (float)height);
glBindVertexArray(vaos[VAO::LineVao]);
glDrawArrays(GL_LINES, 0, rayBatchSize * 2);
}
HANDLE_ERROR(cudaMemcpy((void*)&rays, dev_rays, sizeof(int), cudaMemcpyDeviceToHost));
printf("\n");
}
//render full lines
void Renderer::renderLines(float time){
initLineBuffer();
setupFrame();
renderLineFrame(time);
HANDLE_ERROR(cudaGraphicsUnregisterResource(resourceVBO));
cudaFree(dev_rayInfo);
cudaFree(dev_states);
}
//render pulses
void Renderer::renderPulses(float time, float dt){
initLineBuffer();
setupFrame();
renderPulseFrame(time, dt);
HANDLE_ERROR(cudaGraphicsUnregisterResource(resourceVBO));
cudaFree(dev_rayInfo);
cudaFree(dev_states);
}
Renderer::Renderer(){
cudaScene.lights = NULL;
cudaScene.objects = NULL;
cudaScene.materials = NULL;
}
Renderer::~Renderer(){
glDeleteTextures(TEXTURECOUNT, textures);
glDeleteFramebuffers(FBOCOUNT, fbos);
cudaFree(dev_rays);
cudaFree(cudaScene.lights);
cudaFree(cudaScene.objects);
cudaFree(cudaScene.materials);
}
void Renderer::start(){
glfwSetErrorCallback(glfw_error_callback);
if (!glfwInit())
return;
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
GLFWwindow* window = glfwCreateWindow(1024, 1024, "Cuda forward tracing 2d", NULL, NULL);
if (window == NULL){
glfwTerminate();
return;
}
glfwMakeContextCurrent(window);
glewExperimental = GL_TRUE;
GLenum glew_status = glewInit();
if (glew_status != GLEW_OK){
fprintf(stdout, "Error: %s\n", glewGetErrorString(glew_status));
return;
}
glGetError();
glfwSwapInterval(1);
IMGUI_CHECKVERSION();
ImGui::CreateContext();
ImGuiIO& io = ImGui::GetIO();
const char* glsl_version = "#version 330";
ImGui::StyleColorsDark();
ImGui_ImplGlfw_InitForOpenGL(window, true);
ImGui_ImplOpenGL3_Init(glsl_version);
init();
initScene();
exposure = 1.0f;
rayBatchSize = 2048;
rayBatchCount = 100;
sleepAmount = 20;
size = 1;
timeFrame = 0.0;
frameRate = 24;
frames = 240;
w = 1280;
h = 720;
dt = 100.0f;
srand(time(NULL));
char buf[64];
memset(buf, 0x00, sizeof(char)* 64);
while (!glfwWindowShouldClose(window)){
glfwPollEvents();
ImGui_ImplOpenGL3_NewFrame();
ImGui_ImplGlfw_NewFrame();
ImGui::NewFrame();
{
ImGui::Begin("main");
ImGui::SliderFloat("exposure", &exposure, -100.0f, 100.0f);
ImGui::DragInt("width", &w, 1.0f, 200, 1920);
ImGui::DragInt("height", &h, 1.0f, 200, 1080);
ImGui::DragInt("ray batch size", &rayBatchSize, 1.0f, 64, 8192);
ImGui::DragInt("ray batch count", &rayBatchCount, 1.0f, 1, 10000);
ImGui::DragFloat("time frame", &timeFrame, 0.1f, 0.0f, 1000.0f);
ImGui::DragFloat("dt", &dt, 0.1f, 0.0f, 1000.0f);
if (ImGui::Button("load scene")){
loadScene();
}
if (ImGui::Button("render frame")){
width = w;
height = h;
renderLines(timeFrame);
}
if (ImGui::Button("render frame pulse")){
width = w;
height = h;
renderPulses(timeFrame, dt);
}
ImGui::End();
//render fbo
ImGui::Begin("anim");
ImGui::DragInt("framerate", &frameRate, 1.0f, 1, 60);
ImGui::DragInt("frames", &frames, 1.0f, 1, 10000);
ImGui::InputText("filename", buf, IM_ARRAYSIZE(buf));
if (ImGui::Button("render full line anim")){
width = w;
height = h;
saveLineFrames(std::string(buf), frameRate, frames);
}
if (ImGui::Button("render pulse anim")){
width = w;
height = h;
savePulseFrames(std::string(buf), frameRate, frames);
}
ImGui::End();
}
ImGui::Render();
glfwMakeContextCurrent(window);
int screenWidth, screenHeight;
glfwGetFramebufferSize(window, &screenWidth, &screenHeight);
postProcess();
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glViewport(0, 0, screenWidth, screenHeight);
glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
glClear(GL_COLOR_BUFFER_BIT);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, textures[TEXTURE::OFFSCREEN]);
programs[ShaderEnum::DrawToScreen].bind();
ImgSizePos imgSizePos = getCorrectSize(screenWidth, screenHeight, width, height);
glm::mat3 mvp = glm::mat3(1.0f);
mvp = glm::translate(mvp, glm::vec2(imgSizePos.x, imgSizePos.y));
mvp = glm::scale(mvp, glm::vec2(imgSizePos.w, imgSizePos.h));
//mvp = glm::inverse(mvp);
glBindVertexArray(vaos[VAO::ScreenGeomVao]);
glUniform1i(programs[ShaderEnum::DrawToScreen].getUniformLocation("buffer"), 0);
glUniformMatrix3fv(programs[ShaderEnum::DrawToScreen].getUniformLocation("mvp"), 1, GL_FALSE, glm::value_ptr(mvp));
glDrawArrays(GL_TRIANGLES, 0, 6);
programs[2].unbind();
//!!
ImGui_ImplOpenGL3_RenderDrawData(ImGui::GetDrawData());
glfwMakeContextCurrent(window);
glfwSwapBuffers(window);
}
ImGui_ImplOpenGL3_Shutdown();
ImGui_ImplGlfw_Shutdown();
ImGui::DestroyContext();
glfwDestroyWindow(window);
glfwTerminate();
}
void Renderer::init(){
const char* shaderNames[] = {
"Shaders/drawBuffer.vert", "Shaders/drawBuffer.frag",
"Shaders/lineShader.vert", "Shaders/lineShader.frag",
"Shaders/drawScreen.vert", "Shaders/drawScreen.frag",
"Shaders/drawShape.vert", "Shaders/drawShape.frag"
};
for (int i = 0; i < SHADERCOUNT; i++){
if (!programs[i].loadShaderProgram(shaderNames[i * 2], shaderNames[i * 2 + 1])){
printf("Error loading shader file.\n");
system("pause");
exit(1);
}
}
//INIT FBO AND TEXURES
glGenFramebuffers(FBOCOUNT, fbos);
glGenTextures(TEXTURE::TEXTURECOUNT, textures);
float white[3] = { 1.0f, 1.0f, 1.0f };
width = 1;
height = 1;
glBindTexture(GL_TEXTURE_2D, textures[TEXTURE::BUFFER1]);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB32F, width, height, 0, GL_RGB, GL_FLOAT, white);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
//create vaos and vbos
glGenVertexArrays(VAOCOUNT, vaos);
glGenBuffers(VBOCOUNT, vbos);
glBindVertexArray(vaos[LineVao]);
glBindBuffer(GL_ARRAY_BUFFER, vbos[LightInfoVBO]);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, sizeof(VBOInfo), 0);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, sizeof(VBOInfo), (void *)(4 * sizeof(float)));
glEnableVertexAttribArray(2);
glVertexAttribPointer(2, 1, GL_FLOAT, GL_FALSE, sizeof(VBOInfo), (void *)(7 * sizeof(float)));
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
programs[LineShader].bindAttribLocation(0, "in_pos");
programs[LineShader].bindAttribLocation(1, "in_color");
programs[LineShader].bindAttribLocation(2, "in_angle");
programs[LineShader].link();
/*
programs[PointShader].bindAttribLocation(0, "in_pos");
programs[PointShader].bindAttribLocation(1, "in_color");
programs[PointShader].bindAttribLocation(2, "in_angle");
programs[PointShader].link();*/
float fullscreenRect[] = { -1.0, -1.0
, -1.0, 1.0
, 1.0, 1.0
, -1.0, -1.0
, 1.0, 1.0
, 1.0, -1.0
};
glBindVertexArray(vaos[ScreenGeomVao]);
glBindBuffer(GL_ARRAY_BUFFER, vbos[VBO::ScreenGeomVBO]);
glBufferData(GL_ARRAY_BUFFER, sizeof(float)* 12, fullscreenRect, GL_STATIC_DRAW);
programs[0].bindAttribLocation(0, "pos");
programs[0].link();
programs[2].bindAttribLocation(0, "pos");
programs[2].link();
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 0, 0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
programs[DrawShape].bindAttribLocation(0, "in_pos");
programs[DrawShape].link();
glBindVertexArray(vaos[VAO::LineShapeVao]);
glBindBuffer(GL_ARRAY_BUFFER, vbos[VBO::LineVBO]);
float line[] = { 0.0f, 0.0f, 1.0f, 0.0f };
glBufferData(GL_ARRAY_BUFFER, sizeof(float)* 4, line, GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 0, 0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
glBindVertexArray(vaos[VAO::CircleShapeVao]);
glBindBuffer(GL_ARRAY_BUFFER, vbos[VBO::CircleVBO]);
std::vector<float> circleVectors(CIRCLE_TESSELATION * 2);
for (int i = 0; i < CIRCLE_TESSELATION; i++){
circleVectors[i * 2] = glm::cos(i*glm::two_pi<float>() / CIRCLE_TESSELATION);
circleVectors[i * 2 + 1] = glm::sin(i*glm::two_pi<float>() / CIRCLE_TESSELATION);
}
glBufferData(GL_ARRAY_BUFFER, sizeof(float)* 2 * CIRCLE_TESSELATION, &circleVectors[0], GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 0, 0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
CheckGl();
HANDLE_ERROR(cudaMalloc((void**)&dev_rays, sizeof(int)));
}
void Renderer::initScene(){
SceneDescription scene;
Light light;
light.lightType = _POINT;
light.color = glm::vec3(1.0,1.,0.);
light.angle = 0.0;
light.t = createTransform(glm::vec2(0.0f, 0.0f), glm::radians(0.0f), glm::vec2(0.01f));
scene.lights.push_back(light);
initCudaScene(scene);
}
void Renderer::initCudaScene(const SceneDescription& scene){
objects.clear();
//could be null pointers
cudaFree(cudaScene.lights);
cudaFree(cudaScene.objects);
cudaFree(cudaScene.materials);
for (auto iter = scene.objects.begin(); iter != scene.objects.end(); ++iter){
objects.push_back(*iter);
}
cudaScene.lightCount = scene.lights.size();
cudaScene.matCount = scene.materials.size();
cudaScene.objCount = scene.objects.size();
cudaScene.minDepth = 3;
cudaScene.maxDepth = 8;
if (cudaScene.matCount>0)
HANDLE_ERROR(cudaMalloc((void**)&(cudaScene.materials), cudaScene.matCount * sizeof(Material)));
if (cudaScene.objCount>0)
HANDLE_ERROR(cudaMalloc((void**)&(cudaScene.objects), cudaScene.objCount * sizeof(Object)));
if (cudaScene.lightCount>0)
HANDLE_ERROR(cudaMalloc((void**)&(cudaScene.lights), cudaScene.lightCount * sizeof(Light)));
if (cudaScene.matCount>0)
HANDLE_ERROR(cudaMemcpy(cudaScene.materials, scene.materials.data(), cudaScene.matCount * sizeof(Material), cudaMemcpyHostToDevice));
if (cudaScene.objCount>0)
HANDLE_ERROR(cudaMemcpy(cudaScene.objects, scene.objects.data(), cudaScene.objCount * sizeof(Object), cudaMemcpyHostToDevice));
if (cudaScene.lightCount>0)
HANDLE_ERROR(cudaMemcpy(cudaScene.lights, scene.lights.data(), cudaScene.lightCount * sizeof(Light), cudaMemcpyHostToDevice));
}
void Renderer::initLineBuffer(){
glBindBuffer(GL_ARRAY_BUFFER, vbos[VBO::LightInfoVBO]);
glBufferData(GL_ARRAY_BUFFER, sizeof(VBOInfo)* rayBatchSize * 2, NULL, GL_DYNAMIC_DRAW);
resourceVBO = NULL;
HANDLE_ERROR(cudaGraphicsGLRegisterBuffer(&resourceVBO, vbos[VBO::LightInfoVBO], cudaGraphicsMapFlagsWriteDiscard));
}
void Renderer::setupFrame(){
glBindTexture(GL_TEXTURE_2D, textures[TEXTURE::BUFFER1]);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, width, height, 0, GL_RGBA, GL_FLOAT, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glBindTexture(GL_TEXTURE_2D, textures[TEXTURE::OFFSCREEN]);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB32F, width, height, 0, GL_RGB, GL_FLOAT, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glBindFramebuffer(GL_FRAMEBUFFER, fbos[FBO::FBO1]);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, textures[TEXTURE::BUFFER1], 0);
if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE){
fprintf(stdout, "fbo1 attachement failed\n"); \
system("pause"); \
exit(1);
}
glBindFramebuffer(GL_FRAMEBUFFER, fbos[FBO::FBO2]);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, textures[TEXTURE::OFFSCREEN], 0);
if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE){
fprintf(stdout, "fbo2 attachement failed\n"); \
system("pause"); \
exit(1);
}
//setup cuda
HANDLE_ERROR(cudaMalloc((void**)&dev_rayInfo, rayBatchSize * sizeof(RayInfo)));
HANDLE_ERROR(cudaMalloc((void**)&dev_states, rayBatchSize*sizeof(curandState_t)));
HANDLE_ERROR(cudaMemset(dev_rayInfo, 0x00, rayBatchSize * sizeof(RayInfo)));
HANDLE_ERROR(cudaMemset(dev_rays, 0x00, sizeof(int)));
}
void Renderer::loadScene(){
throw "Not implemented";
}
|
2e82ec8ec620c1914955c14010eb31ab623ab4f1.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <math.h>
#include <float.h>
#include "hip/hip_runtime.h"
#include "utility/src/utils.cuh"
DREAMPLACE_BEGIN_NAMESPACE
template <typename T>
__global__ void computeHPWLMax(
const T *x,
const int *pin2net_map,
const unsigned char *net_mask,
int num_pins,
T *partial_hpwl_x_max)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_pins)
{
int net_id = pin2net_map[i];
atomicMax(&partial_hpwl_x_max[net_id], (T)net_mask[net_id] * x[i]);
}
}
template <typename T>
__global__ void computeHPWLMin(
const T *x,
const int *pin2net_map,
const unsigned char *net_mask,
int num_pins,
T *partial_hpwl_x_min)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_pins)
{
int net_id = pin2net_map[i];
atomicMin(&partial_hpwl_x_min[net_id], (T)net_mask[net_id] * x[i]);
}
}
template <typename T>
__global__ void computeHPWLMaxMin(
const T *x, const T *y,
const int *pin2net_map,
const unsigned char *net_mask,
int num_pins,
T *partial_hpwl_x_max, T *partial_hpwl_x_min,
T *partial_hpwl_y_max, T *partial_hpwl_y_min)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_pins)
{
int net_id = pin2net_map[i];
T xx = (T)net_mask[net_id] * x[i];
atomicMax(&partial_hpwl_x_max[net_id], xx);
atomicMin(&partial_hpwl_x_min[net_id], xx);
T yy = (T)net_mask[net_id] * y[i];
atomicMax(&partial_hpwl_y_max[net_id], yy);
atomicMin(&partial_hpwl_y_min[net_id], yy);
}
}
template <typename T>
int computeHPWLCudaAtomicLauncher(
const T *x, const T *y,
const int *pin2net_map,
const unsigned char *net_mask,
int num_nets,
int num_pins,
T *partial_hpwl_max,
T *partial_hpwl_min)
{
const int thread_count = 64;
const int block_count_pins = (num_pins + thread_count - 1) / thread_count;
hipLaunchKernelGGL(( computeHPWLMaxMin), dim3(block_count_pins), dim3(thread_count), 0, 0,
x, y,
pin2net_map,
net_mask,
num_pins,
partial_hpwl_max, partial_hpwl_min,
partial_hpwl_max + num_nets, partial_hpwl_min + num_nets);
//printArray(partial_hpwl, num_nets, "partial_hpwl");
// I move out the summation to use ATen
// significant speedup is observed
//sumArray<<<1, 1>>>(partial_hpwl, num_nets, hpwl);
return 0;
}
// manually instantiate the template function
#define REGISTER_KERNEL_LAUNCHER(type) \
template int computeHPWLCudaAtomicLauncher<type>( \
const type *x, const type *y, \
const int *pin2net_map, \
const unsigned char *net_mask, \
int num_nets, \
int num_pins, \
type *partial_hpwl_max, \
type *partial_hpwl_min);
REGISTER_KERNEL_LAUNCHER(int);
REGISTER_KERNEL_LAUNCHER(long long int);
DREAMPLACE_END_NAMESPACE
| 2e82ec8ec620c1914955c14010eb31ab623ab4f1.cu | #include <stdio.h>
#include <math.h>
#include <float.h>
#include "cuda_runtime.h"
#include "utility/src/utils.cuh"
DREAMPLACE_BEGIN_NAMESPACE
template <typename T>
__global__ void computeHPWLMax(
const T *x,
const int *pin2net_map,
const unsigned char *net_mask,
int num_pins,
T *partial_hpwl_x_max)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_pins)
{
int net_id = pin2net_map[i];
atomicMax(&partial_hpwl_x_max[net_id], (T)net_mask[net_id] * x[i]);
}
}
template <typename T>
__global__ void computeHPWLMin(
const T *x,
const int *pin2net_map,
const unsigned char *net_mask,
int num_pins,
T *partial_hpwl_x_min)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_pins)
{
int net_id = pin2net_map[i];
atomicMin(&partial_hpwl_x_min[net_id], (T)net_mask[net_id] * x[i]);
}
}
template <typename T>
__global__ void computeHPWLMaxMin(
const T *x, const T *y,
const int *pin2net_map,
const unsigned char *net_mask,
int num_pins,
T *partial_hpwl_x_max, T *partial_hpwl_x_min,
T *partial_hpwl_y_max, T *partial_hpwl_y_min)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_pins)
{
int net_id = pin2net_map[i];
T xx = (T)net_mask[net_id] * x[i];
atomicMax(&partial_hpwl_x_max[net_id], xx);
atomicMin(&partial_hpwl_x_min[net_id], xx);
T yy = (T)net_mask[net_id] * y[i];
atomicMax(&partial_hpwl_y_max[net_id], yy);
atomicMin(&partial_hpwl_y_min[net_id], yy);
}
}
template <typename T>
int computeHPWLCudaAtomicLauncher(
const T *x, const T *y,
const int *pin2net_map,
const unsigned char *net_mask,
int num_nets,
int num_pins,
T *partial_hpwl_max,
T *partial_hpwl_min)
{
const int thread_count = 64;
const int block_count_pins = (num_pins + thread_count - 1) / thread_count;
computeHPWLMaxMin<<<block_count_pins, thread_count>>>(
x, y,
pin2net_map,
net_mask,
num_pins,
partial_hpwl_max, partial_hpwl_min,
partial_hpwl_max + num_nets, partial_hpwl_min + num_nets);
//printArray(partial_hpwl, num_nets, "partial_hpwl");
// I move out the summation to use ATen
// significant speedup is observed
//sumArray<<<1, 1>>>(partial_hpwl, num_nets, hpwl);
return 0;
}
// manually instantiate the template function
#define REGISTER_KERNEL_LAUNCHER(type) \
template int computeHPWLCudaAtomicLauncher<type>( \
const type *x, const type *y, \
const int *pin2net_map, \
const unsigned char *net_mask, \
int num_nets, \
int num_pins, \
type *partial_hpwl_max, \
type *partial_hpwl_min);
REGISTER_KERNEL_LAUNCHER(int);
REGISTER_KERNEL_LAUNCHER(long long int);
DREAMPLACE_END_NAMESPACE
|
b407d0af9a528e3c46521b5f0e96cad7284ae81d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <device_launch_parameters.h>
#include <stdlib.h>
#include <time.h>
#include <stdbool.h>
#define GRID_SIZE 8
#define BLOCK_SIZE 1024
#define min(a, b) (a < b ? a : b)
#define abs(a) (a > 0 ? a : -1 * a)
__global__ void kMeansStep1(float *d_data, float* d_clusters, float* d_assignments, float *d_distances, int n_clusters){
int start_data = blockDim.x * blockIdx.x + threadIdx.x * TILE_WIDTH;
int end_data = start_data + TILE_WIDTH;
int start_cluster = blockDim.y * blockIdx.y + threadIdx.y * TILE_WIDTH;
int end_cluster = start_cluster + TILE_WIDTH;
for (int data=start_data; data<end_data; data++){
for (int cluster=start_cluster, cluster<end_cluster; cluster++){
float distance = 0.0
for (int j=0; j<d; j++){
distance += (d_clustes[cluster * d + j] - d_data[data * d + j]) * (d_clustes[cluster * d + j] - d_data[data * d + j]);
}
d_distances[data * n_clusters + cluster] = distance;
}
}
}
__global__ void kMeansStep2(float *assignments, float *d_clusters, int n_data, int n_clusters){
int data = blockIdx.x;
int start = threadIdx.x;
while (start < n_clusters){
}
//reduction
__syncthreads();
if (tid == 0){
for int(j = 0; j<d; j++)
atomicAdd(&d_clusters[cluster][j], d_data[data][j]);
}
}
__global void checkConverged(float *d_prev_clusters, float *d_new_clusters, bool *converged){
int start_dim = blockDim.x * blockIdx.x + threadIdx.x * TILE_WIDTH;
int end_dim = start_dim + TILE_WIDTH;
int start_cluster = blockDim.y * blockIdx.y + threadIdx.y * TILE_WIDTH;
int end_cluster = start_cluster + TILE_WIDTH;
for (int data=start_dim; data<end_dim; data++){
for (int cluster=start_cluster; cluster<end_cluster; cluster++){
if (abs(d_prev_clusters - d_new_clusters) > 0.1)
atomicAnd(not_converged, 1);
}
}
}
__global__ void salam(){
}
int main(){
int n; // number of data points
int k; //number of centorids
int d; //dimension of each point
int n_data = 10;
int n_clusters = 5;
int d = 4;
int size_data = sizeof(float) * n_data * d;
int size_clusters = sizeof(float) * n_clusters * d;
int size_distances = sizeof(float) * n_data * n_clusters;
int h_converged = true;
float *h_data = (float *)malloc(n * sizeof(float));
float *h_clusters = (float *)malloc(k * sizeof(float));
int *h_assignments = (int*) malloc(n * sizeof(int));
float *d_data, *d_clusters, *d_assignments, *d_distances, *d_converged;
for (int i=0; i<n*d; i++)
h_data[i] = (float *) malloc(d * sizeof(float));
for (int i=0; i<n_clusters*d; i++)
h_clusters[i] = (float *) malloc(d * sizeof(float));
hipMalloc((void **)&d_data, size_data;
hipMalloc((void **)&d_clusters, size_clusters);
hipMalloc((void **)&d_prev_clusters, size_clusters)
hipMalloc((void **)&d_assignments, n*sizeof(int));
hipMalloc((void **)&d_assignments, size_distances);
hipMalloc((void *)&converged, size_distances);
hipMemcpy(d_data, h_data, size_data, hipMemcpyHostToDevice);
hipMemcpy(d_clusters, h_data, size_data, hipMemcpyHostToDevice);
float *d1 = d_clusters;
float *d2 = d_prev_clusters;
while(1){
hipLaunchKernelGGL(( kMeansStep1) , dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, d_data, d1, d_assignments, n_data, n_clusters);
hipDeviceSynchronize();
hipMemset(d2, 0, size_clusters);
hipLaunchKernelGGL(( kMeansStep2) , dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, d_assignments, d2);
hipLaunchKernelGGL(( checkConverged) , dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, d1, d2, &converged);
d1 = d1 == d_in ? d_out : d_in;
d2 = d2 == d_in ? d_out : d_in;
}
return 0;
} | b407d0af9a528e3c46521b5f0e96cad7284ae81d.cu | #include <stdio.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <device_launch_parameters.h>
#include <stdlib.h>
#include <time.h>
#include <stdbool.h>
#define GRID_SIZE 8
#define BLOCK_SIZE 1024
#define min(a, b) (a < b ? a : b)
#define abs(a) (a > 0 ? a : -1 * a)
__global__ void kMeansStep1(float *d_data, float* d_clusters, float* d_assignments, float *d_distances, int n_clusters){
int start_data = blockDim.x * blockIdx.x + threadIdx.x * TILE_WIDTH;
int end_data = start_data + TILE_WIDTH;
int start_cluster = blockDim.y * blockIdx.y + threadIdx.y * TILE_WIDTH;
int end_cluster = start_cluster + TILE_WIDTH;
for (int data=start_data; data<end_data; data++){
for (int cluster=start_cluster, cluster<end_cluster; cluster++){
float distance = 0.0
for (int j=0; j<d; j++){
distance += (d_clustes[cluster * d + j] - d_data[data * d + j]) * (d_clustes[cluster * d + j] - d_data[data * d + j]);
}
d_distances[data * n_clusters + cluster] = distance;
}
}
}
__global__ void kMeansStep2(float *assignments, float *d_clusters, int n_data, int n_clusters){
int data = blockIdx.x;
int start = threadIdx.x;
while (start < n_clusters){
}
//reduction
__syncthreads();
if (tid == 0){
for int(j = 0; j<d; j++)
atomicAdd(&d_clusters[cluster][j], d_data[data][j]);
}
}
__global void checkConverged(float *d_prev_clusters, float *d_new_clusters, bool *converged){
int start_dim = blockDim.x * blockIdx.x + threadIdx.x * TILE_WIDTH;
int end_dim = start_dim + TILE_WIDTH;
int start_cluster = blockDim.y * blockIdx.y + threadIdx.y * TILE_WIDTH;
int end_cluster = start_cluster + TILE_WIDTH;
for (int data=start_dim; data<end_dim; data++){
for (int cluster=start_cluster; cluster<end_cluster; cluster++){
if (abs(d_prev_clusters - d_new_clusters) > 0.1)
atomicAnd(not_converged, 1);
}
}
}
__global__ void salam(){
}
int main(){
int n; // number of data points
int k; //number of centorids
int d; //dimension of each point
int n_data = 10;
int n_clusters = 5;
int d = 4;
int size_data = sizeof(float) * n_data * d;
int size_clusters = sizeof(float) * n_clusters * d;
int size_distances = sizeof(float) * n_data * n_clusters;
int h_converged = true;
float *h_data = (float *)malloc(n * sizeof(float));
float *h_clusters = (float *)malloc(k * sizeof(float));
int *h_assignments = (int*) malloc(n * sizeof(int));
float *d_data, *d_clusters, *d_assignments, *d_distances, *d_converged;
for (int i=0; i<n*d; i++)
h_data[i] = (float *) malloc(d * sizeof(float));
for (int i=0; i<n_clusters*d; i++)
h_clusters[i] = (float *) malloc(d * sizeof(float));
cudaMalloc((void **)&d_data, size_data;
cudaMalloc((void **)&d_clusters, size_clusters);
cudaMalloc((void **)&d_prev_clusters, size_clusters)
cudaMalloc((void **)&d_assignments, n*sizeof(int));
cudaMalloc((void **)&d_assignments, size_distances);
cudaMalloc((void *)&converged, size_distances);
cudaMemcpy(d_data, h_data, size_data, cudaMemcpyHostToDevice);
cudaMemcpy(d_clusters, h_data, size_data, cudaMemcpyHostToDevice);
float *d1 = d_clusters;
float *d2 = d_prev_clusters;
while(1){
kMeansStep1 <<<GRID_SIZE, BLOCK_SIZE>>> (d_data, d1, d_assignments, n_data, n_clusters);
cudaThreadSynchronize();
cudaMemset(d2, 0, size_clusters);
kMeansStep2 <<<GRID_SIZE, BLOCK_SIZE>>> (d_assignments, d2);
checkConverged <<<GRID_SIZE, BLOCK_SIZE>>> (d1, d2, &converged);
d1 = d1 == d_in ? d_out : d_in;
d2 = d2 == d_in ? d_out : d_in;
}
return 0;
} |
412caae9e47aa0f57565f6c71b051203363db232.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author raver119@gmail.com
// @author Yurii Shyrma (iuriish@yahoo.com)
//
#include <system/op_boilerplate.h>
#include <loops/reduce_long.h>
#include <loops/legacy_ops.h>
#include <helpers/DebugHelper.h>
#include <types/types.h>
#include <execution/LaunchContext.h>
#include <exceptions/cuda_exception.h>
#include <loops/scalar.h>
using namespace simdOps;
////////////////////////////////////////////////////////////////////////
template <typename X, typename Z, typename OpType>
__global__ void simpleReduce(const void *x, const Nd4jLong *outerXTadShapeInfo, const Nd4jLong *innerXTadShapeInfo,
void *extraParams, void *vreductionBuffer, void *z, const Nd4jLong *zShapeInfo) {
functions::reduce::ReduceLongFunction<X,Z>::template transformCudaXD<OpType>(x, outerXTadShapeInfo, innerXTadShapeInfo, extraParams, vreductionBuffer, z, zShapeInfo);
}
////////////////////////////////////////////////////////////////////////
template <typename X, typename Z, typename OpType>
__device__ void reduceScalarGeneric(const void *x, const Nd4jLong *xShapeInfo,
void *extraParams,
void *z, const Nd4jLong *zShapeInfo,
int *dimension, int dimensionLength,
void *reductionBuffer,
const Nd4jLong *tadOnlyShapeInfo) {
functions::reduce::ReduceLongFunction<X, Z>::template execScalarCuda<OpType>(x, xShapeInfo, extraParams, z, zShapeInfo, reductionBuffer, tadOnlyShapeInfo);
}
////////////////////////////////////////////////////////////////////////
template <typename X, typename Z, typename OpType>
__global__ void simpleScalar(const void *x, const Nd4jLong *xShapeInfo,
void *extraParams,
void *z, const Nd4jLong *zShapeInfo,
int *dimension, int dimensionLength,
void *reductionBuffer,
const Nd4jLong *tadOnlyShapeInfo) {
reduceScalarGeneric<X, Z, OpType>(x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo);
}
namespace functions {
namespace reduce {
////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
template <typename OpType>
__device__ void ReduceLongFunction<X,Z>::aggregatePartials(void *vsPartials, Nd4jLong tid, Nd4jLong numItems, void *vextraParams) {
// start the shared memory loop on the next power of 2 less
// than the block size. If block size is not a power of 2,
// accumulate the intermediate sums in the remainder range.
auto sPartials = reinterpret_cast<Z*>(vsPartials);
auto extraParams = reinterpret_cast<X*>(vextraParams);
Nd4jLong floorPow2 = numItems;
if (floorPow2 & (floorPow2 - 1)) {
while (floorPow2 & (floorPow2 - 1))
floorPow2 &= floorPow2 - 1;
if (tid >= floorPow2)
sPartials[tid - floorPow2] = OpType::update(sPartials[tid - floorPow2], sPartials[tid], extraParams);
__syncthreads();
}
for (Nd4jLong activeThreads = floorPow2 >> 1; activeThreads; activeThreads >>= 1) {
if (tid < activeThreads && tid + activeThreads < numItems)
sPartials[tid] = OpType::update(sPartials[tid], sPartials[tid + activeThreads], extraParams);
__syncthreads();
}
}
////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
template <typename OpType>
__device__ void ReduceLongFunction<X,Z>::transformCudaXD(const void *vx, const Nd4jLong *outerXTadShapeInfo, const Nd4jLong *innerXTadShapeInfo,
void *vextraParams, void *vreductionBuffer,
void *vz, const Nd4jLong *zShapeInfo) {
auto x = reinterpret_cast<const X*>(vx);
auto z = reinterpret_cast<Z*>(vz);
auto extraParams = reinterpret_cast<X*>(vextraParams);
//shared memory space for storing intermediate results
__shared__ Z sPartials[CUDA_BLOCK_SIZE];
__shared__ int tadLen, numTads;
__shared__ bool sameOffsets;
if (threadIdx.x == 0) {
sameOffsets = shape::haveSameShapeAndStrides(zShapeInfo, outerXTadShapeInfo);
tadLen = shape::length(innerXTadShapeInfo);
numTads = shape::length(outerXTadShapeInfo);
}
__syncthreads();
int coords[MAX_RANK];
for (int r = blockIdx.x; r < numTads; r += gridDim.x) {
shape::index2coords(r, outerXTadShapeInfo, coords);
const auto outerOffset = shape::getOffset(outerXTadShapeInfo, coords);
const auto zOffset = sameOffsets ? outerOffset : shape::getOffset(zShapeInfo, coords);
const X* xTad = x + outerOffset;
sPartials[threadIdx.x] = OpType::startingValue(xTad);
for (int i = threadIdx.x; i < tadLen; i += blockDim.x)
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(xTad[shape::getIndexOffset(i, innerXTadShapeInfo)], extraParams), extraParams);
__syncthreads();
// aggregate. do NOT reduce for elements > tadLen
aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::nd4j_min<int>(blockDim.x, tadLen), extraParams);
__syncthreads();
if (threadIdx.x == 0)
z[zOffset] = OpType::postProcess(sPartials[threadIdx.x], tadLen, extraParams);
}
}
////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
template <typename OpType>
__device__ void ReduceLongFunction<X,Z>::execScalarCuda(const void *vx, const Nd4jLong *xShapeInfo,
void *vextraParams,
void *vz, const Nd4jLong *zShapeInfo,
void *vreductionBuffer,
const Nd4jLong *tadOnlyShapeInfo) {
auto x = reinterpret_cast<const X*>(vx);
auto z = reinterpret_cast<Z*>(vz);
auto extraParams = reinterpret_cast<X*>(vextraParams);
auto reductionBuffer = reinterpret_cast<Z*>(vreductionBuffer);
auto tid = blockDim.x * blockIdx.x + threadIdx.x;
//shared memory space for storing intermediate results
__shared__ Z sPartials[CUDA_BLOCK_SIZE];
__shared__ Nd4jLong xEws;
__shared__ Nd4jLong len;
if(threadIdx.x == 0) {
xEws = shape::elementWiseStride(xShapeInfo);
len = shape::length(xShapeInfo);
}
__syncthreads();
sPartials[threadIdx.x] = OpType::startingValue(x);
if (xEws > 0)
for (int i = tid; i < len; i += (blockDim.x * gridDim.x))
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(x[i * xEws], extraParams), extraParams);
else
for (int i = tid; i < len; i += blockDim.x * gridDim.x)
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(x[shape::getIndexOffset(i, xShapeInfo)], extraParams), extraParams);
__syncthreads();
aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::nd4j_min<int>(blockDim.x, len), extraParams);
__syncthreads();
if (gridDim.x > 1) {
auto tc = reinterpret_cast<unsigned int *>(reductionBuffer);
__shared__ bool amLast;
tid = threadIdx.x;
if (threadIdx.x == 0)
reductionBuffer[blockIdx.x] = sPartials[0];//this->postProcess(sPartials[0],len,extraParams);
__threadfence();
__syncthreads();
if (threadIdx.x == 0) {
unsigned int ticket = atomicInc(&tc[16384], gridDim.x);
amLast = (ticket == gridDim.x - 1);
}
__syncthreads();
if (amLast) {
tc[16384] = 0;
sPartials[threadIdx.x] = OpType::startingValue(x);
for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x)
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], reductionBuffer[i], extraParams);
__syncthreads();
aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::nd4j_min<int>(gridDim.x, blockDim.x), extraParams);
__syncthreads();
if (threadIdx.x == 0) {
z[0] = OpType::postProcess(sPartials[0], len, extraParams);
}
}
}
else {
if (threadIdx.x == 0) {
auto tc = reinterpret_cast<unsigned int*>(reductionBuffer);
tc[16384] = 0;
z[0] = OpType::postProcess(sPartials[0], len, extraParams);
}
}
}
////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
template<typename OpType>
__host__ void ReduceLongFunction<X,Z>::intermediateXD(dim3 launchDims, hipStream_t *stream,
const void *x, const Nd4jLong *dXShapeInfo, const Nd4jLong *hXShapeInfo,
void *extraParams, void *vreductionBuffer,
void *z, const Nd4jLong *dZShapeInfo, const Nd4jLong *hZShapeInfo, const int* dims) {
if(shape::isEmpty(hXShapeInfo)) {
if(shape::isEmpty(hZShapeInfo))
return;
const auto startingVal = static_cast<Z>(OpType::startingValue(reinterpret_cast<const X*>(x)));
auto res = hipMemcpyAsync(sd::LaunchContext::defaultContext()->getScalarPointer(), &startingVal, sizeof(Z), hipMemcpyHostToDevice, *stream);
if (res != 0)
throw sd::cuda_exception::build("ReduceLongFunction<X,Z>::intermediateXD: failed to copy temporary scalar", res);
auto ptr = sd::LaunchContext::defaultContext()->getScalarPointer();
// scalar assign
functions::scalar::ScalarTransform<Z, Z, Z>::executeCudaShaped(launchDims, stream, 14, z, dZShapeInfo, hXShapeInfo, z, dZShapeInfo, hZShapeInfo, ptr, nullptr);
}
else {
const int zRank = shape::rank(hZShapeInfo);
const int tadRank = shape::rank(hXShapeInfo) - zRank;
auto outerPack = sd::ConstantShapeHelper::getInstance().createSubArrShapeInfo(hXShapeInfo, dims, zRank);
auto innerPack = sd::ConstantShapeHelper::getInstance().createSubArrShapeInfo(hXShapeInfo, dims+zRank, tadRank);
hipLaunchKernelGGL(( simpleReduce<X, Z, OpType>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, x, reinterpret_cast<Nd4jLong const*>(outerPack.special()), reinterpret_cast<Nd4jLong const*>(innerPack.special()), extraParams, vreductionBuffer, z, dZShapeInfo);
}
}
////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
template<typename OpType>
__host__ void ReduceLongFunction<X,Z>::intermediateScalar(dim3 launchDims, hipStream_t *stream,
const void *x, const Nd4jLong *xShapeInfo, const Nd4jLong *hXShapeInfo,
void *extraParams,
void *z, const Nd4jLong *zShapeInfo, const Nd4jLong *hZShapeInfo,
int *dimension, int dimensionLength,
void *reductionBuffer,
const Nd4jLong *tadOnlyShapeInfo) {
if (shape::isEmpty(hXShapeInfo)) {
if (shape::isEmpty(hZShapeInfo))
return;
const auto startingVal = static_cast<Z>(OpType::startingValue(reinterpret_cast<const X*>(x)));
auto res = hipMemcpyAsync(z, &startingVal, sizeof(Z), hipMemcpyHostToDevice, *stream);
if (res != 0)
throw sd::cuda_exception::build("ReduceLongFunction<X,Z>::intermediateScalar: failed to copy resulting scalar", res);
}
else {
hipLaunchKernelGGL(( simpleScalar<X, Z, OpType>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo);
}
}
////////////////////////////////////////////////////////////////////////
template <typename X, typename Y>
_CUDA_H void ReduceLongFunction<X,Y>::execReduceScalar(dim3 launchDims, hipStream_t *stream,
const int opNum,
const void *x, const Nd4jLong *xShapeInfo, const Nd4jLong* hXShapeInfo,
void *extraParams,
void *z, const Nd4jLong *zShapeInfo, const Nd4jLong* hZShapeInfo,
int *dimension, int dimensionLength,
void *reductionBuffer,
const Nd4jLong *tadOnlyShapeInfo) {
DISPATCH_BY_OPNUM_TT(intermediateScalar, PARAMS(launchDims, stream, x, xShapeInfo, hXShapeInfo, extraParams, z, zShapeInfo, hZShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo), OPS_A(REDUCE_LONG_OPS));
sd::DebugHelper::checkErrorCode(stream, "execReduceScalarFloat(...) failed");
}
////////////////////////////////////////////////////////////////////////
template <typename X, typename Y>
_CUDA_H void ReduceLongFunction<X,Y>::execReduceXD(dim3 launchDims, hipStream_t *stream, const int opNum,
const void *x, const Nd4jLong *dXShapeInfo, const Nd4jLong *hXShapeInfo,
void *extraParams, void *vreductionBuffer,
void *z, const Nd4jLong *dZShapeInfo, const Nd4jLong *hZShapeInfo, const int *dims) {
if(shape::length(hZShapeInfo) == 1) {
ReduceLongFunction<X,Y>::execReduceScalar(launchDims, stream, opNum, x, dXShapeInfo, hXShapeInfo, extraParams, z, dZShapeInfo, hZShapeInfo, nullptr, 0, vreductionBuffer, nullptr);
}
else {
DISPATCH_BY_OPNUM_TT(intermediateXD, PARAMS(launchDims, stream, x, dXShapeInfo, hXShapeInfo, extraParams, vreductionBuffer, z, dZShapeInfo, hZShapeInfo, dims), OPS_A(REDUCE_LONG_OPS));
}
DEBUG_KERNEL(stream, opNum);
}
////////////////////////////////////////////////////////////////////////
template <typename X>
__device__ void initializeShared(X *extraParams, X **sPartials, int sMemSize) {
int sPartialsLength = sMemSize / sizeof(X);
X *sPartialsDeref = (X *) *sPartials;
for (int i = 0; i < sPartialsLength; i++)
sPartialsDeref[i] = extraParams[0];
}
BUILD_DOUBLE_TEMPLATE(template class ND4J_EXPORT ReduceLongFunction, , LIBND4J_TYPES, LONG_TYPES);
}
}
| 412caae9e47aa0f57565f6c71b051203363db232.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author raver119@gmail.com
// @author Yurii Shyrma (iuriish@yahoo.com)
//
#include <system/op_boilerplate.h>
#include <loops/reduce_long.h>
#include <loops/legacy_ops.h>
#include <helpers/DebugHelper.h>
#include <types/types.h>
#include <execution/LaunchContext.h>
#include <exceptions/cuda_exception.h>
#include <loops/scalar.h>
using namespace simdOps;
////////////////////////////////////////////////////////////////////////
template <typename X, typename Z, typename OpType>
__global__ void simpleReduce(const void *x, const Nd4jLong *outerXTadShapeInfo, const Nd4jLong *innerXTadShapeInfo,
void *extraParams, void *vreductionBuffer, void *z, const Nd4jLong *zShapeInfo) {
functions::reduce::ReduceLongFunction<X,Z>::template transformCudaXD<OpType>(x, outerXTadShapeInfo, innerXTadShapeInfo, extraParams, vreductionBuffer, z, zShapeInfo);
}
////////////////////////////////////////////////////////////////////////
template <typename X, typename Z, typename OpType>
__device__ void reduceScalarGeneric(const void *x, const Nd4jLong *xShapeInfo,
void *extraParams,
void *z, const Nd4jLong *zShapeInfo,
int *dimension, int dimensionLength,
void *reductionBuffer,
const Nd4jLong *tadOnlyShapeInfo) {
functions::reduce::ReduceLongFunction<X, Z>::template execScalarCuda<OpType>(x, xShapeInfo, extraParams, z, zShapeInfo, reductionBuffer, tadOnlyShapeInfo);
}
////////////////////////////////////////////////////////////////////////
template <typename X, typename Z, typename OpType>
__global__ void simpleScalar(const void *x, const Nd4jLong *xShapeInfo,
void *extraParams,
void *z, const Nd4jLong *zShapeInfo,
int *dimension, int dimensionLength,
void *reductionBuffer,
const Nd4jLong *tadOnlyShapeInfo) {
reduceScalarGeneric<X, Z, OpType>(x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo);
}
namespace functions {
namespace reduce {
////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
template <typename OpType>
__device__ void ReduceLongFunction<X,Z>::aggregatePartials(void *vsPartials, Nd4jLong tid, Nd4jLong numItems, void *vextraParams) {
// start the shared memory loop on the next power of 2 less
// than the block size. If block size is not a power of 2,
// accumulate the intermediate sums in the remainder range.
auto sPartials = reinterpret_cast<Z*>(vsPartials);
auto extraParams = reinterpret_cast<X*>(vextraParams);
Nd4jLong floorPow2 = numItems;
if (floorPow2 & (floorPow2 - 1)) {
while (floorPow2 & (floorPow2 - 1))
floorPow2 &= floorPow2 - 1;
if (tid >= floorPow2)
sPartials[tid - floorPow2] = OpType::update(sPartials[tid - floorPow2], sPartials[tid], extraParams);
__syncthreads();
}
for (Nd4jLong activeThreads = floorPow2 >> 1; activeThreads; activeThreads >>= 1) {
if (tid < activeThreads && tid + activeThreads < numItems)
sPartials[tid] = OpType::update(sPartials[tid], sPartials[tid + activeThreads], extraParams);
__syncthreads();
}
}
////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
template <typename OpType>
__device__ void ReduceLongFunction<X,Z>::transformCudaXD(const void *vx, const Nd4jLong *outerXTadShapeInfo, const Nd4jLong *innerXTadShapeInfo,
void *vextraParams, void *vreductionBuffer,
void *vz, const Nd4jLong *zShapeInfo) {
auto x = reinterpret_cast<const X*>(vx);
auto z = reinterpret_cast<Z*>(vz);
auto extraParams = reinterpret_cast<X*>(vextraParams);
//shared memory space for storing intermediate results
__shared__ Z sPartials[CUDA_BLOCK_SIZE];
__shared__ int tadLen, numTads;
__shared__ bool sameOffsets;
if (threadIdx.x == 0) {
sameOffsets = shape::haveSameShapeAndStrides(zShapeInfo, outerXTadShapeInfo);
tadLen = shape::length(innerXTadShapeInfo);
numTads = shape::length(outerXTadShapeInfo);
}
__syncthreads();
int coords[MAX_RANK];
for (int r = blockIdx.x; r < numTads; r += gridDim.x) {
shape::index2coords(r, outerXTadShapeInfo, coords);
const auto outerOffset = shape::getOffset(outerXTadShapeInfo, coords);
const auto zOffset = sameOffsets ? outerOffset : shape::getOffset(zShapeInfo, coords);
const X* xTad = x + outerOffset;
sPartials[threadIdx.x] = OpType::startingValue(xTad);
for (int i = threadIdx.x; i < tadLen; i += blockDim.x)
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(xTad[shape::getIndexOffset(i, innerXTadShapeInfo)], extraParams), extraParams);
__syncthreads();
// aggregate. do NOT reduce for elements > tadLen
aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::nd4j_min<int>(blockDim.x, tadLen), extraParams);
__syncthreads();
if (threadIdx.x == 0)
z[zOffset] = OpType::postProcess(sPartials[threadIdx.x], tadLen, extraParams);
}
}
////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
template <typename OpType>
__device__ void ReduceLongFunction<X,Z>::execScalarCuda(const void *vx, const Nd4jLong *xShapeInfo,
void *vextraParams,
void *vz, const Nd4jLong *zShapeInfo,
void *vreductionBuffer,
const Nd4jLong *tadOnlyShapeInfo) {
auto x = reinterpret_cast<const X*>(vx);
auto z = reinterpret_cast<Z*>(vz);
auto extraParams = reinterpret_cast<X*>(vextraParams);
auto reductionBuffer = reinterpret_cast<Z*>(vreductionBuffer);
auto tid = blockDim.x * blockIdx.x + threadIdx.x;
//shared memory space for storing intermediate results
__shared__ Z sPartials[CUDA_BLOCK_SIZE];
__shared__ Nd4jLong xEws;
__shared__ Nd4jLong len;
if(threadIdx.x == 0) {
xEws = shape::elementWiseStride(xShapeInfo);
len = shape::length(xShapeInfo);
}
__syncthreads();
sPartials[threadIdx.x] = OpType::startingValue(x);
if (xEws > 0)
for (int i = tid; i < len; i += (blockDim.x * gridDim.x))
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(x[i * xEws], extraParams), extraParams);
else
for (int i = tid; i < len; i += blockDim.x * gridDim.x)
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(x[shape::getIndexOffset(i, xShapeInfo)], extraParams), extraParams);
__syncthreads();
aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::nd4j_min<int>(blockDim.x, len), extraParams);
__syncthreads();
if (gridDim.x > 1) {
auto tc = reinterpret_cast<unsigned int *>(reductionBuffer);
__shared__ bool amLast;
tid = threadIdx.x;
if (threadIdx.x == 0)
reductionBuffer[blockIdx.x] = sPartials[0];//this->postProcess(sPartials[0],len,extraParams);
__threadfence();
__syncthreads();
if (threadIdx.x == 0) {
unsigned int ticket = atomicInc(&tc[16384], gridDim.x);
amLast = (ticket == gridDim.x - 1);
}
__syncthreads();
if (amLast) {
tc[16384] = 0;
sPartials[threadIdx.x] = OpType::startingValue(x);
for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x)
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], reductionBuffer[i], extraParams);
__syncthreads();
aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::nd4j_min<int>(gridDim.x, blockDim.x), extraParams);
__syncthreads();
if (threadIdx.x == 0) {
z[0] = OpType::postProcess(sPartials[0], len, extraParams);
}
}
}
else {
if (threadIdx.x == 0) {
auto tc = reinterpret_cast<unsigned int*>(reductionBuffer);
tc[16384] = 0;
z[0] = OpType::postProcess(sPartials[0], len, extraParams);
}
}
}
////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
template<typename OpType>
__host__ void ReduceLongFunction<X,Z>::intermediateXD(dim3 launchDims, cudaStream_t *stream,
const void *x, const Nd4jLong *dXShapeInfo, const Nd4jLong *hXShapeInfo,
void *extraParams, void *vreductionBuffer,
void *z, const Nd4jLong *dZShapeInfo, const Nd4jLong *hZShapeInfo, const int* dims) {
if(shape::isEmpty(hXShapeInfo)) {
if(shape::isEmpty(hZShapeInfo))
return;
const auto startingVal = static_cast<Z>(OpType::startingValue(reinterpret_cast<const X*>(x)));
auto res = cudaMemcpyAsync(sd::LaunchContext::defaultContext()->getScalarPointer(), &startingVal, sizeof(Z), cudaMemcpyHostToDevice, *stream);
if (res != 0)
throw sd::cuda_exception::build("ReduceLongFunction<X,Z>::intermediateXD: failed to copy temporary scalar", res);
auto ptr = sd::LaunchContext::defaultContext()->getScalarPointer();
// scalar assign
functions::scalar::ScalarTransform<Z, Z, Z>::executeCudaShaped(launchDims, stream, 14, z, dZShapeInfo, hXShapeInfo, z, dZShapeInfo, hZShapeInfo, ptr, nullptr);
}
else {
const int zRank = shape::rank(hZShapeInfo);
const int tadRank = shape::rank(hXShapeInfo) - zRank;
auto outerPack = sd::ConstantShapeHelper::getInstance().createSubArrShapeInfo(hXShapeInfo, dims, zRank);
auto innerPack = sd::ConstantShapeHelper::getInstance().createSubArrShapeInfo(hXShapeInfo, dims+zRank, tadRank);
simpleReduce<X, Z, OpType><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(x, reinterpret_cast<Nd4jLong const*>(outerPack.special()), reinterpret_cast<Nd4jLong const*>(innerPack.special()), extraParams, vreductionBuffer, z, dZShapeInfo);
}
}
////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
template<typename OpType>
__host__ void ReduceLongFunction<X,Z>::intermediateScalar(dim3 launchDims, cudaStream_t *stream,
const void *x, const Nd4jLong *xShapeInfo, const Nd4jLong *hXShapeInfo,
void *extraParams,
void *z, const Nd4jLong *zShapeInfo, const Nd4jLong *hZShapeInfo,
int *dimension, int dimensionLength,
void *reductionBuffer,
const Nd4jLong *tadOnlyShapeInfo) {
if (shape::isEmpty(hXShapeInfo)) {
if (shape::isEmpty(hZShapeInfo))
return;
const auto startingVal = static_cast<Z>(OpType::startingValue(reinterpret_cast<const X*>(x)));
auto res = cudaMemcpyAsync(z, &startingVal, sizeof(Z), cudaMemcpyHostToDevice, *stream);
if (res != 0)
throw sd::cuda_exception::build("ReduceLongFunction<X,Z>::intermediateScalar: failed to copy resulting scalar", res);
}
else {
simpleScalar<X, Z, OpType><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo);
}
}
////////////////////////////////////////////////////////////////////////
template <typename X, typename Y>
_CUDA_H void ReduceLongFunction<X,Y>::execReduceScalar(dim3 launchDims, cudaStream_t *stream,
const int opNum,
const void *x, const Nd4jLong *xShapeInfo, const Nd4jLong* hXShapeInfo,
void *extraParams,
void *z, const Nd4jLong *zShapeInfo, const Nd4jLong* hZShapeInfo,
int *dimension, int dimensionLength,
void *reductionBuffer,
const Nd4jLong *tadOnlyShapeInfo) {
DISPATCH_BY_OPNUM_TT(intermediateScalar, PARAMS(launchDims, stream, x, xShapeInfo, hXShapeInfo, extraParams, z, zShapeInfo, hZShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo), OPS_A(REDUCE_LONG_OPS));
sd::DebugHelper::checkErrorCode(stream, "execReduceScalarFloat(...) failed");
}
////////////////////////////////////////////////////////////////////////
template <typename X, typename Y>
_CUDA_H void ReduceLongFunction<X,Y>::execReduceXD(dim3 launchDims, cudaStream_t *stream, const int opNum,
const void *x, const Nd4jLong *dXShapeInfo, const Nd4jLong *hXShapeInfo,
void *extraParams, void *vreductionBuffer,
void *z, const Nd4jLong *dZShapeInfo, const Nd4jLong *hZShapeInfo, const int *dims) {
if(shape::length(hZShapeInfo) == 1) {
ReduceLongFunction<X,Y>::execReduceScalar(launchDims, stream, opNum, x, dXShapeInfo, hXShapeInfo, extraParams, z, dZShapeInfo, hZShapeInfo, nullptr, 0, vreductionBuffer, nullptr);
}
else {
DISPATCH_BY_OPNUM_TT(intermediateXD, PARAMS(launchDims, stream, x, dXShapeInfo, hXShapeInfo, extraParams, vreductionBuffer, z, dZShapeInfo, hZShapeInfo, dims), OPS_A(REDUCE_LONG_OPS));
}
DEBUG_KERNEL(stream, opNum);
}
////////////////////////////////////////////////////////////////////////
template <typename X>
__device__ void initializeShared(X *extraParams, X **sPartials, int sMemSize) {
int sPartialsLength = sMemSize / sizeof(X);
X *sPartialsDeref = (X *) *sPartials;
for (int i = 0; i < sPartialsLength; i++)
sPartialsDeref[i] = extraParams[0];
}
BUILD_DOUBLE_TEMPLATE(template class ND4J_EXPORT ReduceLongFunction, , LIBND4J_TYPES, LONG_TYPES);
}
}
|
5c30a58c4e447cb5f9fe6504e8b13051b7921d61.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "nodes/resize.h"
#include "core/common_cu.h"
__global__ void NearestNeighborKernel(
const int y_size,
const int num_channels,
const int input_height,
const int input_width,
const int output_height,
const int output_width,
const float height_scale,
const float width_scale,
const float* X,
float* Y) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < y_size) {
int indexTemp = i;
const int w = indexTemp % output_width;
indexTemp /= output_width;
const int h = indexTemp % output_height;
indexTemp /= output_height;
const int c = indexTemp % num_channels;
indexTemp /= num_channels;
const int n = indexTemp;
const int in_y = fminf(h / height_scale, input_height - 1);
const int in_x = fminf(w / width_scale, input_width - 1);
Y[i] =
X[((n * num_channels + c) * input_height + in_y) * input_width + in_x];
}
}
__global__ void NearestNeighborGradientKernel(
const int dy_size,
const int num_channels,
const int input_height,
const int input_width,
const int output_height,
const int output_width,
const float height_scale,
const float width_scale,
const float* dY,
float* dX) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < dy_size) {
int indexTemp = i;
const int x = indexTemp % input_width;
indexTemp /= input_width;
const int y = indexTemp % input_height;
indexTemp /= input_height;
const int c = indexTemp % num_channels;
indexTemp /= num_channels;
const int n = indexTemp;
const int out_y = fminf(y / height_scale, output_height - 1);
const int out_x = fminf(x / width_scale, output_width - 1);
const int out_index =
((n * num_channels + c) * output_height + out_y) * output_width + out_x;
#if __CUDA_ARCH__ >= 350
atomicAdd(dX + out_index, __ldg(dY + i));
#else
atomicAdd(dX + out_index, *(dY + i));
#endif
}
}
Resize::Resize(deepflow::NodeParam *param) : Node(param) {
LOG_IF(FATAL, param->has_resize_param() == false) << "param.has_resize_param() == false";
}
void Resize::init() {
auto param = _param->resize_param();
m_height_scale = param.height_scale();
m_width_scale = param.width_scale();
auto input_dims = _inputs[0]->value()->dims();
_outputs[0]->initValue({input_dims[0], input_dims[1], (int) (input_dims[2] * m_height_scale), (int) (input_dims[3] * m_width_scale) });
_outputs[0]->initDiff();
}
void Resize::forward() {
auto size = _outputs[0]->value()->size();
auto dims = _inputs[0]->value()->dims();
NearestNeighborKernel << < numOfBlocks(size), maxThreadsPerBlock >> > (
size,
dims[1],
dims[2],
dims[3],
dims[2] * m_height_scale,
dims[3] * m_width_scale,
m_height_scale,
m_width_scale,
_inputs[0]->value()->gpu_data(),
(float*)_outputs[0]->value()->gpu_data());
DF_KERNEL_CHECK();
}
void Resize::backward() {
if (_inputs[0]->diff()) {
auto size = _outputs[0]->value()->size();
auto output_dims = _outputs[0]->value()->dims();
auto input_dims = _inputs[0]->value()->dims();
DF_CUDA_CHECK(hipMemset(_inputs[0]->diff()->gpu_data(), 0, _inputs[0]->diff()->bytes()));
NearestNeighborGradientKernel << < numOfBlocks(size), maxThreadsPerBlock >> > (
size,
output_dims[1],
output_dims[2],
output_dims[3],
input_dims[2],
input_dims[3],
m_height_scale,
m_width_scale,
_outputs[0]->diff()->gpu_data(),
(float*)_inputs[0]->diff()->gpu_data());
DF_KERNEL_CHECK();
}
}
std::string Resize::to_cpp() const
{
std::string cpp = "auto " + _name + " = df.resize(" + _input_name_for_cpp(0) + ", ";
cpp += std::to_string(m_height_scale) + ", ";
cpp += std::to_string(m_width_scale) + ", ";
cpp += "\"" + _name + "\");";
return cpp;
} | 5c30a58c4e447cb5f9fe6504e8b13051b7921d61.cu | #include "nodes/resize.h"
#include "core/common_cu.h"
__global__ void NearestNeighborKernel(
const int y_size,
const int num_channels,
const int input_height,
const int input_width,
const int output_height,
const int output_width,
const float height_scale,
const float width_scale,
const float* X,
float* Y) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < y_size) {
int indexTemp = i;
const int w = indexTemp % output_width;
indexTemp /= output_width;
const int h = indexTemp % output_height;
indexTemp /= output_height;
const int c = indexTemp % num_channels;
indexTemp /= num_channels;
const int n = indexTemp;
const int in_y = fminf(h / height_scale, input_height - 1);
const int in_x = fminf(w / width_scale, input_width - 1);
Y[i] =
X[((n * num_channels + c) * input_height + in_y) * input_width + in_x];
}
}
__global__ void NearestNeighborGradientKernel(
const int dy_size,
const int num_channels,
const int input_height,
const int input_width,
const int output_height,
const int output_width,
const float height_scale,
const float width_scale,
const float* dY,
float* dX) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < dy_size) {
int indexTemp = i;
const int x = indexTemp % input_width;
indexTemp /= input_width;
const int y = indexTemp % input_height;
indexTemp /= input_height;
const int c = indexTemp % num_channels;
indexTemp /= num_channels;
const int n = indexTemp;
const int out_y = fminf(y / height_scale, output_height - 1);
const int out_x = fminf(x / width_scale, output_width - 1);
const int out_index =
((n * num_channels + c) * output_height + out_y) * output_width + out_x;
#if __CUDA_ARCH__ >= 350
atomicAdd(dX + out_index, __ldg(dY + i));
#else
atomicAdd(dX + out_index, *(dY + i));
#endif
}
}
Resize::Resize(deepflow::NodeParam *param) : Node(param) {
LOG_IF(FATAL, param->has_resize_param() == false) << "param.has_resize_param() == false";
}
void Resize::init() {
auto param = _param->resize_param();
m_height_scale = param.height_scale();
m_width_scale = param.width_scale();
auto input_dims = _inputs[0]->value()->dims();
_outputs[0]->initValue({input_dims[0], input_dims[1], (int) (input_dims[2] * m_height_scale), (int) (input_dims[3] * m_width_scale) });
_outputs[0]->initDiff();
}
void Resize::forward() {
auto size = _outputs[0]->value()->size();
auto dims = _inputs[0]->value()->dims();
NearestNeighborKernel << < numOfBlocks(size), maxThreadsPerBlock >> > (
size,
dims[1],
dims[2],
dims[3],
dims[2] * m_height_scale,
dims[3] * m_width_scale,
m_height_scale,
m_width_scale,
_inputs[0]->value()->gpu_data(),
(float*)_outputs[0]->value()->gpu_data());
DF_KERNEL_CHECK();
}
void Resize::backward() {
if (_inputs[0]->diff()) {
auto size = _outputs[0]->value()->size();
auto output_dims = _outputs[0]->value()->dims();
auto input_dims = _inputs[0]->value()->dims();
DF_CUDA_CHECK(cudaMemset(_inputs[0]->diff()->gpu_data(), 0, _inputs[0]->diff()->bytes()));
NearestNeighborGradientKernel << < numOfBlocks(size), maxThreadsPerBlock >> > (
size,
output_dims[1],
output_dims[2],
output_dims[3],
input_dims[2],
input_dims[3],
m_height_scale,
m_width_scale,
_outputs[0]->diff()->gpu_data(),
(float*)_inputs[0]->diff()->gpu_data());
DF_KERNEL_CHECK();
}
}
std::string Resize::to_cpp() const
{
std::string cpp = "auto " + _name + " = df.resize(" + _input_name_for_cpp(0) + ", ";
cpp += std::to_string(m_height_scale) + ", ";
cpp += std::to_string(m_width_scale) + ", ";
cpp += "\"" + _name + "\");";
return cpp;
} |
98b9db4227a19b409949632abfad0e2ff76cbe9d.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file csv-reader.cu code to read csv data
*
* CSV Reader
*/
#include <hip/hip_runtime.h>
#include <iostream>
#include <vector>
#include <string>
#include <stdio.h>
#include <iostream>
#include <iomanip>
#include <vector>
#include <unordered_map>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <thrust/scan.h>
#include <thrust/reduce.h>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/host_vector.h>
#include "type_conversion.cuh"
#include "date-time-parser.cuh"
#include <gdf/gdf.h>
#include <gdf/errorutils.h>
#include <gdf/gdf_io.h>
#include <rmm.h>
#include "NVStrings.h"
constexpr int32_t HASH_SEED = 33;
using namespace std;
//-- define the structure for raw data handling - for internal use
typedef struct raw_csv_ {
char * data; // on-device: the raw unprocessed CSV data - loaded as a large char * array
unsigned long long* d_num_records; // on-device: Number of records.
unsigned long long* recStart; // on-device: Starting position of the records.
char delimiter; // host: the delimiter
char terminator; // host: the line terminator
long num_bytes; // host: the number of bytes in the data
long num_bits; // host: the number of 64-bit bitmaps (different than valid)
unsigned long long num_records; // host: number of records (per column)
// int num_cols; // host: number of columns
int num_active_cols; // host: number of columns that will be return to user.
int num_actual_cols; // host: number of columns in the file --- based on the number of columns in header
vector<gdf_dtype> dtypes; // host: array of dtypes (since gdf_columns are not created until end)
vector<string> col_names; // host: array of column names
bool* h_parseCol; // host : array of booleans stating if column should be parsed in reading process: parseCol[x]=false means that the column x needs to be filtered out.
bool* d_parseCol; // device : array of booleans stating if column should be parsed in reading process: parseCol[x]=false means that the column x needs to be filtered out.
long header_row; // Row id of the header
bool dayfirst;
} raw_csv_t;
typedef struct column_data_{
unsigned long long countFloat;
unsigned long long countDateAndTime;
unsigned long long countString;
unsigned long long countInt8;
unsigned long long countInt16;
unsigned long long countInt32;
unsigned long long countInt64;
unsigned long long countNULL;
}column_data_t;
using string_pair = std::pair<const char*,size_t>;
//
//---------------create and process ---------------------------------------------
//
gdf_error parseArguments(csv_read_arg *args, raw_csv_t *csv);
// gdf_error getColNamesAndTypes(const char **col_names, const char **dtypes, raw_csv_t *d);
gdf_error updateRawCsv( const char * data, long num_bytes, raw_csv_t * csvData );
gdf_error allocateGdfDataSpace(gdf_column *);
gdf_dtype convertStringToDtype(std::string &dtype);
#define checkError(error, txt) if ( error != GDF_SUCCESS) { cerr << "ERROR: " << error << " in " << txt << endl; return error; }
//
//---------------CUDA Kernel ---------------------------------------------
//
__device__ int findSetBit(int tid, long num_bits, uint64_t *f_bits, int x);
gdf_error launch_countRecords(raw_csv_t * csvData);
gdf_error launch_storeRecordStart(raw_csv_t * csvData);
gdf_error launch_dataConvertColumns(raw_csv_t * raw_csv, void** d_gdf, gdf_valid_type** valid, gdf_dtype* d_dtypes, string_pair **str_cols, long row_offset, unsigned long long *);
gdf_error launch_dataTypeDetection(raw_csv_t * raw_csv, long row_offset, column_data_t* d_columnData);
__global__ void countRecords(char *data, const char delim, const char terminator, long num_bytes, long num_bits, unsigned long long* num_records);
__global__ void storeRecordStart(char *data, const char delim, const char terminator, long num_bytes, long num_bits, unsigned long long* num_records,unsigned long long* recStart) ;
__global__ void convertCsvToGdf(char *csv,char delim, char terminator, unsigned long long num_records, int num_columns,bool *parseCol,unsigned long long *recStart,gdf_dtype *dtype,void **gdf_data,gdf_valid_type **valid,string_pair **str_cols,unsigned long long row_offset, long header_row,bool dayfirst,unsigned long long *num_valid);
__global__ void dataTypeDetection( char *raw_csv, char delim, char terminator, unsigned long long num_records, int num_columns, bool *parseCol, unsigned long long *recStart, unsigned long long row_offset, long header_row, column_data_t* d_columnData);
//
//---------------CUDA Valid (8 blocks of 8-bits) Bitmap Kernels ---------------------------------------------
//
__device__ int whichBitmap(int record) { return (record/8); }
__device__ int whichBit(int bit) { return (bit % 8); }
__inline__ __device__ void validAtomicOR(gdf_valid_type* address, gdf_valid_type val)
{
int32_t *base_address = (int32_t*)((gdf_valid_type*)address - ((size_t)address & 3));
int32_t int_val = (int32_t)val << (((size_t) address & 3) * 8);
atomicOr(base_address, int_val);
}
__device__ void setBit(gdf_valid_type* address, int bit) {
gdf_valid_type bitMask[8] = {1, 2, 4, 8, 16, 32, 64, 128};
validAtomicOR(address, bitMask[bit]);
}
std::string stringType(gdf_dtype dt){
switch (dt){
case GDF_STRING: return std::string("str");
case GDF_DATE64: return std::string("date64");
case GDF_CATEGORY: return std::string("category");
case GDF_FLOAT64: return std::string("float64");
case GDF_INT8: return std::string("int8");
case GDF_INT16: return std::string("int16");
case GDF_INT32: return std::string("int32");
case GDF_INT64: return std::string("int64");
default:
return "long";
}
}
/**
* @brief read in a CSV file
*
* Read in a CSV file, extract all fields, and return a GDF (array of gdf_columns)
*
* @param[in and out] args the input arguments, but this also contains the returned data
*
* Arguments:
*
* Required Arguments
* file_path - file location to read from - currently the file cannot be compressed
* num_cols - number of columns in the names and dtype arrays
* names - ordered List of column names, this is a required field
* dtype - ordered List of data types, this is required
*
* Optional
* lineterminator - define the line terminator character. Default is '\n'
* delimiter - define the field separator, default is ',' This argument is also called 'sep'
* delim_whitespace - use white space as the delimiter - default is false. This overrides the delimiter argument
* skipinitialspace - skip white spaces after the delimiter - default is false
*
* skiprows - number of rows at the start of the files to skip, default is 0
* skipfooter - number of rows at the bottom of the file to skip - default is 0
*
* dayfirst - is the first value the day? DD/MM versus MM/DD
*
*
* Output
* num_cols_out - Out: return the number of columns read in
* num_rows_out - Out: return the number of rows read in
* gdf_column **data - Out: return the array of *gdf_columns
*
*
* @return gdf_error
*
*/
gdf_error read_csv(csv_read_arg *args)
{
gdf_error error = gdf_error::GDF_SUCCESS;
//-----------------------------------------------------------------------------
// create the CSV data structure - this will be filled in as the CSV data is processed.
// Done first to validate data types
raw_csv_t * raw_csv = new raw_csv_t;
// error = parseArguments(args, raw_csv);
raw_csv->num_actual_cols = args->num_cols;
raw_csv->num_active_cols = args->num_cols;
raw_csv->num_records = 0;
if ( args->delim_whitespace == true) {
raw_csv->delimiter = ' ';
} else {
raw_csv->delimiter = args->delimiter;
}
if(args->windowslinetermination)
raw_csv->terminator = '\n';
else
raw_csv->terminator = args->lineterminator;
raw_csv->dayfirst = args->dayfirst;
//-----------------------------------------------------------------------------
// memory map in the data
void * map_data = NULL;
struct stat st;
int fd;
fd = open(args->file_path, O_RDONLY );
if (fd < 0) { close(fd); checkError(GDF_FILE_ERROR, "Error opening file"); }
if (fstat(fd, &st)) { close(fd); checkError(GDF_FILE_ERROR, "cannot stat file"); }
raw_csv->num_bytes = st.st_size;
map_data = mmap(0, raw_csv->num_bytes, PROT_READ, MAP_PRIVATE, fd, 0);
if (map_data == MAP_FAILED || raw_csv->num_bytes==0) { close(fd); checkError(GDF_C_ERROR, "Error mapping file"); }
//-----------------------------------------------------------------------------
//--- create a structure to hold variables used to parse the CSV data
error = updateRawCsv( (const char *)map_data, (long)raw_csv->num_bytes, raw_csv );
checkError(error, "call to createRawCsv");
//-----------------------------------------------------------------------------
// find the record and fields points (in bitmaps)
hipDeviceSynchronize();
error = launch_countRecords(raw_csv);
checkError(error, "call to record counter");
//-----------------------------------------------------------------------------
//-- Allocate space to hold the record starting point
RMM_TRY( rmmAlloc((void**)&(raw_csv->recStart), (sizeof(unsigned long long) * (raw_csv->num_records + 1)), 0) );
CUDA_TRY( hipMemset(raw_csv->d_num_records, 0, (sizeof(unsigned long long) )) ) ;
//-----------------------------------------------------------------------------
//-- Scan data and set the starting positions
error = launch_storeRecordStart(raw_csv);
checkError(error, "call to record initial position store");
hipDeviceSynchronize();
thrust::sort(thrust::device,raw_csv->recStart, raw_csv->recStart + raw_csv->num_records + 1);
//-----------------------------------------------------------------------------
//-- Acquire header row of
int h_num_cols=0, h_dup_cols_removed=0;
int skip_header=0;
// Check if the user gave us a list of column names
if(args->names==NULL){
// Getting the first row of data from the file. We will parse the data to find lineterminator as
// well as the column delimiter.
char* cmap_data = (char *)map_data;
unsigned long long c=0;
raw_csv->header_row=0;
if (args->header>=0){
raw_csv->header_row = args->header;
}
if(raw_csv->header_row > (long)raw_csv->num_records){
checkError(GDF_FILE_ERROR, "Number of records is smaller than the id of the specified header row");
}
unsigned long long headerPositions[2];
CUDA_TRY( hipMemcpy(headerPositions,raw_csv->recStart + raw_csv->header_row, sizeof(unsigned long long)*2, hipMemcpyDeviceToHost));
unsigned long long start = headerPositions[0];
unsigned long long stop = headerPositions[1];
c=start;
while(c<stop){
if (cmap_data[c]==args->lineterminator){
h_num_cols++;
break;
}
else if(cmap_data[c] == '\r' && (c+1L)<(unsigned long long)raw_csv->num_bytes && cmap_data[c+1] == '\n'){
h_num_cols++;
break;
}else if (cmap_data[c]==args->delimiter)
h_num_cols++;
c++;
}
unsigned long long prev=0;
c=start;
raw_csv->col_names.clear();
if(args->header>=0){
h_num_cols=0;
// Storing the names of the columns into a vector of strings
while(c<=stop){
if (cmap_data[c]==args->delimiter || cmap_data[c]==args->lineterminator){
std::string colName(cmap_data +prev,c-prev );
prev=c+1;
raw_csv->col_names.push_back(colName);
h_num_cols++;
}
c++;
}
skip_header=1;
}else{
for (int i = 0; i<h_num_cols; i++){
std::string newColName = std::to_string(i);
raw_csv->col_names.push_back(newColName);
}
}
// Allocating a boolean array that will use to state if a column needs to read or filtered.
raw_csv->h_parseCol = (bool*)malloc(sizeof(bool) * (h_num_cols));
RMM_TRY( rmmAlloc ((void**)&raw_csv->d_parseCol,(sizeof(bool) * (h_num_cols)),0 ) );
for (int i = 0; i<h_num_cols; i++)
raw_csv->h_parseCol[i]=true;
// Looking for duplicates
for (auto it = raw_csv->col_names.begin(); it != raw_csv->col_names.end(); it++){
bool found_dupe = false;
for (auto it2 = (it+1); it2 != raw_csv->col_names.end(); it2++){
if (*it==*it2){
found_dupe=true;
break;
}
}
if(found_dupe){
int count=1;
for (auto it2 = (it+1); it2 != raw_csv->col_names.end(); it2++){
if (*it==*it2){
if(args->mangle_dupe_cols){
// Replace all the duplicates of column X with X.1,X.2,... First appearance stays as X.
std::string newColName = *it2;
newColName += "." + std::to_string(count);
count++;
*it2 = newColName;
} else{
// All duplicate fields will be ignored.
int pos=std::distance(raw_csv->col_names.begin(), it2);
raw_csv->h_parseCol[pos]=false;
h_dup_cols_removed++;
}
}
}
}
}
raw_csv->num_actual_cols = h_num_cols; // Actuaul number of columns in the CSV file
raw_csv->num_active_cols = h_num_cols-h_dup_cols_removed; // Number of fields that need to be processed based on duplicatation fields
CUDA_TRY(hipMemcpy(raw_csv->d_parseCol, raw_csv->h_parseCol, sizeof(bool) * (h_num_cols), hipMemcpyHostToDevice));
}
else {
raw_csv->h_parseCol = (bool*)malloc(sizeof(bool) * (args->num_cols));
RMM_TRY( rmmAlloc ((void**)&raw_csv->d_parseCol,(sizeof(bool) * (args->num_cols)),0 ) );
for (int i = 0; i<raw_csv->num_actual_cols; i++){
raw_csv->h_parseCol[i]=true;
std::string col_name = args->names[i];
raw_csv->col_names.push_back(col_name);
}
CUDA_TRY(hipMemcpy(raw_csv->d_parseCol, raw_csv->h_parseCol, sizeof(bool) * (args->num_cols), hipMemcpyHostToDevice));
}
// User can give
if (args->use_cols_int!=NULL || args->use_cols_char!=NULL){
if(args->use_cols_int!=NULL){
for (int i = 0; i<raw_csv->num_actual_cols; i++)
raw_csv->h_parseCol[i]=false;
for(int i=0; i < args->use_cols_int_len; i++){
int pos = args->use_cols_int[i];
raw_csv->h_parseCol[pos]=true;
}
raw_csv->num_active_cols = args->use_cols_int_len;
}else{
for (int i = 0; i<raw_csv->num_actual_cols; i++)
raw_csv->h_parseCol[i]=false;
int countFound=0;
for(int i=0; i < args->use_cols_char_len; i++){
std::string colName(args->use_cols_char[i]);
for (auto it = raw_csv->col_names.begin(); it != raw_csv->col_names.end(); it++){
if(colName==*it){
countFound++;
int pos=std::distance(raw_csv->col_names.begin(), it);
raw_csv->h_parseCol[pos]=true;
break;
}
}
}
raw_csv->num_active_cols = countFound;
}
CUDA_TRY(hipMemcpy(raw_csv->d_parseCol, raw_csv->h_parseCol, sizeof(bool) * (raw_csv->num_actual_cols), hipMemcpyHostToDevice));
}
raw_csv->num_records -= (args->skiprows + args->skipfooter);
if(skip_header==0){
raw_csv->header_row=-1;
}else{
raw_csv->num_records-=1;
}
//-----------------------------------------------------------------------------
//--- done with host data
close(fd);
munmap(map_data, raw_csv->num_bytes);
//-----------------------------------------------------------------------------
//--- Auto detect types of the vectors
// if(args->dtype==NULL){
if(args->names==NULL){
column_data_t *d_ColumnData,*h_ColumnData;
h_ColumnData = (column_data_t*)malloc(sizeof(column_data_t) * (raw_csv->num_active_cols));
RMM_TRY( rmmAlloc ((void**)&d_ColumnData,(sizeof(column_data_t) * (raw_csv->num_active_cols)),0 ) );
CUDA_TRY( hipMemset(d_ColumnData, 0, (sizeof(column_data_t) * (raw_csv->num_active_cols)) ) ) ;
launch_dataTypeDetection(raw_csv, args->skiprows, d_ColumnData);
CUDA_TRY( hipMemcpy(h_ColumnData,d_ColumnData, sizeof(column_data_t) * (raw_csv->num_active_cols), hipMemcpyDeviceToHost));
vector<gdf_dtype> d_detectedTypes; // host: array of dtypes (since gdf_columns are not created until end)
raw_csv->dtypes.clear();
for(int col = 0; col < raw_csv->num_active_cols; col++){
unsigned long long countInt = h_ColumnData[col].countInt8+h_ColumnData[col].countInt16+
h_ColumnData[col].countInt32+h_ColumnData[col].countInt64;
if (h_ColumnData[col].countNULL == raw_csv->num_records){
d_detectedTypes.push_back(GDF_INT8); // Entire column is NULL. Allocating the smallest amount of memory
} else if(h_ColumnData[col].countString>0L){
d_detectedTypes.push_back(GDF_CATEGORY); // For auto-detection, we are currently not supporting strings.
} else if(h_ColumnData[col].countDateAndTime>0L){
d_detectedTypes.push_back(GDF_DATE64);
} else if(h_ColumnData[col].countFloat > 0L ||
(h_ColumnData[col].countFloat==0L && countInt >0L && h_ColumnData[col].countNULL >0L) ) {
// The second condition has been added to conform to PANDAS which states that a colum of
// integers with a single NULL record need to be treated as floats.
d_detectedTypes.push_back(GDF_FLOAT64);
}
else {
d_detectedTypes.push_back(GDF_INT64);
}
}
raw_csv->dtypes=d_detectedTypes;
free(h_ColumnData);
RMM_TRY( rmmFree ( d_ColumnData, 0 ) );
}
else{
for ( int x = 0; x < raw_csv->num_actual_cols; x++) {
std::string temp_type = args->dtype[x];
gdf_dtype col_dtype = convertStringToDtype( temp_type );
if (col_dtype == GDF_invalid)
return GDF_UNSUPPORTED_DTYPE;
raw_csv->dtypes.push_back(col_dtype);
}
}
//-----------------------------------------------------------------------------
//--- allocate space for the results
gdf_column **cols = (gdf_column **)malloc( sizeof(gdf_column *) * raw_csv->num_active_cols);
void **d_data,**h_data;
gdf_valid_type **d_valid,**h_valid;
unsigned long long *d_valid_count,*h_valid_count;
gdf_dtype *d_dtypes,*h_dtypes;
h_dtypes = (gdf_dtype*)malloc ( sizeof(gdf_dtype)* (raw_csv->num_active_cols));
h_valid_count = (unsigned long long*)malloc ( sizeof(unsigned long long)* (raw_csv->num_active_cols));
h_data = (void**)malloc ( sizeof(void*)* (raw_csv->num_active_cols));
h_valid = (gdf_valid_type**)malloc ( sizeof(gdf_valid_type*)* (raw_csv->num_active_cols));
RMM_TRY( rmmAlloc ((void**)&d_dtypes, (sizeof(gdf_dtype) * raw_csv->num_active_cols), 0 ) );
RMM_TRY( rmmAlloc ((void**)&d_data, (sizeof(void *) * raw_csv->num_active_cols), 0 ) );
RMM_TRY( rmmAlloc ((void**)&d_valid, (sizeof(gdf_valid_type *) * raw_csv->num_active_cols), 0 ) );
RMM_TRY( rmmAlloc ((void**)&d_valid_count, (sizeof(unsigned long long) * raw_csv->num_active_cols), 0 ) );
CUDA_TRY( hipMemset(d_valid_count, 0, (sizeof(unsigned long long) * raw_csv->num_active_cols)) );
int stringColCount=0;
for (int col = 0; col < raw_csv->num_active_cols; col++) {
if(raw_csv->dtypes[col]==gdf_dtype::GDF_STRING)
stringColCount++;
}
string_pair **h_str_cols = NULL, **d_str_cols = NULL;
if (stringColCount > 0 ) {
h_str_cols = (string_pair**) malloc ((sizeof(string_pair *) * stringColCount));
RMM_TRY( rmmAlloc ((void**)&d_str_cols, (sizeof(string_pair *) * stringColCount), 0) );
for (int col = 0; col < stringColCount; col++) {
RMM_TRY( rmmAlloc ((void**)(h_str_cols + col), sizeof(string_pair) * (raw_csv->num_records), 0) );
}
CUDA_TRY(hipMemcpy(d_str_cols, h_str_cols, sizeof(string_pair *) * stringColCount, hipMemcpyHostToDevice));
}
for (int col = 0; col < raw_csv->num_active_cols; col++) {
gdf_column *gdf = (gdf_column *)malloc(sizeof(gdf_column) * 1);
gdf->size = raw_csv->num_records;
gdf->dtype = raw_csv->dtypes[col];
gdf->null_count = 0; // will be filled in later
//--- column name
std::string str = raw_csv->col_names[col];
int len = str.length() + 1;
gdf->col_name = (char *)malloc(sizeof(char) * len);
memcpy(gdf->col_name, str.c_str(), len);
gdf->col_name[len -1] = '\0';
allocateGdfDataSpace(gdf);
cols[col] = gdf;
h_dtypes[col] = raw_csv->dtypes[col];
h_data[col] = gdf->data;
h_valid[col] = gdf->valid;
}
CUDA_TRY( hipMemcpy(d_dtypes,h_dtypes, sizeof(gdf_dtype) * (raw_csv->num_active_cols), hipMemcpyHostToDevice));
CUDA_TRY( hipMemcpy(d_data,h_data, sizeof(void*) * (raw_csv->num_active_cols), hipMemcpyHostToDevice));
CUDA_TRY( hipMemcpy(d_valid,h_valid, sizeof(gdf_valid_type*) * (raw_csv->num_active_cols), hipMemcpyHostToDevice));
free(h_dtypes);
free(h_valid);
free(h_data);
launch_dataConvertColumns(raw_csv,d_data, d_valid, d_dtypes,d_str_cols, args->skiprows, d_valid_count);
hipDeviceSynchronize();
stringColCount=0;
for (int col = 0; col < raw_csv->num_active_cols; col++) {
gdf_column *gdf = cols[col];
if (gdf->dtype != gdf_dtype::GDF_STRING)
continue;
gdf->data = (void*)NVStrings::create_from_index(h_str_cols[stringColCount],size_t(raw_csv->num_records));
RMM_TRY( rmmFree ( h_str_cols [stringColCount], 0 ) );
stringColCount++;
}
CUDA_TRY( hipMemcpy(h_valid_count,d_valid_count, sizeof(unsigned long long) * (raw_csv->num_active_cols), hipMemcpyDeviceToHost));
//--- set the null count
for ( int col = 0; col < raw_csv->num_active_cols; col++) {
cols[col]->null_count = raw_csv->num_records - h_valid_count[col];
}
free(h_valid_count);
// free up space that is no longer needed
if (h_str_cols != NULL)
free ( h_str_cols);
free(raw_csv->h_parseCol);
if (d_str_cols != NULL)
RMM_TRY( rmmFree ( d_str_cols, 0 ) );
RMM_TRY( rmmFree ( d_valid, 0 ) );
RMM_TRY( rmmFree ( d_valid_count, 0 ) );
RMM_TRY( rmmFree ( d_dtypes, 0 ) );
RMM_TRY( rmmFree ( d_data, 0 ) );
RMM_TRY( rmmFree ( raw_csv->recStart, 0 ) );
RMM_TRY( rmmFree ( raw_csv->d_parseCol, 0 ) );
RMM_TRY( rmmFree ( raw_csv->d_num_records, 0 ) );
CUDA_TRY( hipFree ( raw_csv->data) );
args->data = cols;
args->num_cols_out = raw_csv->num_active_cols;
args->num_rows_out = raw_csv->num_records;
delete raw_csv;
return error;
}
/*
* What is passed in is the data type as a string, need to convert that into gdf_dtype enum
*/
gdf_dtype convertStringToDtype(std::string &dtype) {
if (dtype.compare( "str") == 0) return GDF_STRING;
if (dtype.compare( "date") == 0) return GDF_DATE64;
if (dtype.compare( "date32") == 0) return GDF_DATE32;
if (dtype.compare( "date64") == 0) return GDF_DATE64;
if (dtype.compare( "timestamp") == 0) return GDF_TIMESTAMP;
if (dtype.compare( "category") == 0) return GDF_CATEGORY;
if (dtype.compare( "float") == 0) return GDF_FLOAT32;
if (dtype.compare( "float32") == 0) return GDF_FLOAT32;
if (dtype.compare( "float64") == 0) return GDF_FLOAT64;
if (dtype.compare( "double") == 0) return GDF_FLOAT64;
if (dtype.compare( "short") == 0) return GDF_INT16;
if (dtype.compare( "int") == 0) return GDF_INT32;
if (dtype.compare( "int32") == 0) return GDF_INT32;
if (dtype.compare( "int64") == 0) return GDF_INT64;
if (dtype.compare( "long") == 0) return GDF_INT64;
return GDF_invalid;
}
/*
* Create the raw_csv_t structure and allocate space on the GPU
*/
gdf_error updateRawCsv( const char * data, long num_bytes, raw_csv_t * raw ) {
int num_bits = (num_bytes + 63) / 64;
CUDA_TRY( hipMallocManaged ((void**)&raw->data, (sizeof(char) * num_bytes)));
// RMM_TRY( rmmAlloc ((void**)&raw->data, (sizeof(char) * num_bytes),0 ));
RMM_TRY( rmmAlloc((void**)&raw->d_num_records, sizeof(unsigned long long),0) );
CUDA_TRY( hipMemcpy(raw->data, data, num_bytes, hipMemcpyHostToDevice));
CUDA_TRY( hipMemset(raw->d_num_records,0, ((sizeof(long)) )) );
raw->num_bits = num_bits;
return GDF_SUCCESS;
}
/*
* For each of the gdf_cvolumns, create the on-device space. the on-host fields should already be filled in
*/
gdf_error allocateGdfDataSpace(gdf_column *gdf) {
long N = gdf->size;
long num_bitmaps = (N + 31) / 8; // 8 bytes per bitmap
//--- allocate space for the valid bitmaps
RMM_TRY( rmmAlloc((void**)&(gdf->valid), (sizeof(gdf_valid_type) * num_bitmaps), 0) );
CUDA_TRY(hipMemset(gdf->valid, 0, (sizeof(gdf_valid_type) * num_bitmaps)) );
int elementSize=0;
//--- Allocate space for the data
switch(gdf->dtype) {
case gdf_dtype::GDF_INT8:
elementSize = sizeof(int8_t);
break;
case gdf_dtype::GDF_INT16:
elementSize = sizeof(int16_t);
break;
case gdf_dtype::GDF_INT32:
elementSize = sizeof(int32_t);
break;
case gdf_dtype::GDF_INT64:
elementSize = sizeof(int64_t);
break;
case gdf_dtype::GDF_FLOAT32:
elementSize = sizeof(float);
break;
case gdf_dtype::GDF_FLOAT64:
elementSize = sizeof(double);
break;
case gdf_dtype::GDF_DATE32:
elementSize = sizeof(gdf_date32);
break;
case gdf_dtype::GDF_DATE64:
elementSize = sizeof(gdf_date64);
break;
case gdf_dtype::GDF_TIMESTAMP:
elementSize = sizeof(int64_t);
break;
case gdf_dtype::GDF_CATEGORY:
elementSize = sizeof(gdf_category);
break;
case gdf_dtype::GDF_STRING:
return gdf_error::GDF_SUCCESS;
// Memory for gdf->data allocated by string class eventually
default:
return GDF_UNSUPPORTED_DTYPE;
}
RMM_TRY( rmmAlloc((void**)&(gdf->data), (elementSize * N), 0) );
return gdf_error::GDF_SUCCESS;
}
//----------------------------------------------------------------------------------------------------------------
// CUDA Kernels
//----------------------------------------------------------------------------------------------------------------
gdf_error launch_countRecords(raw_csv_t * csvData) {
char *data = csvData->data;
long num_bytes = csvData->num_bytes;
long numBitmaps = csvData->num_bits;
char delim = csvData->delimiter;
unsigned long long *d_num_records = csvData->d_num_records;
char terminator = csvData->terminator;
/*
* Each bitmap is for a 64-byte chunk,
*
* Note: could do one thread per byte, but that would require a lock on the bit map
*
*/
int64_t threads = 1024;
// Using the number of bitmaps as the size - data index is bitmap ID * 64
int64_t blocks = (numBitmaps + (threads -1)) / threads ;
hipLaunchKernelGGL(( countRecords) , dim3(blocks), dim3(threads) , 0, 0, data, delim, terminator, num_bytes, numBitmaps, d_num_records);
CUDA_TRY(hipGetLastError());
long recs=-1;
CUDA_TRY(hipMemcpy(&recs, d_num_records, sizeof(long), hipMemcpyDeviceToHost));
csvData->num_records=recs;
CUDA_TRY(hipGetLastError());
return GDF_SUCCESS;
}
__global__ void countRecords(char *data, const char delim, const char terminator, long num_bytes, long num_bits, unsigned long long* num_records) {
// thread IDs range per block, so also need the block id
long tid = threadIdx.x + (blockDim.x * blockIdx.x);
if ( tid >= num_bits)
return;
// data ID is a multiple of 64
long did = tid * 64L;
char *raw = (data + did);
long byteToProcess = ((did + 64L) < num_bytes) ? 64L : (num_bytes - did);
// process the data
long x = 0;
long newLinesFound=0;
for (x = 0; x < byteToProcess; x++) {
// records
if (raw[x] == terminator) {
newLinesFound++;
} else if (raw[x] == '\r' && (x+1L)<num_bytes && raw[x +1] == '\n') {
x++;
newLinesFound++;
}
}
atomicAdd((unsigned long long int*)num_records,(unsigned long long int)newLinesFound);
}
gdf_error launch_storeRecordStart(raw_csv_t * csvData) {
char *data = csvData->data;
long num_bytes = csvData->num_bytes;
long numBitmaps = csvData->num_bits;
char delim = csvData->delimiter;
char terminator = csvData->terminator;
unsigned long long *d_num_records = csvData->d_num_records;
unsigned long long *recStart = csvData->recStart;
/*
* Each bitmap is for a 64-byte chunk
* Note: could do one thread per byte, but that would require a lock on the bit map
*/
long threads = 1024;
// Using the number of bitmaps as the size - data index is bitmap ID * 64
long blocks = (numBitmaps + (threads -1)) / threads ;
hipLaunchKernelGGL(( storeRecordStart) , dim3(blocks), dim3(threads) , 0, 0, data, delim, terminator, num_bytes, numBitmaps,d_num_records,recStart);
CUDA_TRY(hipGetLastError());
return GDF_SUCCESS;
}
__global__ void storeRecordStart(char *data, const char delim, const char terminator, long num_bytes, long num_bits, unsigned long long* num_records,unsigned long long* recStart) {
// thread IDs range per block, so also need the block id
long tid = threadIdx.x + (blockDim.x * blockIdx.x);
if ( tid >= num_bits)
return;
// data ID - multiple of 64
long did = tid * 64L;
char *raw = (data + did);
long byteToProcess = ((did + 64L) < num_bytes) ? 64L : (num_bytes - did);
if(tid==0){
long pos = atomicAdd((unsigned long long int*)num_records,(unsigned long long int)1);
recStart[pos]=did+0;
}
// process the data
long x = 0;
for (x = 0; x < byteToProcess; x++) {
// records
if (raw[x] == terminator) {
long pos = atomicAdd((unsigned long long int*)num_records,(unsigned long long int)1);
recStart[pos]=did+x+1;
} else if (raw[x] == '\r' && (x+1L)<num_bytes && raw[x +1] == '\n') {
x++;
long pos = atomicAdd((unsigned long long int*)num_records,(unsigned long long int)1);
recStart[pos]=did+x+1;
}
}
}
//----------------------------------------------------------------------------------------------------------------
gdf_error launch_dataConvertColumns(raw_csv_t * raw_csv, void **gdf, gdf_valid_type** valid, gdf_dtype* d_dtypes,string_pair **str_cols, long row_offset, unsigned long long *num_valid) {
int64_t threads = 1024;
int64_t blocks = ( raw_csv->num_records + (threads -1)) / threads ;
hipLaunchKernelGGL(( convertCsvToGdf) , dim3(blocks), dim3(threads) , 0, 0,
raw_csv->data,
raw_csv->delimiter,
raw_csv->terminator,
raw_csv->num_records,
raw_csv->num_actual_cols,
raw_csv->d_parseCol,
raw_csv->recStart,
d_dtypes,
gdf,
valid,
str_cols,
row_offset,
raw_csv->header_row,
raw_csv->dayfirst,
num_valid
);
return GDF_SUCCESS;
}
/*
* Data is processed in one row\record at a time - so the number of total threads (tid) is equal to the number of rows.
*
*/
__global__ void convertCsvToGdf(
char *raw_csv,
char delim,
char terminator,
unsigned long long num_records,
int num_columns,
bool *parseCol,
unsigned long long *recStart,
gdf_dtype *dtype,
void **gdf_data,
gdf_valid_type **valid,
string_pair **str_cols,
unsigned long long row_offset,
long header_row,
bool dayfirst,
unsigned long long *num_valid
)
{
// thread IDs range per block, so also need the block id
long rec_id = threadIdx.x + (blockDim.x * blockIdx.x); // this is entry into the field array - tid is an elements within the num_entries array
// we can have more threads than data, make sure we are not past the end of the data
if ( rec_id >= num_records)
return;
long extraOff=0;
if(rec_id>=header_row && header_row>=0)
extraOff=1;
long start = recStart[rec_id + row_offset + extraOff];
long stop = recStart[rec_id + 1 + row_offset + extraOff];
long pos = start;
int col = 0;
int actual_col = 0;
int stringCol = 0;
while(col<num_columns){
if(start>stop)
break;
while(true){
if(raw_csv[pos]==delim){
break;
}
else if (raw_csv[pos] == terminator){
break;
}
else if(raw_csv[pos] == '\r' && ((pos+1) < stop && raw_csv[pos+1]=='\n')){
stop--;
break;
}
if(pos>=stop)
break;
pos++;
}
if(parseCol[col]==true){
long tempPos=pos-1;
if(dtype[col] != gdf_dtype::GDF_CATEGORY && dtype[col] != gdf_dtype::GDF_STRING){
removePrePostWhiteSpaces2(raw_csv, &start, &tempPos);
}
if(start<=(tempPos)) { // Empty strings are not legal values
switch(dtype[col]) {
case gdf_dtype::GDF_INT8:
{
int8_t *gdf_out = (int8_t *)gdf_data[actual_col];
gdf_out[rec_id] = convertStrtoInt<int8_t>(raw_csv, start, tempPos);
}
break;
case gdf_dtype::GDF_INT16: {
int16_t *gdf_out = (int16_t *)gdf_data[actual_col];
gdf_out[rec_id] = convertStrtoInt<int16_t>(raw_csv, start, tempPos);
}
break;
case gdf_dtype::GDF_INT32:
{
int32_t *gdf_out = (int32_t *)gdf_data[actual_col];
gdf_out[rec_id] = convertStrtoInt<int32_t>(raw_csv, start, tempPos);
}
break;
case gdf_dtype::GDF_INT64:
{
int64_t *gdf_out = (int64_t *)gdf_data[actual_col];
gdf_out[rec_id] = convertStrtoInt<int64_t>(raw_csv, start, tempPos);
}
break;
case gdf_dtype::GDF_FLOAT32:
{
float *gdf_out = (float *)gdf_data[actual_col];
gdf_out[rec_id] = convertStrtoFloat<float>(raw_csv, start, tempPos);
}
break;
case gdf_dtype::GDF_FLOAT64:
{
double *gdf_out = (double *)gdf_data[actual_col];
gdf_out[rec_id] = convertStrtoFloat<double>(raw_csv, start, tempPos);
}
break;
case gdf_dtype::GDF_DATE32:
{
gdf_date32 *gdf_out = (gdf_date32 *)gdf_data[actual_col];
gdf_out[rec_id] = parseDateFormat(raw_csv, start, tempPos, dayfirst);
}
break;
case gdf_dtype::GDF_DATE64:
{
gdf_date64 *gdf_out = (gdf_date64 *)gdf_data[actual_col];
gdf_out[rec_id] = parseDateTimeFormat(raw_csv, start, tempPos, dayfirst);
}
break;
case gdf_dtype::GDF_TIMESTAMP:
{
int64_t *gdf_out = (int64_t *)gdf_data[actual_col];
gdf_out[rec_id] = convertStrtoInt<int64_t>(raw_csv, start, tempPos);
}
break;
case gdf_dtype::GDF_CATEGORY:
{
gdf_category *gdf_out = (gdf_category *)gdf_data[actual_col];
gdf_out[rec_id] = convertStrtoHash(raw_csv, start, pos, HASH_SEED);
}
break;
case gdf_dtype::GDF_STRING:{
str_cols[stringCol][rec_id].first = raw_csv+start;
str_cols[stringCol][rec_id].second = size_t(pos-start);
stringCol++;
}
break;
default:
break;
}
// set the valid bitmap - all bits were set to 0 to start
int bitmapIdx = whichBitmap(rec_id); // which bitmap
int bitIdx = whichBit(rec_id); // which bit - over an 8-bit index
setBit(valid[col]+bitmapIdx, bitIdx); // This is done with atomics
atomicAdd((unsigned long long int*)&num_valid[col],(unsigned long long int)1);
}
else if(dtype[col]==gdf_dtype::GDF_STRING){
str_cols[stringCol][rec_id].first = NULL;
str_cols[stringCol][rec_id].second = 0;
stringCol++;
}
actual_col++;
}
pos++;
start=pos;
col++;
}
}
//----------------------------------------------------------------------------------------------------------------
gdf_error launch_dataTypeDetection(
raw_csv_t * raw_csv,
long row_offset,
column_data_t* d_columnData)
{
int64_t threads = 1024;
int64_t blocks = ( raw_csv->num_records + (threads -1)) / threads ;
hipLaunchKernelGGL(( dataTypeDetection) , dim3(blocks), dim3(threads) , 0, 0,
raw_csv->data,
raw_csv->delimiter,
raw_csv->terminator,
raw_csv->num_records,
raw_csv->num_actual_cols,
raw_csv->d_parseCol,
raw_csv->recStart,
row_offset,
raw_csv->header_row,
d_columnData
);
return GDF_SUCCESS;
}
/*
*/
__global__ void dataTypeDetection(
char *raw_csv,
char delim,
char terminator,
unsigned long long num_records,
int num_columns,
bool *parseCol,
unsigned long long *recStart,
unsigned long long row_offset,
long header_row,
column_data_t* d_columnData
)
{
// thread IDs range per block, so also need the block id
long rec_id = threadIdx.x + (blockDim.x * blockIdx.x); // this is entry into the field array - tid is an elements within the num_entries array
// we can have more threads than data, make sure we are not past the end of the data
if ( rec_id >= num_records)
return;
long extraOff=0;
if(rec_id>=header_row && header_row>=0)
extraOff=1;
long start = recStart[rec_id + row_offset + extraOff];
long stop = recStart[rec_id + 1 + row_offset + extraOff];
long pos = start;
int col = 0;
int actual_col = 0;
// Going through all the columns of a given record
while(col<num_columns){
if(start>stop)
break;
// Finding the breaking point for each column
while(true){
if(raw_csv[pos]==delim){
break;
}
else if (raw_csv[pos] == terminator){
break;
}
else if(raw_csv[pos] == '\r' && ((pos+1) < stop && raw_csv[pos+1]=='\n')){
stop--;
break;
}
if(pos>stop)
break;
pos++;
}
// Checking if this is a column that the user wants --- user can filter columns
if(parseCol[col]==true){
long tempPos=pos-1;
// Checking if the record is NULL
if(start>(tempPos)){
atomicAdd(& d_columnData[actual_col].countNULL, 1L);
pos++;
start=pos;
col++;
actual_col++;
continue;
}
long countNumber=0;
long countDecimal=0;
long countSlash=0;
long countDash=0;
long countColon=0;
long countString=0;
long strLen=pos-start;
// Remove all pre and post white-spaces. We might find additional NULL fields if the entire entry is made up of only spaces.
removePrePostWhiteSpaces2(raw_csv, &start, &tempPos);
for(long startPos=start; startPos<=tempPos; startPos++){
if(raw_csv[startPos]>= '0' && raw_csv[startPos] <= '9'){
countNumber++;
continue;
}
// Looking for unique characters that will help identify column types.
switch (raw_csv[startPos]){
case '.':
countDecimal++;break;
case '-':
countDash++; break;
case '/':
countSlash++;break;
case ':':
countColon++;break;
default:
countString++;
break;
}
}
if(strLen==0) // Removed spaces ' ' in the pre-processing and thus we can have an empty string.
atomicAdd(& d_columnData[actual_col].countNULL, 1L);
// Integers have to have the length of the string or can be off by one if they start with a minus sign
else if(countNumber==(strLen) || ( strLen>1 && countNumber==(strLen-1) && raw_csv[start]=='-') ){
// Checking to see if we the integer value requires 8,16,32,64 bits.
// This will allow us to allocate the exact amount of memory.
int64_t i = convertStrtoInt<int64_t>(raw_csv, start, tempPos);
if(i >= (1L<<31)){
atomicAdd(& d_columnData[actual_col].countInt64, 1L);
}
else if(i >= (1L<<15)){
atomicAdd(& d_columnData[actual_col].countInt32, 1L);
}
else if(i >= (1L<<7)){
atomicAdd(& d_columnData[actual_col].countInt16, 1L);
}
else
atomicAdd(& d_columnData[actual_col].countInt8, 1L);
}
// Floating point numbers are made up of numerical strings, have to have a decimal sign, and can have a minus sign.
else if((countNumber==(strLen-1) && countDecimal==1) || (strLen>2 && countNumber==(strLen-2) && raw_csv[start]=='-')){
atomicAdd(& d_columnData[actual_col].countFloat, 1L);
}
// The date-time field cannot have more than 3 strings. As such if an entry has more than 3 string characters, it is not
// a data-time field. Also, if a string has multiple decimals, then is not a legit number.
else if(countString > 3 || countDecimal > 1){
atomicAdd(& d_columnData[actual_col].countString, 1L);
}
else {
// A date field can have either one or two '-' or '\'. A legal combination will only have one of them.
// To simplify the process of auto column detection, we are not covering all the date-time formation permutations.
if((countDash>0 && countDash<=2 && countSlash==0)|| (countDash==0 && countSlash>0 && countSlash<=2) ){
if((countColon<=2)){
atomicAdd(& d_columnData[actual_col].countDateAndTime, 1L);
}
else{
atomicAdd(& d_columnData[actual_col].countString, 1L);
}
}
// Default field is string type.
else{
atomicAdd(& d_columnData[actual_col].countString, 1L);
}
}
actual_col++;
}
pos++;
start=pos;
col++;
}
}
//----------------------------------------------------------------------------------------------------------------
/*
* Return which bit is set
* x is the occurrence: 1 = first, 2 = seconds, ...
*/
__device__ int findSetBit(int tid, long num_bits, uint64_t *r_bits, int x) {
int idx = tid;
if ( x == 0 )
return -1;
int withinBitCount = 0;
int offset = 0;
int found = 0;
uint64_t bitmap = r_bits[idx];
while (found != x)
{
if(bitmap == 0)
{
idx++;
if (idx >= num_bits)
return -1;
bitmap = r_bits[idx];
offset += 64;
withinBitCount = 0;
}
if ( bitmap & 1 ) {
found++; //found a set bit
}
bitmap >>= 1;
++withinBitCount;
}
offset += withinBitCount -1;
return offset;
}
| 98b9db4227a19b409949632abfad0e2ff76cbe9d.cu | /*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file csv-reader.cu code to read csv data
*
* CSV Reader
*/
#include <cuda_runtime.h>
#include <iostream>
#include <vector>
#include <string>
#include <stdio.h>
#include <iostream>
#include <iomanip>
#include <vector>
#include <unordered_map>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <thrust/scan.h>
#include <thrust/reduce.h>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/host_vector.h>
#include "type_conversion.cuh"
#include "date-time-parser.cuh"
#include <gdf/gdf.h>
#include <gdf/errorutils.h>
#include <gdf/gdf_io.h>
#include <rmm.h>
#include "NVStrings.h"
constexpr int32_t HASH_SEED = 33;
using namespace std;
//-- define the structure for raw data handling - for internal use
typedef struct raw_csv_ {
char * data; // on-device: the raw unprocessed CSV data - loaded as a large char * array
unsigned long long* d_num_records; // on-device: Number of records.
unsigned long long* recStart; // on-device: Starting position of the records.
char delimiter; // host: the delimiter
char terminator; // host: the line terminator
long num_bytes; // host: the number of bytes in the data
long num_bits; // host: the number of 64-bit bitmaps (different than valid)
unsigned long long num_records; // host: number of records (per column)
// int num_cols; // host: number of columns
int num_active_cols; // host: number of columns that will be return to user.
int num_actual_cols; // host: number of columns in the file --- based on the number of columns in header
vector<gdf_dtype> dtypes; // host: array of dtypes (since gdf_columns are not created until end)
vector<string> col_names; // host: array of column names
bool* h_parseCol; // host : array of booleans stating if column should be parsed in reading process: parseCol[x]=false means that the column x needs to be filtered out.
bool* d_parseCol; // device : array of booleans stating if column should be parsed in reading process: parseCol[x]=false means that the column x needs to be filtered out.
long header_row; // Row id of the header
bool dayfirst;
} raw_csv_t;
typedef struct column_data_{
unsigned long long countFloat;
unsigned long long countDateAndTime;
unsigned long long countString;
unsigned long long countInt8;
unsigned long long countInt16;
unsigned long long countInt32;
unsigned long long countInt64;
unsigned long long countNULL;
}column_data_t;
using string_pair = std::pair<const char*,size_t>;
//
//---------------create and process ---------------------------------------------
//
gdf_error parseArguments(csv_read_arg *args, raw_csv_t *csv);
// gdf_error getColNamesAndTypes(const char **col_names, const char **dtypes, raw_csv_t *d);
gdf_error updateRawCsv( const char * data, long num_bytes, raw_csv_t * csvData );
gdf_error allocateGdfDataSpace(gdf_column *);
gdf_dtype convertStringToDtype(std::string &dtype);
#define checkError(error, txt) if ( error != GDF_SUCCESS) { cerr << "ERROR: " << error << " in " << txt << endl; return error; }
//
//---------------CUDA Kernel ---------------------------------------------
//
__device__ int findSetBit(int tid, long num_bits, uint64_t *f_bits, int x);
gdf_error launch_countRecords(raw_csv_t * csvData);
gdf_error launch_storeRecordStart(raw_csv_t * csvData);
gdf_error launch_dataConvertColumns(raw_csv_t * raw_csv, void** d_gdf, gdf_valid_type** valid, gdf_dtype* d_dtypes, string_pair **str_cols, long row_offset, unsigned long long *);
gdf_error launch_dataTypeDetection(raw_csv_t * raw_csv, long row_offset, column_data_t* d_columnData);
__global__ void countRecords(char *data, const char delim, const char terminator, long num_bytes, long num_bits, unsigned long long* num_records);
__global__ void storeRecordStart(char *data, const char delim, const char terminator, long num_bytes, long num_bits, unsigned long long* num_records,unsigned long long* recStart) ;
__global__ void convertCsvToGdf(char *csv,char delim, char terminator, unsigned long long num_records, int num_columns,bool *parseCol,unsigned long long *recStart,gdf_dtype *dtype,void **gdf_data,gdf_valid_type **valid,string_pair **str_cols,unsigned long long row_offset, long header_row,bool dayfirst,unsigned long long *num_valid);
__global__ void dataTypeDetection( char *raw_csv, char delim, char terminator, unsigned long long num_records, int num_columns, bool *parseCol, unsigned long long *recStart, unsigned long long row_offset, long header_row, column_data_t* d_columnData);
//
//---------------CUDA Valid (8 blocks of 8-bits) Bitmap Kernels ---------------------------------------------
//
__device__ int whichBitmap(int record) { return (record/8); }
__device__ int whichBit(int bit) { return (bit % 8); }
__inline__ __device__ void validAtomicOR(gdf_valid_type* address, gdf_valid_type val)
{
int32_t *base_address = (int32_t*)((gdf_valid_type*)address - ((size_t)address & 3));
int32_t int_val = (int32_t)val << (((size_t) address & 3) * 8);
atomicOr(base_address, int_val);
}
__device__ void setBit(gdf_valid_type* address, int bit) {
gdf_valid_type bitMask[8] = {1, 2, 4, 8, 16, 32, 64, 128};
validAtomicOR(address, bitMask[bit]);
}
std::string stringType(gdf_dtype dt){
switch (dt){
case GDF_STRING: return std::string("str");
case GDF_DATE64: return std::string("date64");
case GDF_CATEGORY: return std::string("category");
case GDF_FLOAT64: return std::string("float64");
case GDF_INT8: return std::string("int8");
case GDF_INT16: return std::string("int16");
case GDF_INT32: return std::string("int32");
case GDF_INT64: return std::string("int64");
default:
return "long";
}
}
/**
* @brief read in a CSV file
*
* Read in a CSV file, extract all fields, and return a GDF (array of gdf_columns)
*
* @param[in and out] args the input arguments, but this also contains the returned data
*
* Arguments:
*
* Required Arguments
* file_path - file location to read from - currently the file cannot be compressed
* num_cols - number of columns in the names and dtype arrays
* names - ordered List of column names, this is a required field
* dtype - ordered List of data types, this is required
*
* Optional
* lineterminator - define the line terminator character. Default is '\n'
* delimiter - define the field separator, default is ',' This argument is also called 'sep'
* delim_whitespace - use white space as the delimiter - default is false. This overrides the delimiter argument
* skipinitialspace - skip white spaces after the delimiter - default is false
*
* skiprows - number of rows at the start of the files to skip, default is 0
* skipfooter - number of rows at the bottom of the file to skip - default is 0
*
* dayfirst - is the first value the day? DD/MM versus MM/DD
*
*
* Output
* num_cols_out - Out: return the number of columns read in
* num_rows_out - Out: return the number of rows read in
* gdf_column **data - Out: return the array of *gdf_columns
*
*
* @return gdf_error
*
*/
gdf_error read_csv(csv_read_arg *args)
{
gdf_error error = gdf_error::GDF_SUCCESS;
//-----------------------------------------------------------------------------
// create the CSV data structure - this will be filled in as the CSV data is processed.
// Done first to validate data types
raw_csv_t * raw_csv = new raw_csv_t;
// error = parseArguments(args, raw_csv);
raw_csv->num_actual_cols = args->num_cols;
raw_csv->num_active_cols = args->num_cols;
raw_csv->num_records = 0;
if ( args->delim_whitespace == true) {
raw_csv->delimiter = ' ';
} else {
raw_csv->delimiter = args->delimiter;
}
if(args->windowslinetermination)
raw_csv->terminator = '\n';
else
raw_csv->terminator = args->lineterminator;
raw_csv->dayfirst = args->dayfirst;
//-----------------------------------------------------------------------------
// memory map in the data
void * map_data = NULL;
struct stat st;
int fd;
fd = open(args->file_path, O_RDONLY );
if (fd < 0) { close(fd); checkError(GDF_FILE_ERROR, "Error opening file"); }
if (fstat(fd, &st)) { close(fd); checkError(GDF_FILE_ERROR, "cannot stat file"); }
raw_csv->num_bytes = st.st_size;
map_data = mmap(0, raw_csv->num_bytes, PROT_READ, MAP_PRIVATE, fd, 0);
if (map_data == MAP_FAILED || raw_csv->num_bytes==0) { close(fd); checkError(GDF_C_ERROR, "Error mapping file"); }
//-----------------------------------------------------------------------------
//--- create a structure to hold variables used to parse the CSV data
error = updateRawCsv( (const char *)map_data, (long)raw_csv->num_bytes, raw_csv );
checkError(error, "call to createRawCsv");
//-----------------------------------------------------------------------------
// find the record and fields points (in bitmaps)
cudaDeviceSynchronize();
error = launch_countRecords(raw_csv);
checkError(error, "call to record counter");
//-----------------------------------------------------------------------------
//-- Allocate space to hold the record starting point
RMM_TRY( rmmAlloc((void**)&(raw_csv->recStart), (sizeof(unsigned long long) * (raw_csv->num_records + 1)), 0) );
CUDA_TRY( cudaMemset(raw_csv->d_num_records, 0, (sizeof(unsigned long long) )) ) ;
//-----------------------------------------------------------------------------
//-- Scan data and set the starting positions
error = launch_storeRecordStart(raw_csv);
checkError(error, "call to record initial position store");
cudaDeviceSynchronize();
thrust::sort(thrust::device,raw_csv->recStart, raw_csv->recStart + raw_csv->num_records + 1);
//-----------------------------------------------------------------------------
//-- Acquire header row of
int h_num_cols=0, h_dup_cols_removed=0;
int skip_header=0;
// Check if the user gave us a list of column names
if(args->names==NULL){
// Getting the first row of data from the file. We will parse the data to find lineterminator as
// well as the column delimiter.
char* cmap_data = (char *)map_data;
unsigned long long c=0;
raw_csv->header_row=0;
if (args->header>=0){
raw_csv->header_row = args->header;
}
if(raw_csv->header_row > (long)raw_csv->num_records){
checkError(GDF_FILE_ERROR, "Number of records is smaller than the id of the specified header row");
}
unsigned long long headerPositions[2];
CUDA_TRY( cudaMemcpy(headerPositions,raw_csv->recStart + raw_csv->header_row, sizeof(unsigned long long)*2, cudaMemcpyDeviceToHost));
unsigned long long start = headerPositions[0];
unsigned long long stop = headerPositions[1];
c=start;
while(c<stop){
if (cmap_data[c]==args->lineterminator){
h_num_cols++;
break;
}
else if(cmap_data[c] == '\r' && (c+1L)<(unsigned long long)raw_csv->num_bytes && cmap_data[c+1] == '\n'){
h_num_cols++;
break;
}else if (cmap_data[c]==args->delimiter)
h_num_cols++;
c++;
}
unsigned long long prev=0;
c=start;
raw_csv->col_names.clear();
if(args->header>=0){
h_num_cols=0;
// Storing the names of the columns into a vector of strings
while(c<=stop){
if (cmap_data[c]==args->delimiter || cmap_data[c]==args->lineterminator){
std::string colName(cmap_data +prev,c-prev );
prev=c+1;
raw_csv->col_names.push_back(colName);
h_num_cols++;
}
c++;
}
skip_header=1;
}else{
for (int i = 0; i<h_num_cols; i++){
std::string newColName = std::to_string(i);
raw_csv->col_names.push_back(newColName);
}
}
// Allocating a boolean array that will use to state if a column needs to read or filtered.
raw_csv->h_parseCol = (bool*)malloc(sizeof(bool) * (h_num_cols));
RMM_TRY( rmmAlloc ((void**)&raw_csv->d_parseCol,(sizeof(bool) * (h_num_cols)),0 ) );
for (int i = 0; i<h_num_cols; i++)
raw_csv->h_parseCol[i]=true;
// Looking for duplicates
for (auto it = raw_csv->col_names.begin(); it != raw_csv->col_names.end(); it++){
bool found_dupe = false;
for (auto it2 = (it+1); it2 != raw_csv->col_names.end(); it2++){
if (*it==*it2){
found_dupe=true;
break;
}
}
if(found_dupe){
int count=1;
for (auto it2 = (it+1); it2 != raw_csv->col_names.end(); it2++){
if (*it==*it2){
if(args->mangle_dupe_cols){
// Replace all the duplicates of column X with X.1,X.2,... First appearance stays as X.
std::string newColName = *it2;
newColName += "." + std::to_string(count);
count++;
*it2 = newColName;
} else{
// All duplicate fields will be ignored.
int pos=std::distance(raw_csv->col_names.begin(), it2);
raw_csv->h_parseCol[pos]=false;
h_dup_cols_removed++;
}
}
}
}
}
raw_csv->num_actual_cols = h_num_cols; // Actuaul number of columns in the CSV file
raw_csv->num_active_cols = h_num_cols-h_dup_cols_removed; // Number of fields that need to be processed based on duplicatation fields
CUDA_TRY(cudaMemcpy(raw_csv->d_parseCol, raw_csv->h_parseCol, sizeof(bool) * (h_num_cols), cudaMemcpyHostToDevice));
}
else {
raw_csv->h_parseCol = (bool*)malloc(sizeof(bool) * (args->num_cols));
RMM_TRY( rmmAlloc ((void**)&raw_csv->d_parseCol,(sizeof(bool) * (args->num_cols)),0 ) );
for (int i = 0; i<raw_csv->num_actual_cols; i++){
raw_csv->h_parseCol[i]=true;
std::string col_name = args->names[i];
raw_csv->col_names.push_back(col_name);
}
CUDA_TRY(cudaMemcpy(raw_csv->d_parseCol, raw_csv->h_parseCol, sizeof(bool) * (args->num_cols), cudaMemcpyHostToDevice));
}
// User can give
if (args->use_cols_int!=NULL || args->use_cols_char!=NULL){
if(args->use_cols_int!=NULL){
for (int i = 0; i<raw_csv->num_actual_cols; i++)
raw_csv->h_parseCol[i]=false;
for(int i=0; i < args->use_cols_int_len; i++){
int pos = args->use_cols_int[i];
raw_csv->h_parseCol[pos]=true;
}
raw_csv->num_active_cols = args->use_cols_int_len;
}else{
for (int i = 0; i<raw_csv->num_actual_cols; i++)
raw_csv->h_parseCol[i]=false;
int countFound=0;
for(int i=0; i < args->use_cols_char_len; i++){
std::string colName(args->use_cols_char[i]);
for (auto it = raw_csv->col_names.begin(); it != raw_csv->col_names.end(); it++){
if(colName==*it){
countFound++;
int pos=std::distance(raw_csv->col_names.begin(), it);
raw_csv->h_parseCol[pos]=true;
break;
}
}
}
raw_csv->num_active_cols = countFound;
}
CUDA_TRY(cudaMemcpy(raw_csv->d_parseCol, raw_csv->h_parseCol, sizeof(bool) * (raw_csv->num_actual_cols), cudaMemcpyHostToDevice));
}
raw_csv->num_records -= (args->skiprows + args->skipfooter);
if(skip_header==0){
raw_csv->header_row=-1;
}else{
raw_csv->num_records-=1;
}
//-----------------------------------------------------------------------------
//--- done with host data
close(fd);
munmap(map_data, raw_csv->num_bytes);
//-----------------------------------------------------------------------------
//--- Auto detect types of the vectors
// if(args->dtype==NULL){
if(args->names==NULL){
column_data_t *d_ColumnData,*h_ColumnData;
h_ColumnData = (column_data_t*)malloc(sizeof(column_data_t) * (raw_csv->num_active_cols));
RMM_TRY( rmmAlloc ((void**)&d_ColumnData,(sizeof(column_data_t) * (raw_csv->num_active_cols)),0 ) );
CUDA_TRY( cudaMemset(d_ColumnData, 0, (sizeof(column_data_t) * (raw_csv->num_active_cols)) ) ) ;
launch_dataTypeDetection(raw_csv, args->skiprows, d_ColumnData);
CUDA_TRY( cudaMemcpy(h_ColumnData,d_ColumnData, sizeof(column_data_t) * (raw_csv->num_active_cols), cudaMemcpyDeviceToHost));
vector<gdf_dtype> d_detectedTypes; // host: array of dtypes (since gdf_columns are not created until end)
raw_csv->dtypes.clear();
for(int col = 0; col < raw_csv->num_active_cols; col++){
unsigned long long countInt = h_ColumnData[col].countInt8+h_ColumnData[col].countInt16+
h_ColumnData[col].countInt32+h_ColumnData[col].countInt64;
if (h_ColumnData[col].countNULL == raw_csv->num_records){
d_detectedTypes.push_back(GDF_INT8); // Entire column is NULL. Allocating the smallest amount of memory
} else if(h_ColumnData[col].countString>0L){
d_detectedTypes.push_back(GDF_CATEGORY); // For auto-detection, we are currently not supporting strings.
} else if(h_ColumnData[col].countDateAndTime>0L){
d_detectedTypes.push_back(GDF_DATE64);
} else if(h_ColumnData[col].countFloat > 0L ||
(h_ColumnData[col].countFloat==0L && countInt >0L && h_ColumnData[col].countNULL >0L) ) {
// The second condition has been added to conform to PANDAS which states that a colum of
// integers with a single NULL record need to be treated as floats.
d_detectedTypes.push_back(GDF_FLOAT64);
}
else {
d_detectedTypes.push_back(GDF_INT64);
}
}
raw_csv->dtypes=d_detectedTypes;
free(h_ColumnData);
RMM_TRY( rmmFree ( d_ColumnData, 0 ) );
}
else{
for ( int x = 0; x < raw_csv->num_actual_cols; x++) {
std::string temp_type = args->dtype[x];
gdf_dtype col_dtype = convertStringToDtype( temp_type );
if (col_dtype == GDF_invalid)
return GDF_UNSUPPORTED_DTYPE;
raw_csv->dtypes.push_back(col_dtype);
}
}
//-----------------------------------------------------------------------------
//--- allocate space for the results
gdf_column **cols = (gdf_column **)malloc( sizeof(gdf_column *) * raw_csv->num_active_cols);
void **d_data,**h_data;
gdf_valid_type **d_valid,**h_valid;
unsigned long long *d_valid_count,*h_valid_count;
gdf_dtype *d_dtypes,*h_dtypes;
h_dtypes = (gdf_dtype*)malloc ( sizeof(gdf_dtype)* (raw_csv->num_active_cols));
h_valid_count = (unsigned long long*)malloc ( sizeof(unsigned long long)* (raw_csv->num_active_cols));
h_data = (void**)malloc ( sizeof(void*)* (raw_csv->num_active_cols));
h_valid = (gdf_valid_type**)malloc ( sizeof(gdf_valid_type*)* (raw_csv->num_active_cols));
RMM_TRY( rmmAlloc ((void**)&d_dtypes, (sizeof(gdf_dtype) * raw_csv->num_active_cols), 0 ) );
RMM_TRY( rmmAlloc ((void**)&d_data, (sizeof(void *) * raw_csv->num_active_cols), 0 ) );
RMM_TRY( rmmAlloc ((void**)&d_valid, (sizeof(gdf_valid_type *) * raw_csv->num_active_cols), 0 ) );
RMM_TRY( rmmAlloc ((void**)&d_valid_count, (sizeof(unsigned long long) * raw_csv->num_active_cols), 0 ) );
CUDA_TRY( cudaMemset(d_valid_count, 0, (sizeof(unsigned long long) * raw_csv->num_active_cols)) );
int stringColCount=0;
for (int col = 0; col < raw_csv->num_active_cols; col++) {
if(raw_csv->dtypes[col]==gdf_dtype::GDF_STRING)
stringColCount++;
}
string_pair **h_str_cols = NULL, **d_str_cols = NULL;
if (stringColCount > 0 ) {
h_str_cols = (string_pair**) malloc ((sizeof(string_pair *) * stringColCount));
RMM_TRY( rmmAlloc ((void**)&d_str_cols, (sizeof(string_pair *) * stringColCount), 0) );
for (int col = 0; col < stringColCount; col++) {
RMM_TRY( rmmAlloc ((void**)(h_str_cols + col), sizeof(string_pair) * (raw_csv->num_records), 0) );
}
CUDA_TRY(cudaMemcpy(d_str_cols, h_str_cols, sizeof(string_pair *) * stringColCount, cudaMemcpyHostToDevice));
}
for (int col = 0; col < raw_csv->num_active_cols; col++) {
gdf_column *gdf = (gdf_column *)malloc(sizeof(gdf_column) * 1);
gdf->size = raw_csv->num_records;
gdf->dtype = raw_csv->dtypes[col];
gdf->null_count = 0; // will be filled in later
//--- column name
std::string str = raw_csv->col_names[col];
int len = str.length() + 1;
gdf->col_name = (char *)malloc(sizeof(char) * len);
memcpy(gdf->col_name, str.c_str(), len);
gdf->col_name[len -1] = '\0';
allocateGdfDataSpace(gdf);
cols[col] = gdf;
h_dtypes[col] = raw_csv->dtypes[col];
h_data[col] = gdf->data;
h_valid[col] = gdf->valid;
}
CUDA_TRY( cudaMemcpy(d_dtypes,h_dtypes, sizeof(gdf_dtype) * (raw_csv->num_active_cols), cudaMemcpyHostToDevice));
CUDA_TRY( cudaMemcpy(d_data,h_data, sizeof(void*) * (raw_csv->num_active_cols), cudaMemcpyHostToDevice));
CUDA_TRY( cudaMemcpy(d_valid,h_valid, sizeof(gdf_valid_type*) * (raw_csv->num_active_cols), cudaMemcpyHostToDevice));
free(h_dtypes);
free(h_valid);
free(h_data);
launch_dataConvertColumns(raw_csv,d_data, d_valid, d_dtypes,d_str_cols, args->skiprows, d_valid_count);
cudaDeviceSynchronize();
stringColCount=0;
for (int col = 0; col < raw_csv->num_active_cols; col++) {
gdf_column *gdf = cols[col];
if (gdf->dtype != gdf_dtype::GDF_STRING)
continue;
gdf->data = (void*)NVStrings::create_from_index(h_str_cols[stringColCount],size_t(raw_csv->num_records));
RMM_TRY( rmmFree ( h_str_cols [stringColCount], 0 ) );
stringColCount++;
}
CUDA_TRY( cudaMemcpy(h_valid_count,d_valid_count, sizeof(unsigned long long) * (raw_csv->num_active_cols), cudaMemcpyDeviceToHost));
//--- set the null count
for ( int col = 0; col < raw_csv->num_active_cols; col++) {
cols[col]->null_count = raw_csv->num_records - h_valid_count[col];
}
free(h_valid_count);
// free up space that is no longer needed
if (h_str_cols != NULL)
free ( h_str_cols);
free(raw_csv->h_parseCol);
if (d_str_cols != NULL)
RMM_TRY( rmmFree ( d_str_cols, 0 ) );
RMM_TRY( rmmFree ( d_valid, 0 ) );
RMM_TRY( rmmFree ( d_valid_count, 0 ) );
RMM_TRY( rmmFree ( d_dtypes, 0 ) );
RMM_TRY( rmmFree ( d_data, 0 ) );
RMM_TRY( rmmFree ( raw_csv->recStart, 0 ) );
RMM_TRY( rmmFree ( raw_csv->d_parseCol, 0 ) );
RMM_TRY( rmmFree ( raw_csv->d_num_records, 0 ) );
CUDA_TRY( cudaFree ( raw_csv->data) );
args->data = cols;
args->num_cols_out = raw_csv->num_active_cols;
args->num_rows_out = raw_csv->num_records;
delete raw_csv;
return error;
}
/*
* What is passed in is the data type as a string, need to convert that into gdf_dtype enum
*/
gdf_dtype convertStringToDtype(std::string &dtype) {
if (dtype.compare( "str") == 0) return GDF_STRING;
if (dtype.compare( "date") == 0) return GDF_DATE64;
if (dtype.compare( "date32") == 0) return GDF_DATE32;
if (dtype.compare( "date64") == 0) return GDF_DATE64;
if (dtype.compare( "timestamp") == 0) return GDF_TIMESTAMP;
if (dtype.compare( "category") == 0) return GDF_CATEGORY;
if (dtype.compare( "float") == 0) return GDF_FLOAT32;
if (dtype.compare( "float32") == 0) return GDF_FLOAT32;
if (dtype.compare( "float64") == 0) return GDF_FLOAT64;
if (dtype.compare( "double") == 0) return GDF_FLOAT64;
if (dtype.compare( "short") == 0) return GDF_INT16;
if (dtype.compare( "int") == 0) return GDF_INT32;
if (dtype.compare( "int32") == 0) return GDF_INT32;
if (dtype.compare( "int64") == 0) return GDF_INT64;
if (dtype.compare( "long") == 0) return GDF_INT64;
return GDF_invalid;
}
/*
* Create the raw_csv_t structure and allocate space on the GPU
*/
gdf_error updateRawCsv( const char * data, long num_bytes, raw_csv_t * raw ) {
int num_bits = (num_bytes + 63) / 64;
CUDA_TRY( cudaMallocManaged ((void**)&raw->data, (sizeof(char) * num_bytes)));
// RMM_TRY( rmmAlloc ((void**)&raw->data, (sizeof(char) * num_bytes),0 ));
RMM_TRY( rmmAlloc((void**)&raw->d_num_records, sizeof(unsigned long long),0) );
CUDA_TRY( cudaMemcpy(raw->data, data, num_bytes, cudaMemcpyHostToDevice));
CUDA_TRY( cudaMemset(raw->d_num_records,0, ((sizeof(long)) )) );
raw->num_bits = num_bits;
return GDF_SUCCESS;
}
/*
* For each of the gdf_cvolumns, create the on-device space. the on-host fields should already be filled in
*/
gdf_error allocateGdfDataSpace(gdf_column *gdf) {
long N = gdf->size;
long num_bitmaps = (N + 31) / 8; // 8 bytes per bitmap
//--- allocate space for the valid bitmaps
RMM_TRY( rmmAlloc((void**)&(gdf->valid), (sizeof(gdf_valid_type) * num_bitmaps), 0) );
CUDA_TRY(cudaMemset(gdf->valid, 0, (sizeof(gdf_valid_type) * num_bitmaps)) );
int elementSize=0;
//--- Allocate space for the data
switch(gdf->dtype) {
case gdf_dtype::GDF_INT8:
elementSize = sizeof(int8_t);
break;
case gdf_dtype::GDF_INT16:
elementSize = sizeof(int16_t);
break;
case gdf_dtype::GDF_INT32:
elementSize = sizeof(int32_t);
break;
case gdf_dtype::GDF_INT64:
elementSize = sizeof(int64_t);
break;
case gdf_dtype::GDF_FLOAT32:
elementSize = sizeof(float);
break;
case gdf_dtype::GDF_FLOAT64:
elementSize = sizeof(double);
break;
case gdf_dtype::GDF_DATE32:
elementSize = sizeof(gdf_date32);
break;
case gdf_dtype::GDF_DATE64:
elementSize = sizeof(gdf_date64);
break;
case gdf_dtype::GDF_TIMESTAMP:
elementSize = sizeof(int64_t);
break;
case gdf_dtype::GDF_CATEGORY:
elementSize = sizeof(gdf_category);
break;
case gdf_dtype::GDF_STRING:
return gdf_error::GDF_SUCCESS;
// Memory for gdf->data allocated by string class eventually
default:
return GDF_UNSUPPORTED_DTYPE;
}
RMM_TRY( rmmAlloc((void**)&(gdf->data), (elementSize * N), 0) );
return gdf_error::GDF_SUCCESS;
}
//----------------------------------------------------------------------------------------------------------------
// CUDA Kernels
//----------------------------------------------------------------------------------------------------------------
gdf_error launch_countRecords(raw_csv_t * csvData) {
char *data = csvData->data;
long num_bytes = csvData->num_bytes;
long numBitmaps = csvData->num_bits;
char delim = csvData->delimiter;
unsigned long long *d_num_records = csvData->d_num_records;
char terminator = csvData->terminator;
/*
* Each bitmap is for a 64-byte chunk,
*
* Note: could do one thread per byte, but that would require a lock on the bit map
*
*/
int64_t threads = 1024;
// Using the number of bitmaps as the size - data index is bitmap ID * 64
int64_t blocks = (numBitmaps + (threads -1)) / threads ;
countRecords <<< blocks, threads >>> (data, delim, terminator, num_bytes, numBitmaps, d_num_records);
CUDA_TRY(cudaGetLastError());
long recs=-1;
CUDA_TRY(cudaMemcpy(&recs, d_num_records, sizeof(long), cudaMemcpyDeviceToHost));
csvData->num_records=recs;
CUDA_TRY(cudaGetLastError());
return GDF_SUCCESS;
}
__global__ void countRecords(char *data, const char delim, const char terminator, long num_bytes, long num_bits, unsigned long long* num_records) {
// thread IDs range per block, so also need the block id
long tid = threadIdx.x + (blockDim.x * blockIdx.x);
if ( tid >= num_bits)
return;
// data ID is a multiple of 64
long did = tid * 64L;
char *raw = (data + did);
long byteToProcess = ((did + 64L) < num_bytes) ? 64L : (num_bytes - did);
// process the data
long x = 0;
long newLinesFound=0;
for (x = 0; x < byteToProcess; x++) {
// records
if (raw[x] == terminator) {
newLinesFound++;
} else if (raw[x] == '\r' && (x+1L)<num_bytes && raw[x +1] == '\n') {
x++;
newLinesFound++;
}
}
atomicAdd((unsigned long long int*)num_records,(unsigned long long int)newLinesFound);
}
gdf_error launch_storeRecordStart(raw_csv_t * csvData) {
char *data = csvData->data;
long num_bytes = csvData->num_bytes;
long numBitmaps = csvData->num_bits;
char delim = csvData->delimiter;
char terminator = csvData->terminator;
unsigned long long *d_num_records = csvData->d_num_records;
unsigned long long *recStart = csvData->recStart;
/*
* Each bitmap is for a 64-byte chunk
* Note: could do one thread per byte, but that would require a lock on the bit map
*/
long threads = 1024;
// Using the number of bitmaps as the size - data index is bitmap ID * 64
long blocks = (numBitmaps + (threads -1)) / threads ;
storeRecordStart <<< blocks, threads >>> (data, delim, terminator, num_bytes, numBitmaps,d_num_records,recStart);
CUDA_TRY(cudaGetLastError());
return GDF_SUCCESS;
}
__global__ void storeRecordStart(char *data, const char delim, const char terminator, long num_bytes, long num_bits, unsigned long long* num_records,unsigned long long* recStart) {
// thread IDs range per block, so also need the block id
long tid = threadIdx.x + (blockDim.x * blockIdx.x);
if ( tid >= num_bits)
return;
// data ID - multiple of 64
long did = tid * 64L;
char *raw = (data + did);
long byteToProcess = ((did + 64L) < num_bytes) ? 64L : (num_bytes - did);
if(tid==0){
long pos = atomicAdd((unsigned long long int*)num_records,(unsigned long long int)1);
recStart[pos]=did+0;
}
// process the data
long x = 0;
for (x = 0; x < byteToProcess; x++) {
// records
if (raw[x] == terminator) {
long pos = atomicAdd((unsigned long long int*)num_records,(unsigned long long int)1);
recStart[pos]=did+x+1;
} else if (raw[x] == '\r' && (x+1L)<num_bytes && raw[x +1] == '\n') {
x++;
long pos = atomicAdd((unsigned long long int*)num_records,(unsigned long long int)1);
recStart[pos]=did+x+1;
}
}
}
//----------------------------------------------------------------------------------------------------------------
gdf_error launch_dataConvertColumns(raw_csv_t * raw_csv, void **gdf, gdf_valid_type** valid, gdf_dtype* d_dtypes,string_pair **str_cols, long row_offset, unsigned long long *num_valid) {
int64_t threads = 1024;
int64_t blocks = ( raw_csv->num_records + (threads -1)) / threads ;
convertCsvToGdf <<< blocks, threads >>>(
raw_csv->data,
raw_csv->delimiter,
raw_csv->terminator,
raw_csv->num_records,
raw_csv->num_actual_cols,
raw_csv->d_parseCol,
raw_csv->recStart,
d_dtypes,
gdf,
valid,
str_cols,
row_offset,
raw_csv->header_row,
raw_csv->dayfirst,
num_valid
);
return GDF_SUCCESS;
}
/*
* Data is processed in one row\record at a time - so the number of total threads (tid) is equal to the number of rows.
*
*/
__global__ void convertCsvToGdf(
char *raw_csv,
char delim,
char terminator,
unsigned long long num_records,
int num_columns,
bool *parseCol,
unsigned long long *recStart,
gdf_dtype *dtype,
void **gdf_data,
gdf_valid_type **valid,
string_pair **str_cols,
unsigned long long row_offset,
long header_row,
bool dayfirst,
unsigned long long *num_valid
)
{
// thread IDs range per block, so also need the block id
long rec_id = threadIdx.x + (blockDim.x * blockIdx.x); // this is entry into the field array - tid is an elements within the num_entries array
// we can have more threads than data, make sure we are not past the end of the data
if ( rec_id >= num_records)
return;
long extraOff=0;
if(rec_id>=header_row && header_row>=0)
extraOff=1;
long start = recStart[rec_id + row_offset + extraOff];
long stop = recStart[rec_id + 1 + row_offset + extraOff];
long pos = start;
int col = 0;
int actual_col = 0;
int stringCol = 0;
while(col<num_columns){
if(start>stop)
break;
while(true){
if(raw_csv[pos]==delim){
break;
}
else if (raw_csv[pos] == terminator){
break;
}
else if(raw_csv[pos] == '\r' && ((pos+1) < stop && raw_csv[pos+1]=='\n')){
stop--;
break;
}
if(pos>=stop)
break;
pos++;
}
if(parseCol[col]==true){
long tempPos=pos-1;
if(dtype[col] != gdf_dtype::GDF_CATEGORY && dtype[col] != gdf_dtype::GDF_STRING){
removePrePostWhiteSpaces2(raw_csv, &start, &tempPos);
}
if(start<=(tempPos)) { // Empty strings are not legal values
switch(dtype[col]) {
case gdf_dtype::GDF_INT8:
{
int8_t *gdf_out = (int8_t *)gdf_data[actual_col];
gdf_out[rec_id] = convertStrtoInt<int8_t>(raw_csv, start, tempPos);
}
break;
case gdf_dtype::GDF_INT16: {
int16_t *gdf_out = (int16_t *)gdf_data[actual_col];
gdf_out[rec_id] = convertStrtoInt<int16_t>(raw_csv, start, tempPos);
}
break;
case gdf_dtype::GDF_INT32:
{
int32_t *gdf_out = (int32_t *)gdf_data[actual_col];
gdf_out[rec_id] = convertStrtoInt<int32_t>(raw_csv, start, tempPos);
}
break;
case gdf_dtype::GDF_INT64:
{
int64_t *gdf_out = (int64_t *)gdf_data[actual_col];
gdf_out[rec_id] = convertStrtoInt<int64_t>(raw_csv, start, tempPos);
}
break;
case gdf_dtype::GDF_FLOAT32:
{
float *gdf_out = (float *)gdf_data[actual_col];
gdf_out[rec_id] = convertStrtoFloat<float>(raw_csv, start, tempPos);
}
break;
case gdf_dtype::GDF_FLOAT64:
{
double *gdf_out = (double *)gdf_data[actual_col];
gdf_out[rec_id] = convertStrtoFloat<double>(raw_csv, start, tempPos);
}
break;
case gdf_dtype::GDF_DATE32:
{
gdf_date32 *gdf_out = (gdf_date32 *)gdf_data[actual_col];
gdf_out[rec_id] = parseDateFormat(raw_csv, start, tempPos, dayfirst);
}
break;
case gdf_dtype::GDF_DATE64:
{
gdf_date64 *gdf_out = (gdf_date64 *)gdf_data[actual_col];
gdf_out[rec_id] = parseDateTimeFormat(raw_csv, start, tempPos, dayfirst);
}
break;
case gdf_dtype::GDF_TIMESTAMP:
{
int64_t *gdf_out = (int64_t *)gdf_data[actual_col];
gdf_out[rec_id] = convertStrtoInt<int64_t>(raw_csv, start, tempPos);
}
break;
case gdf_dtype::GDF_CATEGORY:
{
gdf_category *gdf_out = (gdf_category *)gdf_data[actual_col];
gdf_out[rec_id] = convertStrtoHash(raw_csv, start, pos, HASH_SEED);
}
break;
case gdf_dtype::GDF_STRING:{
str_cols[stringCol][rec_id].first = raw_csv+start;
str_cols[stringCol][rec_id].second = size_t(pos-start);
stringCol++;
}
break;
default:
break;
}
// set the valid bitmap - all bits were set to 0 to start
int bitmapIdx = whichBitmap(rec_id); // which bitmap
int bitIdx = whichBit(rec_id); // which bit - over an 8-bit index
setBit(valid[col]+bitmapIdx, bitIdx); // This is done with atomics
atomicAdd((unsigned long long int*)&num_valid[col],(unsigned long long int)1);
}
else if(dtype[col]==gdf_dtype::GDF_STRING){
str_cols[stringCol][rec_id].first = NULL;
str_cols[stringCol][rec_id].second = 0;
stringCol++;
}
actual_col++;
}
pos++;
start=pos;
col++;
}
}
//----------------------------------------------------------------------------------------------------------------
gdf_error launch_dataTypeDetection(
raw_csv_t * raw_csv,
long row_offset,
column_data_t* d_columnData)
{
int64_t threads = 1024;
int64_t blocks = ( raw_csv->num_records + (threads -1)) / threads ;
dataTypeDetection <<< blocks, threads >>>(
raw_csv->data,
raw_csv->delimiter,
raw_csv->terminator,
raw_csv->num_records,
raw_csv->num_actual_cols,
raw_csv->d_parseCol,
raw_csv->recStart,
row_offset,
raw_csv->header_row,
d_columnData
);
return GDF_SUCCESS;
}
/*
*/
__global__ void dataTypeDetection(
char *raw_csv,
char delim,
char terminator,
unsigned long long num_records,
int num_columns,
bool *parseCol,
unsigned long long *recStart,
unsigned long long row_offset,
long header_row,
column_data_t* d_columnData
)
{
// thread IDs range per block, so also need the block id
long rec_id = threadIdx.x + (blockDim.x * blockIdx.x); // this is entry into the field array - tid is an elements within the num_entries array
// we can have more threads than data, make sure we are not past the end of the data
if ( rec_id >= num_records)
return;
long extraOff=0;
if(rec_id>=header_row && header_row>=0)
extraOff=1;
long start = recStart[rec_id + row_offset + extraOff];
long stop = recStart[rec_id + 1 + row_offset + extraOff];
long pos = start;
int col = 0;
int actual_col = 0;
// Going through all the columns of a given record
while(col<num_columns){
if(start>stop)
break;
// Finding the breaking point for each column
while(true){
if(raw_csv[pos]==delim){
break;
}
else if (raw_csv[pos] == terminator){
break;
}
else if(raw_csv[pos] == '\r' && ((pos+1) < stop && raw_csv[pos+1]=='\n')){
stop--;
break;
}
if(pos>stop)
break;
pos++;
}
// Checking if this is a column that the user wants --- user can filter columns
if(parseCol[col]==true){
long tempPos=pos-1;
// Checking if the record is NULL
if(start>(tempPos)){
atomicAdd(& d_columnData[actual_col].countNULL, 1L);
pos++;
start=pos;
col++;
actual_col++;
continue;
}
long countNumber=0;
long countDecimal=0;
long countSlash=0;
long countDash=0;
long countColon=0;
long countString=0;
long strLen=pos-start;
// Remove all pre and post white-spaces. We might find additional NULL fields if the entire entry is made up of only spaces.
removePrePostWhiteSpaces2(raw_csv, &start, &tempPos);
for(long startPos=start; startPos<=tempPos; startPos++){
if(raw_csv[startPos]>= '0' && raw_csv[startPos] <= '9'){
countNumber++;
continue;
}
// Looking for unique characters that will help identify column types.
switch (raw_csv[startPos]){
case '.':
countDecimal++;break;
case '-':
countDash++; break;
case '/':
countSlash++;break;
case ':':
countColon++;break;
default:
countString++;
break;
}
}
if(strLen==0) // Removed spaces ' ' in the pre-processing and thus we can have an empty string.
atomicAdd(& d_columnData[actual_col].countNULL, 1L);
// Integers have to have the length of the string or can be off by one if they start with a minus sign
else if(countNumber==(strLen) || ( strLen>1 && countNumber==(strLen-1) && raw_csv[start]=='-') ){
// Checking to see if we the integer value requires 8,16,32,64 bits.
// This will allow us to allocate the exact amount of memory.
int64_t i = convertStrtoInt<int64_t>(raw_csv, start, tempPos);
if(i >= (1L<<31)){
atomicAdd(& d_columnData[actual_col].countInt64, 1L);
}
else if(i >= (1L<<15)){
atomicAdd(& d_columnData[actual_col].countInt32, 1L);
}
else if(i >= (1L<<7)){
atomicAdd(& d_columnData[actual_col].countInt16, 1L);
}
else
atomicAdd(& d_columnData[actual_col].countInt8, 1L);
}
// Floating point numbers are made up of numerical strings, have to have a decimal sign, and can have a minus sign.
else if((countNumber==(strLen-1) && countDecimal==1) || (strLen>2 && countNumber==(strLen-2) && raw_csv[start]=='-')){
atomicAdd(& d_columnData[actual_col].countFloat, 1L);
}
// The date-time field cannot have more than 3 strings. As such if an entry has more than 3 string characters, it is not
// a data-time field. Also, if a string has multiple decimals, then is not a legit number.
else if(countString > 3 || countDecimal > 1){
atomicAdd(& d_columnData[actual_col].countString, 1L);
}
else {
// A date field can have either one or two '-' or '\'. A legal combination will only have one of them.
// To simplify the process of auto column detection, we are not covering all the date-time formation permutations.
if((countDash>0 && countDash<=2 && countSlash==0)|| (countDash==0 && countSlash>0 && countSlash<=2) ){
if((countColon<=2)){
atomicAdd(& d_columnData[actual_col].countDateAndTime, 1L);
}
else{
atomicAdd(& d_columnData[actual_col].countString, 1L);
}
}
// Default field is string type.
else{
atomicAdd(& d_columnData[actual_col].countString, 1L);
}
}
actual_col++;
}
pos++;
start=pos;
col++;
}
}
//----------------------------------------------------------------------------------------------------------------
/*
* Return which bit is set
* x is the occurrence: 1 = first, 2 = seconds, ...
*/
__device__ int findSetBit(int tid, long num_bits, uint64_t *r_bits, int x) {
int idx = tid;
if ( x == 0 )
return -1;
int withinBitCount = 0;
int offset = 0;
int found = 0;
uint64_t bitmap = r_bits[idx];
while (found != x)
{
if(bitmap == 0)
{
idx++;
if (idx >= num_bits)
return -1;
bitmap = r_bits[idx];
offset += 64;
withinBitCount = 0;
}
if ( bitmap & 1 ) {
found++; //found a set bit
}
bitmap >>= 1;
++withinBitCount;
}
offset += withinBitCount -1;
return offset;
}
|
fd3e03e2891a47d51810784fc27b858d0fadecdc.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <assert.h>
#include <time.h>
#include <hip/hip_runtime.h>
#include <hipfft.h>
#include <hipfftXt.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#include "common.h"
extern const int SigLen;
extern const int FFTRun;
extern const int Trials;
int main(int argc, char **argv)
{
int targetDev = 0;
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, targetDev);
printf("Device name: %s\n", prop.name);
hipSetDevice(targetDev);
/*
int sig_len = SigLen;
int fft_run = FFTRun;
int trials = Trials;
//-------------------//
// read cmd options
//-------------------//
int i = 0;
while(i < argc)
{
if(argv[i][0]=='-')
{
if(argv[i][1]=='-'){
// read long options
if(moreopt(argv[i]))
fprintf(stderr,"unknown verbose option : %s\n", argv[i]);
}
// read short options
switch(argv[i][1])
{
case 'u':
usage(argv[0]);
exit(EXIT_FAILURE);
case 'l':
i=read_opt(argc, argv, i, &sig_len, "int");
break;
case 'r':
i=read_opt(argc, argv, i, &fft_run, "int");
break;
case 't':
i=read_opt(argc, argv, i, &trials, "int");
break;
}
}
i++;
}
printf("[LOG] FFT Length: %d\n", sig_len);
printf("[LOG] Runs: %d\n", fft_run);
printf("[LOG] Trials: %d\n", trials);
*/
printf("[LOG] Start 1d-fft GPU.\n");
//------------------------------------------------------------------------//
// host memory
//------------------------------------------------------------------------//
float *h1 = (float*) malloc(sizeof(float) * 2048 * 64);
float *h2 = (float*) malloc(sizeof(float) * 4096 * 64);
float *h3 = (float*) malloc(sizeof(float) * 4098 * 64);
for(int i=0;i<64;i++){
for(int j=0;j<2048;j++){
h1[i*2048 + j] = (float)(j+1);
}
}
for(int i=0;i<64;i++){
for(int j=0;j<4096;j++){
h2[i*4096 + j] = (float)(j+1);
}
}
for(int i=0;i<64;i++){
for(int j=0;j<4098;j++){
h3[i*4098 + j] = (float)(j+1);
}
}
//------------------------------------------------------------------------//
// gpu timer
//------------------------------------------------------------------------//
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
//------------------------------------------------------------------------//
// device memory
//------------------------------------------------------------------------//
// ToDo: allocate device memory for host data (d_sig) and for output results (d_result)
float *d1,*d2,*d3;
checkCuda( hipMalloc((void**)&d1, sizeof(float) * 2048 * 64) );
checkCuda( hipMalloc((void**)&d2, sizeof(float) * 4096 * 64) );
checkCuda( hipMalloc((void**)&d3, sizeof(float) * 4098 * 64) );
hipfftComplex *d1_complex, *d2_complex, *d3_complex;
checkCuda( hipMalloc((void**)&d1_complex, sizeof(hipfftComplex) * 2048 * 64) );
checkCuda( hipMalloc((void**)&d2_complex, sizeof(hipfftComplex) * 4096 * 64) );
checkCuda( hipMalloc((void**)&d3_complex, sizeof(hipfftComplex) * 4098 * 64) );
//------------------------------------------------------------------------//
// copy data from host to device
//------------------------------------------------------------------------//
checkCuda( hipMemcpy(d1, h1, sizeof(float)*64*2048, hipMemcpyHostToDevice) );
checkCuda( hipMemcpy(d2, h2, sizeof(float)*64*4096, hipMemcpyHostToDevice) );
checkCuda( hipMemcpy(d3, h3, sizeof(float)*64*4098, hipMemcpyHostToDevice) );
//------------------------------------------------------------------------//
// Create FFT plan
//------------------------------------------------------------------------//
printf("[LOG] 2k fft plan.\n");
hipfftHandle plan2k;
if (hipfftPlan1d(&plan2k, 2048, HIPFFT_R2C, 64) != HIPFFT_SUCCESS){
fprintf(stderr, "CUFFT error: 2k Plan creation failed");
return -1;
}
printf("[LOG] 2k inverse fft plan.\n");
hipfftHandle plan2ki;
if (hipfftPlan1d(&plan2ki, 2048, HIPFFT_C2R, 64) != HIPFFT_SUCCESS){
fprintf(stderr, "CUFFT error: ifft 2k Plan creation failed");
return -10;
}
printf("[LOG] 4k fft plan.\n");
hipfftHandle plan4k;
if (hipfftPlan1d(&plan4k, 4096, HIPFFT_R2C, 64) != HIPFFT_SUCCESS){
fprintf(stderr, "CUFFT error: 4k Plan creation failed");
return -2;
}
printf("[LOG] 4k inverse fft plan.\n");
hipfftHandle plan4ki;
if (hipfftPlan1d(&plan4ki, 4096, HIPFFT_C2R, 64) != HIPFFT_SUCCESS){
fprintf(stderr, "CUFFT error: ifft 4k Plan creation failed");
return -20;
}
printf("[LOG] 4k_a fft plan.\n");
hipfftHandle plan4k_a;
if (hipfftPlan1d(&plan4k_a, 4096, HIPFFT_R2C, 64) != HIPFFT_SUCCESS){
fprintf(stderr, "CUFFT error: 4k_a Plan creation failed");
return -3;
}
printf("[LOG] 4k_a_i fft plan.\n");
hipfftHandle plan4k_a_i;
if (hipfftPlan1d(&plan4k_a_i, 4096, HIPFFT_C2R, 64) != HIPFFT_SUCCESS){
fprintf(stderr, "CUFFT error: ifft 4k_a Plan creation failed");
return -30;
}
//------------------------------------------------------------------------//
// run forward FFT
//------------------------------------------------------------------------//
float gputime_ms;
//--------//
// 2k fft
//--------//
printf("[LOG] Test R2C 2048 fft.\n");
gputime_ms = 0.f;
hipEventRecord(start, 0);
for (int i = 0; i < 100; i++) {
if (hipfftExecR2C(plan2k, (hipfftReal*)d1, (hipfftComplex*)d1_complex) != HIPFFT_SUCCESS){
fprintf(stderr, "CUFFT error: ExecR2C 2K Forward failed");
return -4;
}
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&gputime_ms, start, stop);
printf("runtime = %lf (ms)\n", gputime_ms * 0.01);
//--------//
// 2k ifft
//--------//
printf("[ifft] 2048.\n");
gputime_ms = 0.f;
hipEventRecord(start, 0);
for (int i = 0; i < 100; i++) {
if (hipfftExecC2R(plan2ki, (hipfftComplex*)d1_complex, (hipfftReal*)d1) != HIPFFT_SUCCESS){
fprintf(stderr, "CUFFT error: ExecR2C 2K Inverset failed");
return -40;
}
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&gputime_ms, start, stop);
printf("runtime = %lf (ms)\n", gputime_ms * 0.01);
//--------//
// 4k fft
//--------//
printf("[LOG] Test R2C 4096 fft.\n");
gputime_ms = 0.f;
hipEventRecord(start, 0);
for (int i = 0; i < 100; i++) {
if (hipfftExecR2C(plan4k, (hipfftReal*)d2, (hipfftComplex*)d2_complex) != HIPFFT_SUCCESS){
fprintf(stderr, "CUFFT error: ExecR2C 4K Forward failed");
return -5;
}
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&gputime_ms, start, stop);
printf("runtime = %lf (ms)\n", gputime_ms * 0.01);
//--------//
// 4k ifft
//--------//
printf("[ifft] 4096\n");
gputime_ms = 0.f;
hipEventRecord(start, 0);
for (int i = 0; i < 100; i++) {
if (hipfftExecC2R(plan4ki, (hipfftComplex*)d2_complex, (hipfftReal*)d2) != HIPFFT_SUCCESS){
fprintf(stderr, "CUFFT error: ExecR2C 4K Inverse failed");
return -50;
}
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&gputime_ms, start, stop);
printf("runtime = %lf (ms)\n", gputime_ms * 0.01);
//--------//
// 4k fft on 4098
//--------//
printf("[LOG] Test R2C 4096 fft on 4098.\n");
gputime_ms = 0.f;
hipEventRecord(start, 0);
for (int i = 0; i < 100; i++) {
if (hipfftExecR2C(plan4k_a, (hipfftReal*)d3, (hipfftComplex*)d3_complex) != HIPFFT_SUCCESS){
fprintf(stderr, "CUFFT error: ExecR2C 4K_a Forward failed");
return -6;
}
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&gputime_ms, start, stop);
printf("runtime = %lf (ms)\n", gputime_ms * 0.01);
//--------//
// 4k ifft on 4098
//--------//
printf("[ifft] 4096 on 4098\n");
gputime_ms = 0.f;
hipEventRecord(start, 0);
for (int i = 0; i < 100; i++) {
if (hipfftExecC2R(plan4k_a_i, (hipfftComplex*)d3_complex, (hipfftReal*)d3) != HIPFFT_SUCCESS){
fprintf(stderr, "CUFFT error: ExecR2C 4K_a Inverse failed");
return -60;
}
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&gputime_ms, start, stop);
printf("runtime = %lf (ms)\n", gputime_ms * 0.01);
//------------------------------------------------------------------------//
// free
//------------------------------------------------------------------------//
checkCudaErrors(hipEventDestroy(start));
checkCudaErrors(hipEventDestroy(stop));
hipfftDestroy(plan2k);
hipfftDestroy(plan2ki);
hipfftDestroy(plan4k);
hipfftDestroy(plan4ki);
hipfftDestroy(plan4k_a);
hipfftDestroy(plan4k_a_i);
hipFree(d1);
hipFree(d2);
hipFree(d3);
hipFree(d1_complex);
hipFree(d2_complex);
hipFree(d3_complex);
free(h1);
free(h2);
free(h3);
return 0;
}
| fd3e03e2891a47d51810784fc27b858d0fadecdc.cu | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <assert.h>
#include <time.h>
#include <cuda_runtime.h>
#include <cufft.h>
#include <cufftXt.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#include "common.h"
extern const int SigLen;
extern const int FFTRun;
extern const int Trials;
int main(int argc, char **argv)
{
int targetDev = 0;
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, targetDev);
printf("Device name: %s\n", prop.name);
cudaSetDevice(targetDev);
/*
int sig_len = SigLen;
int fft_run = FFTRun;
int trials = Trials;
//-------------------//
// read cmd options
//-------------------//
int i = 0;
while(i < argc)
{
if(argv[i][0]=='-')
{
if(argv[i][1]=='-'){
// read long options
if(moreopt(argv[i]))
fprintf(stderr,"unknown verbose option : %s\n", argv[i]);
}
// read short options
switch(argv[i][1])
{
case 'u':
usage(argv[0]);
exit(EXIT_FAILURE);
case 'l':
i=read_opt(argc, argv, i, &sig_len, "int");
break;
case 'r':
i=read_opt(argc, argv, i, &fft_run, "int");
break;
case 't':
i=read_opt(argc, argv, i, &trials, "int");
break;
}
}
i++;
}
printf("[LOG] FFT Length: %d\n", sig_len);
printf("[LOG] Runs: %d\n", fft_run);
printf("[LOG] Trials: %d\n", trials);
*/
printf("[LOG] Start 1d-fft GPU.\n");
//------------------------------------------------------------------------//
// host memory
//------------------------------------------------------------------------//
float *h1 = (float*) malloc(sizeof(float) * 2048 * 64);
float *h2 = (float*) malloc(sizeof(float) * 4096 * 64);
float *h3 = (float*) malloc(sizeof(float) * 4098 * 64);
for(int i=0;i<64;i++){
for(int j=0;j<2048;j++){
h1[i*2048 + j] = (float)(j+1);
}
}
for(int i=0;i<64;i++){
for(int j=0;j<4096;j++){
h2[i*4096 + j] = (float)(j+1);
}
}
for(int i=0;i<64;i++){
for(int j=0;j<4098;j++){
h3[i*4098 + j] = (float)(j+1);
}
}
//------------------------------------------------------------------------//
// gpu timer
//------------------------------------------------------------------------//
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//------------------------------------------------------------------------//
// device memory
//------------------------------------------------------------------------//
// ToDo: allocate device memory for host data (d_sig) and for output results (d_result)
float *d1,*d2,*d3;
checkCuda( cudaMalloc((void**)&d1, sizeof(float) * 2048 * 64) );
checkCuda( cudaMalloc((void**)&d2, sizeof(float) * 4096 * 64) );
checkCuda( cudaMalloc((void**)&d3, sizeof(float) * 4098 * 64) );
cufftComplex *d1_complex, *d2_complex, *d3_complex;
checkCuda( cudaMalloc((void**)&d1_complex, sizeof(cufftComplex) * 2048 * 64) );
checkCuda( cudaMalloc((void**)&d2_complex, sizeof(cufftComplex) * 4096 * 64) );
checkCuda( cudaMalloc((void**)&d3_complex, sizeof(cufftComplex) * 4098 * 64) );
//------------------------------------------------------------------------//
// copy data from host to device
//------------------------------------------------------------------------//
checkCuda( cudaMemcpy(d1, h1, sizeof(float)*64*2048, cudaMemcpyHostToDevice) );
checkCuda( cudaMemcpy(d2, h2, sizeof(float)*64*4096, cudaMemcpyHostToDevice) );
checkCuda( cudaMemcpy(d3, h3, sizeof(float)*64*4098, cudaMemcpyHostToDevice) );
//------------------------------------------------------------------------//
// Create FFT plan
//------------------------------------------------------------------------//
printf("[LOG] 2k fft plan.\n");
cufftHandle plan2k;
if (cufftPlan1d(&plan2k, 2048, CUFFT_R2C, 64) != CUFFT_SUCCESS){
fprintf(stderr, "CUFFT error: 2k Plan creation failed");
return -1;
}
printf("[LOG] 2k inverse fft plan.\n");
cufftHandle plan2ki;
if (cufftPlan1d(&plan2ki, 2048, CUFFT_C2R, 64) != CUFFT_SUCCESS){
fprintf(stderr, "CUFFT error: ifft 2k Plan creation failed");
return -10;
}
printf("[LOG] 4k fft plan.\n");
cufftHandle plan4k;
if (cufftPlan1d(&plan4k, 4096, CUFFT_R2C, 64) != CUFFT_SUCCESS){
fprintf(stderr, "CUFFT error: 4k Plan creation failed");
return -2;
}
printf("[LOG] 4k inverse fft plan.\n");
cufftHandle plan4ki;
if (cufftPlan1d(&plan4ki, 4096, CUFFT_C2R, 64) != CUFFT_SUCCESS){
fprintf(stderr, "CUFFT error: ifft 4k Plan creation failed");
return -20;
}
printf("[LOG] 4k_a fft plan.\n");
cufftHandle plan4k_a;
if (cufftPlan1d(&plan4k_a, 4096, CUFFT_R2C, 64) != CUFFT_SUCCESS){
fprintf(stderr, "CUFFT error: 4k_a Plan creation failed");
return -3;
}
printf("[LOG] 4k_a_i fft plan.\n");
cufftHandle plan4k_a_i;
if (cufftPlan1d(&plan4k_a_i, 4096, CUFFT_C2R, 64) != CUFFT_SUCCESS){
fprintf(stderr, "CUFFT error: ifft 4k_a Plan creation failed");
return -30;
}
//------------------------------------------------------------------------//
// run forward FFT
//------------------------------------------------------------------------//
float gputime_ms;
//--------//
// 2k fft
//--------//
printf("[LOG] Test R2C 2048 fft.\n");
gputime_ms = 0.f;
cudaEventRecord(start, 0);
for (int i = 0; i < 100; i++) {
if (cufftExecR2C(plan2k, (cufftReal*)d1, (cufftComplex*)d1_complex) != CUFFT_SUCCESS){
fprintf(stderr, "CUFFT error: ExecR2C 2K Forward failed");
return -4;
}
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gputime_ms, start, stop);
printf("runtime = %lf (ms)\n", gputime_ms * 0.01);
//--------//
// 2k ifft
//--------//
printf("[ifft] 2048.\n");
gputime_ms = 0.f;
cudaEventRecord(start, 0);
for (int i = 0; i < 100; i++) {
if (cufftExecC2R(plan2ki, (cufftComplex*)d1_complex, (cufftReal*)d1) != CUFFT_SUCCESS){
fprintf(stderr, "CUFFT error: ExecR2C 2K Inverset failed");
return -40;
}
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gputime_ms, start, stop);
printf("runtime = %lf (ms)\n", gputime_ms * 0.01);
//--------//
// 4k fft
//--------//
printf("[LOG] Test R2C 4096 fft.\n");
gputime_ms = 0.f;
cudaEventRecord(start, 0);
for (int i = 0; i < 100; i++) {
if (cufftExecR2C(plan4k, (cufftReal*)d2, (cufftComplex*)d2_complex) != CUFFT_SUCCESS){
fprintf(stderr, "CUFFT error: ExecR2C 4K Forward failed");
return -5;
}
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gputime_ms, start, stop);
printf("runtime = %lf (ms)\n", gputime_ms * 0.01);
//--------//
// 4k ifft
//--------//
printf("[ifft] 4096\n");
gputime_ms = 0.f;
cudaEventRecord(start, 0);
for (int i = 0; i < 100; i++) {
if (cufftExecC2R(plan4ki, (cufftComplex*)d2_complex, (cufftReal*)d2) != CUFFT_SUCCESS){
fprintf(stderr, "CUFFT error: ExecR2C 4K Inverse failed");
return -50;
}
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gputime_ms, start, stop);
printf("runtime = %lf (ms)\n", gputime_ms * 0.01);
//--------//
// 4k fft on 4098
//--------//
printf("[LOG] Test R2C 4096 fft on 4098.\n");
gputime_ms = 0.f;
cudaEventRecord(start, 0);
for (int i = 0; i < 100; i++) {
if (cufftExecR2C(plan4k_a, (cufftReal*)d3, (cufftComplex*)d3_complex) != CUFFT_SUCCESS){
fprintf(stderr, "CUFFT error: ExecR2C 4K_a Forward failed");
return -6;
}
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gputime_ms, start, stop);
printf("runtime = %lf (ms)\n", gputime_ms * 0.01);
//--------//
// 4k ifft on 4098
//--------//
printf("[ifft] 4096 on 4098\n");
gputime_ms = 0.f;
cudaEventRecord(start, 0);
for (int i = 0; i < 100; i++) {
if (cufftExecC2R(plan4k_a_i, (cufftComplex*)d3_complex, (cufftReal*)d3) != CUFFT_SUCCESS){
fprintf(stderr, "CUFFT error: ExecR2C 4K_a Inverse failed");
return -60;
}
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gputime_ms, start, stop);
printf("runtime = %lf (ms)\n", gputime_ms * 0.01);
//------------------------------------------------------------------------//
// free
//------------------------------------------------------------------------//
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaEventDestroy(stop));
cufftDestroy(plan2k);
cufftDestroy(plan2ki);
cufftDestroy(plan4k);
cufftDestroy(plan4ki);
cufftDestroy(plan4k_a);
cufftDestroy(plan4k_a_i);
cudaFree(d1);
cudaFree(d2);
cudaFree(d3);
cudaFree(d1_complex);
cudaFree(d2_complex);
cudaFree(d3_complex);
free(h1);
free(h2);
free(h3);
return 0;
}
|
e2ada3559e9a7b93429ebe646e55f4067196ef25.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void saxpy_baseline ( float* y, float* x, float a, clock_t * timer_vals)
{
for (int i=0; i < NUM_ITERS; i++) {
unsigned int idx = i * COMPUTE_THREADS_PER_CTA * CTA_COUNT + blockIdx.x * COMPUTE_THREADS_PER_CTA + threadIdx.x;
y[idx] = a * x[idx] + y[idx];
}
} | e2ada3559e9a7b93429ebe646e55f4067196ef25.cu | #include "includes.h"
__global__ void saxpy_baseline ( float* y, float* x, float a, clock_t * timer_vals)
{
for (int i=0; i < NUM_ITERS; i++) {
unsigned int idx = i * COMPUTE_THREADS_PER_CTA * CTA_COUNT + blockIdx.x * COMPUTE_THREADS_PER_CTA + threadIdx.x;
y[idx] = a * x[idx] + y[idx];
}
} |
2a65285ff52edecc367a3f153ed8a1d188e1cd4c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+//
// //
// cu_newton.cu //
// //
// D. C. Groothuizen Dijkema - April, 2020 //
//+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+//
// CUDA accelerated implementation for producing Newton's fractals
#include <cu_fractal.hpp>
__device__ thrust::pair<hipDoubleComplex,hipDoubleComplex> polynomial_and_deriv(const hipDoubleComplex &x, const double * const coeffs
, const int degree)
{
//
// CUDA device to evaluate the value and derivative of a polynomial at a point using Horner's method
//
// parameters
// ----------
// x : const hipDoubleComplex &
// - the point to evaluate at
// coeffs : const double * const
// - the coefficients of the polynomial given in order of the lowest degree to highest
// if the polynomial is written p(x)=a_n*x^(n-1)+...+a_k*x^(n-k-1)+...+a_1*x+a_0, then coeffs should have the following form:
// *(coeffs+0)==a_0
// *(coeffs+1)==a_1
// *(coeffs+k)==a_k
// *(coeffs+n)==a_n
// degree : const int
// - the degree of the polynomial
//
// returns
// -------
// thrust::pair<hipDoubleComplex,hipDoubleComplex>
// - the function value and derivative of the given polynomial evaluated at the given point
//
hipDoubleComplex p,p_prime;
p=make_cuDoubleComplex(*(coeffs+degree),0.);
p_prime=make_cuDoubleComplex(0.,0.);
for (int itr=degree-1;itr>=0;--itr)
{
p_prime=cuCadd(cuCmul(x,p_prime),p);
p=cuCadd(cuCmul(x,p),make_cuDoubleComplex(*(coeffs+itr),0.0));
}
return thrust::make_pair(p,p_prime);
}
__device__ hipDoubleComplex newton_root(const double * const coeffs, int * const itr_taken, hipDoubleComplex x, const int degree
, const int max_itr, const double tol)
{
//
// CUDA device to apply the Newton-Raphson method to a given number to find the root of a given polynomial
//
// parameters
// ----------
// coeffs : const double * const
// - the coefficients of the polynomial given in order of the lowest degree to highest
// see polynomial_and_deriv() for requirements
// itr_taken : int * const
// - a poitner to write out the number of iterations needed to reach a root
// x : hipDoubleComplex
// - the number to a start from
// degree : const int
// - the degree of the polynomial
// max_itr : const int
// - the maximum number of iterations allowed
// tol : const double
// - the tolerance within which a root will be deemed to have been reached
//
// returns
// -------
// hipDoubleComplex
// - the root reached from the given number
//
for (int itr=0;itr<max_itr;++itr)
{
// get the current function value and derivative
hipDoubleComplex f_x,g_x;
thrust::tie(f_x,g_x)=polynomial_and_deriv(x,coeffs,degree);
// converged to a root
if (cuCabs(f_x)<tol)
{
*itr_taken=itr;
return x;
}
// derivative is flat and we can't update
if (cuCreal(g_x)==0.&&cuCimag(g_x)==0.)
{
*itr_taken=NPP_MAX_32S;
return make_cuDoubleComplex(CUDART_INF,CUDART_INF);
}
// update
x=cuCsub(x,cuCdiv(f_x,g_x));
}
// couldn't find a root in the given number of iterations
*itr_taken=NPP_MAX_32S ;
return make_cuDoubleComplex(CUDART_INF,CUDART_INF);
}
__global__ void compute_newton(double * const d_re, double * const d_im, int * const d_itr, const double * const d_coeffs, const int max_itr
, const int degree, const int xresolution, const int yresolution, const double startx, const double starty, const double deltax
, const double deltay)
{
//
// CUDA kernel to find the roots of a polynomial a given number in the complex plane converges to with Newton's method
//
// parameters
// ----------
// d_re,d_im : double * const
// - 1D flat arrays representing 2D arrays to write out the root which the numbers in the subset cnoverged to
// d_itr : int * const
// - 1D flat array representing a 2D array to write out either the number of iterations needed reach a root or a marker that this root
// could not be reached
// d_coeffs : const double * const
// - the coefficients of the polynomial given in order of the lowest degree to highest
// see polynomial_and_deriv() for requirements
// max_itr : const int
// - the maximum number of iterations allowed
// degree : const int
// - the degree of the polynomial
// xresolution,yresolution : const int
// - the number of steps to take in the x-direction (the real components) and the y-direction (the imaginary components)
// start_itr,end_itr : const int
// - the subset of the steps taken from the starting point in the y-direction (the complex component)
// startx,starty : const double
// - the real and imaginary components of the number defining the bottom left corner of the entire space being sampled
// deltax,deltay : const double
// - the size of the step to take in the x- and y-direction
//
// determine where we are in memory
const int idy=blockIdx.y*blockDim.y+threadIdx.y,idx=blockIdx.x*blockDim.x+threadIdx.x,ind=idy*xresolution+idx;
// check we haven't gone out of bounds
if (idx>=xresolution||idy>=yresolution) { return; }
// determine the current point
const double imag=starty+deltay*idy,real=startx+deltax*idx;
// find the root we converge to from this point
hipDoubleComplex root=newton_root(d_coeffs,(d_itr+ind),make_cuDoubleComplex(real,imag),degree,max_itr,1e-6);
// write out
d_re[ind]=cuCreal(root);
d_im[ind]=cuCimag(root);
}
int __declspec(dllexport) sample_newton(double * const h_re, double * const h_im, int * const h_itr, const double * const h_coeffs
, const int max_itr, const int degree, const int xresolution, const int yresolution, const double startx, const double endx
, const double starty, const double endy, const bool verbose)
{
//
// Determine the roots of a polynomial the numbers in a given subset of the complex plane converge to with Newton's method, with CUDA
// acceleration
//
// parameters
// ----------
// h_re,h_im : double * const
// - 1D flat arrays representing 2D arrays to write out the root which the numbers in the subset cnoverged to
// h_itr : int * const
// - 1D flat array representing a 2D array to write out either the number of iterations needed reach a root or a marker that this root
// could not be reached
// h_coeffs : const double * const
// - the coefficients of the polynomial given in order of the lowest degree to highest
// see polynomial_and_deriv() for requirements
// max_itr : const int
// - the maximum number of iterations allowed
// degree : const int
// - the degree of the polynomial
// xresolution,yresolution : const int
// - the number of steps to take in the x- and y-direction (the real and imaginary components)
// startx,endx,starty,endy : const double
// - the first and last values to sample at
// verbose : bool
// - flag to control logging to console
//
// returns
// -------
// int
// - the value which marks that a starting point could not be shown to not be in the mandelbrot set
//
// computation parameters
const double deltax=(endx-startx)/xresolution,deltay=(endy-starty)/yresolution;
const int total=xresolution*yresolution;
// memory parameters
const size_t c_size=(degree+1)*sizeof(double),d_size=total*sizeof(double),i_size=total*sizeof(int);
// device memory pointers
double *d_re=nullptr,*d_im=nullptr,*d_coeffs=nullptr;
int *d_itr=nullptr;
// allocate device memory
CUDA_REQUIRE_SUCCESS(hipMalloc(reinterpret_cast<void **>(&d_re),d_size));
CUDA_REQUIRE_SUCCESS(hipMalloc(reinterpret_cast<void **>(&d_im),d_size));
CUDA_REQUIRE_SUCCESS(hipMalloc(reinterpret_cast<void **>(&d_itr),i_size));
CUDA_REQUIRE_SUCCESS(hipMalloc(reinterpret_cast<void **>(&d_coeffs),c_size));
CUDA_REQUIRE_SUCCESS(hipMemcpy(d_coeffs,h_coeffs,c_size,hipMemcpyHostToDevice));
// copy polynomial coefficients over
// GPU memory setup
const dim3 dim_block(32,32),dim_grid((xresolution+dim_block.x-1)/dim_block.x,(yresolution+dim_block.y-1)/dim_block.y);
// run and time
float elapsed;
hipEvent_t start,stop;
CUDA_REQUIRE_SUCCESS(hipEventCreate(&start));
CUDA_REQUIRE_SUCCESS(hipEventCreate(&stop));
CUDA_REQUIRE_SUCCESS(hipEventRecord(start,0));
hipLaunchKernelGGL(( compute_newton), dim3(dim_grid),dim3(dim_block), 0, 0, d_re,d_im,d_itr,d_coeffs,max_itr,degree,xresolution,yresolution,startx,starty,deltax,deltay);
// check for errors
CUDA_REQUIRE_SUCCESS(hipPeekAtLastError());
CUDA_REQUIRE_SUCCESS(hipDeviceSynchronize());
CUDA_REQUIRE_SUCCESS(hipEventRecord(stop,0));
CUDA_REQUIRE_SUCCESS(hipEventSynchronize(stop));
CUDA_REQUIRE_SUCCESS(hipEventElapsedTime(&elapsed,start,stop));
if (verbose)
{
std::cout << total << " points processed." << std::endl
<< "Time taken: " << elapsed/1000 << "s." << std::endl;
}
CUDA_REQUIRE_SUCCESS(hipMemcpy(h_re,d_re,d_size,hipMemcpyDeviceToHost));
CUDA_REQUIRE_SUCCESS(hipMemcpy(h_im,d_im,d_size,hipMemcpyDeviceToHost));
CUDA_REQUIRE_SUCCESS(hipMemcpy(h_itr,d_itr,i_size,hipMemcpyDeviceToHost));
// free GPU memory
CUDA_REQUIRE_SUCCESS(hipFree(d_re));
CUDA_REQUIRE_SUCCESS(hipFree(d_im));
CUDA_REQUIRE_SUCCESS(hipFree(d_itr));
return NPP_MAX_32S;
}
| 2a65285ff52edecc367a3f153ed8a1d188e1cd4c.cu |
//+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+//
// //
// cu_newton.cu //
// //
// D. C. Groothuizen Dijkema - April, 2020 //
//+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+//
// CUDA accelerated implementation for producing Newton's fractals
#include <cu_fractal.hpp>
__device__ thrust::pair<cuDoubleComplex,cuDoubleComplex> polynomial_and_deriv(const cuDoubleComplex &x, const double * const coeffs
, const int degree)
{
//
// CUDA device to evaluate the value and derivative of a polynomial at a point using Horner's method
//
// parameters
// ----------
// x : const cuDoubleComplex &
// - the point to evaluate at
// coeffs : const double * const
// - the coefficients of the polynomial given in order of the lowest degree to highest
// if the polynomial is written p(x)=a_n*x^(n-1)+...+a_k*x^(n-k-1)+...+a_1*x+a_0, then coeffs should have the following form:
// *(coeffs+0)==a_0
// *(coeffs+1)==a_1
// *(coeffs+k)==a_k
// *(coeffs+n)==a_n
// degree : const int
// - the degree of the polynomial
//
// returns
// -------
// thrust::pair<cuDoubleComplex,cuDoubleComplex>
// - the function value and derivative of the given polynomial evaluated at the given point
//
cuDoubleComplex p,p_prime;
p=make_cuDoubleComplex(*(coeffs+degree),0.);
p_prime=make_cuDoubleComplex(0.,0.);
for (int itr=degree-1;itr>=0;--itr)
{
p_prime=cuCadd(cuCmul(x,p_prime),p);
p=cuCadd(cuCmul(x,p),make_cuDoubleComplex(*(coeffs+itr),0.0));
}
return thrust::make_pair(p,p_prime);
}
__device__ cuDoubleComplex newton_root(const double * const coeffs, int * const itr_taken, cuDoubleComplex x, const int degree
, const int max_itr, const double tol)
{
//
// CUDA device to apply the Newton-Raphson method to a given number to find the root of a given polynomial
//
// parameters
// ----------
// coeffs : const double * const
// - the coefficients of the polynomial given in order of the lowest degree to highest
// see polynomial_and_deriv() for requirements
// itr_taken : int * const
// - a poitner to write out the number of iterations needed to reach a root
// x : cuDoubleComplex
// - the number to a start from
// degree : const int
// - the degree of the polynomial
// max_itr : const int
// - the maximum number of iterations allowed
// tol : const double
// - the tolerance within which a root will be deemed to have been reached
//
// returns
// -------
// cuDoubleComplex
// - the root reached from the given number
//
for (int itr=0;itr<max_itr;++itr)
{
// get the current function value and derivative
cuDoubleComplex f_x,g_x;
thrust::tie(f_x,g_x)=polynomial_and_deriv(x,coeffs,degree);
// converged to a root
if (cuCabs(f_x)<tol)
{
*itr_taken=itr;
return x;
}
// derivative is flat and we can't update
if (cuCreal(g_x)==0.&&cuCimag(g_x)==0.)
{
*itr_taken=NPP_MAX_32S;
return make_cuDoubleComplex(CUDART_INF,CUDART_INF);
}
// update
x=cuCsub(x,cuCdiv(f_x,g_x));
}
// couldn't find a root in the given number of iterations
*itr_taken=NPP_MAX_32S ;
return make_cuDoubleComplex(CUDART_INF,CUDART_INF);
}
__global__ void compute_newton(double * const d_re, double * const d_im, int * const d_itr, const double * const d_coeffs, const int max_itr
, const int degree, const int xresolution, const int yresolution, const double startx, const double starty, const double deltax
, const double deltay)
{
//
// CUDA kernel to find the roots of a polynomial a given number in the complex plane converges to with Newton's method
//
// parameters
// ----------
// d_re,d_im : double * const
// - 1D flat arrays representing 2D arrays to write out the root which the numbers in the subset cnoverged to
// d_itr : int * const
// - 1D flat array representing a 2D array to write out either the number of iterations needed reach a root or a marker that this root
// could not be reached
// d_coeffs : const double * const
// - the coefficients of the polynomial given in order of the lowest degree to highest
// see polynomial_and_deriv() for requirements
// max_itr : const int
// - the maximum number of iterations allowed
// degree : const int
// - the degree of the polynomial
// xresolution,yresolution : const int
// - the number of steps to take in the x-direction (the real components) and the y-direction (the imaginary components)
// start_itr,end_itr : const int
// - the subset of the steps taken from the starting point in the y-direction (the complex component)
// startx,starty : const double
// - the real and imaginary components of the number defining the bottom left corner of the entire space being sampled
// deltax,deltay : const double
// - the size of the step to take in the x- and y-direction
//
// determine where we are in memory
const int idy=blockIdx.y*blockDim.y+threadIdx.y,idx=blockIdx.x*blockDim.x+threadIdx.x,ind=idy*xresolution+idx;
// check we haven't gone out of bounds
if (idx>=xresolution||idy>=yresolution) { return; }
// determine the current point
const double imag=starty+deltay*idy,real=startx+deltax*idx;
// find the root we converge to from this point
cuDoubleComplex root=newton_root(d_coeffs,(d_itr+ind),make_cuDoubleComplex(real,imag),degree,max_itr,1e-6);
// write out
d_re[ind]=cuCreal(root);
d_im[ind]=cuCimag(root);
}
int __declspec(dllexport) sample_newton(double * const h_re, double * const h_im, int * const h_itr, const double * const h_coeffs
, const int max_itr, const int degree, const int xresolution, const int yresolution, const double startx, const double endx
, const double starty, const double endy, const bool verbose)
{
//
// Determine the roots of a polynomial the numbers in a given subset of the complex plane converge to with Newton's method, with CUDA
// acceleration
//
// parameters
// ----------
// h_re,h_im : double * const
// - 1D flat arrays representing 2D arrays to write out the root which the numbers in the subset cnoverged to
// h_itr : int * const
// - 1D flat array representing a 2D array to write out either the number of iterations needed reach a root or a marker that this root
// could not be reached
// h_coeffs : const double * const
// - the coefficients of the polynomial given in order of the lowest degree to highest
// see polynomial_and_deriv() for requirements
// max_itr : const int
// - the maximum number of iterations allowed
// degree : const int
// - the degree of the polynomial
// xresolution,yresolution : const int
// - the number of steps to take in the x- and y-direction (the real and imaginary components)
// startx,endx,starty,endy : const double
// - the first and last values to sample at
// verbose : bool
// - flag to control logging to console
//
// returns
// -------
// int
// - the value which marks that a starting point could not be shown to not be in the mandelbrot set
//
// computation parameters
const double deltax=(endx-startx)/xresolution,deltay=(endy-starty)/yresolution;
const int total=xresolution*yresolution;
// memory parameters
const size_t c_size=(degree+1)*sizeof(double),d_size=total*sizeof(double),i_size=total*sizeof(int);
// device memory pointers
double *d_re=nullptr,*d_im=nullptr,*d_coeffs=nullptr;
int *d_itr=nullptr;
// allocate device memory
CUDA_REQUIRE_SUCCESS(cudaMalloc(reinterpret_cast<void **>(&d_re),d_size));
CUDA_REQUIRE_SUCCESS(cudaMalloc(reinterpret_cast<void **>(&d_im),d_size));
CUDA_REQUIRE_SUCCESS(cudaMalloc(reinterpret_cast<void **>(&d_itr),i_size));
CUDA_REQUIRE_SUCCESS(cudaMalloc(reinterpret_cast<void **>(&d_coeffs),c_size));
CUDA_REQUIRE_SUCCESS(cudaMemcpy(d_coeffs,h_coeffs,c_size,cudaMemcpyHostToDevice));
// copy polynomial coefficients over
// GPU memory setup
const dim3 dim_block(32,32),dim_grid((xresolution+dim_block.x-1)/dim_block.x,(yresolution+dim_block.y-1)/dim_block.y);
// run and time
float elapsed;
cudaEvent_t start,stop;
CUDA_REQUIRE_SUCCESS(cudaEventCreate(&start));
CUDA_REQUIRE_SUCCESS(cudaEventCreate(&stop));
CUDA_REQUIRE_SUCCESS(cudaEventRecord(start,0));
compute_newton<<<dim_grid,dim_block>>>(d_re,d_im,d_itr,d_coeffs,max_itr,degree,xresolution,yresolution,startx,starty,deltax,deltay);
// check for errors
CUDA_REQUIRE_SUCCESS(cudaPeekAtLastError());
CUDA_REQUIRE_SUCCESS(cudaDeviceSynchronize());
CUDA_REQUIRE_SUCCESS(cudaEventRecord(stop,0));
CUDA_REQUIRE_SUCCESS(cudaEventSynchronize(stop));
CUDA_REQUIRE_SUCCESS(cudaEventElapsedTime(&elapsed,start,stop));
if (verbose)
{
std::cout << total << " points processed." << std::endl
<< "Time taken: " << elapsed/1000 << "s." << std::endl;
}
CUDA_REQUIRE_SUCCESS(cudaMemcpy(h_re,d_re,d_size,cudaMemcpyDeviceToHost));
CUDA_REQUIRE_SUCCESS(cudaMemcpy(h_im,d_im,d_size,cudaMemcpyDeviceToHost));
CUDA_REQUIRE_SUCCESS(cudaMemcpy(h_itr,d_itr,i_size,cudaMemcpyDeviceToHost));
// free GPU memory
CUDA_REQUIRE_SUCCESS(cudaFree(d_re));
CUDA_REQUIRE_SUCCESS(cudaFree(d_im));
CUDA_REQUIRE_SUCCESS(cudaFree(d_itr));
return NPP_MAX_32S;
}
|
c1728fbdf11766171a79675704949efd08fb9459.hip | // !!! This is a file automatically generated by hipify!!!
/**
* (C) Copyright 2020 IBM. All Rights Reserved.
*
* This code is licensed under the Apache License, Version 2.0. You may
* obtain a copy of this license in the LICENSE.txt file in the root directory
* of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
*
* Any modifications or derivative works of this code must retain this
* copyright notice, and modified files need to carry a notice indicating
* that they have been altered from the originals.
*/
#include "pwu_kernel_parameter.h"
#include "rpu_pulsed_meta_parameter.h"
#include "rpucuda_pulsed_device.h"
#include <memory>
namespace RPU {
/******************************************************************************************/
/* PulsedRPUDeviceCuda
Base class which maintains the basic hard bounds and dw_min
up/down and decays etc for the pulsed updates.
Note that it is still Abstract. Need to implement the getUpdateKernels in derived.
*/
template <typename T>
PulsedRPUDeviceCuda<T>::PulsedRPUDeviceCuda(CudaContext *c, int x_size, int d_size)
: PulsedRPUDeviceCudaBase<T>(c, x_size, d_size){};
template <typename T> void PulsedRPUDeviceCuda<T>::initialize() {
dev_4params_ = make_unique<CudaArray<float>>(this->context_, 4 * this->size_);
dev_diffusion_rate_ = make_unique<CudaArray<T>>(this->context_, this->size_);
dev_reset_bias_ = make_unique<CudaArray<T>>(this->context_, this->size_);
dev_decay_scale_ = make_unique<CudaArray<T>>(this->context_, this->size_);
this->context_->synchronize();
};
template <typename T>
PulsedRPUDeviceCuda<T>::PulsedRPUDeviceCuda(const PulsedRPUDeviceCuda<T> &other)
: PulsedRPUDeviceCudaBase<T>(other) {
initialize();
dev_4params_->assign(*other.dev_4params_);
dev_diffusion_rate_->assign(*other.dev_diffusion_rate_);
dev_reset_bias_->assign(*other.dev_reset_bias_);
dev_decay_scale_->assign(*other.dev_decay_scale_);
this->context_->synchronize();
};
// template <typename T>
// PulsedRPUDeviceCuda<T>& PulsedRPUDeviceCuda<T>::operator=(const PulsedRPUDeviceCuda<T>& other){
// PulsedRPUDeviceCuda<T> tmp(other);
// swap(*this,tmp);
// return *this;
// };
// template <typename T>
// PulsedRPUDeviceCuda<T>::PulsedRPUDeviceCuda(PulsedRPUDeviceCuda<T>&& other) {
// *this = std::move(other);
// };
// template <typename T>
// PulsedRPUDeviceCuda<T>& PulsedRPUDeviceCuda<T>::operator=(PulsedRPUDeviceCuda<T>&& other){
// PulsedRPUDeviceCudaBase<T>::operator=(std::move(other));
// dev_4params_ = std::move(other.dev_4params_);
// dev_diffusion_rate_ = std::move(other.dev_diffusion_rate_);
// dev_reset_bias_ = std::move(other.dev_reset_bias_);
// dev_decay_scale_ = std::move(other.dev_decay_scale_);
// return *this;
// };
template <typename T>
void PulsedRPUDeviceCuda<T>::populateFrom(const AbstractRPUDevice<T> &rpu_device_in) {
const auto &rpu_device = dynamic_cast<const PulsedRPUDevice<T> &>(rpu_device_in);
if (&rpu_device == nullptr) {
RPU_FATAL("populateFrom expects PulsedRPUDevice.");
}
int x_size = rpu_device.getXSize();
int d_size = rpu_device.getDSize();
int size = x_size * d_size;
initialize();
PulsedRPUDeviceCudaBase<T>::populateFrom(rpu_device_in);
PulsedDPStruc<T> **sup = rpu_device.getDPStruc();
// copy RPU to device variables
float *tmp = new float[4 * size];
T *tmp_ds = new T[size];
T *tmp_df = new T[size];
T *tmp_rb = new T[size];
T *ds = rpu_device.getDecayScale()[0];
T *df = rpu_device.getDiffusionRate()[0];
T *rb = rpu_device.getResetBias()[0];
for (int i = 0; i < d_size; ++i) {
for (int j = 0; j < x_size; ++j) {
int k = j * (d_size * 4) +
4 * i; // transposed: col major required by cuBLAS .. linear arangmenet for now
tmp[k] = sup[i][j].min_bound;
tmp[k + 1] = sup[i][j].scale_down;
tmp[k + 2] = sup[i][j].max_bound;
tmp[k + 3] = sup[i][j].scale_up;
int l_t = j * (d_size) + i;
int l = i * (x_size) + j;
tmp_ds[l_t] = ds[l];
tmp_df[l_t] = df[l];
tmp_rb[l_t] = rb[l];
}
}
dev_4params_->assign(tmp);
// other parameters
dev_decay_scale_->assign(tmp_ds);
dev_diffusion_rate_->assign(tmp_df);
dev_reset_bias_->assign(tmp_rb);
this->context_->synchronize();
delete[] tmp_ds;
delete[] tmp_df;
delete[] tmp_rb;
delete[] tmp;
}
template <typename T>
void PulsedRPUDeviceCuda<T>::applyWeightUpdate(T *weights, T *dw_and_current_weight_out) {
RPU::math::elemaddcopysat<T>(
this->context_, weights, dw_and_current_weight_out, this->size_,
dev_4params_->getDataConst());
}
template <typename T>
void PulsedRPUDeviceCuda<T>::decayWeights(T *weights, T alpha, bool bias_no_decay) {
RPU::math::elemscalealpha<T>(
this->context_, weights, bias_no_decay ? MAX(this->size_ - this->d_size_, 0) : this->size_,
dev_decay_scale_->getData(), dev_4params_->getData(), alpha);
}
template <typename T> void PulsedRPUDeviceCuda<T>::decayWeights(T *weights, bool bias_no_decay) {
RPU::math::elemscale<T>(
this->context_, weights, bias_no_decay ? MAX(this->size_ - this->d_size_, 0) : this->size_,
dev_decay_scale_->getData(), dev_4params_->getData());
}
template <typename T> void PulsedRPUDeviceCuda<T>::diffuseWeights(T *weights) {
if (this->dev_diffusion_nrnd_ == nullptr) {
this->initDiffusionRnd();
this->rnd_context_->randNormal(
this->dev_diffusion_nrnd_->getData(), this->dev_diffusion_nrnd_->getSize());
}
this->rnd_context_->synchronize();
RPU::math::elemasb02<T>(
this->context_, weights, this->size_, this->dev_diffusion_nrnd_->getData(),
dev_diffusion_rate_->getData(), dev_4params_->getData());
this->rnd_context_->recordWaitEvent(this->context_->getStream());
this->rnd_context_->randNormal(
this->dev_diffusion_nrnd_->getData(), this->dev_diffusion_nrnd_->getSize());
}
template <typename T> void PulsedRPUDeviceCuda<T>::clipWeights(T *weights, T clip) {
RPU::math::elemsat<T>(this->context_, weights, this->size_, dev_4params_->getData());
if (clip >= 0) {
RPU::math::aclip<T>(this->context_, weights, this->size_, clip);
}
}
template <typename T> void PulsedRPUDeviceCuda<T>::initResetRnd() {
if (this->rnd_context_ == nullptr) {
this->initRndContext();
}
dev_reset_nrnd_ = std::unique_ptr<CudaArray<float>>(
new CudaArray<float>(&*this->rnd_context_, (this->size_ + 31) / 32 * 32));
dev_reset_flag_ = std::unique_ptr<CudaArray<float>>(
new CudaArray<float>(&*this->rnd_context_, (this->size_ + 31) / 32 * 32));
dev_reset_flag_->setConst(0);
this->rnd_context_->synchronize();
}
template <typename T>
void PulsedRPUDeviceCuda<T>::resetCols(T *weights, int start_col, int n_cols, T reset_prob) {
// col-major in CUDA.
if (dev_reset_nrnd_ == nullptr) {
initResetRnd();
}
int n = n_cols * this->d_size_;
int offset = start_col * this->d_size_;
this->rnd_context_->randNormal(
dev_reset_nrnd_->getData(), n_cols * this->d_size_, 0.0, getPar().reset_std);
if (reset_prob < 1) {
this->rnd_context_->randUniform(dev_reset_flag_->getData(), n_cols * this->d_size_);
}
this->context_->recordWaitEvent(this->rnd_context_->getStream());
if (n >= this->size_) {
// reset whole matrix
RPU::math::elemresetsat<T>(
this->context_, weights, this->size_, dev_reset_bias_->getDataConst(),
dev_reset_nrnd_->getDataConst(), dev_reset_flag_->getDataConst(), reset_prob,
dev_4params_->getData());
} else if (offset + n <= this->size_) {
// one pass enough
RPU::math::elemresetsat<T>(
this->context_, weights + offset, n, dev_reset_bias_->getDataConst() + offset,
dev_reset_nrnd_->getDataConst(), dev_reset_flag_->getDataConst(), reset_prob,
dev_4params_->getData() + 4 * offset);
} else {
// two passes
int m = this->size_ - offset;
RPU::math::elemresetsat<T>(
this->context_, weights + offset, m, dev_reset_bias_->getDataConst() + offset,
dev_reset_nrnd_->getDataConst(), dev_reset_flag_->getDataConst(), reset_prob,
dev_4params_->getData() + 4 * offset);
RPU::math::elemresetsat<T>(
this->context_, weights, n - m, dev_reset_bias_->getDataConst(),
dev_reset_nrnd_->getDataConst() + m, dev_reset_flag_->getDataConst() + m, reset_prob,
dev_4params_->getData());
}
}
template <typename T>
void PulsedRPUDeviceCuda<T>::runUpdateKernel(
pwukp_t<T> kpars,
CudaContext *c,
T *dev_weights,
int m_batch,
const BitLineMaker<T> *blm,
const PulsedUpdateMetaParameter<T> &up,
hiprandState_t *dev_states,
int one_sided,
uint32_t *x_counts_chunk,
uint32_t *d_counts_chunk) {
kpars->run(
c->getStream(), dev_weights, m_batch, blm, this, up, dev_states, one_sided, x_counts_chunk,
d_counts_chunk);
}
template class PulsedRPUDeviceCuda<float>;
#ifdef RPU_USE_DOUBLE
template class PulsedRPUDeviceCuda<double>;
#endif
} // namespace RPU
| c1728fbdf11766171a79675704949efd08fb9459.cu | /**
* (C) Copyright 2020 IBM. All Rights Reserved.
*
* This code is licensed under the Apache License, Version 2.0. You may
* obtain a copy of this license in the LICENSE.txt file in the root directory
* of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
*
* Any modifications or derivative works of this code must retain this
* copyright notice, and modified files need to carry a notice indicating
* that they have been altered from the originals.
*/
#include "pwu_kernel_parameter.h"
#include "rpu_pulsed_meta_parameter.h"
#include "rpucuda_pulsed_device.h"
#include <memory>
namespace RPU {
/******************************************************************************************/
/* PulsedRPUDeviceCuda
Base class which maintains the basic hard bounds and dw_min
up/down and decays etc for the pulsed updates.
Note that it is still Abstract. Need to implement the getUpdateKernels in derived.
*/
template <typename T>
PulsedRPUDeviceCuda<T>::PulsedRPUDeviceCuda(CudaContext *c, int x_size, int d_size)
: PulsedRPUDeviceCudaBase<T>(c, x_size, d_size){};
template <typename T> void PulsedRPUDeviceCuda<T>::initialize() {
dev_4params_ = make_unique<CudaArray<float>>(this->context_, 4 * this->size_);
dev_diffusion_rate_ = make_unique<CudaArray<T>>(this->context_, this->size_);
dev_reset_bias_ = make_unique<CudaArray<T>>(this->context_, this->size_);
dev_decay_scale_ = make_unique<CudaArray<T>>(this->context_, this->size_);
this->context_->synchronize();
};
template <typename T>
PulsedRPUDeviceCuda<T>::PulsedRPUDeviceCuda(const PulsedRPUDeviceCuda<T> &other)
: PulsedRPUDeviceCudaBase<T>(other) {
initialize();
dev_4params_->assign(*other.dev_4params_);
dev_diffusion_rate_->assign(*other.dev_diffusion_rate_);
dev_reset_bias_->assign(*other.dev_reset_bias_);
dev_decay_scale_->assign(*other.dev_decay_scale_);
this->context_->synchronize();
};
// template <typename T>
// PulsedRPUDeviceCuda<T>& PulsedRPUDeviceCuda<T>::operator=(const PulsedRPUDeviceCuda<T>& other){
// PulsedRPUDeviceCuda<T> tmp(other);
// swap(*this,tmp);
// return *this;
// };
// template <typename T>
// PulsedRPUDeviceCuda<T>::PulsedRPUDeviceCuda(PulsedRPUDeviceCuda<T>&& other) {
// *this = std::move(other);
// };
// template <typename T>
// PulsedRPUDeviceCuda<T>& PulsedRPUDeviceCuda<T>::operator=(PulsedRPUDeviceCuda<T>&& other){
// PulsedRPUDeviceCudaBase<T>::operator=(std::move(other));
// dev_4params_ = std::move(other.dev_4params_);
// dev_diffusion_rate_ = std::move(other.dev_diffusion_rate_);
// dev_reset_bias_ = std::move(other.dev_reset_bias_);
// dev_decay_scale_ = std::move(other.dev_decay_scale_);
// return *this;
// };
template <typename T>
void PulsedRPUDeviceCuda<T>::populateFrom(const AbstractRPUDevice<T> &rpu_device_in) {
const auto &rpu_device = dynamic_cast<const PulsedRPUDevice<T> &>(rpu_device_in);
if (&rpu_device == nullptr) {
RPU_FATAL("populateFrom expects PulsedRPUDevice.");
}
int x_size = rpu_device.getXSize();
int d_size = rpu_device.getDSize();
int size = x_size * d_size;
initialize();
PulsedRPUDeviceCudaBase<T>::populateFrom(rpu_device_in);
PulsedDPStruc<T> **sup = rpu_device.getDPStruc();
// copy RPU to device variables
float *tmp = new float[4 * size];
T *tmp_ds = new T[size];
T *tmp_df = new T[size];
T *tmp_rb = new T[size];
T *ds = rpu_device.getDecayScale()[0];
T *df = rpu_device.getDiffusionRate()[0];
T *rb = rpu_device.getResetBias()[0];
for (int i = 0; i < d_size; ++i) {
for (int j = 0; j < x_size; ++j) {
int k = j * (d_size * 4) +
4 * i; // transposed: col major required by cuBLAS .. linear arangmenet for now
tmp[k] = sup[i][j].min_bound;
tmp[k + 1] = sup[i][j].scale_down;
tmp[k + 2] = sup[i][j].max_bound;
tmp[k + 3] = sup[i][j].scale_up;
int l_t = j * (d_size) + i;
int l = i * (x_size) + j;
tmp_ds[l_t] = ds[l];
tmp_df[l_t] = df[l];
tmp_rb[l_t] = rb[l];
}
}
dev_4params_->assign(tmp);
// other parameters
dev_decay_scale_->assign(tmp_ds);
dev_diffusion_rate_->assign(tmp_df);
dev_reset_bias_->assign(tmp_rb);
this->context_->synchronize();
delete[] tmp_ds;
delete[] tmp_df;
delete[] tmp_rb;
delete[] tmp;
}
template <typename T>
void PulsedRPUDeviceCuda<T>::applyWeightUpdate(T *weights, T *dw_and_current_weight_out) {
RPU::math::elemaddcopysat<T>(
this->context_, weights, dw_and_current_weight_out, this->size_,
dev_4params_->getDataConst());
}
template <typename T>
void PulsedRPUDeviceCuda<T>::decayWeights(T *weights, T alpha, bool bias_no_decay) {
RPU::math::elemscalealpha<T>(
this->context_, weights, bias_no_decay ? MAX(this->size_ - this->d_size_, 0) : this->size_,
dev_decay_scale_->getData(), dev_4params_->getData(), alpha);
}
template <typename T> void PulsedRPUDeviceCuda<T>::decayWeights(T *weights, bool bias_no_decay) {
RPU::math::elemscale<T>(
this->context_, weights, bias_no_decay ? MAX(this->size_ - this->d_size_, 0) : this->size_,
dev_decay_scale_->getData(), dev_4params_->getData());
}
template <typename T> void PulsedRPUDeviceCuda<T>::diffuseWeights(T *weights) {
if (this->dev_diffusion_nrnd_ == nullptr) {
this->initDiffusionRnd();
this->rnd_context_->randNormal(
this->dev_diffusion_nrnd_->getData(), this->dev_diffusion_nrnd_->getSize());
}
this->rnd_context_->synchronize();
RPU::math::elemasb02<T>(
this->context_, weights, this->size_, this->dev_diffusion_nrnd_->getData(),
dev_diffusion_rate_->getData(), dev_4params_->getData());
this->rnd_context_->recordWaitEvent(this->context_->getStream());
this->rnd_context_->randNormal(
this->dev_diffusion_nrnd_->getData(), this->dev_diffusion_nrnd_->getSize());
}
template <typename T> void PulsedRPUDeviceCuda<T>::clipWeights(T *weights, T clip) {
RPU::math::elemsat<T>(this->context_, weights, this->size_, dev_4params_->getData());
if (clip >= 0) {
RPU::math::aclip<T>(this->context_, weights, this->size_, clip);
}
}
template <typename T> void PulsedRPUDeviceCuda<T>::initResetRnd() {
if (this->rnd_context_ == nullptr) {
this->initRndContext();
}
dev_reset_nrnd_ = std::unique_ptr<CudaArray<float>>(
new CudaArray<float>(&*this->rnd_context_, (this->size_ + 31) / 32 * 32));
dev_reset_flag_ = std::unique_ptr<CudaArray<float>>(
new CudaArray<float>(&*this->rnd_context_, (this->size_ + 31) / 32 * 32));
dev_reset_flag_->setConst(0);
this->rnd_context_->synchronize();
}
template <typename T>
void PulsedRPUDeviceCuda<T>::resetCols(T *weights, int start_col, int n_cols, T reset_prob) {
// col-major in CUDA.
if (dev_reset_nrnd_ == nullptr) {
initResetRnd();
}
int n = n_cols * this->d_size_;
int offset = start_col * this->d_size_;
this->rnd_context_->randNormal(
dev_reset_nrnd_->getData(), n_cols * this->d_size_, 0.0, getPar().reset_std);
if (reset_prob < 1) {
this->rnd_context_->randUniform(dev_reset_flag_->getData(), n_cols * this->d_size_);
}
this->context_->recordWaitEvent(this->rnd_context_->getStream());
if (n >= this->size_) {
// reset whole matrix
RPU::math::elemresetsat<T>(
this->context_, weights, this->size_, dev_reset_bias_->getDataConst(),
dev_reset_nrnd_->getDataConst(), dev_reset_flag_->getDataConst(), reset_prob,
dev_4params_->getData());
} else if (offset + n <= this->size_) {
// one pass enough
RPU::math::elemresetsat<T>(
this->context_, weights + offset, n, dev_reset_bias_->getDataConst() + offset,
dev_reset_nrnd_->getDataConst(), dev_reset_flag_->getDataConst(), reset_prob,
dev_4params_->getData() + 4 * offset);
} else {
// two passes
int m = this->size_ - offset;
RPU::math::elemresetsat<T>(
this->context_, weights + offset, m, dev_reset_bias_->getDataConst() + offset,
dev_reset_nrnd_->getDataConst(), dev_reset_flag_->getDataConst(), reset_prob,
dev_4params_->getData() + 4 * offset);
RPU::math::elemresetsat<T>(
this->context_, weights, n - m, dev_reset_bias_->getDataConst(),
dev_reset_nrnd_->getDataConst() + m, dev_reset_flag_->getDataConst() + m, reset_prob,
dev_4params_->getData());
}
}
template <typename T>
void PulsedRPUDeviceCuda<T>::runUpdateKernel(
pwukp_t<T> kpars,
CudaContext *c,
T *dev_weights,
int m_batch,
const BitLineMaker<T> *blm,
const PulsedUpdateMetaParameter<T> &up,
curandState_t *dev_states,
int one_sided,
uint32_t *x_counts_chunk,
uint32_t *d_counts_chunk) {
kpars->run(
c->getStream(), dev_weights, m_batch, blm, this, up, dev_states, one_sided, x_counts_chunk,
d_counts_chunk);
}
template class PulsedRPUDeviceCuda<float>;
#ifdef RPU_USE_DOUBLE
template class PulsedRPUDeviceCuda<double>;
#endif
} // namespace RPU
|
7c1ab74409977f6c700d76c69137d120ef05f409.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
File name: bc_bottom_up_gpu.cu
Author: Yuede Ji
Last update: 1:15 10-28-2015
Description: GPU bc on small graph
() read begin position, csr, weight value from binary file
(2) betweenness centrality
(3) atomic lock
**/
#include <stdio.h>
#include <iostream>
#include <stdlib.h>
#include <string.h>
#include "wtime.h"
#include "graph.h"
using namespace std;
const char output_file[] = "/home/yuede/small_graph/result_bc/bc_bottom_up_gpu.bc";
const char sp_count_file[] = "/home/yuede/small_graph/result_bc/bc_bottom_up_gpu.sp_count";
const char dist_file[] = "/home/yuede/small_graph/result_bc/bc_bottom_up_gpu.dist";
const char sa_file[] = "/home/yuede/small_graph/result_bc/bc_bottom_up_gpu.sa";
const int INF = 0x7fffffff;
const int V = 218;
//const int startnum = 2;
//const int blocknum = 3;
path_t bc[V*V];
index_t sa[V];
index_t sp_count[V];
path_t local_dist[V];
index_t sa_global[V*V];
int sp_count_global[V*V];
path_t dist_global[V*V];
path_t bc_global[V*V];
/*void print_result()
{
FILE * fp = fopen(output_file, "w");
for(int i=0; i<V*V; ++i)
{
fprintf(fp, "%g\n", bc[i]);
//fprintf(fp, "%d %g\n", i, bc[i]);
}
fclose(fp);
}
*/
void print_debug()
{
FILE * fp_count = fopen(sp_count_file, "w");
for(index_t i=0; i<V*V; ++i)
fprintf(fp_count, "%d\n", sp_count_global[i]);
fclose(fp_count);
FILE * fp_sa = fopen(sa_file, "w");
for(index_t i=0; i<V*V; ++i)
fprintf(fp_sa, "%u\n", sa_global[i]);
fclose(fp_sa);
FILE * fp_dist = fopen(dist_file, "w");
for(index_t i=0; i<V*V; ++i)
fprintf(fp_dist, "%g\n", dist_global[i]);
fclose(fp_dist);
FILE * fp_bc = fopen(output_file, "w");
for(int i=0; i<V*V; ++i)
{
fprintf(fp_bc, "%g\n", bc_global[i]);
//fprintf(fp, "%d %g\n", i, bc[i]);
}
fclose(fp_bc);
}
__global__ void bc_merge(path_t * dev_bc)
{
int id = threadIdx.x;
int bc_id = id + blockDim.x;
while(bc_id < blockDim.x * blockDim.x)
{
dev_bc[id] += dev_bc[bc_id];
bc_id += blockDim.x;
}
__syncthreads();
}
__global__ void bc_all(index_t * dev_beg_pos,
index_t * dev_csr,
path_t *dev_weight,
path_t * dev_bc,
index_t * dev_sa_global,
int * dev_sp_count_global,
path_t * dev_dist_global,
int start_vert,
int end_vert)
{
//printf("block_dim = %d\n", blockDim.x);
__shared__ int shared_sp_count[V];
__shared__ path_t shared_bc_tmp[V];
__shared__ path_t shared_dist[V];
__shared__ index_t shared_sa[V];
__shared__ bool flag;
__shared__ int level;
index_t root = blockIdx.x;
index_t dest = threadIdx.x;
index_t id = threadIdx.x + blockIdx.x * blockDim.x;
//initialize
shared_dist[dest] = INF;
shared_sp_count[dest] = 0;
shared_bc_tmp[dest] = 0;
shared_sa[dest] = INF;
__syncthreads();
if(root < start_vert || root >= end_vert)
return;
if(dest == 0)
{
shared_dist[root] = 0;
shared_sp_count[root] = 1;
level = 0;
flag = true;
shared_sa[root] = 0;
//printf("%u, %u\n", root, id);
}
//printf("%u\n", id);
__syncthreads();
//Step: sssp
//printf("shared_dist[%u] = %g\n", root, shared_dist[root]);
int iteration=0;
assert(gridDim.x >= V);
while(flag)
{
__syncthreads();///the problem!
flag = false;
bool flag_one = false;
int prev = -1;
iteration++;
__syncthreads();
for(index_t j=dev_beg_pos[dest]; j<dev_beg_pos[dest+1]; ++j)
{
index_t nebr=dev_csr[j];
if(shared_dist[dest] > shared_dist[nebr] + dev_weight[j] && shared_sa[nebr] < iteration)
// if(shared_dist[dest] > shared_dist[nebr] + dev_weight[j])
{
shared_sa[dest] = iteration;
shared_dist[dest] = shared_dist[nebr] + dev_weight[j];
shared_sp_count[dest] = 0;
//shared_sp_count[dest] = shared_sp_count[nebr];
prev = nebr;
level = iteration;
flag = true;
flag_one = true;
}
}
__syncthreads();
// }
// __syncthreads();
// dev_dist_global[id] = shared_dist[dest];
// dev_sa_global[id] = shared_sa[dest];
// __syncthreads();
//}
if(flag_one)
{
for(index_t j=dev_beg_pos[dest]; j<dev_beg_pos[dest+1]; ++j)
{
index_t nebr=dev_csr[j];
if(shared_dist[dest] == shared_dist[dev_csr[j]] + dev_weight[j]) //&& dev_csr[j] != prev)
{
shared_sp_count[dest] += shared_sp_count[dev_csr[j]];
// shared_sa[dest] = shared_sa[dev_csr[j]] + ;
// if(shared_sa[dest] < shared_sa[dev_csr[j]] + )
// {
// shared_sa[dest] = shared_sa[dev_csr[j]] + ;
// if(shared_sa[dest] > level)
// level = shared_sa[dest];
// }
}
}
}
__syncthreads();
}
// dev_dist_global[id] = shared_dist[dest];
// __syncthreads();
// __syncthreads();
// if(shared_sa[dest] < INF -10 && shared_sa[dest] > level)
// {
// //printf("ok\n");
// //level = 0;
// //printf("%u\n", shared_sa[dest]);
// level = shared_sa[dest];
// }
__syncthreads();
//if(dest == 0)
// printf("level = %d\n", level);
// dev_bc[id] = shared_sp_count[dest];
dev_sa_global[id] = shared_sa[dest];
dev_sp_count_global[id] = shared_sp_count[dest];
dev_dist_global[id] = shared_dist[dest];
__syncthreads();
//printf("\n");
//Step 2: bc_one
//printf("%u %d\n", root, level);
while(level>=0)
{
//printf("level = %d\n", level);
if(shared_sa[dest] == level)
{
for(index_t j=dev_beg_pos[dest]; j<dev_beg_pos[dest+1]; ++j)
{
if(shared_dist[dev_csr[j]] == shared_dist[dest] + dev_weight[j])
{
///shared_bc_tmp[dev_csr[j]] may be changed
//printf("level = %d, sa[%u] = %u, sa[%u] = %u\n", level, dest, shared_sa[dest], dev_csr[j], shared_sa[dev_csr[j]]);
if(shared_sp_count[dev_csr[j]] != 0)
shared_bc_tmp[dest] += shared_sp_count[dest]*(1.0 + shared_bc_tmp[dev_csr[j]])/(shared_sp_count[dev_csr[j]]);
}
//__syncthreads();
}
}
__syncthreads();
if(dest == 0)
{
level = level - 1;
//printf("level = %d\n", level);
}
__syncthreads();
}
__syncthreads();
if(dest == root)
shared_bc_tmp[root] = 0;
//atomic 4
dev_bc[id] = shared_bc_tmp[dest];
}
void bc_gpu_launch(graph *g, int start_vert, int end_vert)
{
index_t v = g->vert_count;
index_t e = g->edge_count;
//cout<<"v = "<<v<<", e = "<<e<<endl;
//printf("gpu %d, %d\n", start_vert, end_vert);
index_t *dev_beg_pos;
vertex_t *dev_csr;
path_t *dev_weight;
path_t *dev_bc;
index_t *dev_sa_global;
int *dev_sp_count_global;
path_t *dev_bc_global;
path_t *dev_dist_global;
hipMalloc( (void **) &dev_beg_pos, (v+1)*sizeof(index_t));
hipMalloc( (void **) &dev_csr, e*sizeof(vertex_t));
hipMalloc( (void **) &dev_weight, e*sizeof(path_t));
hipMalloc( (void **) &dev_bc, V*V*sizeof(path_t));
hipMalloc( (void **) &dev_sa_global, V*V*sizeof(index_t));
hipMalloc( (void **) &dev_sp_count_global, V*V*sizeof(int));
hipMalloc( (void **) &dev_dist_global, V*V*sizeof(path_t));
hipMalloc( (void **) &dev_bc_global, V*V*sizeof(path_t));
hipMemcpy(dev_beg_pos, g->beg_pos, (v+1)*sizeof(index_t), hipMemcpyHostToDevice);
hipMemcpy(dev_csr, g->csr, e*sizeof(index_t),hipMemcpyHostToDevice);
hipMemcpy(dev_weight, g->weight, e*sizeof(path_t),hipMemcpyHostToDevice);
hipMemset(dev_bc, 0, V*V*sizeof(path_t));
hipMemset(dev_sa_global, 0, V*V*sizeof(index_t));
hipMemset(dev_sp_count_global, 0, V*V*sizeof(int));
hipMemset(dev_dist_global, 0, V*V*sizeof(path_t));
hipMemset(dev_bc_global, 0, V*V*sizeof(index_t));
hipLaunchKernelGGL(( bc_all), dim3(V), dim3(V), 0, 0, dev_beg_pos, dev_csr, dev_weight, dev_bc_global, dev_sa_global, dev_sp_count_global, dev_dist_global, start_vert, end_vert);//, dev_flag_traverse);
hipDeviceSynchronize();
hipLaunchKernelGGL(( bc_merge), dim3(1), dim3(V), 0, 0, dev_bc_global);
hipDeviceSynchronize();
hipMemcpy(sa_global, dev_sa_global, V*V*sizeof(index_t), hipMemcpyDeviceToHost);
hipMemcpy(sp_count_global, dev_sp_count_global, V*V*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(dist_global, dev_dist_global, V*V*sizeof(path_t), hipMemcpyDeviceToHost);
hipMemcpy(bc_global, dev_bc_global, V*V*sizeof(path_t), hipMemcpyDeviceToHost);
hipFree(dev_sa_global);
hipFree(dev_beg_pos);
hipFree(dev_csr);
hipFree(dev_weight);
hipFree(dev_bc);
}
int main(int args, char ** argv)
{
//printf("Input: ./bfs_small_graph /path/to/beg /path/to/csr thread-count\n");
if(args != 7)
exit(-1);
const char *beg_filename = argv[1];
const char *csr_filename = argv[2];
const char *weight_filename = argv[3];
const int thd_count = atoi(argv[4]);
const int start_vert = atoi(argv[5]);
const int end_vert = atoi(argv[6]);
graph *g = new graph(beg_filename, csr_filename, weight_filename);
bc_gpu_launch(g, start_vert, end_vert);// g->vert_count, g->edge_count);
//print_result();
print_debug();
return 0;
}
| 7c1ab74409977f6c700d76c69137d120ef05f409.cu | /**
File name: bc_bottom_up_gpu.cu
Author: Yuede Ji
Last update: 1:15 10-28-2015
Description: GPU bc on small graph
() read begin position, csr, weight value from binary file
(2) betweenness centrality
(3) atomic lock
**/
#include <stdio.h>
#include <iostream>
#include <stdlib.h>
#include <string.h>
#include "wtime.h"
#include "graph.h"
using namespace std;
const char output_file[] = "/home/yuede/small_graph/result_bc/bc_bottom_up_gpu.bc";
const char sp_count_file[] = "/home/yuede/small_graph/result_bc/bc_bottom_up_gpu.sp_count";
const char dist_file[] = "/home/yuede/small_graph/result_bc/bc_bottom_up_gpu.dist";
const char sa_file[] = "/home/yuede/small_graph/result_bc/bc_bottom_up_gpu.sa";
const int INF = 0x7fffffff;
const int V = 218;
//const int startnum = 2;
//const int blocknum = 3;
path_t bc[V*V];
index_t sa[V];
index_t sp_count[V];
path_t local_dist[V];
index_t sa_global[V*V];
int sp_count_global[V*V];
path_t dist_global[V*V];
path_t bc_global[V*V];
/*void print_result()
{
FILE * fp = fopen(output_file, "w");
for(int i=0; i<V*V; ++i)
{
fprintf(fp, "%g\n", bc[i]);
//fprintf(fp, "%d %g\n", i, bc[i]);
}
fclose(fp);
}
*/
void print_debug()
{
FILE * fp_count = fopen(sp_count_file, "w");
for(index_t i=0; i<V*V; ++i)
fprintf(fp_count, "%d\n", sp_count_global[i]);
fclose(fp_count);
FILE * fp_sa = fopen(sa_file, "w");
for(index_t i=0; i<V*V; ++i)
fprintf(fp_sa, "%u\n", sa_global[i]);
fclose(fp_sa);
FILE * fp_dist = fopen(dist_file, "w");
for(index_t i=0; i<V*V; ++i)
fprintf(fp_dist, "%g\n", dist_global[i]);
fclose(fp_dist);
FILE * fp_bc = fopen(output_file, "w");
for(int i=0; i<V*V; ++i)
{
fprintf(fp_bc, "%g\n", bc_global[i]);
//fprintf(fp, "%d %g\n", i, bc[i]);
}
fclose(fp_bc);
}
__global__ void bc_merge(path_t * dev_bc)
{
int id = threadIdx.x;
int bc_id = id + blockDim.x;
while(bc_id < blockDim.x * blockDim.x)
{
dev_bc[id] += dev_bc[bc_id];
bc_id += blockDim.x;
}
__syncthreads();
}
__global__ void bc_all(index_t * dev_beg_pos,
index_t * dev_csr,
path_t *dev_weight,
path_t * dev_bc,
index_t * dev_sa_global,
int * dev_sp_count_global,
path_t * dev_dist_global,
int start_vert,
int end_vert)
{
//printf("block_dim = %d\n", blockDim.x);
__shared__ int shared_sp_count[V];
__shared__ path_t shared_bc_tmp[V];
__shared__ path_t shared_dist[V];
__shared__ index_t shared_sa[V];
__shared__ bool flag;
__shared__ int level;
index_t root = blockIdx.x;
index_t dest = threadIdx.x;
index_t id = threadIdx.x + blockIdx.x * blockDim.x;
//initialize
shared_dist[dest] = INF;
shared_sp_count[dest] = 0;
shared_bc_tmp[dest] = 0;
shared_sa[dest] = INF;
__syncthreads();
if(root < start_vert || root >= end_vert)
return;
if(dest == 0)
{
shared_dist[root] = 0;
shared_sp_count[root] = 1;
level = 0;
flag = true;
shared_sa[root] = 0;
//printf("%u, %u\n", root, id);
}
//printf("%u\n", id);
__syncthreads();
//Step: sssp
//printf("shared_dist[%u] = %g\n", root, shared_dist[root]);
int iteration=0;
assert(gridDim.x >= V);
while(flag)
{
__syncthreads();///the problem!
flag = false;
bool flag_one = false;
int prev = -1;
iteration++;
__syncthreads();
for(index_t j=dev_beg_pos[dest]; j<dev_beg_pos[dest+1]; ++j)
{
index_t nebr=dev_csr[j];
if(shared_dist[dest] > shared_dist[nebr] + dev_weight[j] && shared_sa[nebr] < iteration)
// if(shared_dist[dest] > shared_dist[nebr] + dev_weight[j])
{
shared_sa[dest] = iteration;
shared_dist[dest] = shared_dist[nebr] + dev_weight[j];
shared_sp_count[dest] = 0;
//shared_sp_count[dest] = shared_sp_count[nebr];
prev = nebr;
level = iteration;
flag = true;
flag_one = true;
}
}
__syncthreads();
// }
// __syncthreads();
// dev_dist_global[id] = shared_dist[dest];
// dev_sa_global[id] = shared_sa[dest];
// __syncthreads();
//}
if(flag_one)
{
for(index_t j=dev_beg_pos[dest]; j<dev_beg_pos[dest+1]; ++j)
{
index_t nebr=dev_csr[j];
if(shared_dist[dest] == shared_dist[dev_csr[j]] + dev_weight[j]) //&& dev_csr[j] != prev)
{
shared_sp_count[dest] += shared_sp_count[dev_csr[j]];
// shared_sa[dest] = shared_sa[dev_csr[j]] + ;
// if(shared_sa[dest] < shared_sa[dev_csr[j]] + )
// {
// shared_sa[dest] = shared_sa[dev_csr[j]] + ;
// if(shared_sa[dest] > level)
// level = shared_sa[dest];
// }
}
}
}
__syncthreads();
}
// dev_dist_global[id] = shared_dist[dest];
// __syncthreads();
// __syncthreads();
// if(shared_sa[dest] < INF -10 && shared_sa[dest] > level)
// {
// //printf("ok\n");
// //level = 0;
// //printf("%u\n", shared_sa[dest]);
// level = shared_sa[dest];
// }
__syncthreads();
//if(dest == 0)
// printf("level = %d\n", level);
// dev_bc[id] = shared_sp_count[dest];
dev_sa_global[id] = shared_sa[dest];
dev_sp_count_global[id] = shared_sp_count[dest];
dev_dist_global[id] = shared_dist[dest];
__syncthreads();
//printf("\n");
//Step 2: bc_one
//printf("%u %d\n", root, level);
while(level>=0)
{
//printf("level = %d\n", level);
if(shared_sa[dest] == level)
{
for(index_t j=dev_beg_pos[dest]; j<dev_beg_pos[dest+1]; ++j)
{
if(shared_dist[dev_csr[j]] == shared_dist[dest] + dev_weight[j])
{
///shared_bc_tmp[dev_csr[j]] may be changed
//printf("level = %d, sa[%u] = %u, sa[%u] = %u\n", level, dest, shared_sa[dest], dev_csr[j], shared_sa[dev_csr[j]]);
if(shared_sp_count[dev_csr[j]] != 0)
shared_bc_tmp[dest] += shared_sp_count[dest]*(1.0 + shared_bc_tmp[dev_csr[j]])/(shared_sp_count[dev_csr[j]]);
}
//__syncthreads();
}
}
__syncthreads();
if(dest == 0)
{
level = level - 1;
//printf("level = %d\n", level);
}
__syncthreads();
}
__syncthreads();
if(dest == root)
shared_bc_tmp[root] = 0;
//atomic 4
dev_bc[id] = shared_bc_tmp[dest];
}
void bc_gpu_launch(graph *g, int start_vert, int end_vert)
{
index_t v = g->vert_count;
index_t e = g->edge_count;
//cout<<"v = "<<v<<", e = "<<e<<endl;
//printf("gpu %d, %d\n", start_vert, end_vert);
index_t *dev_beg_pos;
vertex_t *dev_csr;
path_t *dev_weight;
path_t *dev_bc;
index_t *dev_sa_global;
int *dev_sp_count_global;
path_t *dev_bc_global;
path_t *dev_dist_global;
cudaMalloc( (void **) &dev_beg_pos, (v+1)*sizeof(index_t));
cudaMalloc( (void **) &dev_csr, e*sizeof(vertex_t));
cudaMalloc( (void **) &dev_weight, e*sizeof(path_t));
cudaMalloc( (void **) &dev_bc, V*V*sizeof(path_t));
cudaMalloc( (void **) &dev_sa_global, V*V*sizeof(index_t));
cudaMalloc( (void **) &dev_sp_count_global, V*V*sizeof(int));
cudaMalloc( (void **) &dev_dist_global, V*V*sizeof(path_t));
cudaMalloc( (void **) &dev_bc_global, V*V*sizeof(path_t));
cudaMemcpy(dev_beg_pos, g->beg_pos, (v+1)*sizeof(index_t), cudaMemcpyHostToDevice);
cudaMemcpy(dev_csr, g->csr, e*sizeof(index_t),cudaMemcpyHostToDevice);
cudaMemcpy(dev_weight, g->weight, e*sizeof(path_t),cudaMemcpyHostToDevice);
cudaMemset(dev_bc, 0, V*V*sizeof(path_t));
cudaMemset(dev_sa_global, 0, V*V*sizeof(index_t));
cudaMemset(dev_sp_count_global, 0, V*V*sizeof(int));
cudaMemset(dev_dist_global, 0, V*V*sizeof(path_t));
cudaMemset(dev_bc_global, 0, V*V*sizeof(index_t));
bc_all<<<V, V>>>(dev_beg_pos, dev_csr, dev_weight, dev_bc_global, dev_sa_global, dev_sp_count_global, dev_dist_global, start_vert, end_vert);//, dev_flag_traverse);
cudaDeviceSynchronize();
bc_merge<<<1, V>>>(dev_bc_global);
cudaDeviceSynchronize();
cudaMemcpy(sa_global, dev_sa_global, V*V*sizeof(index_t), cudaMemcpyDeviceToHost);
cudaMemcpy(sp_count_global, dev_sp_count_global, V*V*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(dist_global, dev_dist_global, V*V*sizeof(path_t), cudaMemcpyDeviceToHost);
cudaMemcpy(bc_global, dev_bc_global, V*V*sizeof(path_t), cudaMemcpyDeviceToHost);
cudaFree(dev_sa_global);
cudaFree(dev_beg_pos);
cudaFree(dev_csr);
cudaFree(dev_weight);
cudaFree(dev_bc);
}
int main(int args, char ** argv)
{
//printf("Input: ./bfs_small_graph /path/to/beg /path/to/csr thread-count\n");
if(args != 7)
exit(-1);
const char *beg_filename = argv[1];
const char *csr_filename = argv[2];
const char *weight_filename = argv[3];
const int thd_count = atoi(argv[4]);
const int start_vert = atoi(argv[5]);
const int end_vert = atoi(argv[6]);
graph *g = new graph(beg_filename, csr_filename, weight_filename);
bc_gpu_launch(g, start_vert, end_vert);// g->vert_count, g->edge_count);
//print_result();
print_debug();
return 0;
}
|
99502ed11e7fcb7d1476fa2a006ff985ca6a1f3b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorMathReduce.cu"
#else
THC_API void
THCTensor_(sum)(THCState* state, THCTensor *self, THCTensor *src, long dimension, int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (!THC_reduceDim(state, self, src,
thrust::identity<real>(),
ReduceAdd<real, accreal>(),
ReduceAdd<accreal, accreal>(),
ScalarConvert<int, accreal>::to(0),
dimension,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
THC_API void
THCTensor_(prod)(THCState* state, THCTensor *self, THCTensor *src, long dimension, int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (!THC_reduceDim(state, self, src,
thrust::identity<real>(),
ReduceMultiply<real, accreal>(),
ReduceMultiply<accreal, accreal>(),
ScalarConvert<int, accreal>::to(1),
dimension,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
THC_API void
THCTensor_(mean)(THCState *state, THCTensor *self, THCTensor *src, long dim, int keepdim)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
THCTensor_(sum)(state, self, src, dim, keepdim);
THCTensor_(div)(state, self, self, ScalarConvert<long, real>::to(THCTensor_(size)(state, src, dim)));
}
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
THC_API void
THCTensor_(renorm)(THCState *state, THCTensor* self, THCTensor* src, real value, long dimension, real maxnorm)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
THCTensor *self_;
THCTensor *src_ = THCTensor_(newTranspose)(state, src, dimension, 0);
THCTensor *data = THCTensor_(newClone)(state, src_);
ptrdiff_t size = THCTensor_(nElement)(state, data)/data->size[0];
THArgCheck(dimension >= 0 && dimension < THCTensor_(nDimension)(state, src), 3, "invalid dimension");
THArgCheck(THCNumerics<real>::gt(value, ScalarConvert<int, real>::to(0)), 2, "non-positive-norm not supported");
THArgCheck(THCTensor_(nDimension)(state, src) > 1, 1, "need at least 2 dimensions");
dim3 grid(data->size[0]);
dim3 threads(32);
hipLaunchKernelGGL(( THCTensor_kernel_renorm<real>), dim3(grid), dim3(threads), 0, THCState_getCurrentStream(state), THCTensor_(data)(state, data), value, size, maxnorm);
hipError_t errcode = hipGetLastError();
if(errcode != hipSuccess)
THError(hipGetErrorString(errcode));
THCTensor_(free)(state, src_);
self_ = THCTensor_(newTranspose)(state, data, dimension, 0);
THCTensor_(resizeAs)(state, self, self_);
THCTensor_(freeCopyTo)(state, self_, self);
THCTensor_(free)(state, data);
}
THC_API void
THCTensor_(std)(THCState *state, THCTensor *self_, THCTensor *src, long dimension, int biased, int keepdim)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
THLongStorage *dim = THCTensor_(newSizeOf)(state, src);
THLongStorage_set(dim, dimension, 1);
THCTensor_(resize)(state, self_, dim, NULL);
THLongStorage_free(dim);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
src = THCTensor_(newContiguous)(state, src);
if (dimension == THCTensor_(nDimension)(state, src) - 1) {
THCTensor_varInnermostDim<THCTensor, real, true>(state, self, src, biased);
} else {
THCTensor_varOuterDim<THCTensor, real, true>(state, self, src, dimension, biased);
}
THCTensor_(free)(state, src);
THCTensor_(freeCopyTo)(state, self, self_);
if (!keepdim) {
THCTensor_(squeeze1d)(state, self_, self_, dimension);
}
}
THC_API void
THCTensor_(var)(THCState *state, THCTensor *self_, THCTensor *src, long dimension, int biased, int keepdim)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
THLongStorage *dim = THCTensor_(newSizeOf)(state, src);
THLongStorage_set(dim, dimension, 1);
THCTensor_(resize)(state, self_, dim, NULL);
THLongStorage_free(dim);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
src = THCTensor_(newContiguous)(state, src);
if (dimension == THCTensor_(nDimension)(state, src) - 1) {
THCTensor_varInnermostDim<THCTensor, real, false>(state, self, src, biased);
} else {
THCTensor_varOuterDim<THCTensor, real, false>(state, self, src, dimension, biased);
}
THCTensor_(free)(state, src);
THCTensor_(freeCopyTo)(state, self, self_);
if (!keepdim) {
THCTensor_(squeeze1d)(state, self_, self_, dimension);
}
}
THC_API accreal
THCTensor_(stdall)(THCState *state, THCTensor *self, int biased)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
return THCNumerics<accreal>::sqrt((THCTensor_(varall)(state, self, biased)));
}
THC_API accreal
THCTensor_(varall)(THCState *state, THCTensor *self, int biased)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal mean = THCTensor_(meanall)(state, self);
accreal val;
if (!THC_reduceAll(state, self,
SquareFunctor<accreal, real>(mean),
ReduceAdd<accreal, accreal>(),
ReduceAdd<accreal, accreal>(),
ScalarConvert<int, accreal>::to(0),
&val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
val = THCNumerics<accreal>::div(
val,
ScalarConvert<ptrdiff_t, accreal>::to(THCTensor_(nElement)(state, self) - (biased ? 0 : 1))
);
THCudaCheck(hipGetLastError());
return val;
}
THC_API void
THCTensor_(norm)(THCState *state, THCTensor* self, THCTensor* src, real value, long dimension, int keepdim)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (THCNumerics<real>::eq(value, ScalarConvert<float, real>::to(0.0))) {
THC_reduceDim(state, self, src,
TensorNonZeroOp<real>(), ReduceAdd<real, accreal>(), ReduceAdd<accreal, accreal>(),
ScalarConvert<float, accreal>::to(0.0), dimension, keepdim);
} else if (THCNumerics<real>::eq(value, ScalarConvert<float, real>::to(1.0))) {
THC_reduceDim(state, self, src,
TensorNormOp<real, 1>(value), ReduceAdd<real, accreal>(), ReduceAdd<accreal, accreal>(),
ScalarConvert<float, accreal>::to(0.0), dimension, keepdim);
} else if (THCNumerics<real>::eq(value, ScalarConvert<float, real>::to(2.0))) {
THC_reduceDim(state, self, src,
TensorNormOp<real, 2>(value), ReduceAdd<real, accreal>(), ReduceAdd<accreal, accreal>(),
ScalarConvert<float, accreal>::to(0.0), dimension, keepdim);
THCTensor_(pow)(state, self, self, ScalarConvert<float, real>::to(0.5));
} else {
THC_reduceDim(state, self, src,
TensorNormOp<real, -1>(value), ReduceAdd<real, accreal>(), ReduceAdd<accreal, accreal>(),
ScalarConvert<float, accreal>::to(0.0), dimension, keepdim);
THCTensor_(pow)(state, self, self, THCNumerics<real>::cinv(value));
}
THCudaCheck(hipGetLastError());
}
THC_API accreal
THCTensor_(normall)(THCState *state, THCTensor *self, real value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal result;
if (THCNumerics<real>::eq(value, ScalarConvert<float, real>::to(0.0))) {
THC_reduceAll(state, self,
TensorNonZeroOp<real>(),
ReduceAdd<real, accreal>(),
ReduceAdd<accreal, accreal>(),
ScalarConvert<float, accreal>::to(0.0f),
&result, 0);
} else if (THCNumerics<real>::eq(value, ScalarConvert<float, real>::to(1.0))) {
THC_reduceAll(state, self,
TensorNormOp<real, 1>(value),
ReduceAdd<real, accreal>(),
ReduceAdd<accreal, accreal>(),
ScalarConvert<float, accreal>::to(0.0f),
&result, 0);
} else if (THCNumerics<real>::eq(value, ScalarConvert<float, real>::to(2.0))) {
THC_reduceAll(state, self,
TensorNormOp<real, 2>(value),
ReduceAdd<real, accreal>(),
ReduceAdd<accreal, accreal>(),
ScalarConvert<float, accreal>::to(0.0f),
&result, 0);
result = THCNumerics<accreal>::sqrt(result);
} else {
THC_reduceAll(state, self,
TensorNormOp<real, -1>(value),
ReduceAdd<real, accreal>(),
ReduceAdd<accreal, accreal>(),
ScalarConvert<float, accreal>::to(0.0f),
&result, 0);
result = THCNumerics<accreal>::pow(
result,
ScalarConvert<real, accreal>::to(THCNumerics<real>::cinv(value))
);
}
THCudaCheck(hipGetLastError());
return result;
}
accreal THCTensor_(dist)(THCState *state, THCTensor *self,
THCTensor *src, real value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
self = THCTensor_(newContiguous)(state, self);
ptrdiff_t size = THCTensor_(nElement)(state, self);
src = THCTensor_(newContiguous)(state, src);
thrust::device_ptr<real> self_data(THCTensor_(data)(state, self));
thrust::device_ptr<real> src_data(THCTensor_(data)(state, src));
THCThrustAllocator thrustAlloc(state);
accreal result = thrust::inner_product(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
self_data, self_data+size, src_data, ScalarConvert<int, accreal>::to(0),
thrust::plus<accreal>(),
TensorDistOp<accreal, real>(ScalarConvert<real, accreal>::to(value)));
THCTensor_(free)(state, src);
THCTensor_(free)(state, self);
return THCNumerics<accreal>::pow(result, 1.0 / ScalarConvert<real, accreal>::to(value));
}
#endif
THC_API accreal
THCTensor_(sumall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll(state, self,
thrust::identity<real>(),
ReduceAdd<real, accreal>(),
ReduceAdd<accreal, accreal>(),
ScalarConvert<int, accreal>::to(0),
&val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
return val;
}
THC_API accreal
THCTensor_(prodall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll(state, self,
thrust::identity<real>(),
ReduceMultiply<real, accreal>(),
ReduceMultiply<accreal, accreal>(),
ScalarConvert<int, accreal>::to(1),
&val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
return val;
}
THC_API accreal
THCTensor_(meanall)(THCState *state, THCTensor *self)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
THArgCheck(self->nDimension > 0, 1, "empty Tensor");
return THCTensor_(sumall)(state, self)/THCTensor_(nElement)(state, self);
}
THC_API real
THCTensor_(minall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
real val;
if (!THC_reduceAll(state, self,
thrust::identity<real>(),
ReduceMin<real>(),
ReduceMin<real>(),
THCNumerics<real>::max(), &val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
return val;
}
THC_API real
THCTensor_(maxall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
real val;
if (!THC_reduceAll(state, self,
thrust::identity<real>(),
ReduceMax<real>(),
ReduceMax<real>(),
THCNumerics<real>::min(), &val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
return val;
}
THC_API real
THCTensor_(medianall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
real val;
ptrdiff_t nelem, k;
nelem = THCTensor_(nElement)(state, self);
k = (nelem-1) >> 1;
THLongStorage *size = THLongStorage_newWithSize1(nelem);
THCTensor *view = THCTensor_(newView)(state, self, size);
THLongStorage_free(size);
THCTensor *sorted = THCTensor_(new)(state);
THCudaLongTensor *indices = THCudaLongTensor_new(state);
THCTensor_(sort)(state, sorted, indices, view, 0, 0);
val = THCTensor_(get1d)(state, sorted, k);
THCTensor_(free)(state, view);
THCTensor_(free)(state, sorted);
THCudaLongTensor_free(state, indices);
THCudaCheck(hipGetLastError());
return val;
}
THC_API void
THCTensor_(median)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *self,
long dimension,
int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
long t_size_dim, k;
t_size_dim = THCTensor_(size)(state, self, dimension);
k = (t_size_dim-1) >> 1;
THCTensor *sorted = THCTensor_(new)(state);
THCudaLongTensor *sorted_indices = THCudaLongTensor_new(state);
THCTensor_(sort)(state, sorted, sorted_indices, self, dimension, 0);
THCTensor_(narrow)(state, values, sorted, dimension, k, 1);
THCudaLongTensor_narrow(state, indices, sorted_indices, dimension, k, 1);
THCTensor_(free)(state, sorted);
THCudaLongTensor_free(state, sorted_indices);
if (!keepdim) {
THCTensor_(squeeze1d)(state, values, values, dimension);
THCudaLongTensor_squeeze1d(state, indices, indices, dimension);
}
THCudaCheck(hipGetLastError());
}
THC_API void
THCTensor_(max)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *src,
long dimension,
int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, values, indices, src));
thrust::pair<typename TensorUtils<THCTensor>::DataType, long>
init =
thrust::make_pair<typename TensorUtils<THCTensor>::DataType, long>(
THCNumerics<typename TensorUtils<THCTensor>::DataType>::min(), 1);
return THC_reduceDimIndex(
state, values, indices, src, dimension, keepdim, init,
MaxValuePair<typename TensorUtils<THCTensor>::DataType, long>());
}
THC_API void
THCTensor_(min)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *src,
long dimension,
int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, values, indices, src));
thrust::pair<typename TensorUtils<THCTensor>::DataType, long>
init =
thrust::make_pair<typename TensorUtils<THCTensor>::DataType, long>(
THCNumerics<typename TensorUtils<THCTensor>::DataType>::max(), 1);
return THC_reduceDimIndex(
state, values, indices, src, dimension, keepdim, init,
MinValuePair<typename TensorUtils<THCTensor>::DataType, long>());
}
#endif
| 99502ed11e7fcb7d1476fa2a006ff985ca6a1f3b.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorMathReduce.cu"
#else
THC_API void
THCTensor_(sum)(THCState* state, THCTensor *self, THCTensor *src, long dimension, int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (!THC_reduceDim(state, self, src,
thrust::identity<real>(),
ReduceAdd<real, accreal>(),
ReduceAdd<accreal, accreal>(),
ScalarConvert<int, accreal>::to(0),
dimension,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
THC_API void
THCTensor_(prod)(THCState* state, THCTensor *self, THCTensor *src, long dimension, int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (!THC_reduceDim(state, self, src,
thrust::identity<real>(),
ReduceMultiply<real, accreal>(),
ReduceMultiply<accreal, accreal>(),
ScalarConvert<int, accreal>::to(1),
dimension,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
THC_API void
THCTensor_(mean)(THCState *state, THCTensor *self, THCTensor *src, long dim, int keepdim)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
THCTensor_(sum)(state, self, src, dim, keepdim);
THCTensor_(div)(state, self, self, ScalarConvert<long, real>::to(THCTensor_(size)(state, src, dim)));
}
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
THC_API void
THCTensor_(renorm)(THCState *state, THCTensor* self, THCTensor* src, real value, long dimension, real maxnorm)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
THCTensor *self_;
THCTensor *src_ = THCTensor_(newTranspose)(state, src, dimension, 0);
THCTensor *data = THCTensor_(newClone)(state, src_);
ptrdiff_t size = THCTensor_(nElement)(state, data)/data->size[0];
THArgCheck(dimension >= 0 && dimension < THCTensor_(nDimension)(state, src), 3, "invalid dimension");
THArgCheck(THCNumerics<real>::gt(value, ScalarConvert<int, real>::to(0)), 2, "non-positive-norm not supported");
THArgCheck(THCTensor_(nDimension)(state, src) > 1, 1, "need at least 2 dimensions");
dim3 grid(data->size[0]);
dim3 threads(32);
THCTensor_kernel_renorm<real><<<grid, threads, 0, THCState_getCurrentStream(state)>>>(THCTensor_(data)(state, data), value, size, maxnorm);
cudaError errcode = cudaGetLastError();
if(errcode != cudaSuccess)
THError(cudaGetErrorString(errcode));
THCTensor_(free)(state, src_);
self_ = THCTensor_(newTranspose)(state, data, dimension, 0);
THCTensor_(resizeAs)(state, self, self_);
THCTensor_(freeCopyTo)(state, self_, self);
THCTensor_(free)(state, data);
}
THC_API void
THCTensor_(std)(THCState *state, THCTensor *self_, THCTensor *src, long dimension, int biased, int keepdim)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
THLongStorage *dim = THCTensor_(newSizeOf)(state, src);
THLongStorage_set(dim, dimension, 1);
THCTensor_(resize)(state, self_, dim, NULL);
THLongStorage_free(dim);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
src = THCTensor_(newContiguous)(state, src);
if (dimension == THCTensor_(nDimension)(state, src) - 1) {
THCTensor_varInnermostDim<THCTensor, real, true>(state, self, src, biased);
} else {
THCTensor_varOuterDim<THCTensor, real, true>(state, self, src, dimension, biased);
}
THCTensor_(free)(state, src);
THCTensor_(freeCopyTo)(state, self, self_);
if (!keepdim) {
THCTensor_(squeeze1d)(state, self_, self_, dimension);
}
}
THC_API void
THCTensor_(var)(THCState *state, THCTensor *self_, THCTensor *src, long dimension, int biased, int keepdim)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
THLongStorage *dim = THCTensor_(newSizeOf)(state, src);
THLongStorage_set(dim, dimension, 1);
THCTensor_(resize)(state, self_, dim, NULL);
THLongStorage_free(dim);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
src = THCTensor_(newContiguous)(state, src);
if (dimension == THCTensor_(nDimension)(state, src) - 1) {
THCTensor_varInnermostDim<THCTensor, real, false>(state, self, src, biased);
} else {
THCTensor_varOuterDim<THCTensor, real, false>(state, self, src, dimension, biased);
}
THCTensor_(free)(state, src);
THCTensor_(freeCopyTo)(state, self, self_);
if (!keepdim) {
THCTensor_(squeeze1d)(state, self_, self_, dimension);
}
}
THC_API accreal
THCTensor_(stdall)(THCState *state, THCTensor *self, int biased)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
return THCNumerics<accreal>::sqrt((THCTensor_(varall)(state, self, biased)));
}
THC_API accreal
THCTensor_(varall)(THCState *state, THCTensor *self, int biased)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal mean = THCTensor_(meanall)(state, self);
accreal val;
if (!THC_reduceAll(state, self,
SquareFunctor<accreal, real>(mean),
ReduceAdd<accreal, accreal>(),
ReduceAdd<accreal, accreal>(),
ScalarConvert<int, accreal>::to(0),
&val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
val = THCNumerics<accreal>::div(
val,
ScalarConvert<ptrdiff_t, accreal>::to(THCTensor_(nElement)(state, self) - (biased ? 0 : 1))
);
THCudaCheck(cudaGetLastError());
return val;
}
THC_API void
THCTensor_(norm)(THCState *state, THCTensor* self, THCTensor* src, real value, long dimension, int keepdim)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (THCNumerics<real>::eq(value, ScalarConvert<float, real>::to(0.0))) {
THC_reduceDim(state, self, src,
TensorNonZeroOp<real>(), ReduceAdd<real, accreal>(), ReduceAdd<accreal, accreal>(),
ScalarConvert<float, accreal>::to(0.0), dimension, keepdim);
} else if (THCNumerics<real>::eq(value, ScalarConvert<float, real>::to(1.0))) {
THC_reduceDim(state, self, src,
TensorNormOp<real, 1>(value), ReduceAdd<real, accreal>(), ReduceAdd<accreal, accreal>(),
ScalarConvert<float, accreal>::to(0.0), dimension, keepdim);
} else if (THCNumerics<real>::eq(value, ScalarConvert<float, real>::to(2.0))) {
THC_reduceDim(state, self, src,
TensorNormOp<real, 2>(value), ReduceAdd<real, accreal>(), ReduceAdd<accreal, accreal>(),
ScalarConvert<float, accreal>::to(0.0), dimension, keepdim);
THCTensor_(pow)(state, self, self, ScalarConvert<float, real>::to(0.5));
} else {
THC_reduceDim(state, self, src,
TensorNormOp<real, -1>(value), ReduceAdd<real, accreal>(), ReduceAdd<accreal, accreal>(),
ScalarConvert<float, accreal>::to(0.0), dimension, keepdim);
THCTensor_(pow)(state, self, self, THCNumerics<real>::cinv(value));
}
THCudaCheck(cudaGetLastError());
}
THC_API accreal
THCTensor_(normall)(THCState *state, THCTensor *self, real value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal result;
if (THCNumerics<real>::eq(value, ScalarConvert<float, real>::to(0.0))) {
THC_reduceAll(state, self,
TensorNonZeroOp<real>(),
ReduceAdd<real, accreal>(),
ReduceAdd<accreal, accreal>(),
ScalarConvert<float, accreal>::to(0.0f),
&result, 0);
} else if (THCNumerics<real>::eq(value, ScalarConvert<float, real>::to(1.0))) {
THC_reduceAll(state, self,
TensorNormOp<real, 1>(value),
ReduceAdd<real, accreal>(),
ReduceAdd<accreal, accreal>(),
ScalarConvert<float, accreal>::to(0.0f),
&result, 0);
} else if (THCNumerics<real>::eq(value, ScalarConvert<float, real>::to(2.0))) {
THC_reduceAll(state, self,
TensorNormOp<real, 2>(value),
ReduceAdd<real, accreal>(),
ReduceAdd<accreal, accreal>(),
ScalarConvert<float, accreal>::to(0.0f),
&result, 0);
result = THCNumerics<accreal>::sqrt(result);
} else {
THC_reduceAll(state, self,
TensorNormOp<real, -1>(value),
ReduceAdd<real, accreal>(),
ReduceAdd<accreal, accreal>(),
ScalarConvert<float, accreal>::to(0.0f),
&result, 0);
result = THCNumerics<accreal>::pow(
result,
ScalarConvert<real, accreal>::to(THCNumerics<real>::cinv(value))
);
}
THCudaCheck(cudaGetLastError());
return result;
}
accreal THCTensor_(dist)(THCState *state, THCTensor *self,
THCTensor *src, real value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
self = THCTensor_(newContiguous)(state, self);
ptrdiff_t size = THCTensor_(nElement)(state, self);
src = THCTensor_(newContiguous)(state, src);
thrust::device_ptr<real> self_data(THCTensor_(data)(state, self));
thrust::device_ptr<real> src_data(THCTensor_(data)(state, src));
THCThrustAllocator thrustAlloc(state);
accreal result = thrust::inner_product(
#if CUDA_VERSION >= 7000
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
self_data, self_data+size, src_data, ScalarConvert<int, accreal>::to(0),
thrust::plus<accreal>(),
TensorDistOp<accreal, real>(ScalarConvert<real, accreal>::to(value)));
THCTensor_(free)(state, src);
THCTensor_(free)(state, self);
return THCNumerics<accreal>::pow(result, 1.0 / ScalarConvert<real, accreal>::to(value));
}
#endif
THC_API accreal
THCTensor_(sumall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll(state, self,
thrust::identity<real>(),
ReduceAdd<real, accreal>(),
ReduceAdd<accreal, accreal>(),
ScalarConvert<int, accreal>::to(0),
&val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
return val;
}
THC_API accreal
THCTensor_(prodall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll(state, self,
thrust::identity<real>(),
ReduceMultiply<real, accreal>(),
ReduceMultiply<accreal, accreal>(),
ScalarConvert<int, accreal>::to(1),
&val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
return val;
}
THC_API accreal
THCTensor_(meanall)(THCState *state, THCTensor *self)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
THArgCheck(self->nDimension > 0, 1, "empty Tensor");
return THCTensor_(sumall)(state, self)/THCTensor_(nElement)(state, self);
}
THC_API real
THCTensor_(minall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
real val;
if (!THC_reduceAll(state, self,
thrust::identity<real>(),
ReduceMin<real>(),
ReduceMin<real>(),
THCNumerics<real>::max(), &val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
return val;
}
THC_API real
THCTensor_(maxall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
real val;
if (!THC_reduceAll(state, self,
thrust::identity<real>(),
ReduceMax<real>(),
ReduceMax<real>(),
THCNumerics<real>::min(), &val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
return val;
}
THC_API real
THCTensor_(medianall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
real val;
ptrdiff_t nelem, k;
nelem = THCTensor_(nElement)(state, self);
k = (nelem-1) >> 1;
THLongStorage *size = THLongStorage_newWithSize1(nelem);
THCTensor *view = THCTensor_(newView)(state, self, size);
THLongStorage_free(size);
THCTensor *sorted = THCTensor_(new)(state);
THCudaLongTensor *indices = THCudaLongTensor_new(state);
THCTensor_(sort)(state, sorted, indices, view, 0, 0);
val = THCTensor_(get1d)(state, sorted, k);
THCTensor_(free)(state, view);
THCTensor_(free)(state, sorted);
THCudaLongTensor_free(state, indices);
THCudaCheck(cudaGetLastError());
return val;
}
THC_API void
THCTensor_(median)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *self,
long dimension,
int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
long t_size_dim, k;
t_size_dim = THCTensor_(size)(state, self, dimension);
k = (t_size_dim-1) >> 1;
THCTensor *sorted = THCTensor_(new)(state);
THCudaLongTensor *sorted_indices = THCudaLongTensor_new(state);
THCTensor_(sort)(state, sorted, sorted_indices, self, dimension, 0);
THCTensor_(narrow)(state, values, sorted, dimension, k, 1);
THCudaLongTensor_narrow(state, indices, sorted_indices, dimension, k, 1);
THCTensor_(free)(state, sorted);
THCudaLongTensor_free(state, sorted_indices);
if (!keepdim) {
THCTensor_(squeeze1d)(state, values, values, dimension);
THCudaLongTensor_squeeze1d(state, indices, indices, dimension);
}
THCudaCheck(cudaGetLastError());
}
THC_API void
THCTensor_(max)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *src,
long dimension,
int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, values, indices, src));
thrust::pair<typename TensorUtils<THCTensor>::DataType, long>
init =
thrust::make_pair<typename TensorUtils<THCTensor>::DataType, long>(
THCNumerics<typename TensorUtils<THCTensor>::DataType>::min(), 1);
return THC_reduceDimIndex(
state, values, indices, src, dimension, keepdim, init,
MaxValuePair<typename TensorUtils<THCTensor>::DataType, long>());
}
THC_API void
THCTensor_(min)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *src,
long dimension,
int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, values, indices, src));
thrust::pair<typename TensorUtils<THCTensor>::DataType, long>
init =
thrust::make_pair<typename TensorUtils<THCTensor>::DataType, long>(
THCNumerics<typename TensorUtils<THCTensor>::DataType>::max(), 1);
return THC_reduceDimIndex(
state, values, indices, src, dimension, keepdim, init,
MinValuePair<typename TensorUtils<THCTensor>::DataType, long>());
}
#endif
|
bda468cdf8ae616f029b6ac75f9964d06ea10a4f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void matrixVectorMultKernel(float* fltMatrix, float* vec, float* output, int rows, int columns){
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < rows){
float sum = 0.0f;
for (int col = 0; col < columns; ++col) {
sum += fltMatrix[row * columns + col] + vec[col];
}
output[row] = sum;
}
} | bda468cdf8ae616f029b6ac75f9964d06ea10a4f.cu | #include "includes.h"
__global__ void matrixVectorMultKernel(float* fltMatrix, float* vec, float* output, int rows, int columns){
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < rows){
float sum = 0.0f;
for (int col = 0; col < columns; ++col) {
sum += fltMatrix[row * columns + col] + vec[col];
}
output[row] = sum;
}
} |
622e34a9ed544349718ecae2329b2ca069ff5111.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/native/UnaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Context.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/Math.h>
#include <ATen/native/TensorFactories.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/hip/Math.cuh>
#include <ATen/NumericUtils.h>
#include <c10/hip/HIPMathCompat.h>
#include <ATen/NumericUtils.h>
#include <c10/util/complex.h>
namespace at {
namespace native {
void exp2_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "exp2_cuda", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::exp2(a);
});
});
}
void i0_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.dtype(), "i0_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return calc_i0(a);
});
});
}
void i0e_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "i0e_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return calc_i0e(a);
});
});
}
void sigmoid_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "sigmoid_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return static_cast<scalar_t>(1) / (static_cast<scalar_t>(1) + ::exp(-a));
});
});
}
void sinc_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, iter.common_dtype(), "sinc_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
if (a == scalar_t(0)) {
return scalar_t(1);
} else {
// NVCC says constexpr var is not accessible from device
scalar_t product = c10::detail::pi<scalar_t>() * a;
return std::sin(product) / product;
}
});
});
}
void logit_kernel_cuda(TensorIteratorBase& iter, const Scalar& eps_scalar) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.common_dtype(),
"logit_cuda",
[&]() {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC eps = eps_scalar.to<T_ACC>();
if (eps < T_ACC(0)) {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t x) -> scalar_t {
const T_ACC x_acc = static_cast<T_ACC>(x);
return c10::hip::compat::log(x_acc / (T_ACC(1) - x_acc));
});
} else {
const T_ACC lo = eps;
const T_ACC hi = T_ACC(1) - eps;
gpu_kernel(
iter, [lo, hi] GPU_LAMBDA(scalar_t x) -> scalar_t {
const T_ACC x_acc = static_cast<T_ACC>(x);
T_ACC z = x_acc < lo ? lo : (x_acc > hi ? hi : x_acc);
return c10::hip::compat::log(z / (T_ACC(1) - z));
});
}
});
}
void erf_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "erf_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::erf(a);
});
});
}
void erfc_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "erfc_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::erfc(a);
});
});
}
void erfinv_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "erfinv_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::erfinv(a);
});
});
}
void kaiser_window_kernel_cuda(TensorIteratorBase& iter, int64_t window_length, double beta_){
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.dtype(), "kaiser_window_cuda", [&](){
using T_ACC = acc_type<scalar_t, true>;
const T_ACC inv_alpha = static_cast<T_ACC>(2.0 / (window_length - 1));
const T_ACC beta = static_cast<T_ACC>(beta_);
const T_ACC inv_i0_beta = 1.0 / calc_i0(beta);
gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t a) -> scalar_t {
T_ACC x = static_cast<T_ACC>(a) * inv_alpha - 1;
T_ACC y = std::max<T_ACC>(0, 1 - x * x);
return calc_i0(beta * ::sqrt(y)) * inv_i0_beta;
});
});
}
void entr_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
iter.common_dtype(),
"entr_cuda",
[&]() {
gpu_kernel(iter, [=] GPU_LAMBDA(scalar_t x) -> scalar_t {
if (at::_isnan(x)) {
return x;
} else if (x > 0) {
return -x * ::log(x);
} else if (x == 0) {
return 0;
}
return static_cast<scalar_t>(-INFINITY);
});
});
}
REGISTER_DISPATCH(exp2_stub, &exp2_kernel_cuda);
REGISTER_DISPATCH(i0_stub, &i0_kernel_cuda);
REGISTER_DISPATCH(special_i0e_stub, &i0e_kernel_cuda);
REGISTER_DISPATCH(sigmoid_stub, &sigmoid_kernel_cuda);
REGISTER_DISPATCH(sinc_stub, &sinc_kernel_cuda);
REGISTER_DISPATCH(logit_stub, &logit_kernel_cuda);
REGISTER_DISPATCH(erf_stub, &erf_kernel_cuda);
REGISTER_DISPATCH(erfc_stub, &erfc_kernel_cuda);
REGISTER_DISPATCH(erfinv_stub, &erfinv_kernel_cuda);
REGISTER_DISPATCH(kaiser_window_stub, &kaiser_window_kernel_cuda);
REGISTER_DISPATCH(special_entr_stub, &entr_kernel_cuda);
} // namespace native
} // namespace at
| 622e34a9ed544349718ecae2329b2ca069ff5111.cu | #include <ATen/native/UnaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Context.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/Math.h>
#include <ATen/native/TensorFactories.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/NumericUtils.h>
#include <c10/cuda/CUDAMathCompat.h>
#include <ATen/NumericUtils.h>
#include <c10/util/complex.h>
namespace at {
namespace native {
void exp2_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "exp2_cuda", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::exp2(a);
});
});
}
void i0_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.dtype(), "i0_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return calc_i0(a);
});
});
}
void i0e_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "i0e_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return calc_i0e(a);
});
});
}
void sigmoid_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "sigmoid_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return static_cast<scalar_t>(1) / (static_cast<scalar_t>(1) + std::exp(-a));
});
});
}
void sinc_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, iter.common_dtype(), "sinc_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
if (a == scalar_t(0)) {
return scalar_t(1);
} else {
// NVCC says constexpr var is not accessible from device
scalar_t product = c10::detail::pi<scalar_t>() * a;
return std::sin(product) / product;
}
});
});
}
void logit_kernel_cuda(TensorIteratorBase& iter, const Scalar& eps_scalar) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.common_dtype(),
"logit_cuda",
[&]() {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC eps = eps_scalar.to<T_ACC>();
if (eps < T_ACC(0)) {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t x) -> scalar_t {
const T_ACC x_acc = static_cast<T_ACC>(x);
return c10::cuda::compat::log(x_acc / (T_ACC(1) - x_acc));
});
} else {
const T_ACC lo = eps;
const T_ACC hi = T_ACC(1) - eps;
gpu_kernel(
iter, [lo, hi] GPU_LAMBDA(scalar_t x) -> scalar_t {
const T_ACC x_acc = static_cast<T_ACC>(x);
T_ACC z = x_acc < lo ? lo : (x_acc > hi ? hi : x_acc);
return c10::cuda::compat::log(z / (T_ACC(1) - z));
});
}
});
}
void erf_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "erf_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::erf(a);
});
});
}
void erfc_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "erfc_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::erfc(a);
});
});
}
void erfinv_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "erfinv_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::erfinv(a);
});
});
}
void kaiser_window_kernel_cuda(TensorIteratorBase& iter, int64_t window_length, double beta_){
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.dtype(), "kaiser_window_cuda", [&](){
using T_ACC = acc_type<scalar_t, true>;
const T_ACC inv_alpha = static_cast<T_ACC>(2.0 / (window_length - 1));
const T_ACC beta = static_cast<T_ACC>(beta_);
const T_ACC inv_i0_beta = 1.0 / calc_i0(beta);
gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t a) -> scalar_t {
T_ACC x = static_cast<T_ACC>(a) * inv_alpha - 1;
T_ACC y = std::max<T_ACC>(0, 1 - x * x);
return calc_i0(beta * ::sqrt(y)) * inv_i0_beta;
});
});
}
void entr_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
iter.common_dtype(),
"entr_cuda",
[&]() {
gpu_kernel(iter, [=] GPU_LAMBDA(scalar_t x) -> scalar_t {
if (at::_isnan(x)) {
return x;
} else if (x > 0) {
return -x * std::log(x);
} else if (x == 0) {
return 0;
}
return static_cast<scalar_t>(-INFINITY);
});
});
}
REGISTER_DISPATCH(exp2_stub, &exp2_kernel_cuda);
REGISTER_DISPATCH(i0_stub, &i0_kernel_cuda);
REGISTER_DISPATCH(special_i0e_stub, &i0e_kernel_cuda);
REGISTER_DISPATCH(sigmoid_stub, &sigmoid_kernel_cuda);
REGISTER_DISPATCH(sinc_stub, &sinc_kernel_cuda);
REGISTER_DISPATCH(logit_stub, &logit_kernel_cuda);
REGISTER_DISPATCH(erf_stub, &erf_kernel_cuda);
REGISTER_DISPATCH(erfc_stub, &erfc_kernel_cuda);
REGISTER_DISPATCH(erfinv_stub, &erfinv_kernel_cuda);
REGISTER_DISPATCH(kaiser_window_stub, &kaiser_window_kernel_cuda);
REGISTER_DISPATCH(special_entr_stub, &entr_kernel_cuda);
} // namespace native
} // namespace at
|
af1e058c469fad536877c63b0befdfa2d4dba67b.hip | // !!! This is a file automatically generated by hipify!!!
#include "global.h"
extern void gpu_destroy();
#include "IL/ilut.h"
ILuint nCurrImg = 1;
#include <time.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
//
// Engine..
//
Scene scene;
void global_init()
{
// DevIL init
//
ilInit();
ilutRenderer(ILUT_OPENGL);
ilutEnable(ILUT_OPENGL_CONV);
//ilOriginFunc(IL_ORIGIN_UPPER_LEFT);
//ilEnable(IL_ORIGIN_SET);
ilGenImages(1, &nCurrImg);
ilBindImage(nCurrImg);
//
srand(clock());
}
void global_destroy()
{
gpu_destroy();
// DevIL finalization
ilDeleteImages(1, &nCurrImg);
} | af1e058c469fad536877c63b0befdfa2d4dba67b.cu | #include "global.h"
extern void gpu_destroy();
#include "IL/ilut.h"
ILuint nCurrImg = 1;
#include <time.h>
#include <stdlib.h>
#include <cuda_runtime.h>
//
// Engine..
//
Scene scene;
void global_init()
{
// DevIL init
//
ilInit();
ilutRenderer(ILUT_OPENGL);
ilutEnable(ILUT_OPENGL_CONV);
//ilOriginFunc(IL_ORIGIN_UPPER_LEFT);
//ilEnable(IL_ORIGIN_SET);
ilGenImages(1, &nCurrImg);
ilBindImage(nCurrImg);
//
srand(clock());
}
void global_destroy()
{
gpu_destroy();
// DevIL finalization
ilDeleteImages(1, &nCurrImg);
} |
7d99d749d6b787ac4359e0fb27bf525fc0a93ec7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "utils.cuh"
#include "ln_kernel_traits.h"
#include "ATen/hip/HIPContext.h"
template<typename Ktraits>
__global__ __launch_bounds__(Ktraits::THREADS_PER_CTA) void ln_bwd_kernel(void * __restrict__ dx_,
void * __restrict__ dg_,
void * __restrict__ db_,
const void * __restrict__ dw_,
const void * __restrict__ x_,
const void * __restrict__ mu_,
const void * __restrict__ rs_,
const void * __restrict__ g_,
const int rows
){
using Vec = typename Ktraits::Vec;
enum { BYTES_PER_LDG = Ktraits::BYTES_PER_LDG };
enum { ROWS_PER_CTA = Ktraits::ROWS_PER_CTA };
enum { WARPS_M = Ktraits::WARPS_M };
enum { WARPS_N = Ktraits::WARPS_N };
enum { THREADS_PER_ROW = Ktraits::THREADS_PER_ROW };
enum { COLS = Ktraits::COLS };
enum { BYTES_PER_ROW = Ktraits::BYTES_PER_ROW };
enum { LDGS = BYTES_PER_ROW / Ktraits::BYTES_PER_ROW_PER_CTA };
static_assert(LDGS * Ktraits::BYTES_PER_ROW_PER_CTA == BYTES_PER_ROW, "");
enum { NUM_ELTS = Vec::NUM_ELTS };
using vec_t = typename Ktraits::vec_t;
using base_t = typename Ktraits::base_t;
using compute_t = typename Ktraits::compute_t;
const int tidx = threadIdx.x;
const int bidx = blockIdx.x;
const int lane = tidx % THREADS_PER_WARP;
const int warp = tidx / THREADS_PER_WARP;
const int warp_m = warp / Ktraits::WARPS_N;
const int warp_n = warp % Ktraits::WARPS_N;
const int tid_r = warp_n * THREADS_PER_WARP + lane;
const int r = bidx * Ktraits::ROWS_PER_CTA + warp_m;
const int c = warp_n * THREADS_PER_WARP + lane;
const char *dw_ptr = static_cast<const char *>(dw_);
const char *x_ptr = static_cast<const char *>(x_);
const char *g_ptr = static_cast<const char *>(g_);
char *dx_ptr = static_cast<char *>(dx_);
const compute_t *mu_ptr = static_cast<const compute_t *>(mu_);
const compute_t *rs_ptr = static_cast<const compute_t *>(rs_);
static_assert(COLS == THREADS_PER_ROW * LDGS * NUM_ELTS, "");
// smem for final reduction
//__shared__ compute_t smem_[ROWS_PER_CTA * COLS];
extern __shared__ compute_t smem_[];
// static_assert(sizeof(smem_dw_sum) == 32*1024,"");
// Using the grid stride loop we can assign multiple rows to each thread
// by using a number of CTAs smaller than rows / ROWS_PER_CTA
// We accumulate them here, one in smem, one in registers, because the smem
// capacity is limited compute_t * dw_sum = &smem_dw_sum[warp_m * COLS + tid_r
// * LDGS * NUM_ELTS];
compute_t dwy_sum[LDGS * NUM_ELTS];
compute_t dw_sum[LDGS * NUM_ELTS];
memset(dwy_sum, 0, sizeof(compute_t) * LDGS * NUM_ELTS);
memset(dw_sum, 0, sizeof(compute_t) * LDGS * NUM_ELTS);
// Debug 8 rows, 4B, 1024 cols
__shared__ compute_t smem_mdy[ROWS_PER_CTA * WARPS_N];
__shared__ compute_t smem_mdyy[ROWS_PER_CTA * WARPS_N];
compute_t *mdy_shared = &smem_mdy[warp_m * WARPS_N];
compute_t *mdyy_shared = &smem_mdyy[warp_m * WARPS_N];
constexpr float rn = 1.f / float(COLS);
Vec gamma[LDGS];
int col = c;
#pragma unroll
for (int it = 0; it < LDGS; it++) {
gamma[it].load_from(g_ptr + col * BYTES_PER_LDG);
col += Ktraits::THREADS_PER_ROW;
}
// TODO if ROWS_PER_CTA does not divice rows, we might get divergence in the
// last blocks with syncthreads!
// grid stride over rows
#pragma unroll 1
for (int row = r; row < rows; row += gridDim.x * ROWS_PER_CTA) {
const compute_t mu_r = mu_ptr[row];
const compute_t rs_r = rs_ptr[row];
Vec dw[LDGS], x[LDGS], dx[LDGS];
int col = c;
#pragma unroll
for (int it = 0; it < LDGS; it++) {
dw[it].load_from(dw_ptr + row * BYTES_PER_ROW + col * BYTES_PER_LDG);
x[it].load_from(x_ptr + row * BYTES_PER_ROW + col * BYTES_PER_LDG);
col += THREADS_PER_ROW;
}
// local reductions
compute_t dy[LDGS * NUM_ELTS];
compute_t y[LDGS * NUM_ELTS];
compute_t mdy_local = 0.f;
compute_t mdyy_local = 0.f;
#pragma unroll
for (int it = 0; it < LDGS; it++) {
#pragma unroll
for (int jt = 0; jt < Vec::NUM_ELTS; jt++) {
compute_t x_tmp = x[it].data.elt[jt];
compute_t y_tmp = rs_r * (x_tmp - mu_r);
compute_t dy_tmp = gamma[it].data.elt[jt] * dw[it].data.elt[jt];
compute_t dw_tmp = dw[it].data.elt[jt];
mdy_local += dy_tmp;
mdyy_local += dy_tmp * y_tmp;
dy[it * NUM_ELTS + jt] = dy_tmp;
y[it * NUM_ELTS + jt] = y_tmp;
dwy_sum[it * NUM_ELTS + jt] += dw_tmp * y_tmp;
dw_sum[it * NUM_ELTS + jt] += dw_tmp;
}
}
// reduction across row for mdy, mdyy
if (WARPS_N == 1) { // no need to go through smem!
#pragma unroll
for (int it = 1; it < THREADS_PER_WARP; it *= 2) {
mdy_local += __shfl_xor_sync(uint32_t(-1), mdy_local, it);
mdyy_local += __shfl_xor_sync(uint32_t(-1), mdyy_local, it);
}
mdy_local *= rn;
mdyy_local *= rn;
} else {
#pragma unroll
for (int it = 16; it > 0; it /= 2) {
mdy_local += __shfl_down_sync(uint32_t(-1), mdy_local, it);
mdyy_local += __shfl_down_sync(uint32_t(-1), mdyy_local, it);
} // lane 0 holds the result!
if (lane == 0) {
mdy_shared[warp_n] = mdy_local;
mdyy_shared[warp_n] = mdyy_local;
}
__syncthreads();
if (warp_n == 0 && lane == 0) {
mdy_local = 0.f;
mdyy_local = 0.f;
for (int it = 0; it < WARPS_N; it++) {
mdy_local += mdy_shared[it];
mdyy_local += mdyy_shared[it];
}
mdy_shared[0] = mdy_local;
mdyy_shared[0] = mdyy_local;
}
__syncthreads();
mdy_local = mdy_shared[0] * rn;
mdyy_local = mdyy_shared[0] * rn;
}
#pragma unroll
for (int it = 0; it < LDGS; it++) {
#pragma unroll
for (int jt = 0; jt < NUM_ELTS; jt++) {
compute_t dy_tmp = dy[it * NUM_ELTS + jt];
compute_t y_tmp = y[it * NUM_ELTS + jt];
compute_t dx_tmp =
compute_t(rs_r) * (dy_tmp - mdyy_local * y_tmp - mdy_local);
dx[it].data.elt[jt] = dx_tmp;
}
}
col = c;
#pragma unroll
for (int it = 0; it < LDGS; it++) {
dx[it].store_to(dx_ptr + row * BYTES_PER_ROW + col * BYTES_PER_LDG);
col += Ktraits::THREADS_PER_ROW;
}
} // end: grid stride loop
// Finalize reduction of part dgamma and dbeta for this CTA
// by reducing over the rows held across the WARPS_M warps
enum { NUM_RES = COLS / Ktraits::THREADS_PER_CTA };
static_assert(NUM_RES * Ktraits::THREADS_PER_CTA == COLS, "");
compute_t *smem_write;
smem_write = &smem_[warp_m * COLS + tid_r * NUM_ELTS];
#pragma unroll
for (int it = 0; it < LDGS; it++) {
#pragma unroll
for (int jt = 0; jt < NUM_ELTS; jt++) {
smem_write[jt] = dw_sum[it * NUM_ELTS + jt];
}
smem_write += THREADS_PER_ROW * NUM_ELTS;
}
__syncthreads();
compute_t cta_dw_sum[NUM_RES];
memset(cta_dw_sum, 0, sizeof(compute_t) * NUM_RES);
for (int it = 0; it < ROWS_PER_CTA; it++) {
for (int jt = 0; jt < NUM_RES; jt++) {
cta_dw_sum[jt] += smem_[it * COLS + tidx + jt * Ktraits::THREADS_PER_CTA];
}
}
__syncthreads();
smem_write = &smem_[warp_m * COLS + tid_r * NUM_ELTS];
#pragma unroll
for (int it = 0; it < LDGS; it++) {
#pragma unroll
for (int jt = 0; jt < NUM_ELTS; jt++) {
smem_write[jt] = dwy_sum[it * NUM_ELTS + jt];
}
smem_write += THREADS_PER_ROW * NUM_ELTS;
}
__syncthreads();
compute_t cta_dwy_sum[NUM_RES];
memset(cta_dwy_sum, 0, sizeof(compute_t) * NUM_RES);
for (int it = 0; it < ROWS_PER_CTA; it++) {
for (int jt = 0; jt < NUM_RES; jt++) {
cta_dwy_sum[jt] +=
smem_[it * COLS + tidx + jt * Ktraits::THREADS_PER_CTA];
}
}
compute_t *dgamma_part = static_cast<compute_t *>(dg_) + bidx * COLS + tidx;
for (int jt = 0; jt < NUM_RES; jt++) {
*dgamma_part = cta_dwy_sum[jt];
dgamma_part += Ktraits::THREADS_PER_CTA;
}
compute_t *dbeta_part = static_cast<compute_t *>(db_) + bidx * COLS + tidx;
for (int jt = 0; jt < NUM_RES; jt++) {
*dbeta_part = cta_dw_sum[jt];
dbeta_part += Ktraits::THREADS_PER_CTA;
}
}
template<typename Ktraits, typename out_t>
__global__ __launch_bounds__(Ktraits::THREADS_PER_CTA) void ln_bwd_finalize_kernel(void * __restrict__ dg_,
void * __restrict__ db_,
const void * __restrict__ dg_part_,
const void * __restrict__ db_part_,
const int rows
){
using Vec = typename Ktraits::Vec;
enum { NUM_ELTS = Vec::NUM_ELTS };
using vec_t = typename Ktraits::vec_t;
using base_t = typename Ktraits::base_t;
using compute_t = typename Ktraits::compute_t;
enum { BYTES_PER_LDG = Ktraits::BYTES_PER_LDG };
enum { ROWS_PER_CTA = Ktraits::ROWS_PER_CTA };
enum { WARPS_M = Ktraits::WARPS_M };
enum { WARPS_N = Ktraits::WARPS_N };
enum { THREADS_PER_ROW = Ktraits::THREADS_PER_ROW };
enum { COLS = Ktraits::COLS };
enum { BYTES_PER_ROW = Ktraits::BYTES_PER_ROW };
enum {VEC_COLS = BYTES_PER_ROW / BYTES_PER_LDG};
//dbg
static_assert(VEC_COLS == COLS / NUM_ELTS, "");
//static_assert(VEC_COLS == 1024,"");
const int tidx = threadIdx.x;
const int bidx = blockIdx.x;
const int lane = tidx % THREADS_PER_WARP;
const int warp = tidx / THREADS_PER_WARP;
const int warp_m = warp / Ktraits::WARPS_N;
const int warp_n = warp % Ktraits::WARPS_N;
const int tid_c = warp_n * THREADS_PER_WARP + lane;
const int c =bidx * THREADS_PER_ROW + tid_c;
const int r = warp_m;
__shared__ compute_t smem_[(WARPS_M - 1) * THREADS_PER_ROW * NUM_ELTS];
//Will probably run this with WARPS_N = 1 and grid = 1024 / (32*4) = 8, or NUM_ELTS=1 and grid = 32
// and WARPS_M = 4 (or 1??)
for(int col = c; col < VEC_COLS; col += gridDim.x * THREADS_PER_ROW){
const char* dg_part_ptr = static_cast<const char*>(dg_part_) + r * BYTES_PER_ROW + col * BYTES_PER_LDG;
const char* db_part_ptr = static_cast<const char*>(db_part_) + r * BYTES_PER_ROW + col * BYTES_PER_LDG;
compute_t dg_sum[NUM_ELTS];
compute_t db_sum[NUM_ELTS];
memset(dg_sum, 0, sizeof(compute_t) * NUM_ELTS);
memset(db_sum, 0, sizeof(compute_t) * NUM_ELTS);
#pragma unroll
for(int row = r; row < rows;row += ROWS_PER_CTA){
Vec dg;
Vec db;
dg.load_from(dg_part_ptr);
db.load_from(db_part_ptr);
dg_part_ptr += ROWS_PER_CTA * BYTES_PER_ROW;
db_part_ptr += ROWS_PER_CTA * BYTES_PER_ROW;
#pragma unroll
for (int jt = 0; jt < NUM_ELTS; jt++) {
dg_sum[jt] += dg.data.elt[jt];
db_sum[jt] += db.data.elt[jt];
}
}
// Finalize the reduction across rows of the CTA
compute_t * smem_write;
smem_write = smem_ + (warp_m -1) *THREADS_PER_ROW * NUM_ELTS + tid_c;
if (warp_m > 0) {
#pragma unroll
for (int jt = 0; jt < NUM_ELTS; jt++) {
*smem_write = dg_sum[jt];
smem_write+=THREADS_PER_ROW;
}
}
__syncthreads();
compute_t *smem_read ;
smem_read = smem_ + tid_c ;
if (warp_m == 0) {
#pragma unroll
for (int it = 0; it < WARPS_M - 1; it++) {
#pragma unroll
for (int jt = 0; jt < NUM_ELTS; jt++) {
dg_sum[jt] += *smem_read;
smem_read += THREADS_PER_ROW;
}
}
}
__syncthreads();
smem_write = smem_ + (warp_m -1) *THREADS_PER_ROW * NUM_ELTS + tid_c;
if (warp_m > 0) {
#pragma unroll
for (int jt = 0; jt < NUM_ELTS; jt++) {
*smem_write = db_sum[jt];
smem_write+=THREADS_PER_ROW;
}
}
__syncthreads();
smem_read = smem_ + tid_c;
if (warp_m == 0) {
#pragma unroll
for (int it = 0; it < WARPS_M - 1; it++) {
#pragma unroll
for (int jt = 0; jt < NUM_ELTS; jt++) {
db_sum[jt] += *smem_read;
smem_read += THREADS_PER_ROW;
}
}
using vout_t = typename Vec_type<sizeof(out_t) * NUM_ELTS>::Type;
union {
vout_t raw;
out_t elt[NUM_ELTS];
} dg_out, db_out;
// out_t dg_out[NUM_ELTS], db_out[NUM_ELTS];
#pragma unroll
for (int jt = 0; jt < NUM_ELTS; jt++) {
dg_out.elt[jt] = dg_sum[jt];
db_out.elt[jt] = db_sum[jt];
}
vout_t *dg_ptr = reinterpret_cast<vout_t *>(dg_) + col ;
vout_t *db_ptr = reinterpret_cast<vout_t *>(db_) + col ;
*dg_ptr = dg_out.raw;
*db_ptr = db_out.raw;
}
}
}
template<typename scalar_t>
void launch(at::Tensor &dx, at::Tensor &dgamma, at::Tensor &dbeta,
at::Tensor &dgamma_part, at::Tensor &dbeta_part,
const at::Tensor &dw, const at::Tensor &x,
const at::Tensor &mu, const at::Tensor &rsigma,
const at::Tensor &gamma, const int rows, const int cols, const int gridx, hipStream_t stream){
if (cols == 1024) {
using Ktraits = Kernel_traits<scalar_t, 1024, 4, 1>;
if (Ktraits::SMEM_BYTES >= 48 * 1024) {
AT_CUDA_CHECK(hipFuncSetAttribute(
ln_bwd_kernel<Ktraits>, hipFuncAttributeMaxDynamicSharedMemorySize,
Ktraits::SMEM_BYTES));
}
hipLaunchKernelGGL(( ln_bwd_kernel<Ktraits>)
, dim3(gridx), dim3(Ktraits::THREADS_PER_CTA), Ktraits::SMEM_BYTES, stream,
dx.data_ptr(), dgamma_part.data_ptr(), dbeta_part.data_ptr(),
dw.data_ptr(), x.data_ptr(), mu.data_ptr(), rsigma.data_ptr(),
gamma.data_ptr(), rows);
using Ktraits2 = Kernel_traits<float, 1024, 16, 1, 4>;
constexpr int grid2 =
DIVUP(1024, Ktraits2::THREADS_PER_ROW * Ktraits2::Vec::NUM_ELTS);
hipLaunchKernelGGL(( ln_bwd_finalize_kernel<Ktraits2, scalar_t>)
, dim3(grid2), dim3(Ktraits2::THREADS_PER_CTA), 0, stream,
dgamma.data_ptr(), dbeta.data_ptr(), dgamma_part.data_ptr(),
dbeta_part.data_ptr(), gridx);
} else {
assert(false && "Not implemented");
}
AT_CUDA_CHECK(hipPeekAtLastError());
}
void ln_bwd_cuda(at::Tensor &dx, at::Tensor &dgamma, at::Tensor &dbeta,
const at::Tensor &dw, const at::Tensor &x,
const at::Tensor &mu, const at::Tensor &rsigma,
const at::Tensor &gamma, const int rows, const int cols, hipStream_t stream) {
const auto dtype = x.scalar_type();
const auto props = at::cuda::getCurrentDeviceProperties();
const int smCount = props->multiProcessorCount;
// Launch 2 CTAs per SM
const int grid = 2 * smCount;
//request workspace for two-step reduction. We always reduce in FP32.
auto opts = x.options();
auto dbeta_part = torch::empty({grid, cols}, opts.dtype(torch::kFloat32));
auto dgamma_part = torch::empty({grid, cols}, opts.dtype(torch::kFloat32));
if (dtype == torch::kFloat16) {
launch<half>(dx, dgamma, dbeta, dgamma_part, dbeta_part, dw, x, mu, rsigma, gamma, rows, cols, grid, stream);
} else if (dtype == torch::kFloat32) {
launch<float>(dx, dgamma, dbeta, dgamma_part, dbeta_part, dw, x, mu, rsigma, gamma, rows, cols, grid, stream);
} else {
assert(false && "Not implemented");
}
} | 7d99d749d6b787ac4359e0fb27bf525fc0a93ec7.cu | #include "utils.cuh"
#include "ln_kernel_traits.h"
#include "ATen/cuda/CUDAContext.h"
template<typename Ktraits>
__global__ __launch_bounds__(Ktraits::THREADS_PER_CTA) void ln_bwd_kernel(void * __restrict__ dx_,
void * __restrict__ dg_,
void * __restrict__ db_,
const void * __restrict__ dw_,
const void * __restrict__ x_,
const void * __restrict__ mu_,
const void * __restrict__ rs_,
const void * __restrict__ g_,
const int rows
){
using Vec = typename Ktraits::Vec;
enum { BYTES_PER_LDG = Ktraits::BYTES_PER_LDG };
enum { ROWS_PER_CTA = Ktraits::ROWS_PER_CTA };
enum { WARPS_M = Ktraits::WARPS_M };
enum { WARPS_N = Ktraits::WARPS_N };
enum { THREADS_PER_ROW = Ktraits::THREADS_PER_ROW };
enum { COLS = Ktraits::COLS };
enum { BYTES_PER_ROW = Ktraits::BYTES_PER_ROW };
enum { LDGS = BYTES_PER_ROW / Ktraits::BYTES_PER_ROW_PER_CTA };
static_assert(LDGS * Ktraits::BYTES_PER_ROW_PER_CTA == BYTES_PER_ROW, "");
enum { NUM_ELTS = Vec::NUM_ELTS };
using vec_t = typename Ktraits::vec_t;
using base_t = typename Ktraits::base_t;
using compute_t = typename Ktraits::compute_t;
const int tidx = threadIdx.x;
const int bidx = blockIdx.x;
const int lane = tidx % THREADS_PER_WARP;
const int warp = tidx / THREADS_PER_WARP;
const int warp_m = warp / Ktraits::WARPS_N;
const int warp_n = warp % Ktraits::WARPS_N;
const int tid_r = warp_n * THREADS_PER_WARP + lane;
const int r = bidx * Ktraits::ROWS_PER_CTA + warp_m;
const int c = warp_n * THREADS_PER_WARP + lane;
const char *dw_ptr = static_cast<const char *>(dw_);
const char *x_ptr = static_cast<const char *>(x_);
const char *g_ptr = static_cast<const char *>(g_);
char *dx_ptr = static_cast<char *>(dx_);
const compute_t *mu_ptr = static_cast<const compute_t *>(mu_);
const compute_t *rs_ptr = static_cast<const compute_t *>(rs_);
static_assert(COLS == THREADS_PER_ROW * LDGS * NUM_ELTS, "");
// smem for final reduction
//__shared__ compute_t smem_[ROWS_PER_CTA * COLS];
extern __shared__ compute_t smem_[];
// static_assert(sizeof(smem_dw_sum) == 32*1024,"");
// Using the grid stride loop we can assign multiple rows to each thread
// by using a number of CTAs smaller than rows / ROWS_PER_CTA
// We accumulate them here, one in smem, one in registers, because the smem
// capacity is limited compute_t * dw_sum = &smem_dw_sum[warp_m * COLS + tid_r
// * LDGS * NUM_ELTS];
compute_t dwy_sum[LDGS * NUM_ELTS];
compute_t dw_sum[LDGS * NUM_ELTS];
memset(dwy_sum, 0, sizeof(compute_t) * LDGS * NUM_ELTS);
memset(dw_sum, 0, sizeof(compute_t) * LDGS * NUM_ELTS);
// Debug 8 rows, 4B, 1024 cols
__shared__ compute_t smem_mdy[ROWS_PER_CTA * WARPS_N];
__shared__ compute_t smem_mdyy[ROWS_PER_CTA * WARPS_N];
compute_t *mdy_shared = &smem_mdy[warp_m * WARPS_N];
compute_t *mdyy_shared = &smem_mdyy[warp_m * WARPS_N];
constexpr float rn = 1.f / float(COLS);
Vec gamma[LDGS];
int col = c;
#pragma unroll
for (int it = 0; it < LDGS; it++) {
gamma[it].load_from(g_ptr + col * BYTES_PER_LDG);
col += Ktraits::THREADS_PER_ROW;
}
// TODO if ROWS_PER_CTA does not divice rows, we might get divergence in the
// last blocks with syncthreads!
// grid stride over rows
#pragma unroll 1
for (int row = r; row < rows; row += gridDim.x * ROWS_PER_CTA) {
const compute_t mu_r = mu_ptr[row];
const compute_t rs_r = rs_ptr[row];
Vec dw[LDGS], x[LDGS], dx[LDGS];
int col = c;
#pragma unroll
for (int it = 0; it < LDGS; it++) {
dw[it].load_from(dw_ptr + row * BYTES_PER_ROW + col * BYTES_PER_LDG);
x[it].load_from(x_ptr + row * BYTES_PER_ROW + col * BYTES_PER_LDG);
col += THREADS_PER_ROW;
}
// local reductions
compute_t dy[LDGS * NUM_ELTS];
compute_t y[LDGS * NUM_ELTS];
compute_t mdy_local = 0.f;
compute_t mdyy_local = 0.f;
#pragma unroll
for (int it = 0; it < LDGS; it++) {
#pragma unroll
for (int jt = 0; jt < Vec::NUM_ELTS; jt++) {
compute_t x_tmp = x[it].data.elt[jt];
compute_t y_tmp = rs_r * (x_tmp - mu_r);
compute_t dy_tmp = gamma[it].data.elt[jt] * dw[it].data.elt[jt];
compute_t dw_tmp = dw[it].data.elt[jt];
mdy_local += dy_tmp;
mdyy_local += dy_tmp * y_tmp;
dy[it * NUM_ELTS + jt] = dy_tmp;
y[it * NUM_ELTS + jt] = y_tmp;
dwy_sum[it * NUM_ELTS + jt] += dw_tmp * y_tmp;
dw_sum[it * NUM_ELTS + jt] += dw_tmp;
}
}
// reduction across row for mdy, mdyy
if (WARPS_N == 1) { // no need to go through smem!
#pragma unroll
for (int it = 1; it < THREADS_PER_WARP; it *= 2) {
mdy_local += __shfl_xor_sync(uint32_t(-1), mdy_local, it);
mdyy_local += __shfl_xor_sync(uint32_t(-1), mdyy_local, it);
}
mdy_local *= rn;
mdyy_local *= rn;
} else {
#pragma unroll
for (int it = 16; it > 0; it /= 2) {
mdy_local += __shfl_down_sync(uint32_t(-1), mdy_local, it);
mdyy_local += __shfl_down_sync(uint32_t(-1), mdyy_local, it);
} // lane 0 holds the result!
if (lane == 0) {
mdy_shared[warp_n] = mdy_local;
mdyy_shared[warp_n] = mdyy_local;
}
__syncthreads();
if (warp_n == 0 && lane == 0) {
mdy_local = 0.f;
mdyy_local = 0.f;
for (int it = 0; it < WARPS_N; it++) {
mdy_local += mdy_shared[it];
mdyy_local += mdyy_shared[it];
}
mdy_shared[0] = mdy_local;
mdyy_shared[0] = mdyy_local;
}
__syncthreads();
mdy_local = mdy_shared[0] * rn;
mdyy_local = mdyy_shared[0] * rn;
}
#pragma unroll
for (int it = 0; it < LDGS; it++) {
#pragma unroll
for (int jt = 0; jt < NUM_ELTS; jt++) {
compute_t dy_tmp = dy[it * NUM_ELTS + jt];
compute_t y_tmp = y[it * NUM_ELTS + jt];
compute_t dx_tmp =
compute_t(rs_r) * (dy_tmp - mdyy_local * y_tmp - mdy_local);
dx[it].data.elt[jt] = dx_tmp;
}
}
col = c;
#pragma unroll
for (int it = 0; it < LDGS; it++) {
dx[it].store_to(dx_ptr + row * BYTES_PER_ROW + col * BYTES_PER_LDG);
col += Ktraits::THREADS_PER_ROW;
}
} // end: grid stride loop
// Finalize reduction of part dgamma and dbeta for this CTA
// by reducing over the rows held across the WARPS_M warps
enum { NUM_RES = COLS / Ktraits::THREADS_PER_CTA };
static_assert(NUM_RES * Ktraits::THREADS_PER_CTA == COLS, "");
compute_t *smem_write;
smem_write = &smem_[warp_m * COLS + tid_r * NUM_ELTS];
#pragma unroll
for (int it = 0; it < LDGS; it++) {
#pragma unroll
for (int jt = 0; jt < NUM_ELTS; jt++) {
smem_write[jt] = dw_sum[it * NUM_ELTS + jt];
}
smem_write += THREADS_PER_ROW * NUM_ELTS;
}
__syncthreads();
compute_t cta_dw_sum[NUM_RES];
memset(cta_dw_sum, 0, sizeof(compute_t) * NUM_RES);
for (int it = 0; it < ROWS_PER_CTA; it++) {
for (int jt = 0; jt < NUM_RES; jt++) {
cta_dw_sum[jt] += smem_[it * COLS + tidx + jt * Ktraits::THREADS_PER_CTA];
}
}
__syncthreads();
smem_write = &smem_[warp_m * COLS + tid_r * NUM_ELTS];
#pragma unroll
for (int it = 0; it < LDGS; it++) {
#pragma unroll
for (int jt = 0; jt < NUM_ELTS; jt++) {
smem_write[jt] = dwy_sum[it * NUM_ELTS + jt];
}
smem_write += THREADS_PER_ROW * NUM_ELTS;
}
__syncthreads();
compute_t cta_dwy_sum[NUM_RES];
memset(cta_dwy_sum, 0, sizeof(compute_t) * NUM_RES);
for (int it = 0; it < ROWS_PER_CTA; it++) {
for (int jt = 0; jt < NUM_RES; jt++) {
cta_dwy_sum[jt] +=
smem_[it * COLS + tidx + jt * Ktraits::THREADS_PER_CTA];
}
}
compute_t *dgamma_part = static_cast<compute_t *>(dg_) + bidx * COLS + tidx;
for (int jt = 0; jt < NUM_RES; jt++) {
*dgamma_part = cta_dwy_sum[jt];
dgamma_part += Ktraits::THREADS_PER_CTA;
}
compute_t *dbeta_part = static_cast<compute_t *>(db_) + bidx * COLS + tidx;
for (int jt = 0; jt < NUM_RES; jt++) {
*dbeta_part = cta_dw_sum[jt];
dbeta_part += Ktraits::THREADS_PER_CTA;
}
}
template<typename Ktraits, typename out_t>
__global__ __launch_bounds__(Ktraits::THREADS_PER_CTA) void ln_bwd_finalize_kernel(void * __restrict__ dg_,
void * __restrict__ db_,
const void * __restrict__ dg_part_,
const void * __restrict__ db_part_,
const int rows
){
using Vec = typename Ktraits::Vec;
enum { NUM_ELTS = Vec::NUM_ELTS };
using vec_t = typename Ktraits::vec_t;
using base_t = typename Ktraits::base_t;
using compute_t = typename Ktraits::compute_t;
enum { BYTES_PER_LDG = Ktraits::BYTES_PER_LDG };
enum { ROWS_PER_CTA = Ktraits::ROWS_PER_CTA };
enum { WARPS_M = Ktraits::WARPS_M };
enum { WARPS_N = Ktraits::WARPS_N };
enum { THREADS_PER_ROW = Ktraits::THREADS_PER_ROW };
enum { COLS = Ktraits::COLS };
enum { BYTES_PER_ROW = Ktraits::BYTES_PER_ROW };
enum {VEC_COLS = BYTES_PER_ROW / BYTES_PER_LDG};
//dbg
static_assert(VEC_COLS == COLS / NUM_ELTS, "");
//static_assert(VEC_COLS == 1024,"");
const int tidx = threadIdx.x;
const int bidx = blockIdx.x;
const int lane = tidx % THREADS_PER_WARP;
const int warp = tidx / THREADS_PER_WARP;
const int warp_m = warp / Ktraits::WARPS_N;
const int warp_n = warp % Ktraits::WARPS_N;
const int tid_c = warp_n * THREADS_PER_WARP + lane;
const int c =bidx * THREADS_PER_ROW + tid_c;
const int r = warp_m;
__shared__ compute_t smem_[(WARPS_M - 1) * THREADS_PER_ROW * NUM_ELTS];
//Will probably run this with WARPS_N = 1 and grid = 1024 / (32*4) = 8, or NUM_ELTS=1 and grid = 32
// and WARPS_M = 4 (or 1??)
for(int col = c; col < VEC_COLS; col += gridDim.x * THREADS_PER_ROW){
const char* dg_part_ptr = static_cast<const char*>(dg_part_) + r * BYTES_PER_ROW + col * BYTES_PER_LDG;
const char* db_part_ptr = static_cast<const char*>(db_part_) + r * BYTES_PER_ROW + col * BYTES_PER_LDG;
compute_t dg_sum[NUM_ELTS];
compute_t db_sum[NUM_ELTS];
memset(dg_sum, 0, sizeof(compute_t) * NUM_ELTS);
memset(db_sum, 0, sizeof(compute_t) * NUM_ELTS);
#pragma unroll
for(int row = r; row < rows;row += ROWS_PER_CTA){
Vec dg;
Vec db;
dg.load_from(dg_part_ptr);
db.load_from(db_part_ptr);
dg_part_ptr += ROWS_PER_CTA * BYTES_PER_ROW;
db_part_ptr += ROWS_PER_CTA * BYTES_PER_ROW;
#pragma unroll
for (int jt = 0; jt < NUM_ELTS; jt++) {
dg_sum[jt] += dg.data.elt[jt];
db_sum[jt] += db.data.elt[jt];
}
}
// Finalize the reduction across rows of the CTA
compute_t * smem_write;
smem_write = smem_ + (warp_m -1) *THREADS_PER_ROW * NUM_ELTS + tid_c;
if (warp_m > 0) {
#pragma unroll
for (int jt = 0; jt < NUM_ELTS; jt++) {
*smem_write = dg_sum[jt];
smem_write+=THREADS_PER_ROW;
}
}
__syncthreads();
compute_t *smem_read ;
smem_read = smem_ + tid_c ;
if (warp_m == 0) {
#pragma unroll
for (int it = 0; it < WARPS_M - 1; it++) {
#pragma unroll
for (int jt = 0; jt < NUM_ELTS; jt++) {
dg_sum[jt] += *smem_read;
smem_read += THREADS_PER_ROW;
}
}
}
__syncthreads();
smem_write = smem_ + (warp_m -1) *THREADS_PER_ROW * NUM_ELTS + tid_c;
if (warp_m > 0) {
#pragma unroll
for (int jt = 0; jt < NUM_ELTS; jt++) {
*smem_write = db_sum[jt];
smem_write+=THREADS_PER_ROW;
}
}
__syncthreads();
smem_read = smem_ + tid_c;
if (warp_m == 0) {
#pragma unroll
for (int it = 0; it < WARPS_M - 1; it++) {
#pragma unroll
for (int jt = 0; jt < NUM_ELTS; jt++) {
db_sum[jt] += *smem_read;
smem_read += THREADS_PER_ROW;
}
}
using vout_t = typename Vec_type<sizeof(out_t) * NUM_ELTS>::Type;
union {
vout_t raw;
out_t elt[NUM_ELTS];
} dg_out, db_out;
// out_t dg_out[NUM_ELTS], db_out[NUM_ELTS];
#pragma unroll
for (int jt = 0; jt < NUM_ELTS; jt++) {
dg_out.elt[jt] = dg_sum[jt];
db_out.elt[jt] = db_sum[jt];
}
vout_t *dg_ptr = reinterpret_cast<vout_t *>(dg_) + col ;
vout_t *db_ptr = reinterpret_cast<vout_t *>(db_) + col ;
*dg_ptr = dg_out.raw;
*db_ptr = db_out.raw;
}
}
}
template<typename scalar_t>
void launch(at::Tensor &dx, at::Tensor &dgamma, at::Tensor &dbeta,
at::Tensor &dgamma_part, at::Tensor &dbeta_part,
const at::Tensor &dw, const at::Tensor &x,
const at::Tensor &mu, const at::Tensor &rsigma,
const at::Tensor &gamma, const int rows, const int cols, const int gridx, cudaStream_t stream){
if (cols == 1024) {
using Ktraits = Kernel_traits<scalar_t, 1024, 4, 1>;
if (Ktraits::SMEM_BYTES >= 48 * 1024) {
AT_CUDA_CHECK(cudaFuncSetAttribute(
ln_bwd_kernel<Ktraits>, cudaFuncAttributeMaxDynamicSharedMemorySize,
Ktraits::SMEM_BYTES));
}
ln_bwd_kernel<Ktraits>
<<<gridx, Ktraits::THREADS_PER_CTA, Ktraits::SMEM_BYTES, stream>>>(
dx.data_ptr(), dgamma_part.data_ptr(), dbeta_part.data_ptr(),
dw.data_ptr(), x.data_ptr(), mu.data_ptr(), rsigma.data_ptr(),
gamma.data_ptr(), rows);
using Ktraits2 = Kernel_traits<float, 1024, 16, 1, 4>;
constexpr int grid2 =
DIVUP(1024, Ktraits2::THREADS_PER_ROW * Ktraits2::Vec::NUM_ELTS);
ln_bwd_finalize_kernel<Ktraits2, scalar_t>
<<<grid2, Ktraits2::THREADS_PER_CTA, 0, stream>>>(
dgamma.data_ptr(), dbeta.data_ptr(), dgamma_part.data_ptr(),
dbeta_part.data_ptr(), gridx);
} else {
assert(false && "Not implemented");
}
AT_CUDA_CHECK(cudaPeekAtLastError());
}
void ln_bwd_cuda(at::Tensor &dx, at::Tensor &dgamma, at::Tensor &dbeta,
const at::Tensor &dw, const at::Tensor &x,
const at::Tensor &mu, const at::Tensor &rsigma,
const at::Tensor &gamma, const int rows, const int cols, cudaStream_t stream) {
const auto dtype = x.scalar_type();
const auto props = at::cuda::getCurrentDeviceProperties();
const int smCount = props->multiProcessorCount;
// Launch 2 CTAs per SM
const int grid = 2 * smCount;
//request workspace for two-step reduction. We always reduce in FP32.
auto opts = x.options();
auto dbeta_part = torch::empty({grid, cols}, opts.dtype(torch::kFloat32));
auto dgamma_part = torch::empty({grid, cols}, opts.dtype(torch::kFloat32));
if (dtype == torch::kFloat16) {
launch<half>(dx, dgamma, dbeta, dgamma_part, dbeta_part, dw, x, mu, rsigma, gamma, rows, cols, grid, stream);
} else if (dtype == torch::kFloat32) {
launch<float>(dx, dgamma, dbeta, dgamma_part, dbeta_part, dw, x, mu, rsigma, gamma, rows, cols, grid, stream);
} else {
assert(false && "Not implemented");
}
} |
e4d63270038edb6fd1b839ca27cf5d77b47afed2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
MIT License
Copyright 2020 Jee W. Choi, Marat Dukhan, and Xing Liu
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
Software, and to permit persons to whom the Software is furnished to do so, subject
to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
/* Libraries */
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <omp.h>
#include <cutil_inline.h>
#include "cache_kernel.h"
/* ======================================================================== */
/* GPU kernel error checking function */
void gpu_check_error__srcpos (FILE* fp, const char* filename, size_t line)
{
hipError_t C_E = hipGetLastError ();
if (C_E) {
fprintf (fp, "*** [%s:%lu] CUDA ERROR %d: %s ***\n", filename, line, C_E,
hipGetErrorString (C_E));
fflush (fp);
exit (-1); /* abort program */
}
}
/* ======================================================================== */
/* ======================================================================== */
/* Read program inputs */
void readConfig(int* wordsPerThread, int* nThreads, int* bSize, char** argv)
{
*wordsPerThread = atoi (argv[1]);
*nThreads = atoi (argv[2]);
*bSize = atoi (argv[3]);
}
/* ======================================================================== */
/* ======================================================================== */
/* Validate the results to make sure the kernel ran correctly */
int validateResults(int nThreads, int bSize, int wordsPerThread, int* out, int* in)
{
int i, j;
int tmp, cnt;
int* test;
test = (int*) malloc (nThreads * sizeof (int));
for(i = 0; i < nThreads; i++) {
test[i] = 0;
}
for(i = 0; i < nThreads; i++) {
tmp = in[i % bSize];
for(j = 0; j < wordsPerThread; j++) {
tmp = in[tmp];
}
test[i] = tmp;
}
cnt = 0;
for(i = 0; i < nThreads; i++) {
if(test[i] != out[i]) cnt++;
}
return cnt;
}
/* ======================================================================== */
/* ======================================================================== */
/* Kernel used for clearing the cache before the main test */
__global__ void clear_cache (int *in)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
in[tid] = in[tid] + 1;
}
/* ======================================================================== */
/* ======================================================================== */
/* Run the main test */
float runTest (int* h_out, int* d_out, int* d_pchase, int nThreads, int bSize,
int wordsPerThread)
{
/* CUDA timers */
hipEvent_t start, stop;
float total_time_taken;
int num_blocks = (nThreads + bSize - 1) / bSize;
dim3 grid (num_blocks);
dim3 threads (bSize);
fprintf (stderr, "number of iterations is %d\n", NUM_ITER);
/* Start timer */
cutilSafeCall (hipEventCreate (&start));
cutilSafeCall (hipEventCreate (&stop));
cutilSafeCall (hipEventRecord (start, 0));
for(int iter = 0; iter < NUM_ITER; iter++) {
switch (wordsPerThread) {
case 1:
hipLaunchKernelGGL(( cache_kernel_1) , dim3(grid), dim3(threads), 0, 0, nThreads, d_out, d_pchase);
break;
case 2:
hipLaunchKernelGGL(( cache_kernel_2) , dim3(grid), dim3(threads), 0, 0, nThreads, d_out, d_pchase);
break;
case 4:
hipLaunchKernelGGL(( cache_kernel_4) , dim3(grid), dim3(threads), 0, 0, nThreads, d_out, d_pchase);
break;
case 8:
hipLaunchKernelGGL(( cache_kernel_8) , dim3(grid), dim3(threads), 0, 0, nThreads, d_out, d_pchase);
break;
case 16:
hipLaunchKernelGGL(( cache_kernel_16) , dim3(grid), dim3(threads), 0, 0, nThreads, d_out, d_pchase);
break;
case 32:
hipLaunchKernelGGL(( cache_kernel_32) , dim3(grid), dim3(threads), 0, 0, nThreads, d_out, d_pchase);
break;
case 64:
hipLaunchKernelGGL(( cache_kernel_64) , dim3(grid), dim3(threads), 0, 0, nThreads, d_out, d_pchase);
break;
case 128:
hipLaunchKernelGGL(( cache_kernel_128) , dim3(grid), dim3(threads), 0, 0, nThreads, d_out, d_pchase);
break;
case 256:
hipLaunchKernelGGL(( cache_kernel_256) , dim3(grid), dim3(threads), 0, 0, nThreads, d_out, d_pchase);
break;
case 512:
hipLaunchKernelGGL(( cache_kernel_512) , dim3(grid), dim3(threads), 0, 0, nThreads, d_out, d_pchase);
break;
case 1024:
hipLaunchKernelGGL(( cache_kernel_1024) , dim3(grid), dim3(threads), 0, 0, nThreads, d_out, d_pchase);
break;
case 2048:
hipLaunchKernelGGL(( cache_kernel_2048) , dim3(grid), dim3(threads), 0, 0, nThreads, d_out, d_pchase);
break;
case 4096:
hipLaunchKernelGGL(( cache_kernel_4096), dim3(grid), dim3(threads), 0, 0, nThreads, d_out, d_pchase);
break;
default:
fprintf(stderr, "Invalid wordsPerThread: %d\n", wordsPerThread);
total_time_taken = -1.0f;
break;
}
}
/* End timer */
cutilSafeCall (hipEventRecord (stop, 0));
cutilSafeCall (hipDeviceSynchronize ());
cutilSafeCall (hipEventElapsedTime (&total_time_taken, start, stop));
/* Check for kernel errors */
gpu_check_error__srcpos (stderr, __FILE__, __LINE__);
/* Copy results back */
cutilSafeCall (hipMemcpy (h_out, d_out, nThreads * sizeof (int),
hipMemcpyDeviceToHost));
return total_time_taken;
}
/* ======================================================================== */
int main(int argc, char** argv)
{
int i;
long double total_dram;
/* Timer */
float total_time_taken;
/* Execution parameters */
int wordsPerThread;
int nThreads;
int bSize;
/* Memory data structures */
int* d_out;
int* h_out;
int* h_pchase;
int* d_pchase;
if(argc < 4) {
fprintf(stderr, "usage: %s <word/thread> <# threads> <threads/block>\n",
argv[0]);
exit (0);
} else {
readConfig (&wordsPerThread, &nThreads, &bSize, argv);
}
/* Find the best GPU in the system */
hipSetDevice(cutGetMaxGflopsDeviceId ());
/* This is done to clear the cache */
{
int cache_clear = 4 * 1024 * 1024; // 4 MB
int* h_temp = (int*) malloc (cache_clear);
for(int i = 0; i < cache_clear / sizeof (int); i++) h_temp[i] = 1;
int* d_temp;
cutilSafeCall (hipMalloc ((void**) &d_temp, cache_clear));
cutilSafeCall (hipMemcpy (d_temp, h_temp, cache_clear,
hipMemcpyHostToDevice));
int nt = cache_clear / sizeof (int);
int nb = nt / 512;
hipLaunchKernelGGL(( clear_cache) , dim3(nb), dim3(512), 0, 0, d_temp);
cutilSafeCall (hipDeviceSynchronize ());
free (h_temp);
cutilSafeCall (hipFree (d_temp));
}
/* Allocate host memory */
h_out = (int*) malloc (nThreads * sizeof (int));
h_pchase = (int*) malloc (bSize * sizeof (int));
cutilSafeCall (hipMalloc ((void**) &d_out, nThreads * sizeof (int)));
cutilSafeCall (hipMalloc ((void**) &d_pchase, bSize * sizeof (int)));
/* Initialize memory */
for(i = 0; i < bSize - 1; i++) {
h_pchase[i] = i;
}
h_pchase[bSize - 1] = bSize - 1;
cutilSafeCall (hipMemcpy (d_pchase, h_pchase, bSize * sizeof (int),
hipMemcpyHostToDevice));
/* Run test */
total_time_taken = runTest (h_out, d_out, d_pchase, nThreads, bSize,
wordsPerThread);
/* Validate results */
fprintf(stderr, "Results validated: %d errors\n", validateResults (nThreads,
bSize,
wordsPerThread,
h_out, h_pchase));
/* Compute performance metrics */
total_time_taken = total_time_taken / NUM_ITER;
total_dram = ((wordsPerThread + 1) * (1.0 * nThreads/1e9)) * sizeof (int);
fprintf (stderr, "Time taken to load %Lg GBs: %f (ms)\n", total_dram,
total_time_taken);
fprintf (stderr, "Effective bandwidth: %Lg (GB/s)\n",
(total_dram/total_time_taken * 1e3));
/* Free memory */
free (h_out);
cutilSafeCall (hipFree (d_out));
free (h_pchase);
cutilSafeCall (hipFree (d_pchase));
return 0;
}
| e4d63270038edb6fd1b839ca27cf5d77b47afed2.cu | /*
MIT License
Copyright 2020 Jee W. Choi, Marat Dukhan, and Xing Liu
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
Software, and to permit persons to whom the Software is furnished to do so, subject
to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
/* Libraries */
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <omp.h>
#include <cutil_inline.h>
#include "cache_kernel.h"
/* ======================================================================== */
/* GPU kernel error checking function */
void gpu_check_error__srcpos (FILE* fp, const char* filename, size_t line)
{
cudaError_t C_E = cudaGetLastError ();
if (C_E) {
fprintf (fp, "*** [%s:%lu] CUDA ERROR %d: %s ***\n", filename, line, C_E,
cudaGetErrorString (C_E));
fflush (fp);
exit (-1); /* abort program */
}
}
/* ======================================================================== */
/* ======================================================================== */
/* Read program inputs */
void readConfig(int* wordsPerThread, int* nThreads, int* bSize, char** argv)
{
*wordsPerThread = atoi (argv[1]);
*nThreads = atoi (argv[2]);
*bSize = atoi (argv[3]);
}
/* ======================================================================== */
/* ======================================================================== */
/* Validate the results to make sure the kernel ran correctly */
int validateResults(int nThreads, int bSize, int wordsPerThread, int* out, int* in)
{
int i, j;
int tmp, cnt;
int* test;
test = (int*) malloc (nThreads * sizeof (int));
for(i = 0; i < nThreads; i++) {
test[i] = 0;
}
for(i = 0; i < nThreads; i++) {
tmp = in[i % bSize];
for(j = 0; j < wordsPerThread; j++) {
tmp = in[tmp];
}
test[i] = tmp;
}
cnt = 0;
for(i = 0; i < nThreads; i++) {
if(test[i] != out[i]) cnt++;
}
return cnt;
}
/* ======================================================================== */
/* ======================================================================== */
/* Kernel used for clearing the cache before the main test */
__global__ void clear_cache (int *in)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
in[tid] = in[tid] + 1;
}
/* ======================================================================== */
/* ======================================================================== */
/* Run the main test */
float runTest (int* h_out, int* d_out, int* d_pchase, int nThreads, int bSize,
int wordsPerThread)
{
/* CUDA timers */
cudaEvent_t start, stop;
float total_time_taken;
int num_blocks = (nThreads + bSize - 1) / bSize;
dim3 grid (num_blocks);
dim3 threads (bSize);
fprintf (stderr, "number of iterations is %d\n", NUM_ITER);
/* Start timer */
cutilSafeCall (cudaEventCreate (&start));
cutilSafeCall (cudaEventCreate (&stop));
cutilSafeCall (cudaEventRecord (start, 0));
for(int iter = 0; iter < NUM_ITER; iter++) {
switch (wordsPerThread) {
case 1:
cache_kernel_1 <<<grid, threads>>> (nThreads, d_out, d_pchase);
break;
case 2:
cache_kernel_2 <<<grid, threads>>> (nThreads, d_out, d_pchase);
break;
case 4:
cache_kernel_4 <<<grid, threads>>> (nThreads, d_out, d_pchase);
break;
case 8:
cache_kernel_8 <<<grid, threads>>> (nThreads, d_out, d_pchase);
break;
case 16:
cache_kernel_16 <<<grid, threads>>> (nThreads, d_out, d_pchase);
break;
case 32:
cache_kernel_32 <<<grid, threads>>> (nThreads, d_out, d_pchase);
break;
case 64:
cache_kernel_64 <<<grid, threads>>> (nThreads, d_out, d_pchase);
break;
case 128:
cache_kernel_128 <<<grid, threads>>> (nThreads, d_out, d_pchase);
break;
case 256:
cache_kernel_256 <<<grid, threads>>> (nThreads, d_out, d_pchase);
break;
case 512:
cache_kernel_512 <<<grid, threads>>> (nThreads, d_out, d_pchase);
break;
case 1024:
cache_kernel_1024 <<<grid, threads>>> (nThreads, d_out, d_pchase);
break;
case 2048:
cache_kernel_2048 <<<grid, threads>>> (nThreads, d_out, d_pchase);
break;
case 4096:
cache_kernel_4096<<<grid, threads>>> (nThreads, d_out, d_pchase);
break;
default:
fprintf(stderr, "Invalid wordsPerThread: %d\n", wordsPerThread);
total_time_taken = -1.0f;
break;
}
}
/* End timer */
cutilSafeCall (cudaEventRecord (stop, 0));
cutilSafeCall (cudaThreadSynchronize ());
cutilSafeCall (cudaEventElapsedTime (&total_time_taken, start, stop));
/* Check for kernel errors */
gpu_check_error__srcpos (stderr, __FILE__, __LINE__);
/* Copy results back */
cutilSafeCall (cudaMemcpy (h_out, d_out, nThreads * sizeof (int),
cudaMemcpyDeviceToHost));
return total_time_taken;
}
/* ======================================================================== */
int main(int argc, char** argv)
{
int i;
long double total_dram;
/* Timer */
float total_time_taken;
/* Execution parameters */
int wordsPerThread;
int nThreads;
int bSize;
/* Memory data structures */
int* d_out;
int* h_out;
int* h_pchase;
int* d_pchase;
if(argc < 4) {
fprintf(stderr, "usage: %s <word/thread> <# threads> <threads/block>\n",
argv[0]);
exit (0);
} else {
readConfig (&wordsPerThread, &nThreads, &bSize, argv);
}
/* Find the best GPU in the system */
cudaSetDevice(cutGetMaxGflopsDeviceId ());
/* This is done to clear the cache */
{
int cache_clear = 4 * 1024 * 1024; // 4 MB
int* h_temp = (int*) malloc (cache_clear);
for(int i = 0; i < cache_clear / sizeof (int); i++) h_temp[i] = 1;
int* d_temp;
cutilSafeCall (cudaMalloc ((void**) &d_temp, cache_clear));
cutilSafeCall (cudaMemcpy (d_temp, h_temp, cache_clear,
cudaMemcpyHostToDevice));
int nt = cache_clear / sizeof (int);
int nb = nt / 512;
clear_cache <<<nb, 512>>> (d_temp);
cutilSafeCall (cudaThreadSynchronize ());
free (h_temp);
cutilSafeCall (cudaFree (d_temp));
}
/* Allocate host memory */
h_out = (int*) malloc (nThreads * sizeof (int));
h_pchase = (int*) malloc (bSize * sizeof (int));
cutilSafeCall (cudaMalloc ((void**) &d_out, nThreads * sizeof (int)));
cutilSafeCall (cudaMalloc ((void**) &d_pchase, bSize * sizeof (int)));
/* Initialize memory */
for(i = 0; i < bSize - 1; i++) {
h_pchase[i] = i;
}
h_pchase[bSize - 1] = bSize - 1;
cutilSafeCall (cudaMemcpy (d_pchase, h_pchase, bSize * sizeof (int),
cudaMemcpyHostToDevice));
/* Run test */
total_time_taken = runTest (h_out, d_out, d_pchase, nThreads, bSize,
wordsPerThread);
/* Validate results */
fprintf(stderr, "Results validated: %d errors\n", validateResults (nThreads,
bSize,
wordsPerThread,
h_out, h_pchase));
/* Compute performance metrics */
total_time_taken = total_time_taken / NUM_ITER;
total_dram = ((wordsPerThread + 1) * (1.0 * nThreads/1e9)) * sizeof (int);
fprintf (stderr, "Time taken to load %Lg GBs: %f (ms)\n", total_dram,
total_time_taken);
fprintf (stderr, "Effective bandwidth: %Lg (GB/s)\n",
(total_dram/total_time_taken * 1e3));
/* Free memory */
free (h_out);
cutilSafeCall (cudaFree (d_out));
free (h_pchase);
cutilSafeCall (cudaFree (d_pchase));
return 0;
}
|
94fbb34a2f77b261199b405857f152cd97a6fe76.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************
File Name : iou_loss_layer.cu
File Description: GPU implement
Author : yanghongyu
Create Time : 2019-9-7 10:01:23
*******************************************************/
#include <vector>
#include "caffe/layers/iou_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void IOULossForwardGPU(const int nthreads,
const Dtype* d1, const Dtype* d2, const Dtype* d3, const Dtype* d4,
const Dtype* d1_t, const Dtype* d2_t, const Dtype* d3_t, const Dtype* d4_t,
const Dtype* score_map, Dtype* loss) {
CUDA_KERNEL_LOOP(i, nthreads) {
const int score = static_cast<int>(score_map[i]);
if (score == 0) {
loss[i] = 0;
} else {
Dtype w1 = (d2[i] > d2_t[i])?d2_t[i]:d2[i];
Dtype w2 = (d4[i] > d4_t[i])?d4_t[i]:d4[i];
Dtype intersection_w = w1 + w2;
Dtype h1 = (d1[i] > d1_t[i])?d1_t[i]:d1[i];
Dtype h2 = (d3[i] > d3_t[i])?d3_t[i]:d3[i];
Dtype intersection_h = h1 + h2;
Dtype intersection_area = intersection_w * intersection_h;
Dtype gt_w = d2_t[i] + d4_t[i];
Dtype gt_h = d1_t[i] + d3_t[i];
Dtype gt_area = gt_w * gt_h;
Dtype pred_w = d2[i] + d4[i];
Dtype pred_h = d1[i] + d3[i];
Dtype pred_area = pred_w * pred_h;
if (intersection_area < 1e-8){
intersection_area = 1.;
}
Dtype iou = intersection_area/(gt_area + pred_area - intersection_area);
Dtype w_r = 1.;
loss[i] -= w_r * log(iou);
//printf("thread %d : d1 d2 d3 d4 : %f, %f, %f, %f ",i, d1[i], d2[i], d3[i], d4[i]);
//printf("w1 : %f ", w1);
//printf("w2 : %f ", w2);
//printf("iou : %f", iou);
//printf("loss i : %f", loss[i]);
}
}
}
template <typename Dtype>
__global__ void IOULossBackwardGPU(const int nthreads,
const Dtype* d1, const Dtype* d2, const Dtype* d3, const Dtype* d4,
const Dtype* d1_t, const Dtype* d2_t, const Dtype* d3_t, const Dtype* d4_t,
Dtype* diff1, Dtype* diff2, Dtype* diff3, Dtype* diff4,
const Dtype* score_map) {
CUDA_KERNEL_LOOP(i, nthreads) {
const int score = static_cast<int>(score_map[i]);
if (score == 0) {
diff1[i] = 0;
diff2[i] = 0;
diff3[i] = 0;
diff4[i] = 0;
} else {
Dtype w1 = (d2[i] > d2_t[i])?d2_t[i]:d2[i];
Dtype w2 = (d4[i] > d4_t[i])?d4_t[i]:d4[i];
Dtype intersection_w = w1 + w2;
Dtype h1 = (d1[i] > d1_t[i])?d1_t[i]:d1[i];
Dtype h2 = (d3[i] > d3_t[i])?d3_t[i]:d3[i];
Dtype intersection_h = h1 + h2;
Dtype intersection_area = intersection_w * intersection_h;
Dtype gt_w = d2_t[i] + d4_t[i];
Dtype gt_h = d1_t[i] + d3_t[i];
Dtype gt_area = gt_w * gt_h;
Dtype pred_w = d2[i] + d4[i];
Dtype pred_h = d1[i] + d3[i];
Dtype pred_area = pred_w * pred_h;
Dtype u_area = gt_area + pred_area - intersection_area;
if (intersection_h < 1e-8){
intersection_h = 1.;
}
if (intersection_w < 1e-8){
intersection_w = 1.;
}
//Dtype w_r = 1 + 0.0001 * gt_area;
//Dtype h_r = 1 + 0.0001 * gt_area;
Dtype w_r = 1.;
Dtype h_r = 1.;
if(d1[i] <= d1_t[i])
diff1[i] = h_r * ((pred_w - intersection_w)/u_area - 1./intersection_h);
else
diff1[i] = h_r * (pred_w/u_area);
if(d2[i] <= d2_t[i])
diff2[i] = w_r * ((pred_h - intersection_h)/u_area - 1./intersection_w);
else
diff2[i] = w_r * (pred_h/u_area);
if(d3[i] <= d3_t[i])
diff3[i] = h_r * ((pred_w - intersection_w)/u_area - 1./intersection_h);
else
diff3[i] = h_r * (pred_w/u_area);
if(d4[i] <= d4_t[i])
diff4[i] = w_r * ((pred_h - intersection_h)/u_area - 1./intersection_w);
else
diff4[i] = w_r * (pred_h/u_area);
}
}
}
template <typename Dtype>
void IOULossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const int count = bottom[0]->count(2);
const Dtype* input_data = bottom[0]->gpu_data();
const Dtype* target = bottom[1]->gpu_data();
const Dtype* score_map = bottom[2]->gpu_data();
const Dtype* d1 = input_data;
const Dtype* d2 = input_data + bottom[0]->offset(0, 1, 0, 0);
const Dtype* d3 = input_data + bottom[0]->offset(0, 2, 0, 0);
const Dtype* d4 = input_data + bottom[0]->offset(0, 3, 0, 0);
const Dtype* d1_t = target;
const Dtype* d2_t = target + bottom[1]->offset(0, 1, 0, 0);
const Dtype* d3_t = target + bottom[1]->offset(0, 2, 0, 0);
const Dtype* d4_t = target + bottom[1]->offset(0, 3, 0, 0);
Blob<Dtype> loss;
loss.ReshapeLike(*bottom[2]);
Dtype* loss_data = loss.mutable_gpu_data();
caffe_gpu_set(loss.count(), Dtype(0), loss_data);
hipLaunchKernelGGL(( IOULossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3( CAFFE_CUDA_NUM_THREADS), 0, 0, count, d1, d2, d3, d4, d1_t, d2_t, d3_t, d4_t, score_map,
loss_data);
caffe_gpu_asum(count, score_map, &valid_count_);
Dtype loss_value;
caffe_gpu_asum(count, loss_data, &loss_value);
Dtype normalization = (valid_count_ != 0)?valid_count_:1.;
top[0]->mutable_cpu_data()[0] = loss_value/normalization;
}
template <typename Dtype>
void IOULossLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const int count = bottom[0]->count(2);
const Dtype* input_data = bottom[0]->gpu_data();
const Dtype* target = bottom[1]->gpu_data();
const Dtype* score_map = bottom[2]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
Dtype* diff1 = bottom_diff;
Dtype* diff2 = bottom_diff + bottom[0]->offset(0, 1, 0, 0);
Dtype* diff3 = bottom_diff + bottom[0]->offset(0, 2, 0, 0);
Dtype* diff4 = bottom_diff + bottom[0]->offset(0, 3, 0, 0);
const Dtype* d1 = input_data;
const Dtype* d2 = input_data + bottom[0]->offset(0, 1, 0, 0);
const Dtype* d3 = input_data + bottom[0]->offset(0, 2, 0, 0);
const Dtype* d4 = input_data + bottom[0]->offset(0, 3, 0, 0);
const Dtype* d1_t = target;
const Dtype* d2_t = target + bottom[1]->offset(0, 1, 0, 0);
const Dtype* d3_t = target + bottom[1]->offset(0, 2, 0, 0);
const Dtype* d4_t = target + bottom[1]->offset(0, 3, 0, 0);
hipLaunchKernelGGL(( IOULossBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3( CAFFE_CUDA_NUM_THREADS), 0, 0, count, d1, d2, d3, d4, d1_t, d2_t, d3_t, d4_t,
diff1, diff2, diff3, diff4, score_map);
// Scale down gradient
Dtype normalization = (valid_count_ != 0)?valid_count_:1.;
Dtype loss_weight = top[0]->cpu_diff()[0]/normalization;
caffe_gpu_scal(bottom[0]->count(1), loss_weight, bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(IOULossLayer);
} // namespace caffe
| 94fbb34a2f77b261199b405857f152cd97a6fe76.cu | /******************************************************
File Name : iou_loss_layer.cu
File Description: GPU implement
Author : yanghongyu
Create Time : 2019-9-7 10:01:23
*******************************************************/
#include <vector>
#include "caffe/layers/iou_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void IOULossForwardGPU(const int nthreads,
const Dtype* d1, const Dtype* d2, const Dtype* d3, const Dtype* d4,
const Dtype* d1_t, const Dtype* d2_t, const Dtype* d3_t, const Dtype* d4_t,
const Dtype* score_map, Dtype* loss) {
CUDA_KERNEL_LOOP(i, nthreads) {
const int score = static_cast<int>(score_map[i]);
if (score == 0) {
loss[i] = 0;
} else {
Dtype w1 = (d2[i] > d2_t[i])?d2_t[i]:d2[i];
Dtype w2 = (d4[i] > d4_t[i])?d4_t[i]:d4[i];
Dtype intersection_w = w1 + w2;
Dtype h1 = (d1[i] > d1_t[i])?d1_t[i]:d1[i];
Dtype h2 = (d3[i] > d3_t[i])?d3_t[i]:d3[i];
Dtype intersection_h = h1 + h2;
Dtype intersection_area = intersection_w * intersection_h;
Dtype gt_w = d2_t[i] + d4_t[i];
Dtype gt_h = d1_t[i] + d3_t[i];
Dtype gt_area = gt_w * gt_h;
Dtype pred_w = d2[i] + d4[i];
Dtype pred_h = d1[i] + d3[i];
Dtype pred_area = pred_w * pred_h;
if (intersection_area < 1e-8){
intersection_area = 1.;
}
Dtype iou = intersection_area/(gt_area + pred_area - intersection_area);
Dtype w_r = 1.;
loss[i] -= w_r * log(iou);
//printf("thread %d : d1 d2 d3 d4 : %f, %f, %f, %f ",i, d1[i], d2[i], d3[i], d4[i]);
//printf("w1 : %f ", w1);
//printf("w2 : %f ", w2);
//printf("iou : %f", iou);
//printf("loss i : %f", loss[i]);
}
}
}
template <typename Dtype>
__global__ void IOULossBackwardGPU(const int nthreads,
const Dtype* d1, const Dtype* d2, const Dtype* d3, const Dtype* d4,
const Dtype* d1_t, const Dtype* d2_t, const Dtype* d3_t, const Dtype* d4_t,
Dtype* diff1, Dtype* diff2, Dtype* diff3, Dtype* diff4,
const Dtype* score_map) {
CUDA_KERNEL_LOOP(i, nthreads) {
const int score = static_cast<int>(score_map[i]);
if (score == 0) {
diff1[i] = 0;
diff2[i] = 0;
diff3[i] = 0;
diff4[i] = 0;
} else {
Dtype w1 = (d2[i] > d2_t[i])?d2_t[i]:d2[i];
Dtype w2 = (d4[i] > d4_t[i])?d4_t[i]:d4[i];
Dtype intersection_w = w1 + w2;
Dtype h1 = (d1[i] > d1_t[i])?d1_t[i]:d1[i];
Dtype h2 = (d3[i] > d3_t[i])?d3_t[i]:d3[i];
Dtype intersection_h = h1 + h2;
Dtype intersection_area = intersection_w * intersection_h;
Dtype gt_w = d2_t[i] + d4_t[i];
Dtype gt_h = d1_t[i] + d3_t[i];
Dtype gt_area = gt_w * gt_h;
Dtype pred_w = d2[i] + d4[i];
Dtype pred_h = d1[i] + d3[i];
Dtype pred_area = pred_w * pred_h;
Dtype u_area = gt_area + pred_area - intersection_area;
if (intersection_h < 1e-8){
intersection_h = 1.;
}
if (intersection_w < 1e-8){
intersection_w = 1.;
}
//Dtype w_r = 1 + 0.0001 * gt_area;
//Dtype h_r = 1 + 0.0001 * gt_area;
Dtype w_r = 1.;
Dtype h_r = 1.;
if(d1[i] <= d1_t[i])
diff1[i] = h_r * ((pred_w - intersection_w)/u_area - 1./intersection_h);
else
diff1[i] = h_r * (pred_w/u_area);
if(d2[i] <= d2_t[i])
diff2[i] = w_r * ((pred_h - intersection_h)/u_area - 1./intersection_w);
else
diff2[i] = w_r * (pred_h/u_area);
if(d3[i] <= d3_t[i])
diff3[i] = h_r * ((pred_w - intersection_w)/u_area - 1./intersection_h);
else
diff3[i] = h_r * (pred_w/u_area);
if(d4[i] <= d4_t[i])
diff4[i] = w_r * ((pred_h - intersection_h)/u_area - 1./intersection_w);
else
diff4[i] = w_r * (pred_h/u_area);
}
}
}
template <typename Dtype>
void IOULossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const int count = bottom[0]->count(2);
const Dtype* input_data = bottom[0]->gpu_data();
const Dtype* target = bottom[1]->gpu_data();
const Dtype* score_map = bottom[2]->gpu_data();
const Dtype* d1 = input_data;
const Dtype* d2 = input_data + bottom[0]->offset(0, 1, 0, 0);
const Dtype* d3 = input_data + bottom[0]->offset(0, 2, 0, 0);
const Dtype* d4 = input_data + bottom[0]->offset(0, 3, 0, 0);
const Dtype* d1_t = target;
const Dtype* d2_t = target + bottom[1]->offset(0, 1, 0, 0);
const Dtype* d3_t = target + bottom[1]->offset(0, 2, 0, 0);
const Dtype* d4_t = target + bottom[1]->offset(0, 3, 0, 0);
Blob<Dtype> loss;
loss.ReshapeLike(*bottom[2]);
Dtype* loss_data = loss.mutable_gpu_data();
caffe_gpu_set(loss.count(), Dtype(0), loss_data);
IOULossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(count, d1, d2, d3, d4, d1_t, d2_t, d3_t, d4_t, score_map,
loss_data);
caffe_gpu_asum(count, score_map, &valid_count_);
Dtype loss_value;
caffe_gpu_asum(count, loss_data, &loss_value);
Dtype normalization = (valid_count_ != 0)?valid_count_:1.;
top[0]->mutable_cpu_data()[0] = loss_value/normalization;
}
template <typename Dtype>
void IOULossLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const int count = bottom[0]->count(2);
const Dtype* input_data = bottom[0]->gpu_data();
const Dtype* target = bottom[1]->gpu_data();
const Dtype* score_map = bottom[2]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
Dtype* diff1 = bottom_diff;
Dtype* diff2 = bottom_diff + bottom[0]->offset(0, 1, 0, 0);
Dtype* diff3 = bottom_diff + bottom[0]->offset(0, 2, 0, 0);
Dtype* diff4 = bottom_diff + bottom[0]->offset(0, 3, 0, 0);
const Dtype* d1 = input_data;
const Dtype* d2 = input_data + bottom[0]->offset(0, 1, 0, 0);
const Dtype* d3 = input_data + bottom[0]->offset(0, 2, 0, 0);
const Dtype* d4 = input_data + bottom[0]->offset(0, 3, 0, 0);
const Dtype* d1_t = target;
const Dtype* d2_t = target + bottom[1]->offset(0, 1, 0, 0);
const Dtype* d3_t = target + bottom[1]->offset(0, 2, 0, 0);
const Dtype* d4_t = target + bottom[1]->offset(0, 3, 0, 0);
IOULossBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(count, d1, d2, d3, d4, d1_t, d2_t, d3_t, d4_t,
diff1, diff2, diff3, diff4, score_map);
// Scale down gradient
Dtype normalization = (valid_count_ != 0)?valid_count_:1.;
Dtype loss_weight = top[0]->cpu_diff()[0]/normalization;
caffe_gpu_scal(bottom[0]->count(1), loss_weight, bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(IOULossLayer);
} // namespace caffe
|
3a5934afdcdab12360b027e22698aaac72dd4c3b.hip | // !!! This is a file automatically generated by hipify!!!
#include <thread>
#include <iostream>
#include <stdlib.h>
#include <fstream>
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THH.h>
#include <THH/THHAtomics.cuh>
#include <THH/THHDeviceUtils.cuh>
/* Switch of minimal spanning tree algorithms */
/* Note: we will migrate the cuda implementaion to PyTorch in the next version */
//#define MST_PRIM
//#define MST_KRUSKAL
#define MST_BORUVKA
#ifdef MST_PRIM
#include <boost/graph/adjacency_list.hpp>
#include <boost/graph/prim_minimum_spanning_tree.hpp>
#endif
#ifdef MST_KRUSKAL
#include <boost/graph/adjacency_list.hpp>
#include <boost/graph/kruskal_min_spanning_tree.hpp>
#endif
#ifdef MST_BORUVKA
#include "boruvka.hpp"
#endif
#ifndef MST_BORUVKA
using namespace boost;
typedef adjacency_list <vecS, vecS, undirectedS, no_property,
property < edge_weight_t, float > > Graph;
typedef graph_traits < Graph >::edge_descriptor Edge;
typedef graph_traits < Graph >::vertex_descriptor Vertex;
typedef std::pair<int, int> E;
#endif
static void forward_kernel(int * edge_index, float * edge_weight, int * edge_out, int vertex_count, int edge_count){
#ifdef MST_BORUVKA
struct Graph * g = createGraph(vertex_count, edge_count);
for (int i = 0; i < edge_count; ++i){
g->edge[i].src = edge_index[i * 2];
g->edge[i].dest = edge_index[i * 2 + 1];
g->edge[i].weight = edge_weight[i];
}
#else
Graph g(vertex_count);
for (int i = 0; i < edge_count; ++i)
boost::add_edge((int)edge_index[i * 2], (int)edge_index[i * 2 + 1],
edge_weight[i], g);
#endif
#ifdef MST_PRIM
std::vector < graph_traits < Graph >::vertex_descriptor > p(num_vertices(g));
prim_minimum_spanning_tree(g, &(p[0]));
int * edge_out_ptr = edge_out;
for (std::size_t i = 0; i != p.size(); ++i)
if (p[i] != i) {
*(edge_out_ptr++) = i;
*(edge_out_ptr++) = p[i];
}
#endif
#ifdef MST_KRUSKAL
std::vector < Edge > spanning_tree;
kruskal_minimum_spanning_tree(g, std::back_inserter(spanning_tree));
float * edge_out_ptr = edge_out;
for (std::vector < Edge >::iterator ei = spanning_tree.begin();
ei != spanning_tree.end(); ++ei){
*(edge_out_ptr++) = source(*ei, g);
*(edge_out_ptr++) = target(*ei, g);
}
#endif
#ifdef MST_BORUVKA
boruvkaMST(g, edge_out);
delete[] g->edge;
delete[] g;
#endif
}
at::Tensor mst_forward(
const at::Tensor & edge_index_tensor,
const at::Tensor & edge_weight_tensor,
int vertex_count){
unsigned batch_size = edge_index_tensor.size(0);
unsigned edge_count = edge_index_tensor.size(1);
auto edge_index_cpu = edge_index_tensor.cpu();
auto edge_weight_cpu = edge_weight_tensor.cpu();
auto edge_out_cpu = at::empty({batch_size, vertex_count - 1, 2}, edge_index_cpu.options());
int * edge_out = edge_out_cpu.contiguous().data<int>();
int * edge_index = edge_index_cpu.contiguous().data<int>();
float * edge_weight = edge_weight_cpu.contiguous().data<float>();
// Loop for batch
std::thread pids[batch_size];
for (unsigned i = 0; i < batch_size; i++){
auto edge_index_iter = edge_index + i * edge_count * 2;
auto edge_weight_iter = edge_weight + i * edge_count;
auto edge_out_iter = edge_out + i * (vertex_count - 1) * 2;
pids[i] = std::thread(forward_kernel, edge_index_iter, edge_weight_iter, edge_out_iter, vertex_count, edge_count);
}
for (unsigned i = 0; i < batch_size; i++){
pids[i].join();
}
auto edge_out_tensor = edge_out_cpu.to(edge_index_tensor.device());
return edge_out_tensor;
}
| 3a5934afdcdab12360b027e22698aaac72dd4c3b.cu | #include <thread>
#include <iostream>
#include <stdlib.h>
#include <fstream>
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THC.h>
#include <THC/THCAtomics.cuh>
#include <THC/THCDeviceUtils.cuh>
/* Switch of minimal spanning tree algorithms */
/* Note: we will migrate the cuda implementaion to PyTorch in the next version */
//#define MST_PRIM
//#define MST_KRUSKAL
#define MST_BORUVKA
#ifdef MST_PRIM
#include <boost/graph/adjacency_list.hpp>
#include <boost/graph/prim_minimum_spanning_tree.hpp>
#endif
#ifdef MST_KRUSKAL
#include <boost/graph/adjacency_list.hpp>
#include <boost/graph/kruskal_min_spanning_tree.hpp>
#endif
#ifdef MST_BORUVKA
#include "boruvka.hpp"
#endif
#ifndef MST_BORUVKA
using namespace boost;
typedef adjacency_list <vecS, vecS, undirectedS, no_property,
property < edge_weight_t, float > > Graph;
typedef graph_traits < Graph >::edge_descriptor Edge;
typedef graph_traits < Graph >::vertex_descriptor Vertex;
typedef std::pair<int, int> E;
#endif
static void forward_kernel(int * edge_index, float * edge_weight, int * edge_out, int vertex_count, int edge_count){
#ifdef MST_BORUVKA
struct Graph * g = createGraph(vertex_count, edge_count);
for (int i = 0; i < edge_count; ++i){
g->edge[i].src = edge_index[i * 2];
g->edge[i].dest = edge_index[i * 2 + 1];
g->edge[i].weight = edge_weight[i];
}
#else
Graph g(vertex_count);
for (int i = 0; i < edge_count; ++i)
boost::add_edge((int)edge_index[i * 2], (int)edge_index[i * 2 + 1],
edge_weight[i], g);
#endif
#ifdef MST_PRIM
std::vector < graph_traits < Graph >::vertex_descriptor > p(num_vertices(g));
prim_minimum_spanning_tree(g, &(p[0]));
int * edge_out_ptr = edge_out;
for (std::size_t i = 0; i != p.size(); ++i)
if (p[i] != i) {
*(edge_out_ptr++) = i;
*(edge_out_ptr++) = p[i];
}
#endif
#ifdef MST_KRUSKAL
std::vector < Edge > spanning_tree;
kruskal_minimum_spanning_tree(g, std::back_inserter(spanning_tree));
float * edge_out_ptr = edge_out;
for (std::vector < Edge >::iterator ei = spanning_tree.begin();
ei != spanning_tree.end(); ++ei){
*(edge_out_ptr++) = source(*ei, g);
*(edge_out_ptr++) = target(*ei, g);
}
#endif
#ifdef MST_BORUVKA
boruvkaMST(g, edge_out);
delete[] g->edge;
delete[] g;
#endif
}
at::Tensor mst_forward(
const at::Tensor & edge_index_tensor,
const at::Tensor & edge_weight_tensor,
int vertex_count){
unsigned batch_size = edge_index_tensor.size(0);
unsigned edge_count = edge_index_tensor.size(1);
auto edge_index_cpu = edge_index_tensor.cpu();
auto edge_weight_cpu = edge_weight_tensor.cpu();
auto edge_out_cpu = at::empty({batch_size, vertex_count - 1, 2}, edge_index_cpu.options());
int * edge_out = edge_out_cpu.contiguous().data<int>();
int * edge_index = edge_index_cpu.contiguous().data<int>();
float * edge_weight = edge_weight_cpu.contiguous().data<float>();
// Loop for batch
std::thread pids[batch_size];
for (unsigned i = 0; i < batch_size; i++){
auto edge_index_iter = edge_index + i * edge_count * 2;
auto edge_weight_iter = edge_weight + i * edge_count;
auto edge_out_iter = edge_out + i * (vertex_count - 1) * 2;
pids[i] = std::thread(forward_kernel, edge_index_iter, edge_weight_iter, edge_out_iter, vertex_count, edge_count);
}
for (unsigned i = 0; i < batch_size; i++){
pids[i].join();
}
auto edge_out_tensor = edge_out_cpu.to(edge_index_tensor.device());
return edge_out_tensor;
}
|
671db2c0c38fa999d74528ad9ca6f92370b50bc0.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include "wb.h"
#define NUM_BINS 128
#define CUDA_CHECK(ans) \
{ gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line,
bool abort = true) {
if (code != hipSuccess) {
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code),
file, line);
if (abort)
exit(code);
}
}
/**
* Kernel to perform the histogramming of the input data of ASCII characters
*/
__global__
void HistogramAscii(unsigned *deviceInput, unsigned *deviceBins, int inputLength) {
// calculate index at which this thread will function
unsigned index = threadIdx.x + blockDim.x * blockIdx.x;
// create a private array of bins for each thread block
__shared__ unsigned shared_bins[NUM_BINS];
// each thread will handle a bin initialization starting at its index in a block
// and then at offsets of (number of threads in a block)
unsigned i = threadIdx.x;
while (i < NUM_BINS) {
shared_bins[i] = 0;
i += blockDim.x;
}
// wait for all threads to complete initialization
__syncthreads();
// perform histogramming of the input data
i = index;
// stide block length is all the threads generated
int stride = blockDim.x * gridDim.x;
// considering the input data to be divide into divisions of stride length
// here each thread handles a input data starting from its index overall
// and then will skip over stride length and take in the next input data from next division
while (i < inputLength) {
atomicAdd(&shared_bins[deviceInput[i]], 1);
i += stride;
}
// wait for all threads to complete
__syncthreads();
// each thread will handle a private bin transfer to global memory starting at its index in a block
// and then at offsets of (number of threads in a block)
i = threadIdx.x;
while (i < NUM_BINS) {
atomicAdd(&deviceBins[i], shared_bins[i]);
i += blockDim.x;
}
}
int main(int argc, char *argv[]) {
wbArg_t args;
int inputLength;
unsigned int *hostInput;
unsigned int *hostBins;
unsigned int *deviceInput;
unsigned int *deviceBins;
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
// hostInput = (unsigned int *)wbImport(wbArg_getInputFile(args, 0),
// &inputLength);
// reading data from input file as the above line was not working for spaces
FILE *handle = fopen(wbArg_getInputFile(args, 0), "r");
char c;
fscanf (handle, "%u", &inputLength);
// consume new line
c = fgetc(handle);
hostInput = (unsigned *)malloc(sizeof(unsigned)*inputLength);
int i = 0;
while ((c = fgetc(handle)) != EOF) {
hostInput[i++] = (unsigned int) c;
}
fflush(handle);
fclose(handle);
hostBins = (unsigned int *)malloc(NUM_BINS * sizeof(unsigned int));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The input length is ", inputLength);
wbLog(TRACE, "The number of bins is ", NUM_BINS);
printf("First 10 input values:\n");
for (int i = 0; i < 10; i++) {
printf("(%c : %u)", hostInput[i], hostInput[i]);
}
printf("\n");
wbTime_start(GPU, "Allocating GPU memory.");
//@@ Allocate GPU memory here
hipMalloc((void **)&deviceInput, inputLength * sizeof(unsigned int));
hipMalloc((void **)&deviceBins, NUM_BINS * sizeof(unsigned int));
CUDA_CHECK(hipDeviceSynchronize());
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
//@@ Copy memory to the GPU here
hipMemcpy(deviceInput, hostInput, inputLength * sizeof(unsigned int), hipMemcpyHostToDevice);
hipMemcpy(deviceBins, hostBins, NUM_BINS * sizeof (unsigned int), hipMemcpyHostToDevice);
CUDA_CHECK(hipDeviceSynchronize());
wbTime_stop(GPU, "Copying input memory to the GPU.");
// Launch kernel
// ----------------------------------------------------------
wbLog(TRACE, "Launching kernel");
wbTime_start(Compute, "Performing CUDA computation");
//@@ Perform kernel computation here
hipLaunchKernelGGL(( HistogramAscii), dim3(2), dim3(2), 0, 0, deviceInput, deviceBins, inputLength);
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
//@@ Copy the GPU memory back to the CPU here
hipMemcpy(hostBins, deviceBins, NUM_BINS * sizeof (unsigned int), hipMemcpyDeviceToHost);
CUDA_CHECK(hipDeviceSynchronize());
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory here
hipFree(deviceInput);
hipFree(deviceBins);
wbTime_stop(GPU, "Freeing GPU Memory");
// Verify correctness
// -----------------------------------------------------
// wbSolution(args, hostBins, NUM_BINS);
int num;
unsigned *eOutput = (unsigned int *)wbImport(wbArg_getInputFile(args, 1),
&num);
bool diff = false;
for (int i = 0; i < NUM_BINS; i++) {
if (eOutput[i] != hostBins[i]) {
printf("%d: %u %u\n", i, hostBins[i], eOutput[i]);
diff = true;
break;
}
}
if (!diff) {
printf("Solution is correct");
}
else {
printf("Solution doesn't match");
}
free(hostBins);
free(hostInput);
return 0;
}
| 671db2c0c38fa999d74528ad9ca6f92370b50bc0.cu | #include <stdio.h>
#include <cuda.h>
#include "wb.h"
#define NUM_BINS 128
#define CUDA_CHECK(ans) \
{ gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line,
bool abort = true) {
if (code != cudaSuccess) {
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code),
file, line);
if (abort)
exit(code);
}
}
/**
* Kernel to perform the histogramming of the input data of ASCII characters
*/
__global__
void HistogramAscii(unsigned *deviceInput, unsigned *deviceBins, int inputLength) {
// calculate index at which this thread will function
unsigned index = threadIdx.x + blockDim.x * blockIdx.x;
// create a private array of bins for each thread block
__shared__ unsigned shared_bins[NUM_BINS];
// each thread will handle a bin initialization starting at its index in a block
// and then at offsets of (number of threads in a block)
unsigned i = threadIdx.x;
while (i < NUM_BINS) {
shared_bins[i] = 0;
i += blockDim.x;
}
// wait for all threads to complete initialization
__syncthreads();
// perform histogramming of the input data
i = index;
// stide block length is all the threads generated
int stride = blockDim.x * gridDim.x;
// considering the input data to be divide into divisions of stride length
// here each thread handles a input data starting from its index overall
// and then will skip over stride length and take in the next input data from next division
while (i < inputLength) {
atomicAdd(&shared_bins[deviceInput[i]], 1);
i += stride;
}
// wait for all threads to complete
__syncthreads();
// each thread will handle a private bin transfer to global memory starting at its index in a block
// and then at offsets of (number of threads in a block)
i = threadIdx.x;
while (i < NUM_BINS) {
atomicAdd(&deviceBins[i], shared_bins[i]);
i += blockDim.x;
}
}
int main(int argc, char *argv[]) {
wbArg_t args;
int inputLength;
unsigned int *hostInput;
unsigned int *hostBins;
unsigned int *deviceInput;
unsigned int *deviceBins;
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
// hostInput = (unsigned int *)wbImport(wbArg_getInputFile(args, 0),
// &inputLength);
// reading data from input file as the above line was not working for spaces
FILE *handle = fopen(wbArg_getInputFile(args, 0), "r");
char c;
fscanf (handle, "%u", &inputLength);
// consume new line
c = fgetc(handle);
hostInput = (unsigned *)malloc(sizeof(unsigned)*inputLength);
int i = 0;
while ((c = fgetc(handle)) != EOF) {
hostInput[i++] = (unsigned int) c;
}
fflush(handle);
fclose(handle);
hostBins = (unsigned int *)malloc(NUM_BINS * sizeof(unsigned int));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The input length is ", inputLength);
wbLog(TRACE, "The number of bins is ", NUM_BINS);
printf("First 10 input values:\n");
for (int i = 0; i < 10; i++) {
printf("(%c : %u)", hostInput[i], hostInput[i]);
}
printf("\n");
wbTime_start(GPU, "Allocating GPU memory.");
//@@ Allocate GPU memory here
cudaMalloc((void **)&deviceInput, inputLength * sizeof(unsigned int));
cudaMalloc((void **)&deviceBins, NUM_BINS * sizeof(unsigned int));
CUDA_CHECK(cudaDeviceSynchronize());
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
//@@ Copy memory to the GPU here
cudaMemcpy(deviceInput, hostInput, inputLength * sizeof(unsigned int), cudaMemcpyHostToDevice);
cudaMemcpy(deviceBins, hostBins, NUM_BINS * sizeof (unsigned int), cudaMemcpyHostToDevice);
CUDA_CHECK(cudaDeviceSynchronize());
wbTime_stop(GPU, "Copying input memory to the GPU.");
// Launch kernel
// ----------------------------------------------------------
wbLog(TRACE, "Launching kernel");
wbTime_start(Compute, "Performing CUDA computation");
//@@ Perform kernel computation here
HistogramAscii<<<2, 2>>>(deviceInput, deviceBins, inputLength);
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
//@@ Copy the GPU memory back to the CPU here
cudaMemcpy(hostBins, deviceBins, NUM_BINS * sizeof (unsigned int), cudaMemcpyDeviceToHost);
CUDA_CHECK(cudaDeviceSynchronize());
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory here
cudaFree(deviceInput);
cudaFree(deviceBins);
wbTime_stop(GPU, "Freeing GPU Memory");
// Verify correctness
// -----------------------------------------------------
// wbSolution(args, hostBins, NUM_BINS);
int num;
unsigned *eOutput = (unsigned int *)wbImport(wbArg_getInputFile(args, 1),
&num);
bool diff = false;
for (int i = 0; i < NUM_BINS; i++) {
if (eOutput[i] != hostBins[i]) {
printf("%d: %u %u\n", i, hostBins[i], eOutput[i]);
diff = true;
break;
}
}
if (!diff) {
printf("Solution is correct");
}
else {
printf("Solution doesn't match");
}
free(hostBins);
free(hostInput);
return 0;
}
|
8dadf16d4b0a52330c066fed8d78d9c4961b3dce.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/cross_entropy_op.h"
#include "caffe2/operators/operator_fallback_gpu.h"
namespace caffe2 {
namespace {
__global__ void LabelCrossEntropyKernel(
const int N, const int D, const float* Xdata, const int* labeldata,
const float log_threshold, float* Ydata) {
CUDA_1D_KERNEL_LOOP(i, N) {
CUDA_KERNEL_ASSERT(labeldata[i] >= 0 && labeldata[i] < D);
Ydata[i] = -logf(max(Xdata[i * D + labeldata[i]], log_threshold));
}
}
__global__ void LabelCrossEntropyGradientKernel(
const int N, const int D, const float* Xdata, const int* labeldata,
const float* dYdata, const float log_threshold, float* dXdata) {
CUDA_1D_KERNEL_LOOP(i, N) {
int idx = i * D + labeldata[i];
dXdata[idx] = - dYdata[i] / max(Xdata[idx], log_threshold);
}
}
} // namespace
template <>
bool LabelCrossEntropyOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto& label = Input(1);
auto* Y = Output(0);
DCHECK_EQ(X.ndim(), 2);
int N = X.dim32(0);
int D = X.dim32(1);
DCHECK((label.ndim() == 1) || (label.ndim() == 2 && label.dim32(1) == 1));
DCHECK_EQ(label.dim32(0), N);
Y->Resize(vector<TIndex>(size_t(1), N));
hipLaunchKernelGGL(( LabelCrossEntropyKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
N, D, X.data<float>(), label.data<int>(), kLOG_THRESHOLD(),
Y->mutable_data<float>());
return true;
}
template <>
bool LabelCrossEntropyGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto& label = Input(1);
auto& dY = Input(2);
auto* dX = Output(0);
DCHECK_EQ(X.ndim(), 2);
int N = X.dim32(0);
int D = X.dim32(1);
DCHECK((label.ndim() == 1) || (label.ndim() == 2 && label.dim32(1) == 1));
DCHECK_EQ(label.dim32(0), N);
DCHECK_EQ(dY.ndim(), 1);
DCHECK_EQ(dY.dim32(0), N);
dX->ResizeLike(X);
math::Set<float, CUDAContext>(
dX->size(), 0.f, dX->mutable_data<float>(), &context_);
hipLaunchKernelGGL(( LabelCrossEntropyGradientKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
N, D, X.data<float>(), label.data<int>(), dY.data<float>(),
kLOG_THRESHOLD(), dX->mutable_data<float>());
return true;
}
namespace {
__global__ void MakeTwoClassKernel(
const int N, const float* Xdata, float* Ydata) {
CUDA_1D_KERNEL_LOOP(i, N) {
Ydata[i * 2] = 1.0 - Xdata[i];
Ydata[i * 2 + 1] = Xdata[i];
}
}
__global__ void MakeTwoClassGradientKernel(
const int N, const float* dYdata, float* dXdata) {
CUDA_1D_KERNEL_LOOP(i, N) {
dXdata[i] = dYdata[i * 2 + 1] - dYdata[i * 2];
}
}
} // namespace
template <>
bool MakeTwoClassOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* Y = Output(0);
auto shape = X.dims();
shape.push_back(2);
CHECK_LT(X.size(), std::numeric_limits<int>::max() / 2);
Y->Resize(shape);
int N = X.size();
hipLaunchKernelGGL(( MakeTwoClassKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
N, X.data<float>(), Y->mutable_data<float>());
return true;
}
template <>
bool MakeTwoClassGradientOp<float, CUDAContext>::RunOnDevice() {
auto& dY = Input(0);
auto* dX = Output(0);
auto shape = dY.dims();
CHECK_GE(shape.size(), 1);
CHECK_EQ(shape.back(), 2);
shape.pop_back();
CHECK_LT(dY.size(), std::numeric_limits<int>::max());
dX->Resize(shape);
int N = dX->size();
hipLaunchKernelGGL(( MakeTwoClassGradientKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
N, dY.data<float>(), dX->mutable_data<float>());
return true;
}
namespace {
REGISTER_CUDA_OPERATOR(LabelCrossEntropy,
LabelCrossEntropyOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(LabelCrossEntropyGradient,
LabelCrossEntropyGradientOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(MakeTwoClass,
MakeTwoClassOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(MakeTwoClassGradient,
MakeTwoClassGradientOp<float, CUDAContext>);
//TODO(surya) Add full GPU/CUDA support for the CrossEntropyOp
REGISTER_CUDA_OPERATOR(CrossEntropy,
GPUFallbackOp<CrossEntropyOp<float, CPUContext>>);
REGISTER_CUDA_OPERATOR(CrossEntropyGradient,
GPUFallbackOp<CrossEntropyGradientOp<float, CPUContext>>);
} // namespace
} // namespace caffe2
| 8dadf16d4b0a52330c066fed8d78d9c4961b3dce.cu | #include <assert.h>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/cross_entropy_op.h"
#include "caffe2/operators/operator_fallback_gpu.h"
namespace caffe2 {
namespace {
__global__ void LabelCrossEntropyKernel(
const int N, const int D, const float* Xdata, const int* labeldata,
const float log_threshold, float* Ydata) {
CUDA_1D_KERNEL_LOOP(i, N) {
CUDA_KERNEL_ASSERT(labeldata[i] >= 0 && labeldata[i] < D);
Ydata[i] = -logf(max(Xdata[i * D + labeldata[i]], log_threshold));
}
}
__global__ void LabelCrossEntropyGradientKernel(
const int N, const int D, const float* Xdata, const int* labeldata,
const float* dYdata, const float log_threshold, float* dXdata) {
CUDA_1D_KERNEL_LOOP(i, N) {
int idx = i * D + labeldata[i];
dXdata[idx] = - dYdata[i] / max(Xdata[idx], log_threshold);
}
}
} // namespace
template <>
bool LabelCrossEntropyOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto& label = Input(1);
auto* Y = Output(0);
DCHECK_EQ(X.ndim(), 2);
int N = X.dim32(0);
int D = X.dim32(1);
DCHECK((label.ndim() == 1) || (label.ndim() == 2 && label.dim32(1) == 1));
DCHECK_EQ(label.dim32(0), N);
Y->Resize(vector<TIndex>(size_t(1), N));
LabelCrossEntropyKernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
N, D, X.data<float>(), label.data<int>(), kLOG_THRESHOLD(),
Y->mutable_data<float>());
return true;
}
template <>
bool LabelCrossEntropyGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto& label = Input(1);
auto& dY = Input(2);
auto* dX = Output(0);
DCHECK_EQ(X.ndim(), 2);
int N = X.dim32(0);
int D = X.dim32(1);
DCHECK((label.ndim() == 1) || (label.ndim() == 2 && label.dim32(1) == 1));
DCHECK_EQ(label.dim32(0), N);
DCHECK_EQ(dY.ndim(), 1);
DCHECK_EQ(dY.dim32(0), N);
dX->ResizeLike(X);
math::Set<float, CUDAContext>(
dX->size(), 0.f, dX->mutable_data<float>(), &context_);
LabelCrossEntropyGradientKernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
N, D, X.data<float>(), label.data<int>(), dY.data<float>(),
kLOG_THRESHOLD(), dX->mutable_data<float>());
return true;
}
namespace {
__global__ void MakeTwoClassKernel(
const int N, const float* Xdata, float* Ydata) {
CUDA_1D_KERNEL_LOOP(i, N) {
Ydata[i * 2] = 1.0 - Xdata[i];
Ydata[i * 2 + 1] = Xdata[i];
}
}
__global__ void MakeTwoClassGradientKernel(
const int N, const float* dYdata, float* dXdata) {
CUDA_1D_KERNEL_LOOP(i, N) {
dXdata[i] = dYdata[i * 2 + 1] - dYdata[i * 2];
}
}
} // namespace
template <>
bool MakeTwoClassOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* Y = Output(0);
auto shape = X.dims();
shape.push_back(2);
CHECK_LT(X.size(), std::numeric_limits<int>::max() / 2);
Y->Resize(shape);
int N = X.size();
MakeTwoClassKernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
N, X.data<float>(), Y->mutable_data<float>());
return true;
}
template <>
bool MakeTwoClassGradientOp<float, CUDAContext>::RunOnDevice() {
auto& dY = Input(0);
auto* dX = Output(0);
auto shape = dY.dims();
CHECK_GE(shape.size(), 1);
CHECK_EQ(shape.back(), 2);
shape.pop_back();
CHECK_LT(dY.size(), std::numeric_limits<int>::max());
dX->Resize(shape);
int N = dX->size();
MakeTwoClassGradientKernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
N, dY.data<float>(), dX->mutable_data<float>());
return true;
}
namespace {
REGISTER_CUDA_OPERATOR(LabelCrossEntropy,
LabelCrossEntropyOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(LabelCrossEntropyGradient,
LabelCrossEntropyGradientOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(MakeTwoClass,
MakeTwoClassOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(MakeTwoClassGradient,
MakeTwoClassGradientOp<float, CUDAContext>);
//TODO(surya) Add full GPU/CUDA support for the CrossEntropyOp
REGISTER_CUDA_OPERATOR(CrossEntropy,
GPUFallbackOp<CrossEntropyOp<float, CPUContext>>);
REGISTER_CUDA_OPERATOR(CrossEntropyGradient,
GPUFallbackOp<CrossEntropyGradientOp<float, CPUContext>>);
} // namespace
} // namespace caffe2
|
356bf0105aadae32600f756ad1cad477a33d3735.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime_api.h>
#include <unistd.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <string>
#define GPUJOULE_DIR ""
#define SHARED_MEM_ELEMENTS 1024
#define GLOBAL_MEM_ELEMENTS 4096
//#define GLOBAL_MEM_ELEMENTS 131072
//#define GLOBAL_MEM_ELEMENTS 196608
int num_blocks;
int num_threads_per_block;
int num_iterations;
int divergence;
__global__ void init_memory (unsigned long long ** my_ptr_array, unsigned long long * my_array, int stride, int num_blocks_k, int num_threads_per_block_k) {
int block_id;
int warp_id;
int i;
int index;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
void **ptr_array = (void **)my_ptr_array;
unsigned long long *array = (unsigned long long *)my_array;
if (tid == 0) {
// int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
//int elements_per_warp = elements_per_block / num_warps_per_block;
int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
// for (block_id = 0; block_id < num_blocks_k; block_id++) {
for (warp_id = 0; warp_id < num_warps_per_block; warp_id++) {
for (i = 0; i < elements_per_warp; i++) {
//index = (block_id * elements_per_block) + (warp_id * elements_per_warp);
index = (warp_id * elements_per_warp);
ptr_array[index + i] = (void*)&array[(index + ((i + 16) % elements_per_warp))];
}
}
/* for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
ptr_array[i] = (void*)&array[(i + 32)%GLOBAL_MEM_ELEMENTS];
}
*/
for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
//array[i] = (unsigned long long)ptr_array[(i+stride)%GLOBAL_MEM_ELEMENTS];
array[i] = (unsigned long long)ptr_array[i];
}
}
__syncthreads();
}
__global__ void shared_latency (unsigned long long ** my_ptr_array, unsigned long long * my_array, int array_length, int iterations, unsigned long long * duration, int stride, int divergence, int num_blocks_k, int num_threads_per_block_k, unsigned long long ** my_end_ptr_array) {
unsigned long long int start_time, end_time;
unsigned long long int sum_time = 0;
int i, k;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int block_id = blockIdx.x;
int warp_id = threadIdx.x / 32;
int warp_thread_id = threadIdx.x % 32;
// int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
// int elements_per_warp = elements_per_block / num_warps_per_block;
int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
//int index1 = (block_id * elements_per_block) + (warp_id * elements_per_warp) + warp_thread_id;
int index1 = (warp_id * elements_per_warp) + warp_thread_id;
void **ptr_array = (void **)my_ptr_array;
unsigned long long int *array = (unsigned long long int *)my_array;
void **tmp_ptr;
//tmp_ptr = (void *)sdata;
//tmp_ptr = (void **)(&(ptr_array[(threadIdx.x * stride)%GLOBAL_MEM_ELEMENTS]));
//tmp_ptr = (void **)(&(ptr_array[(tid * stride)%GLOBAL_MEM_ELEMENTS]));
//tmp_ptr = (void **)(&(ptr_array[index1]));
tmp_ptr = (void **)(&(array[index1]));
//#define ONCE tmp_ptr = *(void**)tmp_ptr;
#define ONCE tmp_ptr = (void**)(*tmp_ptr);
#define REPEAT_FOUR_TIMES ONCE ONCE ONCE ONCE
#define REPEAT_SIXTEEN_TIMES REPEAT_FOUR_TIMES REPEAT_FOUR_TIMES REPEAT_FOUR_TIMES REPEAT_FOUR_TIMES
#define REPEAT_SIXTYFOUR_TIMES REPEAT_SIXTEEN_TIMES REPEAT_SIXTEEN_TIMES REPEAT_SIXTEEN_TIMES REPEAT_SIXTEEN_TIMES
if ((threadIdx.x % 32) < divergence) {
for(k = 0; k <= iterations; k++) {
// tmp_ptr = (void**)(*tmp_ptr);
if (k == 0) {
sum_time = 0;
}
start_time = clock();
// ONCE
REPEAT_SIXTYFOUR_TIMES;
REPEAT_SIXTYFOUR_TIMES;
REPEAT_SIXTYFOUR_TIMES;
REPEAT_FOUR_TIMES;
REPEAT_FOUR_TIMES;
end_time = clock();
sum_time += (end_time - start_time);
}
}
my_end_ptr_array[tid] = (unsigned long long*)(*tmp_ptr);
duration[tid] = sum_time;
}
// Shared memory array size is N-2. Last two elements are used as dummy variables.
void parametric_measure_shared(int N, int iterations, int stride) {
hipProfilerStop();
int i;
unsigned long long int * h_a;
unsigned long long int * d_a;
unsigned long long ** h_ptr_a;
unsigned long long ** d_ptr_a;
unsigned long long ** h_end_ptr_a;
unsigned long long ** d_end_ptr_a;
unsigned long long * duration;
unsigned long long * latency;
hipError_t error_id;
/* allocate array on CPU */
h_a = (unsigned long long *)malloc(sizeof(unsigned long long int) * N);
h_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*)*N);
latency = (unsigned long long *)malloc(sizeof(unsigned long long) * num_threads_per_block * num_blocks);
h_end_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*) * num_threads_per_block * num_blocks);
/* initialize array elements on CPU */
for (i = 0; i < N; i++) {
h_ptr_a[i] = (unsigned long long *)&h_a[i];
}
for (i = 0; i < N; i++) {
h_a[i] = (unsigned long long)h_ptr_a[(i + 1 + stride) % N];
}
/* allocate arrays on GPU */
hipMalloc ((void **) &d_a, sizeof(unsigned long long int) * N );
hipMalloc ((void **) &d_ptr_a, sizeof(unsigned long long int*) * N );
hipMalloc ((void **) &duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks);
hipMalloc ((void **) &d_end_ptr_a, sizeof(unsigned long long *) * num_threads_per_block * num_blocks);
hipDeviceSynchronize ();
error_id = hipGetLastError();
if (error_id != hipSuccess) {
printf("Error 1 is %s\n", hipGetErrorString(error_id));
}
/* copy array elements from CPU to GPU */
hipMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long long int) * N, hipMemcpyHostToDevice);
hipMemcpy((void *)d_ptr_a, (void *)h_ptr_a, sizeof(unsigned long long int *) * N, hipMemcpyHostToDevice);
hipMemcpy((void *)duration, (void *)latency, sizeof(unsigned long long) * num_threads_per_block * num_blocks, hipMemcpyHostToDevice);
hipMemcpy((void *)d_end_ptr_a, (void *)h_end_ptr_a, sizeof(unsigned long long *) * num_threads_per_block * num_blocks, hipMemcpyHostToDevice);
hipDeviceSynchronize ();
error_id = hipGetLastError();
if (error_id != hipSuccess) {
printf("Error 2 is %s\n", hipGetErrorString(error_id));
}
hipLaunchKernelGGL(( init_memory) , dim3(1), dim3(1), 0, 0, d_ptr_a, d_a, stride, num_blocks, num_threads_per_block);
hipDeviceSynchronize();
/* launch kernel*/
//dim3 Db = dim3(13);
//dim3 Dg = dim3(768,1,1);
//printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride);
// int sharedMemSize = sizeof(unsigned long long int) * N ;
for (int i = 0; i < 1; i++) {
hipEvent_t start, stop;
float time;
std::string cmd = "GPUJOULE_DIR/nvml/example/power_monitor 5 > GPUJOULE_DIR/energy_model_ubench/energy_model_data/data_movement_energy/l1_cache/fadd_l1d_0_100_64p_asm_power.txt &";
std::system(cmd.c_str());
std::system("sleep 5");
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
hipProfilerStart();
hipFuncSetCacheConfig(shared_latency, hipFuncCachePreferL1);
//shared_latency <<<Dg, Db, sharedMemSize>>>(d_a, N, iterations, duration);
//shared_latency <<<num_blocks, num_threads_per_block, sharedMemSize>>>(d_a, N, num_iterations, duration, stride, divergence);
hipLaunchKernelGGL(( shared_latency) , dim3(num_blocks), dim3(num_threads_per_block), 0, 0, d_ptr_a, d_a, N, num_iterations, duration, stride, divergence, num_blocks, num_threads_per_block, d_end_ptr_a);
hipDeviceSynchronize();
///hipDeviceSynchronize ();
hipProfilerStop();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
std::system("killall power_monitor");
error_id = hipGetLastError();
if (error_id != hipSuccess) {
printf("Error 3 is %s\n", hipGetErrorString(error_id));
}
/* copy results from GPU to CPU */
hipMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long long int) * N, hipMemcpyDeviceToHost);
hipMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks, hipMemcpyDeviceToHost);
hipMemcpy((void *)h_end_ptr_a, (void *)d_end_ptr_a, sizeof(unsigned long long *) * num_threads_per_block * num_blocks, hipMemcpyDeviceToHost);
hipDeviceSynchronize ();
/* print results*/
unsigned long long max_dur = latency[0];
unsigned long long min_dur = latency[0];
unsigned long long avg_lat = latency[0];
for (int i = 1; i < num_threads_per_block * num_blocks; i++) {
if (latency[i] > 0) {
avg_lat += latency[i];
if (latency[i] > max_dur) {
max_dur = latency[i];
} else if (latency[i] < min_dur) {
min_dur = latency[i];
}
}
}
printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * (divergence / 32.0) * num_blocks * 200.0 *num_iterations)), (double)(min_dur/(200.0 * num_iterations)), (double)(max_dur/(200.0 * num_iterations)), time);
//printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * num_blocks * 200.0 *num_iterations)), (double)(min_dur/(200.0 * num_iterations)), (double)(max_dur/(200.0 * num_iterations)), time);
//printf("%f\n", time);
}
/* free memory on GPU */
hipFree(d_a);
hipFree(d_ptr_a);
hipFree(d_end_ptr_a);
hipFree(duration);
hipDeviceSynchronize ();
/*free memory on CPU */
free(h_a);
free(h_ptr_a);
free(h_end_ptr_a);
free(latency);
}
void usage() {
printf("Usage ./binary <num_blocks> <num_threads_per_block> <iterations> <threads active per warp> <stride>\n");
}
int main(int argc, char **argv) {
int N, stride;
// initialize upper bounds here
// int stride_upper_bound = 1;
if(argc != 6) {
usage();
exit(1);
}
num_blocks = atoi(argv[1]);
num_threads_per_block = atoi(argv[2]);
num_iterations = atoi(argv[3]);
divergence = atoi(argv[4]);
stride = atoi(argv[5]);
// printf("Shared memory latency for varying stride.\n");
// printf("stride (bytes), latency (clocks)\n");
// N = SHARED_MEM_ELEMENTS;
N = GLOBAL_MEM_ELEMENTS;
// N = num_threads_per_block;
// stride_upper_bound = 1;
// for (stride = 1; stride <= stride_upper_bound; stride += 1) {
parametric_measure_shared(N, 10, stride);
// }
return 0;
}
| 356bf0105aadae32600f756ad1cad477a33d3735.cu | #include <stdio.h>
#include <cuda_profiler_api.h>
#include <unistd.h>
#include <curand.h>
#include <curand_kernel.h>
#include <string>
#define GPUJOULE_DIR ""
#define SHARED_MEM_ELEMENTS 1024
#define GLOBAL_MEM_ELEMENTS 4096
//#define GLOBAL_MEM_ELEMENTS 131072
//#define GLOBAL_MEM_ELEMENTS 196608
int num_blocks;
int num_threads_per_block;
int num_iterations;
int divergence;
__global__ void init_memory (unsigned long long ** my_ptr_array, unsigned long long * my_array, int stride, int num_blocks_k, int num_threads_per_block_k) {
int block_id;
int warp_id;
int i;
int index;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
void **ptr_array = (void **)my_ptr_array;
unsigned long long *array = (unsigned long long *)my_array;
if (tid == 0) {
// int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
//int elements_per_warp = elements_per_block / num_warps_per_block;
int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
// for (block_id = 0; block_id < num_blocks_k; block_id++) {
for (warp_id = 0; warp_id < num_warps_per_block; warp_id++) {
for (i = 0; i < elements_per_warp; i++) {
//index = (block_id * elements_per_block) + (warp_id * elements_per_warp);
index = (warp_id * elements_per_warp);
ptr_array[index + i] = (void*)&array[(index + ((i + 16) % elements_per_warp))];
}
}
/* for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
ptr_array[i] = (void*)&array[(i + 32)%GLOBAL_MEM_ELEMENTS];
}
*/
for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
//array[i] = (unsigned long long)ptr_array[(i+stride)%GLOBAL_MEM_ELEMENTS];
array[i] = (unsigned long long)ptr_array[i];
}
}
__syncthreads();
}
__global__ void shared_latency (unsigned long long ** my_ptr_array, unsigned long long * my_array, int array_length, int iterations, unsigned long long * duration, int stride, int divergence, int num_blocks_k, int num_threads_per_block_k, unsigned long long ** my_end_ptr_array) {
unsigned long long int start_time, end_time;
unsigned long long int sum_time = 0;
int i, k;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int block_id = blockIdx.x;
int warp_id = threadIdx.x / 32;
int warp_thread_id = threadIdx.x % 32;
// int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
// int elements_per_warp = elements_per_block / num_warps_per_block;
int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
//int index1 = (block_id * elements_per_block) + (warp_id * elements_per_warp) + warp_thread_id;
int index1 = (warp_id * elements_per_warp) + warp_thread_id;
void **ptr_array = (void **)my_ptr_array;
unsigned long long int *array = (unsigned long long int *)my_array;
void **tmp_ptr;
//tmp_ptr = (void *)sdata;
//tmp_ptr = (void **)(&(ptr_array[(threadIdx.x * stride)%GLOBAL_MEM_ELEMENTS]));
//tmp_ptr = (void **)(&(ptr_array[(tid * stride)%GLOBAL_MEM_ELEMENTS]));
//tmp_ptr = (void **)(&(ptr_array[index1]));
tmp_ptr = (void **)(&(array[index1]));
//#define ONCE tmp_ptr = *(void**)tmp_ptr;
#define ONCE tmp_ptr = (void**)(*tmp_ptr);
#define REPEAT_FOUR_TIMES ONCE ONCE ONCE ONCE
#define REPEAT_SIXTEEN_TIMES REPEAT_FOUR_TIMES REPEAT_FOUR_TIMES REPEAT_FOUR_TIMES REPEAT_FOUR_TIMES
#define REPEAT_SIXTYFOUR_TIMES REPEAT_SIXTEEN_TIMES REPEAT_SIXTEEN_TIMES REPEAT_SIXTEEN_TIMES REPEAT_SIXTEEN_TIMES
if ((threadIdx.x % 32) < divergence) {
for(k = 0; k <= iterations; k++) {
// tmp_ptr = (void**)(*tmp_ptr);
if (k == 0) {
sum_time = 0;
}
start_time = clock();
// ONCE
REPEAT_SIXTYFOUR_TIMES;
REPEAT_SIXTYFOUR_TIMES;
REPEAT_SIXTYFOUR_TIMES;
REPEAT_FOUR_TIMES;
REPEAT_FOUR_TIMES;
end_time = clock();
sum_time += (end_time - start_time);
}
}
my_end_ptr_array[tid] = (unsigned long long*)(*tmp_ptr);
duration[tid] = sum_time;
}
// Shared memory array size is N-2. Last two elements are used as dummy variables.
void parametric_measure_shared(int N, int iterations, int stride) {
cudaProfilerStop();
int i;
unsigned long long int * h_a;
unsigned long long int * d_a;
unsigned long long ** h_ptr_a;
unsigned long long ** d_ptr_a;
unsigned long long ** h_end_ptr_a;
unsigned long long ** d_end_ptr_a;
unsigned long long * duration;
unsigned long long * latency;
cudaError_t error_id;
/* allocate array on CPU */
h_a = (unsigned long long *)malloc(sizeof(unsigned long long int) * N);
h_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*)*N);
latency = (unsigned long long *)malloc(sizeof(unsigned long long) * num_threads_per_block * num_blocks);
h_end_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*) * num_threads_per_block * num_blocks);
/* initialize array elements on CPU */
for (i = 0; i < N; i++) {
h_ptr_a[i] = (unsigned long long *)&h_a[i];
}
for (i = 0; i < N; i++) {
h_a[i] = (unsigned long long)h_ptr_a[(i + 1 + stride) % N];
}
/* allocate arrays on GPU */
cudaMalloc ((void **) &d_a, sizeof(unsigned long long int) * N );
cudaMalloc ((void **) &d_ptr_a, sizeof(unsigned long long int*) * N );
cudaMalloc ((void **) &duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks);
cudaMalloc ((void **) &d_end_ptr_a, sizeof(unsigned long long *) * num_threads_per_block * num_blocks);
cudaThreadSynchronize ();
error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error 1 is %s\n", cudaGetErrorString(error_id));
}
/* copy array elements from CPU to GPU */
cudaMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long long int) * N, cudaMemcpyHostToDevice);
cudaMemcpy((void *)d_ptr_a, (void *)h_ptr_a, sizeof(unsigned long long int *) * N, cudaMemcpyHostToDevice);
cudaMemcpy((void *)duration, (void *)latency, sizeof(unsigned long long) * num_threads_per_block * num_blocks, cudaMemcpyHostToDevice);
cudaMemcpy((void *)d_end_ptr_a, (void *)h_end_ptr_a, sizeof(unsigned long long *) * num_threads_per_block * num_blocks, cudaMemcpyHostToDevice);
cudaThreadSynchronize ();
error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error 2 is %s\n", cudaGetErrorString(error_id));
}
init_memory <<<1, 1>>>(d_ptr_a, d_a, stride, num_blocks, num_threads_per_block);
cudaDeviceSynchronize();
/* launch kernel*/
//dim3 Db = dim3(13);
//dim3 Dg = dim3(768,1,1);
//printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride);
// int sharedMemSize = sizeof(unsigned long long int) * N ;
for (int i = 0; i < 1; i++) {
cudaEvent_t start, stop;
float time;
std::string cmd = "GPUJOULE_DIR/nvml/example/power_monitor 5 > GPUJOULE_DIR/energy_model_ubench/energy_model_data/data_movement_energy/l1_cache/fadd_l1d_0_100_64p_asm_power.txt &";
std::system(cmd.c_str());
std::system("sleep 5");
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaProfilerStart();
cudaFuncSetCacheConfig(shared_latency, cudaFuncCachePreferL1);
//shared_latency <<<Dg, Db, sharedMemSize>>>(d_a, N, iterations, duration);
//shared_latency <<<num_blocks, num_threads_per_block, sharedMemSize>>>(d_a, N, num_iterations, duration, stride, divergence);
shared_latency <<<num_blocks, num_threads_per_block>>>(d_ptr_a, d_a, N, num_iterations, duration, stride, divergence, num_blocks, num_threads_per_block, d_end_ptr_a);
cudaDeviceSynchronize();
///cudaThreadSynchronize ();
cudaProfilerStop();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
std::system("killall power_monitor");
error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error 3 is %s\n", cudaGetErrorString(error_id));
}
/* copy results from GPU to CPU */
cudaMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long long int) * N, cudaMemcpyDeviceToHost);
cudaMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks, cudaMemcpyDeviceToHost);
cudaMemcpy((void *)h_end_ptr_a, (void *)d_end_ptr_a, sizeof(unsigned long long *) * num_threads_per_block * num_blocks, cudaMemcpyDeviceToHost);
cudaThreadSynchronize ();
/* print results*/
unsigned long long max_dur = latency[0];
unsigned long long min_dur = latency[0];
unsigned long long avg_lat = latency[0];
for (int i = 1; i < num_threads_per_block * num_blocks; i++) {
if (latency[i] > 0) {
avg_lat += latency[i];
if (latency[i] > max_dur) {
max_dur = latency[i];
} else if (latency[i] < min_dur) {
min_dur = latency[i];
}
}
}
printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * (divergence / 32.0) * num_blocks * 200.0 *num_iterations)), (double)(min_dur/(200.0 * num_iterations)), (double)(max_dur/(200.0 * num_iterations)), time);
//printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * num_blocks * 200.0 *num_iterations)), (double)(min_dur/(200.0 * num_iterations)), (double)(max_dur/(200.0 * num_iterations)), time);
//printf("%f\n", time);
}
/* free memory on GPU */
cudaFree(d_a);
cudaFree(d_ptr_a);
cudaFree(d_end_ptr_a);
cudaFree(duration);
cudaThreadSynchronize ();
/*free memory on CPU */
free(h_a);
free(h_ptr_a);
free(h_end_ptr_a);
free(latency);
}
void usage() {
printf("Usage ./binary <num_blocks> <num_threads_per_block> <iterations> <threads active per warp> <stride>\n");
}
int main(int argc, char **argv) {
int N, stride;
// initialize upper bounds here
// int stride_upper_bound = 1;
if(argc != 6) {
usage();
exit(1);
}
num_blocks = atoi(argv[1]);
num_threads_per_block = atoi(argv[2]);
num_iterations = atoi(argv[3]);
divergence = atoi(argv[4]);
stride = atoi(argv[5]);
// printf("Shared memory latency for varying stride.\n");
// printf("stride (bytes), latency (clocks)\n");
// N = SHARED_MEM_ELEMENTS;
N = GLOBAL_MEM_ELEMENTS;
// N = num_threads_per_block;
// stride_upper_bound = 1;
// for (stride = 1; stride <= stride_upper_bound; stride += 1) {
parametric_measure_shared(N, 10, stride);
// }
return 0;
}
|
7db03023509da62eb944d49b03cf4cf27acf3a57.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Equihash CUDA solver
// Copyright (c) 2016 John Tromp
#define XINTREE
#define UNROLL
#define htole32(x) (x)
#define HAVE_DECL_HTOLE32 1
#include "../cpu_tromp/equi.h"
#include "eqcuda.hpp"
#include "blake2b.cu"
typedef uint16_t u16;
typedef uint64_t u64;
#ifndef RESTBITS
#define RESTBITS 4
#endif
// 2_log of number of buckets
#define BUCKBITS (DIGITBITS-RESTBITS)
#ifndef SAVEMEM
#if RESTBITS == 4
// can't save memory in such small buckets
#define SAVEMEM 1
#elif RESTBITS >= 8
// take advantage of law of large numbers (sum of 2^8 random numbers)
// this reduces (200,9) memory to under 144MB, with negligible discarding
#define SAVEMEM 9/14
#endif
#endif
// number of buckets
static const u32 NBUCKETS = 1 << BUCKBITS;
// bucket mask
static const u32 BUCKMASK = NBUCKETS - 1;
// 2_log of number of slots per bucket
static const u32 SLOTBITS = RESTBITS + 1 + 1;
static const u32 SLOTRANGE = 1 << SLOTBITS;
// number of slots per bucket
static const u32 NSLOTS = SLOTRANGE * SAVEMEM;
// SLOTBITS mask
static const u32 SLOTMASK = SLOTRANGE - 1;
// number of possible values of xhash (rest of n) bits
static const u32 NRESTS = 1 << RESTBITS;
// RESTBITS mask
static const u32 RESTMASK = NRESTS - 1;
// number of blocks of hashes extracted from single 512 bit blake2b output
static const u32 NBLOCKS = (NHASHES + HASHESPERBLAKE - 1) / HASHESPERBLAKE;
// nothing larger found in 100000 runs
static const u32 MAXSOLS = 8;
// tree node identifying its children as two different slots in
// a bucket on previous layer with the same rest bits (x-tra hash)
struct tree {
u32 bid_s0_s1_x; // manual bitfields
__device__ tree(const u32 idx, const u32 xh) {
bid_s0_s1_x = idx << RESTBITS | xh;
}
__device__ tree(const u32 idx) {
bid_s0_s1_x = idx;
}
__device__ tree(const u32 bid, const u32 s0, const u32 s1, const u32 xh) {
#ifdef XINTREE
bid_s0_s1_x = ((((bid << SLOTBITS) | s0) << SLOTBITS) | s1) << RESTBITS | xh;
#else
bid_s0_s1_x = (((bid << SLOTBITS) | s0) << SLOTBITS) | s1;
#endif
}
__device__ u32 getindex() const {
#ifdef XINTREE
return bid_s0_s1_x >> RESTBITS;
#else
return bid_s0_s1_x;
#endif
}
__device__ u32 bucketid() const {
#ifdef XINTREE
return bid_s0_s1_x >> (2 * SLOTBITS + RESTBITS);
#else
return bid_s0_s1_x >> (2 * SLOTBITS);
#endif
}
__device__ u32 slotid0() const {
#ifdef XINTREE
return (bid_s0_s1_x >> SLOTBITS + RESTBITS) & SLOTMASK;
#else
return (bid_s0_s1_x >> SLOTBITS) & SLOTMASK;
#endif
}
__device__ u32 slotid1() const {
#ifdef XINTREE
return (bid_s0_s1_x >> RESTBITS) & SLOTMASK;
#else
return bid_s0_s1_x & SLOTMASK;
#endif
}
__device__ u32 xhash() const {
return bid_s0_s1_x & RESTMASK;
}
};
union hashunit {
u32 word;
uchar bytes[sizeof(u32)];
};
#define WORDS(bits) ((bits + 31) / 32)
#define HASHWORDS0 WORDS(WN - DIGITBITS + RESTBITS)
#define HASHWORDS1 WORDS(WN - 2*DIGITBITS + RESTBITS)
struct slot0 {
tree attr;
hashunit hash[HASHWORDS0];
};
struct slot1 {
tree attr;
hashunit hash[HASHWORDS1];
};
// a bucket is NSLOTS treenodes
typedef slot0 bucket0[NSLOTS];
typedef slot1 bucket1[NSLOTS];
// the N-bit hash consists of K+1 n-bit "digits"
// each of which corresponds to a layer of NBUCKETS buckets
typedef bucket0 digit0[NBUCKETS];
typedef bucket1 digit1[NBUCKETS];
// size (in bytes) of hash in round 0 <= r < WK
u32 hhashsize(const u32 r) {
#ifdef XINTREE
const u32 hashbits = WN - (r + 1) * DIGITBITS;
#else
const u32 hashbits = WN - (r + 1) * DIGITBITS + RESTBITS;
#endif
return (hashbits + 7) / 8;
}
// size (in bytes) of hash in round 0 <= r < WK
__device__ u32 hashsize(const u32 r) {
#ifdef XINTREE
const u32 hashbits = WN - (r + 1) * DIGITBITS;
#else
const u32 hashbits = WN - (r + 1) * DIGITBITS + RESTBITS;
#endif
return (hashbits + 7) / 8;
}
u32 hhashwords(u32 bytes) {
return (bytes + 3) / 4;
}
__device__ u32 hashwords(u32 bytes) {
return (bytes + 3) / 4;
}
// manages hash and tree data
struct htalloc {
bucket0 *trees0[(WK + 1) / 2];
bucket1 *trees1[WK / 2];
};
typedef u32 bsizes[NBUCKETS];
struct __align__(64) equi {
blake2b_state blake_ctx;
htalloc hta;
bsizes *nslots;
proof *sols;
u32 nsols;
u32 nthreads;
equi(const u32 n_threads) {
nthreads = n_threads;
}
void* operator new(size_t i)
{
return _mm_malloc(i, 64);
}
void operator delete(void* p) {
_mm_free(p);
}
void setheadernonce(const char *header, const u32 len, const char* nonce, const u32 nlen) {
setheader(&blake_ctx, header, len, nonce, nlen);
checkCudaErrors(hipMemset(nslots, 0, NBUCKETS * sizeof(u32)));
nsols = 0;
}
__device__ u32 getnslots0(const u32 bid) {
u32 &nslot = nslots[0][bid];
const u32 n = min(nslot, NSLOTS);
nslot = 0;
return n;
}
__device__ u32 getnslots1(const u32 bid) {
u32 &nslot = nslots[1][bid];
const u32 n = min(nslot, NSLOTS);
nslot = 0;
return n;
}
__device__ void orderindices(u32 *indices, u32 size) {
if (indices[0] > indices[size]) {
for (u32 i = 0; i < size; i++) {
const u32 tmp = indices[i];
indices[i] = indices[size + i];
indices[size + i] = tmp;
}
}
}
__device__ void listindices1(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[0][t.bucketid()];
const u32 size = 1 << 0;
indices[0] = buck[t.slotid0()].attr.getindex();
indices[size] = buck[t.slotid1()].attr.getindex();
orderindices(indices, size);
}
__device__ void listindices2(const tree t, u32 *indices) {
const bucket1 &buck = hta.trees1[0][t.bucketid()];
const u32 size = 1 << 1;
listindices1(buck[t.slotid0()].attr, indices);
listindices1(buck[t.slotid1()].attr, indices + size);
orderindices(indices, size);
}
__device__ void listindices3(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[1][t.bucketid()];
const u32 size = 1 << 2;
listindices2(buck[t.slotid0()].attr, indices);
listindices2(buck[t.slotid1()].attr, indices + size);
orderindices(indices, size);
}
__device__ void listindices4(const tree t, u32 *indices) {
const bucket1 &buck = hta.trees1[1][t.bucketid()];
const u32 size = 1 << 3;
listindices3(buck[t.slotid0()].attr, indices);
listindices3(buck[t.slotid1()].attr, indices + size);
orderindices(indices, size);
}
__device__ void listindices5(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[2][t.bucketid()];
const u32 size = 1 << 4;
listindices4(buck[t.slotid0()].attr, indices);
listindices4(buck[t.slotid1()].attr, indices+size);
orderindices(indices, size);
}
__device__ void listindices6(const tree t, u32 *indices) {
const bucket1 &buck = hta.trees1[2][t.bucketid()];
const u32 size = 1 << 5;
listindices5(buck[t.slotid0()].attr, indices);
listindices5(buck[t.slotid1()].attr, indices+size);
orderindices(indices, size);
}
__device__ void listindices7(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[3][t.bucketid()];
const u32 size = 1 << 6;
listindices6(buck[t.slotid0()].attr, indices);
listindices6(buck[t.slotid1()].attr, indices+size);
orderindices(indices, size);
}
__device__ void listindices8(const tree t, u32 *indices) {
const bucket1 &buck = hta.trees1[3][t.bucketid()];
const u32 size = 1 << 7;
listindices7(buck[t.slotid0()].attr, indices);
listindices7(buck[t.slotid1()].attr, indices+size);
orderindices(indices, size);
}
__device__ void listindices9(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[4][t.bucketid()];
const u32 size = 1 << 8;
listindices8(buck[t.slotid0()].attr, indices);
listindices8(buck[t.slotid1()].attr, indices+size);
orderindices(indices, size);
}
__device__ void candidate(const tree t) {
proof prf;
#if WK==9
listindices9(t, prf);
#elif WK==5
listindices5(t, prf);
#else
#error not implemented
#endif
if (probdupe(prf))
return;
u32 soli = atomicAdd(&nsols, 1);
if (soli < MAXSOLS)
#if WK==9
listindices9(t, sols[soli]);
#elif WK==5
listindices5(t, sols[soli]);
#else
#error not implemented
#endif
}
void showbsizes(u32 r) {
#if defined(HIST) || defined(SPARK) || defined(LOGSPARK)
u32 ns[NBUCKETS];
checkCudaErrors(hipMemcpy(ns, nslots[r & 1], NBUCKETS * sizeof(u32), hipMemcpyDeviceToHost));
u32 binsizes[65];
memset(binsizes, 0, 65 * sizeof(u32));
for (u32 bucketid = 0; bucketid < NBUCKETS; bucketid++) {
u32 bsize = min(ns[bucketid], NSLOTS) >> (SLOTBITS - 6);
binsizes[bsize]++;
}
for (u32 i = 0; i < 65; i++) {
#ifdef HIST
printf(" %d:%d", i, binsizes[i]);
#else
#ifdef SPARK
u32 sparks = binsizes[i] / SPARKSCALE;
#else
u32 sparks = 0;
for (u32 bs = binsizes[i]; bs; bs >>= 1) sparks++;
sparks = sparks * 7 / SPARKSCALE;
#endif
printf("\342\226%c", '\201' + sparks);
#endif
}
printf("\n");
#endif
}
// proper dupe test is a little costly on GPU, so allow false negatives
__device__ bool probdupe(u32 *prf) {
unsigned short susp[PROOFSIZE];
memset(susp, 0xffff, PROOFSIZE * sizeof(unsigned short));
for (u32 i=0; i<PROOFSIZE; i++) {
u32 bin = prf[i] & (PROOFSIZE-1);
unsigned short msb = prf[i]>>WK;
if (msb == susp[bin])
return true;
susp[bin] = msb;
}
return false;
}
struct htlayout {
htalloc hta;
u32 prevhashunits;
u32 nexthashunits;
u32 dunits;
u32 prevbo;
u32 nextbo;
__device__ htlayout(equi *eq, u32 r) : hta(eq->hta), prevhashunits(0), dunits(0) {
u32 nexthashbytes = hashsize(r);
nexthashunits = hashwords(nexthashbytes);
prevbo = 0;
nextbo = nexthashunits * sizeof(hashunit) - nexthashbytes; // 0-3
if (r) {
u32 prevhashbytes = hashsize(r-1);
prevhashunits = hashwords(prevhashbytes);
prevbo = prevhashunits * sizeof(hashunit) - prevhashbytes; // 0-3
dunits = prevhashunits - nexthashunits;
}
}
__device__ u32 getxhash0(const slot0* pslot) const {
#ifdef XINTREE
return pslot->attr.xhash();
#elif WN == 200 && RESTBITS == 4
return pslot->hash->bytes[prevbo] >> 4;
#elif WN == 200 && RESTBITS == 8
return (pslot->hash->bytes[prevbo] & 0xf) << 4 | pslot->hash->bytes[prevbo + 1] >> 4;
#elif WN == 144 && RESTBITS == 4
return pslot->hash->bytes[prevbo] & 0xf;
#elif WN == 200 && RESTBITS == 6
return (pslot->hash->bytes[prevbo] & 0x3) << 4 | pslot->hash->bytes[prevbo+1] >> 4;
#else
#error non implemented
#endif
}
__device__ u32 getxhash1(const slot1* pslot) const {
#ifdef XINTREE
return pslot->attr.xhash();
#elif WN == 200 && RESTBITS == 4
return pslot->hash->bytes[prevbo] & 0xf;
#elif WN == 200 && RESTBITS == 8
return pslot->hash->bytes[prevbo];
#elif WN == 144 && RESTBITS == 4
return pslot->hash->bytes[prevbo] & 0xf;
#elif WN == 200 && RESTBITS == 6
return pslot->hash->bytes[prevbo] & 0x3f;
#else
#error non implemented
#endif
}
__device__ bool equal(const hashunit *hash0, const hashunit *hash1) const {
return hash0[prevhashunits - 1].word == hash1[prevhashunits - 1].word;
}
};
struct collisiondata {
#ifdef XBITMAP
#if NSLOTS > 64
#error cant use XBITMAP with more than 64 slots
#endif
u64 xhashmap[NRESTS];
u64 xmap;
#else
#if RESTBITS <= 6
typedef uchar xslot;
#else
typedef u16 xslot;
#endif
static const xslot xnil = ~0;
xslot xhashslots[NRESTS];
xslot nextxhashslot[NSLOTS];
xslot nextslot;
#endif
u32 s0;
__device__ void clear() {
#ifdef XBITMAP
memset(xhashmap, 0, NRESTS * sizeof(u64));
#else
memset(xhashslots, xnil, NRESTS * sizeof(xslot));
memset(nextxhashslot, xnil, NSLOTS * sizeof(xslot));
#endif
}
__device__ bool addslot(u32 s1, u32 xh) {
#ifdef XBITMAP
xmap = xhashmap[xh];
xhashmap[xh] |= (u64)1 << s1;
s0 = ~0;
return true;
#else
nextslot = xhashslots[xh];
nextxhashslot[s1] = nextslot;
xhashslots[xh] = s1;
return true;
#endif
}
__device__ bool nextcollision() const {
#ifdef XBITMAP
return xmap != 0;
#else
return nextslot != xnil;
#endif
}
__device__ u32 slot() {
#ifdef XBITMAP
const u32 ffs = __ffsll(xmap);
s0 += ffs; xmap >>= ffs;
#else
nextslot = nextxhashslot[s0 = nextslot];
#endif
return s0;
}
};
};
__global__ void digitH(equi *eq) {
uchar hash[HASHOUT];
blake2b_state state;
equi::htlayout htl(eq, 0);
const u32 hashbytes = hashsize(0); // always 23 ?
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 block = id; block < NBLOCKS; block += eq->nthreads) {
state = eq->blake_ctx;
blake2b_gpu_hash(&state, block, hash, HASHOUT);
for (u32 i = 0; i<HASHESPERBLAKE; i++) {
const uchar *ph = hash + i * WN / 8;
#if BUCKBITS == 16 && RESTBITS == 4
const u32 bucketid = ((u32)ph[0] << 8) | ph[1];
#ifdef XINTREE
const u32 xhash = ph[2] >> 4;
#endif
#elif BUCKBITS == 14 && RESTBITS == 6
const u32 bucketid = ((u32)ph[0] << 6) | ph[1] >> 2;
#elif BUCKBITS == 12 && RESTBITS == 8
const u32 bucketid = ((u32)ph[0] << 4) | ph[1] >> 4;
#elif BUCKBITS == 20 && RESTBITS == 4
const u32 bucketid = ((((u32)ph[0] << 8) | ph[1]) << 4) | ph[2] >> 4;
#ifdef XINTREE
const u32 xhash = ph[2] & 0xf;
#endif
#elif BUCKBITS == 12 && RESTBITS == 4
const u32 bucketid = ((u32)ph[0] << 4) | ph[1] >> 4;
const u32 xhash = ph[1] & 0xf;
#else
#error not implemented
#endif
const u32 slot = atomicAdd(&eq->nslots[0][bucketid], 1);
if (slot >= NSLOTS)
continue;
slot0 &s = eq->hta.trees0[0][bucketid][slot];
#ifdef XINTREE
s.attr = tree(block*HASHESPERBLAKE+i, xhash);
#else
s.attr = tree(block*HASHESPERBLAKE+i);
#endif
memcpy(s.hash->bytes+htl.nextbo, ph+WN/8-hashbytes, hashbytes);
}
}
}
__global__ void digitO(equi *eq, const u32 r) {
equi::htlayout htl(eq, r);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[(r - 1) / 2][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash0(pslot1)))
continue;
for (; cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
u32 xorbucketid;
u32 xhash;
const uchar *bytes0 = pslot0->hash->bytes, *bytes1 = pslot1->hash->bytes;
#if WN == 200 && BUCKBITS == 16 && RESTBITS == 4 && defined(XINTREE)
xorbucketid = ((((u32)(bytes0[htl.prevbo] ^ bytes1[htl.prevbo]) & 0xf) << 8)
| (bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1])) << 4
| (xhash = bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 4;
xhash &= 0xf;
#elif WN == 144 && BUCKBITS == 20 && RESTBITS == 4
xorbucketid = ((((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 8)
| (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2])) << 4)
| (xhash = bytes0[htl.prevbo + 3] ^ bytes1[htl.prevbo + 3]) >> 4;
xhash &= 0xf;
#elif WN == 96 && BUCKBITS == 12 && RESTBITS == 4
xorbucketid = ((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 4)
| (xhash = bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 4;
xhash &= 0xf;
#elif WN == 200 && BUCKBITS == 14 && RESTBITS == 6
xorbucketid = ((((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) & 0xf) << 8)
| (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2])) << 2
| (bytes0[htl.prevbo + 3] ^ bytes1[htl.prevbo + 3]) >> 6;
#else
#error not implemented
#endif
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[r/2][xorbucketid][xorslot];
#ifdef XINTREE
xs.attr = tree(bucketid, s0, s1, xhash);
#else
xs.attr = tree(bucketid, s0, s1);
#endif
for (u32 i=htl.dunits; i < htl.prevhashunits; i++)
xs.hash[i - htl.dunits].word = pslot0->hash[i].word ^ pslot1->hash[i].word;
}
}
}
}
__global__ void digitE(equi *eq, const u32 r) {
equi::htlayout htl(eq, r);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[(r - 1) / 2][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash1(pslot1)))
continue;
for (; cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
u32 xorbucketid;
u32 xhash;
const uchar *bytes0 = pslot0->hash->bytes, *bytes1 = pslot1->hash->bytes;
#if WN == 200 && BUCKBITS == 16 && RESTBITS == 4 && defined(XINTREE)
xorbucketid = ((u32)(bytes0[htl.prevbo] ^ bytes1[htl.prevbo]) << 8)
| (bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]);
xhash = (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 4;
#elif WN == 144 && BUCKBITS == 20 && RESTBITS == 4
xorbucketid = ((((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 8)
| (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2])) << 4)
| (bytes0[htl.prevbo + 3] ^ bytes1[htl.prevbo + 3]) >> 4;
#elif WN == 96 && BUCKBITS == 12 && RESTBITS == 4
xorbucketid = ((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 4)
| (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 4;
#elif WN == 200 && BUCKBITS == 14 && RESTBITS == 6
xorbucketid = ((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 6)
| (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 2;
#else
#error not implemented
#endif
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[r / 2][xorbucketid][xorslot];
#ifdef XINTREE
xs.attr = tree(bucketid, s0, s1, xhash);
#else
xs.attr = tree(bucketid, s0, s1);
#endif
for (u32 i = htl.dunits; i < htl.prevhashunits; i++)
xs.hash[i - htl.dunits].word = pslot0->hash[i].word ^ pslot1->hash[i].word;
}
}
}
}
#ifdef UNROLL
__global__ void digit_1(equi *eq) {
equi::htlayout htl(eq, 1);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[0][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash0(pslot1)))
continue;
for (; cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 bexor = __byte_perm(xor0, 0, 0x0123);
const u32 xorbucketid = bexor >> 4 & BUCKMASK;
const u32 xhash = bexor & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[0][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = pslot0->hash[1].word ^ pslot1->hash[1].word;
xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[2].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
xs.hash[3].word = pslot0->hash[4].word ^ pslot1->hash[4].word;
xs.hash[4].word = pslot0->hash[5].word ^ pslot1->hash[5].word;
}
}
}
}
__global__ void digit2(equi *eq) {
equi::htlayout htl(eq, 2);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[0][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash1(pslot1)))
continue;
for (; cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 bexor = __byte_perm(xor0, 0, 0x0123);
const u32 xorbucketid = bexor >> 16;
const u32 xhash = bexor >> 12 & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[1][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor0;
xs.hash[1].word = pslot0->hash[1].word ^ pslot1->hash[1].word;
xs.hash[2].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[3].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
xs.hash[4].word = pslot0->hash[4].word ^ pslot1->hash[4].word;
}
}
}
}
__global__ void digit3(equi *eq) {
equi::htlayout htl(eq, 3);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[1][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash0(pslot1)))
continue;
for (; cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word;
const u32 bexor = __byte_perm(xor0, xor1, 0x1234);
const u32 xorbucketid = bexor >> 4 & BUCKMASK;
const u32 xhash = bexor & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[1][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor1;
xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[2].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
xs.hash[3].word = pslot0->hash[4].word ^ pslot1->hash[4].word;
}
}
}
}
__global__ void digit4(equi *eq) {
equi::htlayout htl(eq, 4);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[1][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash1(pslot1)))
continue;
for (; cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 bexor = __byte_perm(xor0, 0, 0x4123);
const u32 xorbucketid = bexor >> 8;
const u32 xhash = bexor >> 4 & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[2][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor0;
xs.hash[1].word = pslot0->hash[1].word ^ pslot1->hash[1].word;
xs.hash[2].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[3].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
}
}
}
}
__global__ void digit5(equi *eq) {
equi::htlayout htl(eq, 5);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[2][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash0(pslot1)))
continue;
for (; cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word;
const u32 bexor = __byte_perm(xor0, xor1, 0x2345);
const u32 xorbucketid = bexor >> 4 & BUCKMASK;
const u32 xhash = bexor & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[2][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor1;
xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[2].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
}
}
}
}
__global__ void digit6(equi *eq) {
equi::htlayout htl(eq, 6);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[2][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash1(pslot1)))
continue;
for (; cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word;
const u32 bexor = __byte_perm(xor0, xor1, 0x2345);
const u32 xorbucketid = bexor >> 16;
const u32 xhash = bexor >> 12 & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[3][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor1;
xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
}
}
}
}
__global__ void digit7(equi *eq) {
equi::htlayout htl(eq, 7);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[3][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash0(pslot1)))
continue;
for (; cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 bexor = __byte_perm(xor0, 0, 0x4012);
const u32 xorbucketid = bexor >> 4 & BUCKMASK;
const u32 xhash = bexor & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[3][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor0;
xs.hash[1].word = pslot0->hash[1].word ^ pslot1->hash[1].word;
}
}
}
}
__global__ void digit8(equi *eq) {
equi::htlayout htl(eq, 8);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[3][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash1(pslot1)))
continue;
for (; cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word;
const u32 bexor = __byte_perm(xor0, xor1, 0x3456);
const u32 xorbucketid = bexor >> 16;
const u32 xhash = bexor >> 12 & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[4][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor1;
}
}
}
}
#endif
__global__ void digitK(equi *eq) {
equi::collisiondata cd;
equi::htlayout htl(eq, WK);
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[(WK - 1) / 2][bucketid];
u32 bsize = eq->getnslots0(bucketid); // assume WK odd
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash0(pslot1))) // assume WK odd
continue;
for (; cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash)) {
#ifdef XINTREE
eq->candidate(tree(bucketid, s0, s1, 0));
#else
eq->candidate(tree(bucketid, s0, s1));
#endif
}
}
}
}
}
eq_cuda_context::eq_cuda_context(int tpb, int blocks, int id)
: threadsperblock(tpb), totalblocks(blocks), device_id(id)
{
eq = new equi(threadsperblock * totalblocks);
sol_memory = malloc(sizeof(proof) * MAXSOLS + 4096);
solutions = (proof*)(((long long)sol_memory + 4095) & -4096);
checkCudaErrors(hipSetDevice(device_id));
checkCudaErrors(hipDeviceReset());
checkCudaErrors(hipSetDeviceFlags(hipDeviceScheduleBlockingSync));
checkCudaErrors(hipDeviceSetCacheConfig(hipFuncCachePreferL1));
checkCudaErrors(hipMalloc((void**)&heap0, sizeof(digit0)));
checkCudaErrors(hipMalloc((void**)&heap1, sizeof(digit1)));
for (u32 r = 0; r < WK; r++)
if ((r & 1) == 0)
eq->hta.trees0[r / 2] = (bucket0 *)(heap0 + r / 2);
else
eq->hta.trees1[r / 2] = (bucket1 *)(heap1 + r / 2);
checkCudaErrors(hipMalloc((void**)&eq->nslots, 2 * NBUCKETS * sizeof(u32)));
checkCudaErrors(hipMalloc((void**)&eq->sols, MAXSOLS * sizeof(proof)));
checkCudaErrors(hipMalloc((void**)&device_eq, sizeof(equi)));
}
eq_cuda_context::~eq_cuda_context()
{
/*checkCudaErrors(hipFree(eq->nslots));
checkCudaErrors(hipFree(eq->sols));
checkCudaErrors(hipFree(eq->hta.trees0[0]));
checkCudaErrors(hipFree(eq->hta.trees1[0]));*/
checkCudaErrors(hipSetDevice(device_id));
checkCudaErrors(hipDeviceReset());
free(sol_memory);
delete eq;
}
void eq_cuda_context::solve(const char *tequihash_header,
unsigned int tequihash_header_len,
const char* nonce,
unsigned int nonce_len,
std::function<bool()> cancelf,
std::function<void(const std::vector<uint32_t>&, size_t, const unsigned char*)> solutionf,
std::function<void(void)> hashdonef)
{
checkCudaErrors(hipSetDevice(device_id));
eq->setheadernonce(tequihash_header, tequihash_header_len, nonce, nonce_len);
checkCudaErrors(hipMemcpy(device_eq, eq, sizeof(equi), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( digitH), dim3(totalblocks), dim3(threadsperblock), 0, 0, device_eq);
if (cancelf()) return;
#if BUCKBITS == 16 && RESTBITS == 4 && defined XINTREE && defined(UNROLL)
hipLaunchKernelGGL(( digit_1), dim3(totalblocks), dim3(threadsperblock), 0, 0, device_eq);
if (cancelf()) return;
hipLaunchKernelGGL(( digit2), dim3(totalblocks), dim3(threadsperblock), 0, 0, device_eq);
if (cancelf()) return;
hipLaunchKernelGGL(( digit3), dim3(totalblocks), dim3(threadsperblock), 0, 0, device_eq);
if (cancelf()) return;
hipLaunchKernelGGL(( digit4), dim3(totalblocks), dim3(threadsperblock), 0, 0, device_eq);
if (cancelf()) return;
hipLaunchKernelGGL(( digit5), dim3(totalblocks), dim3(threadsperblock), 0, 0, device_eq);
if (cancelf()) return;
hipLaunchKernelGGL(( digit6), dim3(totalblocks), dim3(threadsperblock), 0, 0, device_eq);
if (cancelf()) return;
hipLaunchKernelGGL(( digit7), dim3(totalblocks), dim3(threadsperblock), 0, 0, device_eq);
if (cancelf()) return;
hipLaunchKernelGGL(( digit8), dim3(totalblocks), dim3(threadsperblock), 0, 0, device_eq);
#else
for (u32 r = 1; r < WK; r++) {
r & 1 ?hipLaunchKernelGGL(( digitO), dim3(totalblocks), dim3(threadsperblock), 0, 0, device_eq, r)
:hipLaunchKernelGGL(( digitE), dim3(totalblocks), dim3(threadsperblock), 0, 0, device_eq, r);
}
#endif
if (cancelf())
return;
hipLaunchKernelGGL(( digitK), dim3(totalblocks), dim3(threadsperblock), 0, 0, device_eq);
checkCudaErrors(hipMemcpy(eq, device_eq, sizeof(equi), hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(solutions, eq->sols, MAXSOLS * sizeof(proof), hipMemcpyDeviceToHost));
for (unsigned s = 0; (s < eq->nsols) && (s < MAXSOLS); s++)
{
std::vector<uint32_t> index_vector(PROOFSIZE);
for (u32 i = 0; i < PROOFSIZE; i++) {
index_vector[i] = solutions[s][i];
}
solutionf(index_vector, DIGITBITS, nullptr);
if (cancelf())
break;
}
hashdonef();
} | 7db03023509da62eb944d49b03cf4cf27acf3a57.cu | // Equihash CUDA solver
// Copyright (c) 2016 John Tromp
#define XINTREE
#define UNROLL
#define htole32(x) (x)
#define HAVE_DECL_HTOLE32 1
#include "../cpu_tromp/equi.h"
#include "eqcuda.hpp"
#include "blake2b.cu"
typedef uint16_t u16;
typedef uint64_t u64;
#ifndef RESTBITS
#define RESTBITS 4
#endif
// 2_log of number of buckets
#define BUCKBITS (DIGITBITS-RESTBITS)
#ifndef SAVEMEM
#if RESTBITS == 4
// can't save memory in such small buckets
#define SAVEMEM 1
#elif RESTBITS >= 8
// take advantage of law of large numbers (sum of 2^8 random numbers)
// this reduces (200,9) memory to under 144MB, with negligible discarding
#define SAVEMEM 9/14
#endif
#endif
// number of buckets
static const u32 NBUCKETS = 1 << BUCKBITS;
// bucket mask
static const u32 BUCKMASK = NBUCKETS - 1;
// 2_log of number of slots per bucket
static const u32 SLOTBITS = RESTBITS + 1 + 1;
static const u32 SLOTRANGE = 1 << SLOTBITS;
// number of slots per bucket
static const u32 NSLOTS = SLOTRANGE * SAVEMEM;
// SLOTBITS mask
static const u32 SLOTMASK = SLOTRANGE - 1;
// number of possible values of xhash (rest of n) bits
static const u32 NRESTS = 1 << RESTBITS;
// RESTBITS mask
static const u32 RESTMASK = NRESTS - 1;
// number of blocks of hashes extracted from single 512 bit blake2b output
static const u32 NBLOCKS = (NHASHES + HASHESPERBLAKE - 1) / HASHESPERBLAKE;
// nothing larger found in 100000 runs
static const u32 MAXSOLS = 8;
// tree node identifying its children as two different slots in
// a bucket on previous layer with the same rest bits (x-tra hash)
struct tree {
u32 bid_s0_s1_x; // manual bitfields
__device__ tree(const u32 idx, const u32 xh) {
bid_s0_s1_x = idx << RESTBITS | xh;
}
__device__ tree(const u32 idx) {
bid_s0_s1_x = idx;
}
__device__ tree(const u32 bid, const u32 s0, const u32 s1, const u32 xh) {
#ifdef XINTREE
bid_s0_s1_x = ((((bid << SLOTBITS) | s0) << SLOTBITS) | s1) << RESTBITS | xh;
#else
bid_s0_s1_x = (((bid << SLOTBITS) | s0) << SLOTBITS) | s1;
#endif
}
__device__ u32 getindex() const {
#ifdef XINTREE
return bid_s0_s1_x >> RESTBITS;
#else
return bid_s0_s1_x;
#endif
}
__device__ u32 bucketid() const {
#ifdef XINTREE
return bid_s0_s1_x >> (2 * SLOTBITS + RESTBITS);
#else
return bid_s0_s1_x >> (2 * SLOTBITS);
#endif
}
__device__ u32 slotid0() const {
#ifdef XINTREE
return (bid_s0_s1_x >> SLOTBITS + RESTBITS) & SLOTMASK;
#else
return (bid_s0_s1_x >> SLOTBITS) & SLOTMASK;
#endif
}
__device__ u32 slotid1() const {
#ifdef XINTREE
return (bid_s0_s1_x >> RESTBITS) & SLOTMASK;
#else
return bid_s0_s1_x & SLOTMASK;
#endif
}
__device__ u32 xhash() const {
return bid_s0_s1_x & RESTMASK;
}
};
union hashunit {
u32 word;
uchar bytes[sizeof(u32)];
};
#define WORDS(bits) ((bits + 31) / 32)
#define HASHWORDS0 WORDS(WN - DIGITBITS + RESTBITS)
#define HASHWORDS1 WORDS(WN - 2*DIGITBITS + RESTBITS)
struct slot0 {
tree attr;
hashunit hash[HASHWORDS0];
};
struct slot1 {
tree attr;
hashunit hash[HASHWORDS1];
};
// a bucket is NSLOTS treenodes
typedef slot0 bucket0[NSLOTS];
typedef slot1 bucket1[NSLOTS];
// the N-bit hash consists of K+1 n-bit "digits"
// each of which corresponds to a layer of NBUCKETS buckets
typedef bucket0 digit0[NBUCKETS];
typedef bucket1 digit1[NBUCKETS];
// size (in bytes) of hash in round 0 <= r < WK
u32 hhashsize(const u32 r) {
#ifdef XINTREE
const u32 hashbits = WN - (r + 1) * DIGITBITS;
#else
const u32 hashbits = WN - (r + 1) * DIGITBITS + RESTBITS;
#endif
return (hashbits + 7) / 8;
}
// size (in bytes) of hash in round 0 <= r < WK
__device__ u32 hashsize(const u32 r) {
#ifdef XINTREE
const u32 hashbits = WN - (r + 1) * DIGITBITS;
#else
const u32 hashbits = WN - (r + 1) * DIGITBITS + RESTBITS;
#endif
return (hashbits + 7) / 8;
}
u32 hhashwords(u32 bytes) {
return (bytes + 3) / 4;
}
__device__ u32 hashwords(u32 bytes) {
return (bytes + 3) / 4;
}
// manages hash and tree data
struct htalloc {
bucket0 *trees0[(WK + 1) / 2];
bucket1 *trees1[WK / 2];
};
typedef u32 bsizes[NBUCKETS];
struct __align__(64) equi {
blake2b_state blake_ctx;
htalloc hta;
bsizes *nslots;
proof *sols;
u32 nsols;
u32 nthreads;
equi(const u32 n_threads) {
nthreads = n_threads;
}
void* operator new(size_t i)
{
return _mm_malloc(i, 64);
}
void operator delete(void* p) {
_mm_free(p);
}
void setheadernonce(const char *header, const u32 len, const char* nonce, const u32 nlen) {
setheader(&blake_ctx, header, len, nonce, nlen);
checkCudaErrors(cudaMemset(nslots, 0, NBUCKETS * sizeof(u32)));
nsols = 0;
}
__device__ u32 getnslots0(const u32 bid) {
u32 &nslot = nslots[0][bid];
const u32 n = min(nslot, NSLOTS);
nslot = 0;
return n;
}
__device__ u32 getnslots1(const u32 bid) {
u32 &nslot = nslots[1][bid];
const u32 n = min(nslot, NSLOTS);
nslot = 0;
return n;
}
__device__ void orderindices(u32 *indices, u32 size) {
if (indices[0] > indices[size]) {
for (u32 i = 0; i < size; i++) {
const u32 tmp = indices[i];
indices[i] = indices[size + i];
indices[size + i] = tmp;
}
}
}
__device__ void listindices1(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[0][t.bucketid()];
const u32 size = 1 << 0;
indices[0] = buck[t.slotid0()].attr.getindex();
indices[size] = buck[t.slotid1()].attr.getindex();
orderindices(indices, size);
}
__device__ void listindices2(const tree t, u32 *indices) {
const bucket1 &buck = hta.trees1[0][t.bucketid()];
const u32 size = 1 << 1;
listindices1(buck[t.slotid0()].attr, indices);
listindices1(buck[t.slotid1()].attr, indices + size);
orderindices(indices, size);
}
__device__ void listindices3(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[1][t.bucketid()];
const u32 size = 1 << 2;
listindices2(buck[t.slotid0()].attr, indices);
listindices2(buck[t.slotid1()].attr, indices + size);
orderindices(indices, size);
}
__device__ void listindices4(const tree t, u32 *indices) {
const bucket1 &buck = hta.trees1[1][t.bucketid()];
const u32 size = 1 << 3;
listindices3(buck[t.slotid0()].attr, indices);
listindices3(buck[t.slotid1()].attr, indices + size);
orderindices(indices, size);
}
__device__ void listindices5(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[2][t.bucketid()];
const u32 size = 1 << 4;
listindices4(buck[t.slotid0()].attr, indices);
listindices4(buck[t.slotid1()].attr, indices+size);
orderindices(indices, size);
}
__device__ void listindices6(const tree t, u32 *indices) {
const bucket1 &buck = hta.trees1[2][t.bucketid()];
const u32 size = 1 << 5;
listindices5(buck[t.slotid0()].attr, indices);
listindices5(buck[t.slotid1()].attr, indices+size);
orderindices(indices, size);
}
__device__ void listindices7(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[3][t.bucketid()];
const u32 size = 1 << 6;
listindices6(buck[t.slotid0()].attr, indices);
listindices6(buck[t.slotid1()].attr, indices+size);
orderindices(indices, size);
}
__device__ void listindices8(const tree t, u32 *indices) {
const bucket1 &buck = hta.trees1[3][t.bucketid()];
const u32 size = 1 << 7;
listindices7(buck[t.slotid0()].attr, indices);
listindices7(buck[t.slotid1()].attr, indices+size);
orderindices(indices, size);
}
__device__ void listindices9(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[4][t.bucketid()];
const u32 size = 1 << 8;
listindices8(buck[t.slotid0()].attr, indices);
listindices8(buck[t.slotid1()].attr, indices+size);
orderindices(indices, size);
}
__device__ void candidate(const tree t) {
proof prf;
#if WK==9
listindices9(t, prf);
#elif WK==5
listindices5(t, prf);
#else
#error not implemented
#endif
if (probdupe(prf))
return;
u32 soli = atomicAdd(&nsols, 1);
if (soli < MAXSOLS)
#if WK==9
listindices9(t, sols[soli]);
#elif WK==5
listindices5(t, sols[soli]);
#else
#error not implemented
#endif
}
void showbsizes(u32 r) {
#if defined(HIST) || defined(SPARK) || defined(LOGSPARK)
u32 ns[NBUCKETS];
checkCudaErrors(cudaMemcpy(ns, nslots[r & 1], NBUCKETS * sizeof(u32), cudaMemcpyDeviceToHost));
u32 binsizes[65];
memset(binsizes, 0, 65 * sizeof(u32));
for (u32 bucketid = 0; bucketid < NBUCKETS; bucketid++) {
u32 bsize = min(ns[bucketid], NSLOTS) >> (SLOTBITS - 6);
binsizes[bsize]++;
}
for (u32 i = 0; i < 65; i++) {
#ifdef HIST
printf(" %d:%d", i, binsizes[i]);
#else
#ifdef SPARK
u32 sparks = binsizes[i] / SPARKSCALE;
#else
u32 sparks = 0;
for (u32 bs = binsizes[i]; bs; bs >>= 1) sparks++;
sparks = sparks * 7 / SPARKSCALE;
#endif
printf("\342\226%c", '\201' + sparks);
#endif
}
printf("\n");
#endif
}
// proper dupe test is a little costly on GPU, so allow false negatives
__device__ bool probdupe(u32 *prf) {
unsigned short susp[PROOFSIZE];
memset(susp, 0xffff, PROOFSIZE * sizeof(unsigned short));
for (u32 i=0; i<PROOFSIZE; i++) {
u32 bin = prf[i] & (PROOFSIZE-1);
unsigned short msb = prf[i]>>WK;
if (msb == susp[bin])
return true;
susp[bin] = msb;
}
return false;
}
struct htlayout {
htalloc hta;
u32 prevhashunits;
u32 nexthashunits;
u32 dunits;
u32 prevbo;
u32 nextbo;
__device__ htlayout(equi *eq, u32 r) : hta(eq->hta), prevhashunits(0), dunits(0) {
u32 nexthashbytes = hashsize(r);
nexthashunits = hashwords(nexthashbytes);
prevbo = 0;
nextbo = nexthashunits * sizeof(hashunit) - nexthashbytes; // 0-3
if (r) {
u32 prevhashbytes = hashsize(r-1);
prevhashunits = hashwords(prevhashbytes);
prevbo = prevhashunits * sizeof(hashunit) - prevhashbytes; // 0-3
dunits = prevhashunits - nexthashunits;
}
}
__device__ u32 getxhash0(const slot0* pslot) const {
#ifdef XINTREE
return pslot->attr.xhash();
#elif WN == 200 && RESTBITS == 4
return pslot->hash->bytes[prevbo] >> 4;
#elif WN == 200 && RESTBITS == 8
return (pslot->hash->bytes[prevbo] & 0xf) << 4 | pslot->hash->bytes[prevbo + 1] >> 4;
#elif WN == 144 && RESTBITS == 4
return pslot->hash->bytes[prevbo] & 0xf;
#elif WN == 200 && RESTBITS == 6
return (pslot->hash->bytes[prevbo] & 0x3) << 4 | pslot->hash->bytes[prevbo+1] >> 4;
#else
#error non implemented
#endif
}
__device__ u32 getxhash1(const slot1* pslot) const {
#ifdef XINTREE
return pslot->attr.xhash();
#elif WN == 200 && RESTBITS == 4
return pslot->hash->bytes[prevbo] & 0xf;
#elif WN == 200 && RESTBITS == 8
return pslot->hash->bytes[prevbo];
#elif WN == 144 && RESTBITS == 4
return pslot->hash->bytes[prevbo] & 0xf;
#elif WN == 200 && RESTBITS == 6
return pslot->hash->bytes[prevbo] & 0x3f;
#else
#error non implemented
#endif
}
__device__ bool equal(const hashunit *hash0, const hashunit *hash1) const {
return hash0[prevhashunits - 1].word == hash1[prevhashunits - 1].word;
}
};
struct collisiondata {
#ifdef XBITMAP
#if NSLOTS > 64
#error cant use XBITMAP with more than 64 slots
#endif
u64 xhashmap[NRESTS];
u64 xmap;
#else
#if RESTBITS <= 6
typedef uchar xslot;
#else
typedef u16 xslot;
#endif
static const xslot xnil = ~0;
xslot xhashslots[NRESTS];
xslot nextxhashslot[NSLOTS];
xslot nextslot;
#endif
u32 s0;
__device__ void clear() {
#ifdef XBITMAP
memset(xhashmap, 0, NRESTS * sizeof(u64));
#else
memset(xhashslots, xnil, NRESTS * sizeof(xslot));
memset(nextxhashslot, xnil, NSLOTS * sizeof(xslot));
#endif
}
__device__ bool addslot(u32 s1, u32 xh) {
#ifdef XBITMAP
xmap = xhashmap[xh];
xhashmap[xh] |= (u64)1 << s1;
s0 = ~0;
return true;
#else
nextslot = xhashslots[xh];
nextxhashslot[s1] = nextslot;
xhashslots[xh] = s1;
return true;
#endif
}
__device__ bool nextcollision() const {
#ifdef XBITMAP
return xmap != 0;
#else
return nextslot != xnil;
#endif
}
__device__ u32 slot() {
#ifdef XBITMAP
const u32 ffs = __ffsll(xmap);
s0 += ffs; xmap >>= ffs;
#else
nextslot = nextxhashslot[s0 = nextslot];
#endif
return s0;
}
};
};
__global__ void digitH(equi *eq) {
uchar hash[HASHOUT];
blake2b_state state;
equi::htlayout htl(eq, 0);
const u32 hashbytes = hashsize(0); // always 23 ?
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 block = id; block < NBLOCKS; block += eq->nthreads) {
state = eq->blake_ctx;
blake2b_gpu_hash(&state, block, hash, HASHOUT);
for (u32 i = 0; i<HASHESPERBLAKE; i++) {
const uchar *ph = hash + i * WN / 8;
#if BUCKBITS == 16 && RESTBITS == 4
const u32 bucketid = ((u32)ph[0] << 8) | ph[1];
#ifdef XINTREE
const u32 xhash = ph[2] >> 4;
#endif
#elif BUCKBITS == 14 && RESTBITS == 6
const u32 bucketid = ((u32)ph[0] << 6) | ph[1] >> 2;
#elif BUCKBITS == 12 && RESTBITS == 8
const u32 bucketid = ((u32)ph[0] << 4) | ph[1] >> 4;
#elif BUCKBITS == 20 && RESTBITS == 4
const u32 bucketid = ((((u32)ph[0] << 8) | ph[1]) << 4) | ph[2] >> 4;
#ifdef XINTREE
const u32 xhash = ph[2] & 0xf;
#endif
#elif BUCKBITS == 12 && RESTBITS == 4
const u32 bucketid = ((u32)ph[0] << 4) | ph[1] >> 4;
const u32 xhash = ph[1] & 0xf;
#else
#error not implemented
#endif
const u32 slot = atomicAdd(&eq->nslots[0][bucketid], 1);
if (slot >= NSLOTS)
continue;
slot0 &s = eq->hta.trees0[0][bucketid][slot];
#ifdef XINTREE
s.attr = tree(block*HASHESPERBLAKE+i, xhash);
#else
s.attr = tree(block*HASHESPERBLAKE+i);
#endif
memcpy(s.hash->bytes+htl.nextbo, ph+WN/8-hashbytes, hashbytes);
}
}
}
__global__ void digitO(equi *eq, const u32 r) {
equi::htlayout htl(eq, r);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[(r - 1) / 2][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash0(pslot1)))
continue;
for (; cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
u32 xorbucketid;
u32 xhash;
const uchar *bytes0 = pslot0->hash->bytes, *bytes1 = pslot1->hash->bytes;
#if WN == 200 && BUCKBITS == 16 && RESTBITS == 4 && defined(XINTREE)
xorbucketid = ((((u32)(bytes0[htl.prevbo] ^ bytes1[htl.prevbo]) & 0xf) << 8)
| (bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1])) << 4
| (xhash = bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 4;
xhash &= 0xf;
#elif WN == 144 && BUCKBITS == 20 && RESTBITS == 4
xorbucketid = ((((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 8)
| (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2])) << 4)
| (xhash = bytes0[htl.prevbo + 3] ^ bytes1[htl.prevbo + 3]) >> 4;
xhash &= 0xf;
#elif WN == 96 && BUCKBITS == 12 && RESTBITS == 4
xorbucketid = ((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 4)
| (xhash = bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 4;
xhash &= 0xf;
#elif WN == 200 && BUCKBITS == 14 && RESTBITS == 6
xorbucketid = ((((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) & 0xf) << 8)
| (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2])) << 2
| (bytes0[htl.prevbo + 3] ^ bytes1[htl.prevbo + 3]) >> 6;
#else
#error not implemented
#endif
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[r/2][xorbucketid][xorslot];
#ifdef XINTREE
xs.attr = tree(bucketid, s0, s1, xhash);
#else
xs.attr = tree(bucketid, s0, s1);
#endif
for (u32 i=htl.dunits; i < htl.prevhashunits; i++)
xs.hash[i - htl.dunits].word = pslot0->hash[i].word ^ pslot1->hash[i].word;
}
}
}
}
__global__ void digitE(equi *eq, const u32 r) {
equi::htlayout htl(eq, r);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[(r - 1) / 2][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash1(pslot1)))
continue;
for (; cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
u32 xorbucketid;
u32 xhash;
const uchar *bytes0 = pslot0->hash->bytes, *bytes1 = pslot1->hash->bytes;
#if WN == 200 && BUCKBITS == 16 && RESTBITS == 4 && defined(XINTREE)
xorbucketid = ((u32)(bytes0[htl.prevbo] ^ bytes1[htl.prevbo]) << 8)
| (bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]);
xhash = (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 4;
#elif WN == 144 && BUCKBITS == 20 && RESTBITS == 4
xorbucketid = ((((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 8)
| (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2])) << 4)
| (bytes0[htl.prevbo + 3] ^ bytes1[htl.prevbo + 3]) >> 4;
#elif WN == 96 && BUCKBITS == 12 && RESTBITS == 4
xorbucketid = ((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 4)
| (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 4;
#elif WN == 200 && BUCKBITS == 14 && RESTBITS == 6
xorbucketid = ((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 6)
| (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 2;
#else
#error not implemented
#endif
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[r / 2][xorbucketid][xorslot];
#ifdef XINTREE
xs.attr = tree(bucketid, s0, s1, xhash);
#else
xs.attr = tree(bucketid, s0, s1);
#endif
for (u32 i = htl.dunits; i < htl.prevhashunits; i++)
xs.hash[i - htl.dunits].word = pslot0->hash[i].word ^ pslot1->hash[i].word;
}
}
}
}
#ifdef UNROLL
__global__ void digit_1(equi *eq) {
equi::htlayout htl(eq, 1);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[0][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash0(pslot1)))
continue;
for (; cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 bexor = __byte_perm(xor0, 0, 0x0123);
const u32 xorbucketid = bexor >> 4 & BUCKMASK;
const u32 xhash = bexor & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[0][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = pslot0->hash[1].word ^ pslot1->hash[1].word;
xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[2].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
xs.hash[3].word = pslot0->hash[4].word ^ pslot1->hash[4].word;
xs.hash[4].word = pslot0->hash[5].word ^ pslot1->hash[5].word;
}
}
}
}
__global__ void digit2(equi *eq) {
equi::htlayout htl(eq, 2);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[0][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash1(pslot1)))
continue;
for (; cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 bexor = __byte_perm(xor0, 0, 0x0123);
const u32 xorbucketid = bexor >> 16;
const u32 xhash = bexor >> 12 & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[1][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor0;
xs.hash[1].word = pslot0->hash[1].word ^ pslot1->hash[1].word;
xs.hash[2].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[3].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
xs.hash[4].word = pslot0->hash[4].word ^ pslot1->hash[4].word;
}
}
}
}
__global__ void digit3(equi *eq) {
equi::htlayout htl(eq, 3);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[1][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash0(pslot1)))
continue;
for (; cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word;
const u32 bexor = __byte_perm(xor0, xor1, 0x1234);
const u32 xorbucketid = bexor >> 4 & BUCKMASK;
const u32 xhash = bexor & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[1][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor1;
xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[2].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
xs.hash[3].word = pslot0->hash[4].word ^ pslot1->hash[4].word;
}
}
}
}
__global__ void digit4(equi *eq) {
equi::htlayout htl(eq, 4);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[1][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash1(pslot1)))
continue;
for (; cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 bexor = __byte_perm(xor0, 0, 0x4123);
const u32 xorbucketid = bexor >> 8;
const u32 xhash = bexor >> 4 & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[2][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor0;
xs.hash[1].word = pslot0->hash[1].word ^ pslot1->hash[1].word;
xs.hash[2].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[3].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
}
}
}
}
__global__ void digit5(equi *eq) {
equi::htlayout htl(eq, 5);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[2][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash0(pslot1)))
continue;
for (; cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word;
const u32 bexor = __byte_perm(xor0, xor1, 0x2345);
const u32 xorbucketid = bexor >> 4 & BUCKMASK;
const u32 xhash = bexor & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[2][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor1;
xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[2].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
}
}
}
}
__global__ void digit6(equi *eq) {
equi::htlayout htl(eq, 6);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[2][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash1(pslot1)))
continue;
for (; cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word;
const u32 bexor = __byte_perm(xor0, xor1, 0x2345);
const u32 xorbucketid = bexor >> 16;
const u32 xhash = bexor >> 12 & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[3][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor1;
xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
}
}
}
}
__global__ void digit7(equi *eq) {
equi::htlayout htl(eq, 7);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[3][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash0(pslot1)))
continue;
for (; cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 bexor = __byte_perm(xor0, 0, 0x4012);
const u32 xorbucketid = bexor >> 4 & BUCKMASK;
const u32 xhash = bexor & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[3][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor0;
xs.hash[1].word = pslot0->hash[1].word ^ pslot1->hash[1].word;
}
}
}
}
__global__ void digit8(equi *eq) {
equi::htlayout htl(eq, 8);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[3][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash1(pslot1)))
continue;
for (; cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word;
const u32 bexor = __byte_perm(xor0, xor1, 0x3456);
const u32 xorbucketid = bexor >> 16;
const u32 xhash = bexor >> 12 & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[4][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor1;
}
}
}
}
#endif
__global__ void digitK(equi *eq) {
equi::collisiondata cd;
equi::htlayout htl(eq, WK);
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[(WK - 1) / 2][bucketid];
u32 bsize = eq->getnslots0(bucketid); // assume WK odd
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash0(pslot1))) // assume WK odd
continue;
for (; cd.nextcollision();) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash)) {
#ifdef XINTREE
eq->candidate(tree(bucketid, s0, s1, 0));
#else
eq->candidate(tree(bucketid, s0, s1));
#endif
}
}
}
}
}
eq_cuda_context::eq_cuda_context(int tpb, int blocks, int id)
: threadsperblock(tpb), totalblocks(blocks), device_id(id)
{
eq = new equi(threadsperblock * totalblocks);
sol_memory = malloc(sizeof(proof) * MAXSOLS + 4096);
solutions = (proof*)(((long long)sol_memory + 4095) & -4096);
checkCudaErrors(cudaSetDevice(device_id));
checkCudaErrors(cudaDeviceReset());
checkCudaErrors(cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync));
checkCudaErrors(cudaDeviceSetCacheConfig(cudaFuncCachePreferL1));
checkCudaErrors(cudaMalloc((void**)&heap0, sizeof(digit0)));
checkCudaErrors(cudaMalloc((void**)&heap1, sizeof(digit1)));
for (u32 r = 0; r < WK; r++)
if ((r & 1) == 0)
eq->hta.trees0[r / 2] = (bucket0 *)(heap0 + r / 2);
else
eq->hta.trees1[r / 2] = (bucket1 *)(heap1 + r / 2);
checkCudaErrors(cudaMalloc((void**)&eq->nslots, 2 * NBUCKETS * sizeof(u32)));
checkCudaErrors(cudaMalloc((void**)&eq->sols, MAXSOLS * sizeof(proof)));
checkCudaErrors(cudaMalloc((void**)&device_eq, sizeof(equi)));
}
eq_cuda_context::~eq_cuda_context()
{
/*checkCudaErrors(cudaFree(eq->nslots));
checkCudaErrors(cudaFree(eq->sols));
checkCudaErrors(cudaFree(eq->hta.trees0[0]));
checkCudaErrors(cudaFree(eq->hta.trees1[0]));*/
checkCudaErrors(cudaSetDevice(device_id));
checkCudaErrors(cudaDeviceReset());
free(sol_memory);
delete eq;
}
void eq_cuda_context::solve(const char *tequihash_header,
unsigned int tequihash_header_len,
const char* nonce,
unsigned int nonce_len,
std::function<bool()> cancelf,
std::function<void(const std::vector<uint32_t>&, size_t, const unsigned char*)> solutionf,
std::function<void(void)> hashdonef)
{
checkCudaErrors(cudaSetDevice(device_id));
eq->setheadernonce(tequihash_header, tequihash_header_len, nonce, nonce_len);
checkCudaErrors(cudaMemcpy(device_eq, eq, sizeof(equi), cudaMemcpyHostToDevice));
digitH<<<totalblocks, threadsperblock>>>(device_eq);
if (cancelf()) return;
#if BUCKBITS == 16 && RESTBITS == 4 && defined XINTREE && defined(UNROLL)
digit_1<<<totalblocks, threadsperblock>>>(device_eq);
if (cancelf()) return;
digit2<<<totalblocks, threadsperblock>>>(device_eq);
if (cancelf()) return;
digit3<<<totalblocks, threadsperblock>>>(device_eq);
if (cancelf()) return;
digit4<<<totalblocks, threadsperblock>>>(device_eq);
if (cancelf()) return;
digit5<<<totalblocks, threadsperblock>>>(device_eq);
if (cancelf()) return;
digit6<<<totalblocks, threadsperblock>>>(device_eq);
if (cancelf()) return;
digit7<<<totalblocks, threadsperblock>>>(device_eq);
if (cancelf()) return;
digit8<<<totalblocks, threadsperblock>>>(device_eq);
#else
for (u32 r = 1; r < WK; r++) {
r & 1 ? digitO<<<totalblocks, threadsperblock>>>(device_eq, r)
: digitE<<<totalblocks, threadsperblock>>>(device_eq, r);
}
#endif
if (cancelf())
return;
digitK<<<totalblocks, threadsperblock>>>(device_eq);
checkCudaErrors(cudaMemcpy(eq, device_eq, sizeof(equi), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(solutions, eq->sols, MAXSOLS * sizeof(proof), cudaMemcpyDeviceToHost));
for (unsigned s = 0; (s < eq->nsols) && (s < MAXSOLS); s++)
{
std::vector<uint32_t> index_vector(PROOFSIZE);
for (u32 i = 0; i < PROOFSIZE; i++) {
index_vector[i] = solutions[s][i];
}
solutionf(index_vector, DIGITBITS, nullptr);
if (cancelf())
break;
}
hashdonef();
} |
3dd0997b12afb3885e4ebdb45fb625b991cadb3a.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2011-2015 NVIDIA Corporation. All rights reserved
*
* Sample app to demonstrate use of CUPTI library to obtain metric values
* using callbacks for CUDA runtime APIs
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
// #include <cupti.h>
#include <math_constants.h>
// #include "../../lcutil.h"
#include <hip/hip_runtime_api.h>
#define CUDA_SAFE_CALL( call) { \
hipError_t err = call; \
if( hipSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, hipGetErrorString( err) ); \
exit(EXIT_FAILURE); \
} }
#define DRIVER_API_CALL(apiFuncCall) \
do { \
hipError_t _status = apiFuncCall; \
if (_status != hipSuccess) { \
fprintf(stderr, "%s:%d: error: function %s failed with error %d.\n", \
__FILE__, __LINE__, #apiFuncCall, _status); \
exit(-1); \
} \
} while (0)
#define RUNTIME_API_CALL(apiFuncCall) \
do { \
hipError_t _status = apiFuncCall; \
if (_status != hipSuccess) { \
fprintf(stderr, "%s:%d: error: function %s failed with error %s.\n", \
__FILE__, __LINE__, #apiFuncCall, hipGetErrorString(_status));\
exit(-1); \
} \
} while (0)
#define ALIGN_SIZE (8)
#define ALIGN_BUFFER(buffer, align) \
(((uintptr_t) (buffer) & ((align)-1)) ? ((buffer) + (align) - ((uintptr_t) (buffer) & ((align)-1))) : (buffer))
// #define COMP_ITERATIONS (512)
#define THREADS (1024)
#define BLOCKS (3276)
#define N (10)
#define REGBLOCK_SIZE (4)
// #define UNROLL_ITERATIONS (32)
#define deviceNum (0)
// #define OFFSET
#define INNER_REPS 2048
#define UNROLLS 1
// __constant__ __device__ int off [16] = {0,4,8,12,9,13,1,5,2,6,10,14,11,15,3,7}; //512 threads
// __constant__ __device__ int off [16] = {0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3}; //512 threads
// __constant__ __device__ int off [16] = {0,2,4,6,8,10,12,14,11,9,15,13,3,1,7,5}; //256 threads
template <class T> __global__ void benchmark (T* cdin, T* cdout){
// const int total = THREADS*BLOCKS+THREADS;
const int ite = blockIdx.x * THREADS + threadIdx.x;
T r0;
// printf("%d - %d\n", blockIdx.x,off[blockIdx.x]);
// T r1,r2,r3;
// r0=cdin[ite];
for (int k=0; k<N;k++){
#pragma unroll 2048
for(int j=0; j<INNER_REPS; j+=UNROLLS){
r0 = cdin[ite];
cdout[ite]=r0;
}
}
cdout[ite]=r0;
}
double median(int n, double x[][4],int col) {
double temp;
int i, j;
// the following two loops sort the array x in ascending order
for(i=0; i<n-1; i++) {
for(j=i+1; j<n; j++) {
if(x[j][col] < x[i][col]) {
// swap elements
temp = x[i][col];
x[i][col] = x[j][col];
x[j][col] = temp;
}
}
}
if(n%2==0) {
// if there is an even number of elements, return mean of the two elements in the middle
return((x[n/2][col] + x[n/2 - 1][col]) / 2.0);
} else {
// else return the element in the middle
return x[n/2][col];
}
}
void initializeEvents(hipEvent_t *start, hipEvent_t *stop){
CUDA_SAFE_CALL( hipEventCreate(start) );
CUDA_SAFE_CALL( hipEventCreate(stop) );
CUDA_SAFE_CALL( hipEventRecord(*start, 0) );
}
float finalizeEvents(hipEvent_t start, hipEvent_t stop){
CUDA_SAFE_CALL( hipGetLastError() );
CUDA_SAFE_CALL( hipEventRecord(stop, 0) );
CUDA_SAFE_CALL( hipEventSynchronize(stop) );
float kernel_time;
CUDA_SAFE_CALL( hipEventElapsedTime(&kernel_time, start, stop) );
CUDA_SAFE_CALL( hipEventDestroy(start) );
CUDA_SAFE_CALL( hipEventDestroy(stop) );
return kernel_time;
}
void runbench(int type, double* kernel_time, double* bandw,double* cdin,double* cdout){
hipEvent_t start, stop;
initializeEvents(&start, &stop);
dim3 dimBlock(THREADS, 1, 1);
dim3 dimGrid(BLOCKS, 1, 1);
// if (type==0){
hipLaunchKernelGGL(( benchmark<float>), dim3(dimGrid), dim3(dimBlock) , 0, 0, (float*)cdin,(float*)cdout);
// }else{
// benchmark<double><<< dimGrid, dimBlock >>>(cdin,cdout, inner_reps, unrolls);
// }
long long shared_access = 2*(long long)(INNER_REPS)*N*THREADS*BLOCKS;
hipDeviceSynchronize();
double time = finalizeEvents(start, stop);
double result;
if (type==0)
result = ((double)shared_access)*4/(double)time*1000./(double)(1024*1024*1024);
else
result = ((double)shared_access)*8/(double)time*1000./(double)(1024*1024*1024);
*kernel_time = time;
*bandw=result;
}
int main(int argc, char *argv[]){
// CUpti_SubscriberHandle subscriber;
hipCtx_t context = 0;
hipDevice_t device = 0;
int deviceCount;
char deviceName[32];
int outer_reps;
// , vector_size, tile_dim;
if (argc>1){
outer_reps = atoi(argv[1]);
}else{
outer_reps = 1;
}
// cupti_eventData cuptiEvent;
// RuntimeApiTrace_t trace;
hipDeviceProp_t deviceProp;
printf("Usage: %s [device_num] [metric_name]\n", argv[0]);
hipSetDevice(deviceNum);
double mean[4];
double time[outer_reps][2],value[outer_reps][4],sum_dev_median[4],sum_dev_mean[4],medianv[4],std_dev_mean[4],std_dev_median[4];
long SPresult[outer_reps],DPresult[outer_reps],timeresult[outer_reps][2];
int L2size;
int counters;
// StoreDeviceInfo_DRAM(stdout,&L2size);
int size = THREADS*BLOCKS*sizeof(double);
size_t freeCUDAMem, totalCUDAMem;
hipMemGetInfo(&freeCUDAMem, &totalCUDAMem);
printf("Total GPU memory %lu, free %lu\n", totalCUDAMem, freeCUDAMem);
printf("Buffer size: %dMB\n", size*sizeof(double)/(1024*1024));
SPresult[0]=0;
DPresult[0]=0;
//Initialize Global Memory
double *cdin,L2=32;
double *cdout;
CUDA_SAFE_CALL(hipMalloc((void**)&cdin, size));
CUDA_SAFE_CALL(hipMalloc((void**)&cdout, size));
// Copy data to device memory
CUDA_SAFE_CALL(hipMemset(cdin, 1, size)); // initialize to zeros
CUDA_SAFE_CALL(hipMemset(cdout, 0, size)); // initialize to zeros
// Synchronize in order to wait for memory operations to finish
CUDA_SAFE_CALL(hipDeviceSynchronize());
// make sure activity is enabled before any CUDA API
DRIVER_API_CALL(hipGetDeviceCount(&deviceCount));
if (deviceCount == 0) {
printf("There is no device supporting CUDA.\n");
return -2;
}
printf("CUDA Device Number: %d\n", deviceNum);
DRIVER_API_CALL(hipDeviceGet(&device, deviceNum));
CUDA_SAFE_CALL(hipGetDeviceProperties(&deviceProp, device));
DRIVER_API_CALL(hipDeviceGetName(deviceName, 32, device));
int i;
class type;
uint64_t L2units;
size_t sizet=sizeof(L2units);
for (i=0;i<outer_reps;i++){
uint32_t all = 1;
runbench(0,&time[0][0],&value[0][0],cdin,cdout);
printf("Registered time: %f ms\n",time[0][0]);
}
CUDA_SAFE_CALL( hipDeviceReset());
return 0;
}
| 3dd0997b12afb3885e4ebdb45fb625b991cadb3a.cu | /*
* Copyright 2011-2015 NVIDIA Corporation. All rights reserved
*
* Sample app to demonstrate use of CUPTI library to obtain metric values
* using callbacks for CUDA runtime APIs
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
// #include <cupti.h>
#include <math_constants.h>
// #include "../../lcutil.h"
#include <cuda_profiler_api.h>
#define CUDA_SAFE_CALL( call) { \
cudaError err = call; \
if( cudaSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, cudaGetErrorString( err) ); \
exit(EXIT_FAILURE); \
} }
#define DRIVER_API_CALL(apiFuncCall) \
do { \
CUresult _status = apiFuncCall; \
if (_status != CUDA_SUCCESS) { \
fprintf(stderr, "%s:%d: error: function %s failed with error %d.\n", \
__FILE__, __LINE__, #apiFuncCall, _status); \
exit(-1); \
} \
} while (0)
#define RUNTIME_API_CALL(apiFuncCall) \
do { \
cudaError_t _status = apiFuncCall; \
if (_status != cudaSuccess) { \
fprintf(stderr, "%s:%d: error: function %s failed with error %s.\n", \
__FILE__, __LINE__, #apiFuncCall, cudaGetErrorString(_status));\
exit(-1); \
} \
} while (0)
#define ALIGN_SIZE (8)
#define ALIGN_BUFFER(buffer, align) \
(((uintptr_t) (buffer) & ((align)-1)) ? ((buffer) + (align) - ((uintptr_t) (buffer) & ((align)-1))) : (buffer))
// #define COMP_ITERATIONS (512)
#define THREADS (1024)
#define BLOCKS (3276)
#define N (10)
#define REGBLOCK_SIZE (4)
// #define UNROLL_ITERATIONS (32)
#define deviceNum (0)
// #define OFFSET
#define INNER_REPS 2048
#define UNROLLS 1
// __constant__ __device__ int off [16] = {0,4,8,12,9,13,1,5,2,6,10,14,11,15,3,7}; //512 threads
// __constant__ __device__ int off [16] = {0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3}; //512 threads
// __constant__ __device__ int off [16] = {0,2,4,6,8,10,12,14,11,9,15,13,3,1,7,5}; //256 threads
template <class T> __global__ void benchmark (T* cdin, T* cdout){
// const int total = THREADS*BLOCKS+THREADS;
const int ite = blockIdx.x * THREADS + threadIdx.x;
T r0;
// printf("%d - %d\n", blockIdx.x,off[blockIdx.x]);
// T r1,r2,r3;
// r0=cdin[ite];
for (int k=0; k<N;k++){
#pragma unroll 2048
for(int j=0; j<INNER_REPS; j+=UNROLLS){
r0 = cdin[ite];
cdout[ite]=r0;
}
}
cdout[ite]=r0;
}
double median(int n, double x[][4],int col) {
double temp;
int i, j;
// the following two loops sort the array x in ascending order
for(i=0; i<n-1; i++) {
for(j=i+1; j<n; j++) {
if(x[j][col] < x[i][col]) {
// swap elements
temp = x[i][col];
x[i][col] = x[j][col];
x[j][col] = temp;
}
}
}
if(n%2==0) {
// if there is an even number of elements, return mean of the two elements in the middle
return((x[n/2][col] + x[n/2 - 1][col]) / 2.0);
} else {
// else return the element in the middle
return x[n/2][col];
}
}
void initializeEvents(cudaEvent_t *start, cudaEvent_t *stop){
CUDA_SAFE_CALL( cudaEventCreate(start) );
CUDA_SAFE_CALL( cudaEventCreate(stop) );
CUDA_SAFE_CALL( cudaEventRecord(*start, 0) );
}
float finalizeEvents(cudaEvent_t start, cudaEvent_t stop){
CUDA_SAFE_CALL( cudaGetLastError() );
CUDA_SAFE_CALL( cudaEventRecord(stop, 0) );
CUDA_SAFE_CALL( cudaEventSynchronize(stop) );
float kernel_time;
CUDA_SAFE_CALL( cudaEventElapsedTime(&kernel_time, start, stop) );
CUDA_SAFE_CALL( cudaEventDestroy(start) );
CUDA_SAFE_CALL( cudaEventDestroy(stop) );
return kernel_time;
}
void runbench(int type, double* kernel_time, double* bandw,double* cdin,double* cdout){
cudaEvent_t start, stop;
initializeEvents(&start, &stop);
dim3 dimBlock(THREADS, 1, 1);
dim3 dimGrid(BLOCKS, 1, 1);
// if (type==0){
benchmark<float><<< dimGrid, dimBlock >>>((float*)cdin,(float*)cdout);
// }else{
// benchmark<double><<< dimGrid, dimBlock >>>(cdin,cdout, inner_reps, unrolls);
// }
long long shared_access = 2*(long long)(INNER_REPS)*N*THREADS*BLOCKS;
cudaDeviceSynchronize();
double time = finalizeEvents(start, stop);
double result;
if (type==0)
result = ((double)shared_access)*4/(double)time*1000./(double)(1024*1024*1024);
else
result = ((double)shared_access)*8/(double)time*1000./(double)(1024*1024*1024);
*kernel_time = time;
*bandw=result;
}
int main(int argc, char *argv[]){
// CUpti_SubscriberHandle subscriber;
CUcontext context = 0;
CUdevice device = 0;
int deviceCount;
char deviceName[32];
int outer_reps;
// , vector_size, tile_dim;
if (argc>1){
outer_reps = atoi(argv[1]);
}else{
outer_reps = 1;
}
// cupti_eventData cuptiEvent;
// RuntimeApiTrace_t trace;
cudaDeviceProp deviceProp;
printf("Usage: %s [device_num] [metric_name]\n", argv[0]);
cudaSetDevice(deviceNum);
double mean[4];
double time[outer_reps][2],value[outer_reps][4],sum_dev_median[4],sum_dev_mean[4],medianv[4],std_dev_mean[4],std_dev_median[4];
long SPresult[outer_reps],DPresult[outer_reps],timeresult[outer_reps][2];
int L2size;
int counters;
// StoreDeviceInfo_DRAM(stdout,&L2size);
int size = THREADS*BLOCKS*sizeof(double);
size_t freeCUDAMem, totalCUDAMem;
cudaMemGetInfo(&freeCUDAMem, &totalCUDAMem);
printf("Total GPU memory %lu, free %lu\n", totalCUDAMem, freeCUDAMem);
printf("Buffer size: %dMB\n", size*sizeof(double)/(1024*1024));
SPresult[0]=0;
DPresult[0]=0;
//Initialize Global Memory
double *cdin,L2=32;
double *cdout;
CUDA_SAFE_CALL(cudaMalloc((void**)&cdin, size));
CUDA_SAFE_CALL(cudaMalloc((void**)&cdout, size));
// Copy data to device memory
CUDA_SAFE_CALL(cudaMemset(cdin, 1, size)); // initialize to zeros
CUDA_SAFE_CALL(cudaMemset(cdout, 0, size)); // initialize to zeros
// Synchronize in order to wait for memory operations to finish
CUDA_SAFE_CALL(cudaThreadSynchronize());
// make sure activity is enabled before any CUDA API
DRIVER_API_CALL(cuDeviceGetCount(&deviceCount));
if (deviceCount == 0) {
printf("There is no device supporting CUDA.\n");
return -2;
}
printf("CUDA Device Number: %d\n", deviceNum);
DRIVER_API_CALL(cuDeviceGet(&device, deviceNum));
CUDA_SAFE_CALL(cudaGetDeviceProperties(&deviceProp, device));
DRIVER_API_CALL(cuDeviceGetName(deviceName, 32, device));
int i;
class type;
uint64_t L2units;
size_t sizet=sizeof(L2units);
for (i=0;i<outer_reps;i++){
uint32_t all = 1;
runbench(0,&time[0][0],&value[0][0],cdin,cdout);
printf("Registered time: %f ms\n",time[0][0]);
}
CUDA_SAFE_CALL( cudaDeviceReset());
return 0;
}
|
5fb1133264739050ce2e9f88fb3edc822a53fddd.hip | // !!! This is a file automatically generated by hipify!!!
#include "minimize.h"
#include "integration_kernel.h"
#include <mirheo/core/logger.h>
#include <mirheo/core/pvs/particle_vector.h>
#include <mirheo/core/utils/config.h>
namespace mirheo
{
IntegratorMinimize::IntegratorMinimize(const MirState *state, const std::string& name, real maxDisplacement) :
Integrator(state, name), maxDisplacement_{maxDisplacement}
{}
IntegratorMinimize::IntegratorMinimize(const MirState *state, Loader&, const ConfigObject& object) :
IntegratorMinimize(state, object["name"], object["maxDisplacement"])
{}
void IntegratorMinimize::execute(ParticleVector *pv, hipStream_t stream)
{
const auto t = static_cast<real>(getState()->currentTime);
const auto dt = static_cast<real>(getState()->dt);
auto st2 = [max = maxDisplacement_] __device__ (Particle& p, real3 f, real invm, real dt)
{
// Limit the displacement magnitude to `max`.
real3 dr = dt * dt * invm * f;
real dr2 = dot(dr, dr);
if (dr2 > max * max)
dr *= max * math::rsqrt(dr2);
p.r += dr;
};
integrate(pv, dt, st2, stream);
invalidatePV_(pv);
}
void IntegratorMinimize::saveSnapshotAndRegister(Saver& saver)
{
saver.registerObject(this, _saveSnapshot(saver, "IntegratorMinimize"));
}
ConfigObject IntegratorMinimize::_saveSnapshot(Saver& saver, const std::string& typeName)
{
ConfigObject config = Integrator::_saveSnapshot(saver, typeName);
config.emplace("maxDisplacement", saver(maxDisplacement_));
return config;
}
} // namespace mirheo
| 5fb1133264739050ce2e9f88fb3edc822a53fddd.cu | #include "minimize.h"
#include "integration_kernel.h"
#include <mirheo/core/logger.h>
#include <mirheo/core/pvs/particle_vector.h>
#include <mirheo/core/utils/config.h>
namespace mirheo
{
IntegratorMinimize::IntegratorMinimize(const MirState *state, const std::string& name, real maxDisplacement) :
Integrator(state, name), maxDisplacement_{maxDisplacement}
{}
IntegratorMinimize::IntegratorMinimize(const MirState *state, Loader&, const ConfigObject& object) :
IntegratorMinimize(state, object["name"], object["maxDisplacement"])
{}
void IntegratorMinimize::execute(ParticleVector *pv, cudaStream_t stream)
{
const auto t = static_cast<real>(getState()->currentTime);
const auto dt = static_cast<real>(getState()->dt);
auto st2 = [max = maxDisplacement_] __device__ (Particle& p, real3 f, real invm, real dt)
{
// Limit the displacement magnitude to `max`.
real3 dr = dt * dt * invm * f;
real dr2 = dot(dr, dr);
if (dr2 > max * max)
dr *= max * math::rsqrt(dr2);
p.r += dr;
};
integrate(pv, dt, st2, stream);
invalidatePV_(pv);
}
void IntegratorMinimize::saveSnapshotAndRegister(Saver& saver)
{
saver.registerObject(this, _saveSnapshot(saver, "IntegratorMinimize"));
}
ConfigObject IntegratorMinimize::_saveSnapshot(Saver& saver, const std::string& typeName)
{
ConfigObject config = Integrator::_saveSnapshot(saver, typeName);
config.emplace("maxDisplacement", saver(maxDisplacement_));
return config;
}
} // namespace mirheo
|
d3fcae24c235361c4e7af53f5e238c52e83086d1.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright 2019 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <unistd.h>
#include <algorithm>
#include <complex>
#include <limits>
#include <string>
#include <custatevec.h>
#include "../lib/circuit_qsim_parser.h"
#include "../lib/formux.h"
#include "../lib/fuser_mqubit.h"
#include "../lib/gates_qsim.h"
#include "../lib/io_file.h"
#include "../lib/run_qsim.h"
#include "../lib/simulator_custatevec.h"
#include "../lib/util_custatevec.h"
struct Options {
std::string circuit_file;
unsigned maxtime = std::numeric_limits<unsigned>::max();
unsigned seed = 1;
unsigned max_fused_size = 2;
unsigned verbosity = 0;
};
Options GetOptions(int argc, char* argv[]) {
constexpr char usage[] = "usage:\n ./qsim_base -c circuit -d maxtime "
"-s seed -f max_fused_size -v verbosity\n";
Options opt;
int k;
while ((k = getopt(argc, argv, "c:d:s:f:v:")) != -1) {
switch (k) {
case 'c':
opt.circuit_file = optarg;
break;
case 'd':
opt.maxtime = std::atoi(optarg);
break;
case 's':
opt.seed = std::atoi(optarg);
break;
case 'f':
opt.max_fused_size = std::atoi(optarg);
break;
case 'v':
opt.verbosity = std::atoi(optarg);
break;
default:
qsim::IO::errorf(usage);
exit(1);
}
}
return opt;
}
bool ValidateOptions(const Options& opt) {
if (opt.circuit_file.empty()) {
qsim::IO::errorf("circuit file is not provided.\n");
return false;
}
return true;
}
template <typename StateSpace, typename State>
void PrintAmplitudes(
unsigned num_qubits, const StateSpace& state_space, const State& state) {
static constexpr char const* bits[8] = {
"000", "001", "010", "011", "100", "101", "110", "111",
};
uint64_t size = ::min(uint64_t{8}, uint64_t{1} << num_qubits);
unsigned s = 3 - ::min(unsigned{3}, num_qubits);
for (uint64_t i = 0; i < size; ++i) {
auto a = state_space.GetAmpl(state, i);
qsim::IO::messagef("%s:%16.8g%16.8g%16.8g\n",
bits[i] + s, std::real(a), std::imag(a), std::norm(a));
}
}
int main(int argc, char* argv[]) {
using namespace qsim;
auto opt = GetOptions(argc, argv);
if (!ValidateOptions(opt)) {
return 1;
}
using fp_type = float;
Circuit<GateQSim<fp_type>> circuit;
if (!CircuitQsimParser<IOFile>::FromFile(opt.maxtime, opt.circuit_file,
circuit)) {
return 1;
}
struct Factory {
using Simulator = qsim::SimulatorCuStateVec<fp_type>;
using StateSpace = Simulator::StateSpace;
Factory() {
ErrorCheck(hipblasCreate(&cublas_handle));
ErrorCheck(custatevecCreate(&custatevec_handle));
}
~Factory() {
ErrorCheck(hipblasDestroy(cublas_handle));
ErrorCheck(custatevecDestroy(custatevec_handle));
}
StateSpace CreateStateSpace() const {
return StateSpace(cublas_handle, custatevec_handle);
}
Simulator CreateSimulator() const {
return Simulator(custatevec_handle);
}
hipblasHandle_t cublas_handle;
custatevecHandle_t custatevec_handle;
};
using Simulator = Factory::Simulator;
using StateSpace = Simulator::StateSpace;
using State = StateSpace::State;
using Fuser = MultiQubitGateFuser<IO, GateQSim<fp_type>>;
using Runner = QSimRunner<IO, Fuser, Factory>;
Factory factory;
StateSpace state_space = factory.CreateStateSpace();
State state = state_space.Create(circuit.num_qubits);
if (state_space.IsNull(state)) {
IO::errorf("not enough memory: is the number of qubits too large?\n");
return 1;
}
state_space.SetStateZero(state);
Runner::Parameter param;
param.max_fused_size = opt.max_fused_size;
param.seed = opt.seed;
param.verbosity = opt.verbosity;
if (Runner::Run(param, factory, circuit, state)) {
PrintAmplitudes(circuit.num_qubits, state_space, state);
}
return 0;
}
| d3fcae24c235361c4e7af53f5e238c52e83086d1.cu | // Copyright 2019 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <unistd.h>
#include <algorithm>
#include <complex>
#include <limits>
#include <string>
#include <custatevec.h>
#include "../lib/circuit_qsim_parser.h"
#include "../lib/formux.h"
#include "../lib/fuser_mqubit.h"
#include "../lib/gates_qsim.h"
#include "../lib/io_file.h"
#include "../lib/run_qsim.h"
#include "../lib/simulator_custatevec.h"
#include "../lib/util_custatevec.h"
struct Options {
std::string circuit_file;
unsigned maxtime = std::numeric_limits<unsigned>::max();
unsigned seed = 1;
unsigned max_fused_size = 2;
unsigned verbosity = 0;
};
Options GetOptions(int argc, char* argv[]) {
constexpr char usage[] = "usage:\n ./qsim_base -c circuit -d maxtime "
"-s seed -f max_fused_size -v verbosity\n";
Options opt;
int k;
while ((k = getopt(argc, argv, "c:d:s:f:v:")) != -1) {
switch (k) {
case 'c':
opt.circuit_file = optarg;
break;
case 'd':
opt.maxtime = std::atoi(optarg);
break;
case 's':
opt.seed = std::atoi(optarg);
break;
case 'f':
opt.max_fused_size = std::atoi(optarg);
break;
case 'v':
opt.verbosity = std::atoi(optarg);
break;
default:
qsim::IO::errorf(usage);
exit(1);
}
}
return opt;
}
bool ValidateOptions(const Options& opt) {
if (opt.circuit_file.empty()) {
qsim::IO::errorf("circuit file is not provided.\n");
return false;
}
return true;
}
template <typename StateSpace, typename State>
void PrintAmplitudes(
unsigned num_qubits, const StateSpace& state_space, const State& state) {
static constexpr char const* bits[8] = {
"000", "001", "010", "011", "100", "101", "110", "111",
};
uint64_t size = std::min(uint64_t{8}, uint64_t{1} << num_qubits);
unsigned s = 3 - std::min(unsigned{3}, num_qubits);
for (uint64_t i = 0; i < size; ++i) {
auto a = state_space.GetAmpl(state, i);
qsim::IO::messagef("%s:%16.8g%16.8g%16.8g\n",
bits[i] + s, std::real(a), std::imag(a), std::norm(a));
}
}
int main(int argc, char* argv[]) {
using namespace qsim;
auto opt = GetOptions(argc, argv);
if (!ValidateOptions(opt)) {
return 1;
}
using fp_type = float;
Circuit<GateQSim<fp_type>> circuit;
if (!CircuitQsimParser<IOFile>::FromFile(opt.maxtime, opt.circuit_file,
circuit)) {
return 1;
}
struct Factory {
using Simulator = qsim::SimulatorCuStateVec<fp_type>;
using StateSpace = Simulator::StateSpace;
Factory() {
ErrorCheck(cublasCreate(&cublas_handle));
ErrorCheck(custatevecCreate(&custatevec_handle));
}
~Factory() {
ErrorCheck(cublasDestroy(cublas_handle));
ErrorCheck(custatevecDestroy(custatevec_handle));
}
StateSpace CreateStateSpace() const {
return StateSpace(cublas_handle, custatevec_handle);
}
Simulator CreateSimulator() const {
return Simulator(custatevec_handle);
}
cublasHandle_t cublas_handle;
custatevecHandle_t custatevec_handle;
};
using Simulator = Factory::Simulator;
using StateSpace = Simulator::StateSpace;
using State = StateSpace::State;
using Fuser = MultiQubitGateFuser<IO, GateQSim<fp_type>>;
using Runner = QSimRunner<IO, Fuser, Factory>;
Factory factory;
StateSpace state_space = factory.CreateStateSpace();
State state = state_space.Create(circuit.num_qubits);
if (state_space.IsNull(state)) {
IO::errorf("not enough memory: is the number of qubits too large?\n");
return 1;
}
state_space.SetStateZero(state);
Runner::Parameter param;
param.max_fused_size = opt.max_fused_size;
param.seed = opt.seed;
param.verbosity = opt.verbosity;
if (Runner::Run(param, factory, circuit, state)) {
PrintAmplitudes(circuit.num_qubits, state_space, state);
}
return 0;
}
|
a4e65996f98805343dd91a8c23435bd85849b158.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cmath>
#include <algorithm>
#include <stdio.h>
#include <hip/hip_fp16.h>
#include "torch_flip.h"
#include "amir_cuda_util/cuda_util.h"
namespace amirstan
{
namespace plugin
{
using namespace amirstan::cuda;
template <typename T>
__global__ void torch_flip_kernel(T* output, const T* input,
const TensorSize input_size, const TensorStride input_stride,
const TensorSize flip_mask, int nb_dims, int count){
const int* in_size = &(input_size.size[0]);
const size_t* stride = &(input_stride.size[0]);
const int* mask = &(flip_mask.size[0]);
CUDA_KERNEL_LOOP(index, count){
size_t dst_index = index;
size_t src_index = 0;
for (int i = 0; i < nb_dims; ++i)
{
int dim_index = dst_index / stride[i];
dst_index = dst_index % stride[i];
if(mask[i]>0){
dim_index = in_size[i]-1-dim_index;
}
src_index += dim_index * stride[i];
}
output[index] = input[src_index];
}
}
static void create_size_stride(const int* dims, int nb_dims, TensorSize &size, TensorStride& stride){
memcpy(&size.size[0], dims, sizeof(int)*nb_dims);
stride.size[nb_dims-1] = 1;
for(int i=nb_dims-2; i>=0; --i){
stride.size[i] = stride.size[i+1] * size.size[i+1];
}
}
template <typename T>
void torch_flip(T *output, const T* input,
int* input_dims, int nb_dims,
int* flip_dims, int nb_flip_dims,
hipStream_t stream){
TensorSize ts_input_size;
TensorStride input_stride;
create_size_stride(input_dims, nb_dims, ts_input_size, input_stride);
int count = ts_input_size.size[0];
for(int i=1; i<nb_dims; ++i){
count*=ts_input_size.size[i];
}
TensorSize flip_mask;
for(int i=0;i<nb_dims;++i){
flip_mask.size[i]=0;
}
for(int i=0;i<nb_flip_dims;++i){
flip_mask.size[flip_dims[i]]=1;
}
hipLaunchKernelGGL(( torch_flip_kernel<T>), dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, stream, output, input,
ts_input_size, input_stride,
flip_mask, nb_dims, count);
}
template void torch_flip<float>(float *output, const float* input,
int* input_dims, int nb_dims,
int* flip_dims, int nb_flip_dims,
hipStream_t stream);
} // namespace plugin
} // namespace amirstan | a4e65996f98805343dd91a8c23435bd85849b158.cu | #include <cmath>
#include <algorithm>
#include <stdio.h>
#include <cuda_fp16.h>
#include "torch_flip.h"
#include "amir_cuda_util/cuda_util.h"
namespace amirstan
{
namespace plugin
{
using namespace amirstan::cuda;
template <typename T>
__global__ void torch_flip_kernel(T* output, const T* input,
const TensorSize input_size, const TensorStride input_stride,
const TensorSize flip_mask, int nb_dims, int count){
const int* in_size = &(input_size.size[0]);
const size_t* stride = &(input_stride.size[0]);
const int* mask = &(flip_mask.size[0]);
CUDA_KERNEL_LOOP(index, count){
size_t dst_index = index;
size_t src_index = 0;
for (int i = 0; i < nb_dims; ++i)
{
int dim_index = dst_index / stride[i];
dst_index = dst_index % stride[i];
if(mask[i]>0){
dim_index = in_size[i]-1-dim_index;
}
src_index += dim_index * stride[i];
}
output[index] = input[src_index];
}
}
static void create_size_stride(const int* dims, int nb_dims, TensorSize &size, TensorStride& stride){
memcpy(&size.size[0], dims, sizeof(int)*nb_dims);
stride.size[nb_dims-1] = 1;
for(int i=nb_dims-2; i>=0; --i){
stride.size[i] = stride.size[i+1] * size.size[i+1];
}
}
template <typename T>
void torch_flip(T *output, const T* input,
int* input_dims, int nb_dims,
int* flip_dims, int nb_flip_dims,
cudaStream_t stream){
TensorSize ts_input_size;
TensorStride input_stride;
create_size_stride(input_dims, nb_dims, ts_input_size, input_stride);
int count = ts_input_size.size[0];
for(int i=1; i<nb_dims; ++i){
count*=ts_input_size.size[i];
}
TensorSize flip_mask;
for(int i=0;i<nb_dims;++i){
flip_mask.size[i]=0;
}
for(int i=0;i<nb_flip_dims;++i){
flip_mask.size[flip_dims[i]]=1;
}
torch_flip_kernel<T><<<GET_BLOCKS(count), CUDA_NUM_THREADS, 0, stream>>>(output, input,
ts_input_size, input_stride,
flip_mask, nb_dims, count);
}
template void torch_flip<float>(float *output, const float* input,
int* input_dims, int nb_dims,
int* flip_dims, int nb_flip_dims,
cudaStream_t stream);
} // namespace plugin
} // namespace amirstan |
4421749c2b8d57160d984e23dd4013ed2aa85767.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include "boost/date_time/posix_time/posix_time.hpp"
//#define DEBUG
using namespace std;
__global__ void fastReadGlobal(int * array, int size, int * forceRun) {
int idx = (blockDim.x * blockIdx.x) + threadIdx.x;
//__syncthreads();
int x = array[idx];
//__syncthreads();
forceRun[0] = x;
}
__global__ void fastReadReg(int * array, int size, int * forceRun) {
int idx = (blockDim.x * blockIdx.x) + threadIdx.x;
forceRun[0] = idx;
}
double convertToTime(boost::posix_time::ptime t1, boost::posix_time::ptime t2) {
boost::posix_time::time_duration duration = t2 - t1;
unsigned long micro = duration.total_microseconds();
return(micro / 1000000.);
}
#ifdef DEBUG
//Pre: devArray and devForceRun are ptrs to device memory.
void test(int * devArray, int size, int * devForceRun) {
cout << "======" << endl
<< " TEST " << endl
<< "======" << endl;
int random = 9827123;
int * hostForceRun = new int(random);//Some random number
hipMemcpy(hostForceRun, devForceRun, sizeof(int), hipMemcpyDeviceToHost);
int forceRun = (*hostForceRun);
if(forceRun == random) {
cout << "Debug error: devForceRun value was unchanged" << endl;
} else if(!((0 <= forceRun) && (forceRun < size))) {
cout << "Debug error: devForceRun value changed, but to an invalid idx value: "
<< forceRun << endl;
} else {
cout << "PASS: forceRun was " << forceRun << endl;
}
int * hostArray = new int[size];
hipMemcpy(hostArray, devArray, size * sizeof(int), hipMemcpyDeviceToHost);
bool pass = true;
for(int i = 0; i < size; i++) {
if(hostArray[i] != 0) {
pass = false;
}
}
if(pass) {
cout << "PASS: hostArray was correct" << endl;
} else {
cout << "Debug error" << endl;
}
}
#endif
#define NUM_THREADS 512
#define HOW_BIG_TO_MAKE_IT 419430400
#define WHAT_TO_MAKE_IT 0
void resetMemory() {
bool * wiper;
hipMalloc(&wiper, HOW_BIG_TO_MAKE_IT * sizeof(bool));
hipMemset(wiper, WHAT_TO_MAKE_IT, HOW_BIG_TO_MAKE_IT * sizeof(bool));
hipFree(wiper);
}
void timeReg(int numBlocks, int numThreads, int size) {
//Device array to be set to [0, 1, 2, ... size-1]:
int * devArray; //An array of ints
hipMalloc(&devArray, size * sizeof(int));
//Device array that forces fastReadReg to run.
int * devForceRun;//Just an int
hipMalloc(&devForceRun, sizeof(int));
boost::posix_time::ptime regT1(boost::posix_time::microsec_clock::local_time());
hipLaunchKernelGGL(( fastReadReg), dim3(numBlocks), dim3(numThreads), 0, 0, devArray, size, devForceRun);
hipDeviceSynchronize();
boost::posix_time::ptime regT2(boost::posix_time::microsec_clock::local_time());
#ifdef DEBUG
test(devArray, size, devForceRun);
#endif
cout << "RegTime: " << convertToTime(regT1, regT2) << endl;
hipFree(devForceRun);
hipFree(devArray);
resetMemory();
}
void timeGlobal(int numBlocks, int numThreads, int size) {
//Device array to be set to [0, 1, 2, ... size-1]:
int * devArray; //An array of ints
hipMalloc(&devArray, size * sizeof(int));
//Device array that forces fastReadReg to run.
int * devForceRun;//Just an int
hipMalloc(&devForceRun, sizeof(int));
boost::posix_time::ptime globalT1(boost::posix_time::microsec_clock::local_time());
hipLaunchKernelGGL(( fastReadGlobal), dim3(numBlocks), dim3(numThreads), 0, 0, devArray, size, devForceRun);
hipDeviceSynchronize();
boost::posix_time::ptime globalT2(boost::posix_time::microsec_clock::local_time());
#ifdef DEBUG
test(devArray, size, devForceRun);
#endif
cout << "GlobalTime: " << convertToTime(globalT1, globalT2) << endl;
hipFree(devForceRun);
hipFree(devArray);
resetMemory();
}
int main(int argc, char ** argv) {
if(argc != 2) {
cout << "Expected number of threads to run\n";
} else {
resetMemory();
int size = atoi(argv[1]);
if(size % NUM_THREADS != 0) {
cout << "Error: size must be divisible by " << NUM_THREADS << endl;
} else {
int numThreads = NUM_THREADS;
int numBlocks = (size / numThreads);
int max = (2 * 2 * 2 * 2 * 2 * 2 * 2 * 2 * 2 * 2 * 2 * 2 * 2 * 2 * 2 * 2);
if(numBlocks >= max) {
cout << "numBlocks was to big: " << numBlocks << endl;
} else {
timeGlobal(numBlocks, numThreads, size);
timeReg(numBlocks, numThreads, size);
timeGlobal(numBlocks, numThreads, size);
timeReg(numBlocks, numThreads, size);
}
}
}
return(0);
}
| 4421749c2b8d57160d984e23dd4013ed2aa85767.cu | #include <iostream>
#include "boost/date_time/posix_time/posix_time.hpp"
//#define DEBUG
using namespace std;
__global__ void fastReadGlobal(int * array, int size, int * forceRun) {
int idx = (blockDim.x * blockIdx.x) + threadIdx.x;
//__syncthreads();
int x = array[idx];
//__syncthreads();
forceRun[0] = x;
}
__global__ void fastReadReg(int * array, int size, int * forceRun) {
int idx = (blockDim.x * blockIdx.x) + threadIdx.x;
forceRun[0] = idx;
}
double convertToTime(boost::posix_time::ptime t1, boost::posix_time::ptime t2) {
boost::posix_time::time_duration duration = t2 - t1;
unsigned long micro = duration.total_microseconds();
return(micro / 1000000.);
}
#ifdef DEBUG
//Pre: devArray and devForceRun are ptrs to device memory.
void test(int * devArray, int size, int * devForceRun) {
cout << "======" << endl
<< " TEST " << endl
<< "======" << endl;
int random = 9827123;
int * hostForceRun = new int(random);//Some random number
cudaMemcpy(hostForceRun, devForceRun, sizeof(int), cudaMemcpyDeviceToHost);
int forceRun = (*hostForceRun);
if(forceRun == random) {
cout << "Debug error: devForceRun value was unchanged" << endl;
} else if(!((0 <= forceRun) && (forceRun < size))) {
cout << "Debug error: devForceRun value changed, but to an invalid idx value: "
<< forceRun << endl;
} else {
cout << "PASS: forceRun was " << forceRun << endl;
}
int * hostArray = new int[size];
cudaMemcpy(hostArray, devArray, size * sizeof(int), cudaMemcpyDeviceToHost);
bool pass = true;
for(int i = 0; i < size; i++) {
if(hostArray[i] != 0) {
pass = false;
}
}
if(pass) {
cout << "PASS: hostArray was correct" << endl;
} else {
cout << "Debug error" << endl;
}
}
#endif
#define NUM_THREADS 512
#define HOW_BIG_TO_MAKE_IT 419430400
#define WHAT_TO_MAKE_IT 0
void resetMemory() {
bool * wiper;
cudaMalloc(&wiper, HOW_BIG_TO_MAKE_IT * sizeof(bool));
cudaMemset(wiper, WHAT_TO_MAKE_IT, HOW_BIG_TO_MAKE_IT * sizeof(bool));
cudaFree(wiper);
}
void timeReg(int numBlocks, int numThreads, int size) {
//Device array to be set to [0, 1, 2, ... size-1]:
int * devArray; //An array of ints
cudaMalloc(&devArray, size * sizeof(int));
//Device array that forces fastReadReg to run.
int * devForceRun;//Just an int
cudaMalloc(&devForceRun, sizeof(int));
boost::posix_time::ptime regT1(boost::posix_time::microsec_clock::local_time());
fastReadReg<<<numBlocks, numThreads>>>(devArray, size, devForceRun);
cudaDeviceSynchronize();
boost::posix_time::ptime regT2(boost::posix_time::microsec_clock::local_time());
#ifdef DEBUG
test(devArray, size, devForceRun);
#endif
cout << "RegTime: " << convertToTime(regT1, regT2) << endl;
cudaFree(devForceRun);
cudaFree(devArray);
resetMemory();
}
void timeGlobal(int numBlocks, int numThreads, int size) {
//Device array to be set to [0, 1, 2, ... size-1]:
int * devArray; //An array of ints
cudaMalloc(&devArray, size * sizeof(int));
//Device array that forces fastReadReg to run.
int * devForceRun;//Just an int
cudaMalloc(&devForceRun, sizeof(int));
boost::posix_time::ptime globalT1(boost::posix_time::microsec_clock::local_time());
fastReadGlobal<<<numBlocks, numThreads>>>(devArray, size, devForceRun);
cudaDeviceSynchronize();
boost::posix_time::ptime globalT2(boost::posix_time::microsec_clock::local_time());
#ifdef DEBUG
test(devArray, size, devForceRun);
#endif
cout << "GlobalTime: " << convertToTime(globalT1, globalT2) << endl;
cudaFree(devForceRun);
cudaFree(devArray);
resetMemory();
}
int main(int argc, char ** argv) {
if(argc != 2) {
cout << "Expected number of threads to run\n";
} else {
resetMemory();
int size = atoi(argv[1]);
if(size % NUM_THREADS != 0) {
cout << "Error: size must be divisible by " << NUM_THREADS << endl;
} else {
int numThreads = NUM_THREADS;
int numBlocks = (size / numThreads);
int max = (2 * 2 * 2 * 2 * 2 * 2 * 2 * 2 * 2 * 2 * 2 * 2 * 2 * 2 * 2 * 2);
if(numBlocks >= max) {
cout << "numBlocks was to big: " << numBlocks << endl;
} else {
timeGlobal(numBlocks, numThreads, size);
timeReg(numBlocks, numThreads, size);
timeGlobal(numBlocks, numThreads, size);
timeReg(numBlocks, numThreads, size);
}
}
}
return(0);
}
|
a54526a1164b5074f17bc673a28b839eaad0a4bc.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
#include <chrono>
#include <string>
#define N 13
//#define TRANSPOSED
typedef std::chrono::high_resolution_clock Clock;
int32_t matrixCGlobal[N][N];
int32_t matrixAGlobal[N * N] = {
14, 39, 117, 89, 111, 73, 79, 102, 52, 81, 123, 70, 39,
82, 29, 125, 85, 51, 60, 102, 39, 120, 106, 19, 15, 58,
124, 31, 32, 23, 19, 69, 60, 61, 10, 33, 72, 1, 91,
96, 112, 32, 111, 90, 12, 63, 77, 47, 105, 115, 38, 90,
13, 35, 23, 78, 57, 109, 122, 89, 21, 116, 86, 123, 113,
27, 14, 80, 69, 9, 23, 106, 26, 115, 31, 6, 73, 112,
53, 70, 64, 118, 121, 17, 6, 113, 30, 8, 5, 116, 66,
12, 113, 71, 94, 98, 116, 2, 95, 66, 107, 54, 11, 34,
90, 36, 81, 124, 73, 41, 105, 14, 127, 109, 87, 29, 2,
84, 77, 56, 81, 21, 81, 110, 110, 123, 104, 113, 39, 54,
75, 102, 44, 79, 61, 55, 90, 125, 52, 45, 4, 120, 12,
20, 20, 105, 41, 20, 44, 108, 74, 72, 62, 76, 34, 111,
38, 97, 124, 5, 97, 87, 85, 106, 12, 31, 87, 6, 77
};
int32_t matrixBGlobal[N * N] = {
69, 96, 71, 89, 127, 108, 96, 121, 64, 65, 62, 91, 73,
9, 67, 113, 48, 47, 53, 96, 66, 7, 63, 17, 9, 8,
107, 45, 112, 33, 114, 48, 102, 70, 52, 47, 34, 81, 17,
38, 15, 61, 1, 104, 82, 68, 53, 69, 110, 12, 25, 46,
111, 89, 54, 0, 107, 81, 127, 124, 36, 17, 99, 117, 75,
125, 72, 48, 67, 31, 104, 64, 98, 94, 57, 81, 15, 16,
111, 16, 127, 119, 88, 41, 75, 125, 22, 50, 120, 6, 81,
75, 7, 78, 38, 35, 115, 114, 37, 66, 106, 64, 91, 97,
75, 102, 84, 112, 65, 76, 87, 22, 45, 100, 19, 18, 89,
27, 25, 109, 18, 116, 19, 116, 33, 103, 31, 29, 78, 8,
24, 12, 86, 20, 32, 53, 31, 13, 51, 36, 100, 56, 44,
13, 8, 54, 24, 101, 73, 115, 120, 56, 23, 63, 39, 93,
77, 50, 108, 56, 106, 58, 121, 74, 70, 88, 19, 49, 83
};
int32_t matrixB_transposed[N * N];
// Calculates AB + A + B
//void naiveMatrixComputation() {
// int8_t c, d, k;
// int32_t sum;
// for (c = 0; c < N; c++) {
// for (d = 0; d < N; d++) {
// sum = 0;
// for (k = 0; k < N; ++k) {
// sum += matrixAGlobal[c][k] * matrixBGlobal[k][d];
// }
// sum += matrixAGlobal[c][d] + matrixBGlobal[c][d];
// matrixCGlobal[c][d] = sum;
// }
// }
//}
// Calculates AB + A + B
__global__ void
gpuMatrixComputation(const int32_t* matrixA, const int32_t* matrixB, int32_t* matrixC) {
int8_t c, d, k;
for (c = 0; c < N; ++c) {
for (d = 0; d < N; ++d) {
matrixC[c * N + d] = 0;
for (k = 0; k < N; ++k) {
matrixC[c * N + d] += matrixA[c * N + k] * matrixB[k * N + d];
}
matrixC[c * N + d] += matrixA[c * N + d] + matrixB[c * N + d];
}
}
}
// Calculates AB + A + B
__global__ void
gpuParallelMatrixComputation(const int32_t* matrixA, const int32_t* matrixB, int32_t* matrixC, const int size) {
int x = blockIdx.x;
int y = threadIdx.x;
int sum = 0;
#pragma unroll
for (int k = 0; k < size; ++k) {
#ifdef TRANSPOSED
sum += matrixA[x * size + k] * matrixB[y * size + k];
#else
sum += matrixA[x * size + k] * matrixB[k * size + y];
#endif
}
sum += matrixA[x * size + y] + matrixB[x * size + y];
matrixC[x * size + y] = sum;
}
int main() {
/*
* Measure the time it takes to execute 1000 times
*/
std::cout << "Malloc device mem" << std::endl;
for (int c = 0; c < N; ++c)
for (int d = 0; d < N; ++d)
matrixB_transposed[c * N + d] = matrixBGlobal[d * N + c];
int32_t* gpu_a, * gpu_b;
int32_t* gpu_c;
int32_t* c_out;
c_out = (int32_t*)malloc(N * N * sizeof(int32_t));
// We need variables accessible to the GPU,
// so hipMallocManaged provides these
if (hipMallocManaged(&gpu_a, N * N * sizeof(int32_t)) != 0) {
std::cout << "malloc failed" << std::endl;
}
if (hipMallocManaged(&gpu_b, N * N * sizeof(int32_t)) != 0) {
std::cout << "malloc failed" << std::endl;
}
if (hipMallocManaged(&gpu_c, N * N * sizeof(int32_t)) != 0) {
std::cout << "malloc failed" << std::endl;
}
std::cout << "Move to device mem" << std::endl;
if (hipMemcpy(gpu_a, matrixAGlobal, (N * N * sizeof(int32_t)), hipMemcpyHostToDevice) != 0) {
std::cout << "memcpy failed" << std::endl;
}
#ifdef TRANSPOSED
if (hipMemcpy(gpu_b, matrixB_transposed, (N * N * sizeof(int32_t)), hipMemcpyHostToDevice) != 0) {
std::cout << "memcpy failed" << std::endl;
}
#else
if (hipMemcpy(gpu_b, matrixBGlobal, (N * N * sizeof(int32_t)), hipMemcpyHostToDevice) != 0) {
std::cout << "memcpy failed" << std::endl;
}
#endif
std::cout << "Computation" << std::endl;
//Hier een goede keuze maken
dim3 threadsPerBlock = 13; // Should be a factor of 32
dim3 numBlocks = 13; // Very overkill but we now make use of all possible core
unsigned long time_taken = 0;
for (uint16_t i = 0; i < 1e4; i++) {
auto cpu_start = Clock::now();
//88593
// gpuMatrixComputation<<<1, 1>>>(gpu_a, gpu_b, gpu_c);
hipLaunchKernelGGL(( gpuParallelMatrixComputation) , dim3(numBlocks), dim3(threadsPerBlock) , 0, 0, gpu_a, gpu_b, gpu_c, N);
hipDeviceSynchronize();
auto cpu_end = Clock::now();
time_taken += std::chrono::duration_cast<std::chrono::nanoseconds>(cpu_end - cpu_start).count();
}
/*
* Print the resultating matrix
*/
int8_t c, d;
std::cout << "Copy from device to host" << std::endl;
int hipError_t = hipMemcpy(c_out, gpu_c, (N * N * sizeof(int32_t)), hipMemcpyDeviceToHost);
if (hipError_t != 0) {
std::cout << "memcpy output failed: " << hipError_t << std::endl;
}
std::cout << "Printing mem" << std::endl;
for (c = 0; c < N; c++) {
for (d = 0; d < N; d++) {
std::cout << std::to_string((int32_t)c_out[c * N + d]) + "\t";
}
std::cout << "\n";
}
std::cout << "Freeing mem" << std::endl;
hipFree(gpu_a);
hipFree(gpu_b);
hipFree(gpu_c);
std::cout << "Calculation took: " << std::to_string(time_taken / 1000) << " nanoseconds" << std::endl;
return 0;
} | a54526a1164b5074f17bc673a28b839eaad0a4bc.cu | #include <iostream>
#include <cuda_runtime.h>
#include <chrono>
#include <string>
#define N 13
//#define TRANSPOSED
typedef std::chrono::high_resolution_clock Clock;
int32_t matrixCGlobal[N][N];
int32_t matrixAGlobal[N * N] = {
14, 39, 117, 89, 111, 73, 79, 102, 52, 81, 123, 70, 39,
82, 29, 125, 85, 51, 60, 102, 39, 120, 106, 19, 15, 58,
124, 31, 32, 23, 19, 69, 60, 61, 10, 33, 72, 1, 91,
96, 112, 32, 111, 90, 12, 63, 77, 47, 105, 115, 38, 90,
13, 35, 23, 78, 57, 109, 122, 89, 21, 116, 86, 123, 113,
27, 14, 80, 69, 9, 23, 106, 26, 115, 31, 6, 73, 112,
53, 70, 64, 118, 121, 17, 6, 113, 30, 8, 5, 116, 66,
12, 113, 71, 94, 98, 116, 2, 95, 66, 107, 54, 11, 34,
90, 36, 81, 124, 73, 41, 105, 14, 127, 109, 87, 29, 2,
84, 77, 56, 81, 21, 81, 110, 110, 123, 104, 113, 39, 54,
75, 102, 44, 79, 61, 55, 90, 125, 52, 45, 4, 120, 12,
20, 20, 105, 41, 20, 44, 108, 74, 72, 62, 76, 34, 111,
38, 97, 124, 5, 97, 87, 85, 106, 12, 31, 87, 6, 77
};
int32_t matrixBGlobal[N * N] = {
69, 96, 71, 89, 127, 108, 96, 121, 64, 65, 62, 91, 73,
9, 67, 113, 48, 47, 53, 96, 66, 7, 63, 17, 9, 8,
107, 45, 112, 33, 114, 48, 102, 70, 52, 47, 34, 81, 17,
38, 15, 61, 1, 104, 82, 68, 53, 69, 110, 12, 25, 46,
111, 89, 54, 0, 107, 81, 127, 124, 36, 17, 99, 117, 75,
125, 72, 48, 67, 31, 104, 64, 98, 94, 57, 81, 15, 16,
111, 16, 127, 119, 88, 41, 75, 125, 22, 50, 120, 6, 81,
75, 7, 78, 38, 35, 115, 114, 37, 66, 106, 64, 91, 97,
75, 102, 84, 112, 65, 76, 87, 22, 45, 100, 19, 18, 89,
27, 25, 109, 18, 116, 19, 116, 33, 103, 31, 29, 78, 8,
24, 12, 86, 20, 32, 53, 31, 13, 51, 36, 100, 56, 44,
13, 8, 54, 24, 101, 73, 115, 120, 56, 23, 63, 39, 93,
77, 50, 108, 56, 106, 58, 121, 74, 70, 88, 19, 49, 83
};
int32_t matrixB_transposed[N * N];
// Calculates AB + A + B
//void naiveMatrixComputation() {
// int8_t c, d, k;
// int32_t sum;
// for (c = 0; c < N; c++) {
// for (d = 0; d < N; d++) {
// sum = 0;
// for (k = 0; k < N; ++k) {
// sum += matrixAGlobal[c][k] * matrixBGlobal[k][d];
// }
// sum += matrixAGlobal[c][d] + matrixBGlobal[c][d];
// matrixCGlobal[c][d] = sum;
// }
// }
//}
// Calculates AB + A + B
__global__ void
gpuMatrixComputation(const int32_t* matrixA, const int32_t* matrixB, int32_t* matrixC) {
int8_t c, d, k;
for (c = 0; c < N; ++c) {
for (d = 0; d < N; ++d) {
matrixC[c * N + d] = 0;
for (k = 0; k < N; ++k) {
matrixC[c * N + d] += matrixA[c * N + k] * matrixB[k * N + d];
}
matrixC[c * N + d] += matrixA[c * N + d] + matrixB[c * N + d];
}
}
}
// Calculates AB + A + B
__global__ void
gpuParallelMatrixComputation(const int32_t* matrixA, const int32_t* matrixB, int32_t* matrixC, const int size) {
int x = blockIdx.x;
int y = threadIdx.x;
int sum = 0;
#pragma unroll
for (int k = 0; k < size; ++k) {
#ifdef TRANSPOSED
sum += matrixA[x * size + k] * matrixB[y * size + k];
#else
sum += matrixA[x * size + k] * matrixB[k * size + y];
#endif
}
sum += matrixA[x * size + y] + matrixB[x * size + y];
matrixC[x * size + y] = sum;
}
int main() {
/*
* Measure the time it takes to execute 1000 times
*/
std::cout << "Malloc device mem" << std::endl;
for (int c = 0; c < N; ++c)
for (int d = 0; d < N; ++d)
matrixB_transposed[c * N + d] = matrixBGlobal[d * N + c];
int32_t* gpu_a, * gpu_b;
int32_t* gpu_c;
int32_t* c_out;
c_out = (int32_t*)malloc(N * N * sizeof(int32_t));
// We need variables accessible to the GPU,
// so cudaMallocManaged provides these
if (cudaMallocManaged(&gpu_a, N * N * sizeof(int32_t)) != 0) {
std::cout << "malloc failed" << std::endl;
}
if (cudaMallocManaged(&gpu_b, N * N * sizeof(int32_t)) != 0) {
std::cout << "malloc failed" << std::endl;
}
if (cudaMallocManaged(&gpu_c, N * N * sizeof(int32_t)) != 0) {
std::cout << "malloc failed" << std::endl;
}
std::cout << "Move to device mem" << std::endl;
if (cudaMemcpy(gpu_a, matrixAGlobal, (N * N * sizeof(int32_t)), cudaMemcpyHostToDevice) != 0) {
std::cout << "memcpy failed" << std::endl;
}
#ifdef TRANSPOSED
if (cudaMemcpy(gpu_b, matrixB_transposed, (N * N * sizeof(int32_t)), cudaMemcpyHostToDevice) != 0) {
std::cout << "memcpy failed" << std::endl;
}
#else
if (cudaMemcpy(gpu_b, matrixBGlobal, (N * N * sizeof(int32_t)), cudaMemcpyHostToDevice) != 0) {
std::cout << "memcpy failed" << std::endl;
}
#endif
std::cout << "Computation" << std::endl;
//Hier een goede keuze maken
dim3 threadsPerBlock = 13; // Should be a factor of 32
dim3 numBlocks = 13; // Very overkill but we now make use of all possible core
unsigned long time_taken = 0;
for (uint16_t i = 0; i < 1e4; i++) {
auto cpu_start = Clock::now();
//88593
// gpuMatrixComputation<<<1, 1>>>(gpu_a, gpu_b, gpu_c);
gpuParallelMatrixComputation <<<numBlocks, threadsPerBlock >>> (gpu_a, gpu_b, gpu_c, N);
cudaDeviceSynchronize();
auto cpu_end = Clock::now();
time_taken += std::chrono::duration_cast<std::chrono::nanoseconds>(cpu_end - cpu_start).count();
}
/*
* Print the resultating matrix
*/
int8_t c, d;
std::cout << "Copy from device to host" << std::endl;
int cudaError = cudaMemcpy(c_out, gpu_c, (N * N * sizeof(int32_t)), cudaMemcpyDeviceToHost);
if (cudaError != 0) {
std::cout << "memcpy output failed: " << cudaError << std::endl;
}
std::cout << "Printing mem" << std::endl;
for (c = 0; c < N; c++) {
for (d = 0; d < N; d++) {
std::cout << std::to_string((int32_t)c_out[c * N + d]) + "\t";
}
std::cout << "\n";
}
std::cout << "Freeing mem" << std::endl;
cudaFree(gpu_a);
cudaFree(gpu_b);
cudaFree(gpu_c);
std::cout << "Calculation took: " << std::to_string(time_taken / 1000) << " nanoseconds" << std::endl;
return 0;
} |
abe26ffbe61eafe5cafdb5f7d7c057f901e86a7f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef _FILTER_KERNEL_H_
#define _FILTER_KERNEL_H_
__global__ void SobelFilter(unsigned char* g_DataIn, unsigned char* g_DataOut, int width, int height)
{
__shared__ unsigned char sharedMem[BLOCK_HEIGHT * BLOCK_WIDTH];
float s_SobelMatrix[9];
s_SobelMatrix[0] = -1;
s_SobelMatrix[1] = 0;
s_SobelMatrix[2] = 1;
s_SobelMatrix[3] = -2;
s_SobelMatrix[4] = 0;
s_SobelMatrix[5] = 2;
s_SobelMatrix[6] = -1;
s_SobelMatrix[7] = 0;
s_SobelMatrix[8] = 1;
// Computer the X and Y global coordinates
int x = blockIdx.x * TILE_WIDTH + threadIdx.x ;//- FILTER_RADIUS;
int y = blockIdx.y * TILE_HEIGHT + threadIdx.y ;//- FILTER_RADIUS;
// Get the Global index into the original image
int index = y * (width) + x;
// STUDENT: Check 1
// Handle the extra thread case where the image width or height
//
if (x >= width || y >= height)
return;
// STUDENT: Check 2
// Handle the border cases of the global image
if( x < FILTER_RADIUS || y < FILTER_RADIUS) {
g_DataOut[index] = g_DataIn[index];
return;
}
if ((x > width - FILTER_RADIUS - 1)&&(x <width)) {
g_DataOut[index] = g_DataIn[index];
return;
}
if ((y > height - FILTER_RADIUS - 1)&&(y < height)) {
g_DataOut[index] = g_DataIn[index];
return;
}
// Perform the first load of values into shared memory
int sharedIndex = threadIdx.y * blockDim.y + threadIdx.x;
sharedMem[sharedIndex] = g_DataIn[index];
__syncthreads();
// STUDENT: Make sure only the thread ids should write the sum of the neighbors.
if(threadIdx.x >= (blockDim.x - FILTER_RADIUS) || threadIdx.x < FILTER_RADIUS)
return;
if(threadIdx.y >= (blockDim.y - FILTER_RADIUS) || threadIdx.y < FILTER_RADIUS)
return;
float sumX = 0, sumY=0;
for(int dy= -FILTER_RADIUS; dy<=FILTER_RADIUS; dy++) {
for(int dx = -FILTER_RADIUS; dx<=FILTER_RADIUS; dx++) {
float Pixel = (float)(sharedMem[sharedIndex + (dy * blockDim.x + dx)]);
sumX += (float)(Pixel * s_SobelMatrix[(dy + FILTER_RADIUS) * FILTER_DIAMETER + (dx+FILTER_RADIUS)]);
sumY += (float)(Pixel * s_SobelMatrix[(dx + FILTER_RADIUS) * FILTER_DIAMETER + (dy+FILTER_RADIUS)]);
}
}
g_DataOut[index] = (abs(sumX) + abs(sumY)) > EDGE_VALUE_THRESHOLD ? 255 : 0;
}
__global__ void AverageFilter(unsigned char* g_DataIn, unsigned char* g_DataOut, int width, int height)
{
__shared__ unsigned char sharedMem[BLOCK_HEIGHT*BLOCK_WIDTH];
int x = blockIdx.x * TILE_WIDTH + threadIdx.x ;//- FILTER_RADIUS;
int y = blockIdx.y * TILE_HEIGHT + threadIdx.y ;//- FILTER_RADIUS;
// Get the Global index into the original image
int index = y * (width) + x;
// STUDENT: write code for Average Filter : use Sobel as base code
if (x >= width || y >= height)
return;
// STUDENT: Check 2
// Handle the border cases of the global image
if( x < FILTER_RADIUS || y < FILTER_RADIUS) {
g_DataOut[index] = g_DataIn[index];
return;
}
if ((x > width - FILTER_RADIUS - 1)&&(x <width)) {
g_DataOut[index] = g_DataIn[index];
return;
}
if ((y > height - FILTER_RADIUS - 1)&&(y < height)) {
g_DataOut[index] = g_DataIn[index];
return;
}
// Perform the first load of values into shared memory
int sharedIndex = threadIdx.y * blockDim.y + threadIdx.x;
sharedMem[sharedIndex] = g_DataIn[index];
__syncthreads();
// STUDENT: Make sure only the thread ids should write the sum of the neighbors.
if(threadIdx.x >= (BLOCK_WIDTH - FILTER_RADIUS) || threadIdx.x < FILTER_RADIUS)
return;
if(threadIdx.y >= (BLOCK_HEIGHT - FILTER_RADIUS) || threadIdx.y < FILTER_RADIUS)
return;
float sumX = 0;
for(int dy= -FILTER_RADIUS; dy<=FILTER_RADIUS; dy++) {
for(int dx = -FILTER_RADIUS; dx<=FILTER_RADIUS; dx++) {
float Pixel = (float)(sharedMem[sharedIndex + (dy * blockDim.x + dx)]);
sumX+=Pixel;
}
}
g_DataOut[index] = (unsigned char)(sumX/FILTER_AREA);
}
__global__ void HighBoostFilter(unsigned char* g_DataIn, unsigned char* g_DataOut, int width, int height)
{
__shared__ unsigned char sharedMem[BLOCK_HEIGHT*BLOCK_WIDTH];
int x = blockIdx.x * TILE_WIDTH + threadIdx.x ;//- FILTER_RADIUS;
int y = blockIdx.y * TILE_HEIGHT + threadIdx.y ;//- FILTER_RADIUS;
// Get the Global index into the original image
int index = y * (width) + x;
// STUDENT: write code for High Boost Filter : use Sobel as base code
if (x >= width || y >= height)
return;
// STUDENT: Check 2
// Handle the border cases of the global image
if( x < FILTER_RADIUS || y < FILTER_RADIUS) {
g_DataOut[index] = g_DataIn[index];
return;
}
if ((x > width - FILTER_RADIUS - 1)&&(x <width)) {
g_DataOut[index] = g_DataIn[index];
return;
}
if ((y > height - FILTER_RADIUS - 1)&&(y < height)) {
g_DataOut[index] = g_DataIn[index];
return;
}
// Perform the first load of values into shared memory
int sharedIndex = threadIdx.y * blockDim.y + threadIdx.x;
sharedMem[sharedIndex] = g_DataIn[index];
__syncthreads();
if(threadIdx.x >= (BLOCK_WIDTH - FILTER_RADIUS) || threadIdx.x < FILTER_RADIUS)
return;
if(threadIdx.y >= (BLOCK_HEIGHT - FILTER_RADIUS) || threadIdx.y < FILTER_RADIUS)
return;
float sumX,centerPixel = 0;
for(int dy= -FILTER_RADIUS; dy<=FILTER_RADIUS; dy++) {
for(int dx = -FILTER_RADIUS; dx<=FILTER_RADIUS; dx++) {
float Pixel = (float)(sharedMem[sharedIndex + (dy * blockDim.x + dx)]);
sumX+=Pixel;
}
}
centerPixel = (float) (sharedMem[sharedIndex]);
g_DataOut[index] = CLAMP_8bit(int(centerPixel + HIGH_BOOST_FACTOR * (unsigned char)(centerPixel - sumX/FILTER_AREA)));
}
#endif // _FILTER_KERNEL_H_
| abe26ffbe61eafe5cafdb5f7d7c057f901e86a7f.cu |
#ifndef _FILTER_KERNEL_H_
#define _FILTER_KERNEL_H_
__global__ void SobelFilter(unsigned char* g_DataIn, unsigned char* g_DataOut, int width, int height)
{
__shared__ unsigned char sharedMem[BLOCK_HEIGHT * BLOCK_WIDTH];
float s_SobelMatrix[9];
s_SobelMatrix[0] = -1;
s_SobelMatrix[1] = 0;
s_SobelMatrix[2] = 1;
s_SobelMatrix[3] = -2;
s_SobelMatrix[4] = 0;
s_SobelMatrix[5] = 2;
s_SobelMatrix[6] = -1;
s_SobelMatrix[7] = 0;
s_SobelMatrix[8] = 1;
// Computer the X and Y global coordinates
int x = blockIdx.x * TILE_WIDTH + threadIdx.x ;//- FILTER_RADIUS;
int y = blockIdx.y * TILE_HEIGHT + threadIdx.y ;//- FILTER_RADIUS;
// Get the Global index into the original image
int index = y * (width) + x;
// STUDENT: Check 1
// Handle the extra thread case where the image width or height
//
if (x >= width || y >= height)
return;
// STUDENT: Check 2
// Handle the border cases of the global image
if( x < FILTER_RADIUS || y < FILTER_RADIUS) {
g_DataOut[index] = g_DataIn[index];
return;
}
if ((x > width - FILTER_RADIUS - 1)&&(x <width)) {
g_DataOut[index] = g_DataIn[index];
return;
}
if ((y > height - FILTER_RADIUS - 1)&&(y < height)) {
g_DataOut[index] = g_DataIn[index];
return;
}
// Perform the first load of values into shared memory
int sharedIndex = threadIdx.y * blockDim.y + threadIdx.x;
sharedMem[sharedIndex] = g_DataIn[index];
__syncthreads();
// STUDENT: Make sure only the thread ids should write the sum of the neighbors.
if(threadIdx.x >= (blockDim.x - FILTER_RADIUS) || threadIdx.x < FILTER_RADIUS)
return;
if(threadIdx.y >= (blockDim.y - FILTER_RADIUS) || threadIdx.y < FILTER_RADIUS)
return;
float sumX = 0, sumY=0;
for(int dy= -FILTER_RADIUS; dy<=FILTER_RADIUS; dy++) {
for(int dx = -FILTER_RADIUS; dx<=FILTER_RADIUS; dx++) {
float Pixel = (float)(sharedMem[sharedIndex + (dy * blockDim.x + dx)]);
sumX += (float)(Pixel * s_SobelMatrix[(dy + FILTER_RADIUS) * FILTER_DIAMETER + (dx+FILTER_RADIUS)]);
sumY += (float)(Pixel * s_SobelMatrix[(dx + FILTER_RADIUS) * FILTER_DIAMETER + (dy+FILTER_RADIUS)]);
}
}
g_DataOut[index] = (abs(sumX) + abs(sumY)) > EDGE_VALUE_THRESHOLD ? 255 : 0;
}
__global__ void AverageFilter(unsigned char* g_DataIn, unsigned char* g_DataOut, int width, int height)
{
__shared__ unsigned char sharedMem[BLOCK_HEIGHT*BLOCK_WIDTH];
int x = blockIdx.x * TILE_WIDTH + threadIdx.x ;//- FILTER_RADIUS;
int y = blockIdx.y * TILE_HEIGHT + threadIdx.y ;//- FILTER_RADIUS;
// Get the Global index into the original image
int index = y * (width) + x;
// STUDENT: write code for Average Filter : use Sobel as base code
if (x >= width || y >= height)
return;
// STUDENT: Check 2
// Handle the border cases of the global image
if( x < FILTER_RADIUS || y < FILTER_RADIUS) {
g_DataOut[index] = g_DataIn[index];
return;
}
if ((x > width - FILTER_RADIUS - 1)&&(x <width)) {
g_DataOut[index] = g_DataIn[index];
return;
}
if ((y > height - FILTER_RADIUS - 1)&&(y < height)) {
g_DataOut[index] = g_DataIn[index];
return;
}
// Perform the first load of values into shared memory
int sharedIndex = threadIdx.y * blockDim.y + threadIdx.x;
sharedMem[sharedIndex] = g_DataIn[index];
__syncthreads();
// STUDENT: Make sure only the thread ids should write the sum of the neighbors.
if(threadIdx.x >= (BLOCK_WIDTH - FILTER_RADIUS) || threadIdx.x < FILTER_RADIUS)
return;
if(threadIdx.y >= (BLOCK_HEIGHT - FILTER_RADIUS) || threadIdx.y < FILTER_RADIUS)
return;
float sumX = 0;
for(int dy= -FILTER_RADIUS; dy<=FILTER_RADIUS; dy++) {
for(int dx = -FILTER_RADIUS; dx<=FILTER_RADIUS; dx++) {
float Pixel = (float)(sharedMem[sharedIndex + (dy * blockDim.x + dx)]);
sumX+=Pixel;
}
}
g_DataOut[index] = (unsigned char)(sumX/FILTER_AREA);
}
__global__ void HighBoostFilter(unsigned char* g_DataIn, unsigned char* g_DataOut, int width, int height)
{
__shared__ unsigned char sharedMem[BLOCK_HEIGHT*BLOCK_WIDTH];
int x = blockIdx.x * TILE_WIDTH + threadIdx.x ;//- FILTER_RADIUS;
int y = blockIdx.y * TILE_HEIGHT + threadIdx.y ;//- FILTER_RADIUS;
// Get the Global index into the original image
int index = y * (width) + x;
// STUDENT: write code for High Boost Filter : use Sobel as base code
if (x >= width || y >= height)
return;
// STUDENT: Check 2
// Handle the border cases of the global image
if( x < FILTER_RADIUS || y < FILTER_RADIUS) {
g_DataOut[index] = g_DataIn[index];
return;
}
if ((x > width - FILTER_RADIUS - 1)&&(x <width)) {
g_DataOut[index] = g_DataIn[index];
return;
}
if ((y > height - FILTER_RADIUS - 1)&&(y < height)) {
g_DataOut[index] = g_DataIn[index];
return;
}
// Perform the first load of values into shared memory
int sharedIndex = threadIdx.y * blockDim.y + threadIdx.x;
sharedMem[sharedIndex] = g_DataIn[index];
__syncthreads();
if(threadIdx.x >= (BLOCK_WIDTH - FILTER_RADIUS) || threadIdx.x < FILTER_RADIUS)
return;
if(threadIdx.y >= (BLOCK_HEIGHT - FILTER_RADIUS) || threadIdx.y < FILTER_RADIUS)
return;
float sumX,centerPixel = 0;
for(int dy= -FILTER_RADIUS; dy<=FILTER_RADIUS; dy++) {
for(int dx = -FILTER_RADIUS; dx<=FILTER_RADIUS; dx++) {
float Pixel = (float)(sharedMem[sharedIndex + (dy * blockDim.x + dx)]);
sumX+=Pixel;
}
}
centerPixel = (float) (sharedMem[sharedIndex]);
g_DataOut[index] = CLAMP_8bit(int(centerPixel + HIGH_BOOST_FACTOR * (unsigned char)(centerPixel - sumX/FILTER_AREA)));
}
#endif // _FILTER_KERNEL_H_
|
7de11b0be173a389387d0fff07aaecfd9805ab0b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <string>
#include <vector>
#include <fstream>
#include <iostream>
#include <sstream>
enum ComputeMode { ADD, SUB, MUL, DIV };
hipError_t computeWithCuda(int *c, const int *a, const int *b, unsigned int size, ComputeMode mode);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
__global__ void dividKernel(float* c, const float* a, const float* b)
{
int i = threadIdx.x;
c[i] = a[i] / b[i];
}
__global__ void mulKernel(float* c, const float* a, const float* b)
{
int i = threadIdx.x;
c[i] = a[i] * b[i];
}
__global__ void subKernel(float* c, const float* a, const float* b)
{
int i = threadIdx.x;
c[i] = a[i] * b[i];
}
__global__ void compareWithOneKernel(float* b, const double* a)
{
int i = threadIdx.x;
if(a[i] == 1)
b[i] = b[i] + 1;
}
bool read_from_excel_file(std::string file, int numberOfColumn, std::vector<std::vector<double>>* cell)
{
std::string line;
std::ifstream infile(file);
if (!infile.is_open())
return false;
std::vector<double> temp_row;
std::getline(infile, line, '\n');
while (std::getline(infile, line, '\n'))
{
std::istringstream buffer(line);
std::vector<std::string> buff(numberOfColumn);
for (int i = 0; i < buff.size(); i++)
{
buffer >> buff[i];
if (i > 2)
temp_row.push_back(std::stod(buff[i]));
}
cell->push_back(temp_row);
temp_row.clear();
}
return true;
}
int main()
{
std::vector<std::vector<double>> *data;
read_from_excel_file("saeed.txt", 32, data);
std::vector<std::vector<double>> show(20);
double max[20] = { -10,-10,-10,-10,-10,-10,-10,-10,-10,-10,-10,-10,-10,-10,-10,-10,-10,-10,-10,-10 };
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
hipError_t cudaStatus = computeWithCuda(c, a, b, arraySize, mode);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t computeWithCuda(int *c, const int *a, const int *b, unsigned int size, ComputeMode mode)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
hipLaunchKernelGGL(( addKernel), dim3(1), dim3(size), 0, 0, dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
return cudaStatus;
}
| 7de11b0be173a389387d0fff07aaecfd9805ab0b.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <string>
#include <vector>
#include <fstream>
#include <iostream>
#include <sstream>
enum ComputeMode { ADD, SUB, MUL, DIV };
cudaError_t computeWithCuda(int *c, const int *a, const int *b, unsigned int size, ComputeMode mode);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
__global__ void dividKernel(float* c, const float* a, const float* b)
{
int i = threadIdx.x;
c[i] = a[i] / b[i];
}
__global__ void mulKernel(float* c, const float* a, const float* b)
{
int i = threadIdx.x;
c[i] = a[i] * b[i];
}
__global__ void subKernel(float* c, const float* a, const float* b)
{
int i = threadIdx.x;
c[i] = a[i] * b[i];
}
__global__ void compareWithOneKernel(float* b, const double* a)
{
int i = threadIdx.x;
if(a[i] == 1)
b[i] = b[i] + 1;
}
bool read_from_excel_file(std::string file, int numberOfColumn, std::vector<std::vector<double>>* cell)
{
std::string line;
std::ifstream infile(file);
if (!infile.is_open())
return false;
std::vector<double> temp_row;
std::getline(infile, line, '\n');
while (std::getline(infile, line, '\n'))
{
std::istringstream buffer(line);
std::vector<std::string> buff(numberOfColumn);
for (int i = 0; i < buff.size(); i++)
{
buffer >> buff[i];
if (i > 2)
temp_row.push_back(std::stod(buff[i]));
}
cell->push_back(temp_row);
temp_row.clear();
}
return true;
}
int main()
{
std::vector<std::vector<double>> *data;
read_from_excel_file("saeed.txt", 32, data);
std::vector<std::vector<double>> show(20);
double max[20] = { -10,-10,-10,-10,-10,-10,-10,-10,-10,-10,-10,-10,-10,-10,-10,-10,-10,-10,-10,-10 };
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
cudaError_t cudaStatus = computeWithCuda(c, a, b, arraySize, mode);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t computeWithCuda(int *c, const int *a, const int *b, unsigned int size, ComputeMode mode)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
addKernel<<<1, size>>>(dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
|
a3058992cbc3a2e61aa14e3593fc75925ba91439.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (C) 2020 THL A29 Limited, a Tencent company.
// All rights reserved.
// Licensed under the BSD 3-Clause License (the "License"); you may
// not use this file except in compliance with the License. You may
// obtain a copy of the License at
// https://opensource.org/licenses/BSD-3-Clause
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" basis,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License.
// See the AUTHORS file for names of contributors.
#include <hip/hip_runtime.h>
#include <immintrin.h>
#include <hipcub/hipcub.hpp>
#include <numeric>
#include "turbo_transformers/layers/kernels/gpu_softmax_kernel.h"
namespace turbo_transformers {
namespace layers {
namespace kernels {
namespace {
template <typename T, int Len>
struct Array {
__device__ __forceinline__ Array() {}
__device__ __forceinline__ Array(T* inputs) {
for (int i = 0; i < Len; ++i) {
data[i] = inputs[i];
}
}
T data[Len];
};
template <typename T, int Len>
struct ArrayAddFunc {
__device__ __forceinline__ Array<T, Len> operator()(const Array<T, Len>& p1,
const Array<T, Len>& p2) {
Array<T, Len> result;
for (int i = 0; i < Len; ++i) {
result.data[i] = p1.data[i] + p2.data[i];
}
return result;
}
};
template <typename T, int Len>
struct ArrayMaxFunc {
__device__ __forceinline__ Array<T, Len> operator()(const Array<T, Len>& p1,
const Array<T, Len>& p2) {
Array<T, Len> result;
for (int i = 0; i < Len; ++i) {
result.data[i] = p1.data[i] > p2.data[i] ? p1.data[i] : p2.data[i];
}
return result;
}
};
template <int BlockDim, int K>
__global__ void cub_softmax_kernel_k(float* qk_buf_, const float* attr_mask,
const int batch_size, const int head_num,
const int from_seq_len,
const int to_seq_len, const float scaler,
bool is_2D) {
__shared__ typename hipcub::BlockReduce<Array<float, K>, BlockDim>::TempStorage
temp_storage;
__shared__ float s_sum[K], s_max[K];
float tmp[K];
int qk_offset = blockIdx.x * K * to_seq_len;
float mask_val = 0.;
for (int i = 0; i < K; ++i) {
float qk = threadIdx.x < to_seq_len
? qk_buf_[threadIdx.x + qk_offset + to_seq_len * i]
: 0.0f;
if (attr_mask != nullptr) {
int batch_id = (blockIdx.x * K + i) / (head_num * from_seq_len);
int from_seq_id = (blockIdx.x * K + i) % from_seq_len;
mask_val = attr_mask[threadIdx.x +
(is_2D ? (batch_id * to_seq_len)
: (batch_id * from_seq_len + from_seq_id) *
to_seq_len)];
} else {
mask_val = 0.0f;
}
// mask_val = (1.0f - mask_val) * -10000.0f;
tmp[i] = threadIdx.x < to_seq_len ? (qk * scaler + mask_val) : -1e20f;
}
Array<float, K> max_val =
hipcub::BlockReduce<Array<float, K>, BlockDim>(temp_storage)
.Reduce(Array<float, K>(tmp), ArrayMaxFunc<float, K>());
if (threadIdx.x == 0) {
for (int i = 0; i < K; ++i) {
s_max[i] = max_val.data[i];
}
}
__syncthreads();
float qk_tmp[K];
for (int i = 0; i < K; ++i) {
qk_tmp[i] = threadIdx.x < to_seq_len ? __expf((tmp[i] - s_max[i])) : 0.0f;
}
Array<float, K> sum_val =
hipcub::BlockReduce<Array<float, K>, BlockDim>(temp_storage)
.Reduce(Array<float, K>(qk_tmp), ArrayAddFunc<float, K>());
if (threadIdx.x == 0) {
for (int i = 0; i < K; ++i) {
s_sum[i] = sum_val.data[i] + 1e-6f;
}
}
__syncthreads();
if (threadIdx.x < to_seq_len) {
for (int i = 0; i < K; ++i) {
qk_buf_[threadIdx.x + qk_offset + to_seq_len * i] =
(qk_tmp[i] / s_sum[i]);
}
}
}
} // namespace
#define SOFTMAX_KERNEL_CASE(BlockDim, ...) \
case (BlockDim): \
if (row_per_thread_block == RowsPerThreadBlock) { \
hipLaunchKernelGGL(( cub_softmax_kernel_k<BlockDim, RowsPerThreadBlock>) \
, dim3(grid), dim3(block), 0, stream, __VA_ARGS__); \
} else { \
hipLaunchKernelGGL(( cub_softmax_kernel_k<BlockDim, OneRowPerThreadBlock>) \
, dim3(grid), dim3(block), 0, stream, __VA_ARGS__); \
} \
break
#define RUN_KERNEL(...) \
do { \
switch (block.x) { \
SOFTMAX_KERNEL_CASE(32, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(64, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(96, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(128, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(160, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(192, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(224, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(256, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(288, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(320, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(352, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(384, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(416, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(448, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(480, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(512, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(544, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(576, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(608, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(640, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(672, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(704, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(736, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(768, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(800, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(832, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(864, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(896, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(928, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(960, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(992, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(1024, __VA_ARGS__); \
default: \
throw std::runtime_error("The block.x should be 32x."); \
} \
} while (0)
template <>
void GPUSoftmaxMask(float* qk_buf, const float* attr_mask, int64_t batch_size,
int64_t head_num, int64_t from_seq_len, int64_t to_seq_len,
float scale, bool is_2D, hipStream_t stream) {
dim3 block, grid;
int high_dim_size = batch_size * head_num * from_seq_len;
const int OneRowPerThreadBlock = 1;
const int RowsPerThreadBlock = 2;
int row_per_thread_block = OneRowPerThreadBlock;
if ((head_num * from_seq_len) % RowsPerThreadBlock == 0) {
row_per_thread_block = RowsPerThreadBlock;
}
// block size must be 32x, so warp reduce can work
block.x = (to_seq_len + 31) / 32 * 32;
grid.x = high_dim_size / row_per_thread_block;
// Because there are many function templates, the compilation speed may be
// slow.
RUN_KERNEL(qk_buf, attr_mask, batch_size, head_num, from_seq_len, to_seq_len,
scale, is_2D);
}
#undef RUN_KERNEL
#undef SOFTMAX_KERNEL_CASE
} // namespace kernels
} // namespace layers
} // namespace turbo_transformers
| a3058992cbc3a2e61aa14e3593fc75925ba91439.cu | // Copyright (C) 2020 THL A29 Limited, a Tencent company.
// All rights reserved.
// Licensed under the BSD 3-Clause License (the "License"); you may
// not use this file except in compliance with the License. You may
// obtain a copy of the License at
// https://opensource.org/licenses/BSD-3-Clause
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" basis,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License.
// See the AUTHORS file for names of contributors.
#include <cuda_runtime.h>
#include <immintrin.h>
#include <cub/cub.cuh>
#include <numeric>
#include "turbo_transformers/layers/kernels/gpu_softmax_kernel.h"
namespace turbo_transformers {
namespace layers {
namespace kernels {
namespace {
template <typename T, int Len>
struct Array {
__device__ __forceinline__ Array() {}
__device__ __forceinline__ Array(T* inputs) {
for (int i = 0; i < Len; ++i) {
data[i] = inputs[i];
}
}
T data[Len];
};
template <typename T, int Len>
struct ArrayAddFunc {
__device__ __forceinline__ Array<T, Len> operator()(const Array<T, Len>& p1,
const Array<T, Len>& p2) {
Array<T, Len> result;
for (int i = 0; i < Len; ++i) {
result.data[i] = p1.data[i] + p2.data[i];
}
return result;
}
};
template <typename T, int Len>
struct ArrayMaxFunc {
__device__ __forceinline__ Array<T, Len> operator()(const Array<T, Len>& p1,
const Array<T, Len>& p2) {
Array<T, Len> result;
for (int i = 0; i < Len; ++i) {
result.data[i] = p1.data[i] > p2.data[i] ? p1.data[i] : p2.data[i];
}
return result;
}
};
template <int BlockDim, int K>
__global__ void cub_softmax_kernel_k(float* qk_buf_, const float* attr_mask,
const int batch_size, const int head_num,
const int from_seq_len,
const int to_seq_len, const float scaler,
bool is_2D) {
__shared__ typename cub::BlockReduce<Array<float, K>, BlockDim>::TempStorage
temp_storage;
__shared__ float s_sum[K], s_max[K];
float tmp[K];
int qk_offset = blockIdx.x * K * to_seq_len;
float mask_val = 0.;
for (int i = 0; i < K; ++i) {
float qk = threadIdx.x < to_seq_len
? qk_buf_[threadIdx.x + qk_offset + to_seq_len * i]
: 0.0f;
if (attr_mask != nullptr) {
int batch_id = (blockIdx.x * K + i) / (head_num * from_seq_len);
int from_seq_id = (blockIdx.x * K + i) % from_seq_len;
mask_val = attr_mask[threadIdx.x +
(is_2D ? (batch_id * to_seq_len)
: (batch_id * from_seq_len + from_seq_id) *
to_seq_len)];
} else {
mask_val = 0.0f;
}
// mask_val = (1.0f - mask_val) * -10000.0f;
tmp[i] = threadIdx.x < to_seq_len ? (qk * scaler + mask_val) : -1e20f;
}
Array<float, K> max_val =
cub::BlockReduce<Array<float, K>, BlockDim>(temp_storage)
.Reduce(Array<float, K>(tmp), ArrayMaxFunc<float, K>());
if (threadIdx.x == 0) {
for (int i = 0; i < K; ++i) {
s_max[i] = max_val.data[i];
}
}
__syncthreads();
float qk_tmp[K];
for (int i = 0; i < K; ++i) {
qk_tmp[i] = threadIdx.x < to_seq_len ? __expf((tmp[i] - s_max[i])) : 0.0f;
}
Array<float, K> sum_val =
cub::BlockReduce<Array<float, K>, BlockDim>(temp_storage)
.Reduce(Array<float, K>(qk_tmp), ArrayAddFunc<float, K>());
if (threadIdx.x == 0) {
for (int i = 0; i < K; ++i) {
s_sum[i] = sum_val.data[i] + 1e-6f;
}
}
__syncthreads();
if (threadIdx.x < to_seq_len) {
for (int i = 0; i < K; ++i) {
qk_buf_[threadIdx.x + qk_offset + to_seq_len * i] =
(qk_tmp[i] / s_sum[i]);
}
}
}
} // namespace
#define SOFTMAX_KERNEL_CASE(BlockDim, ...) \
case (BlockDim): \
if (row_per_thread_block == RowsPerThreadBlock) { \
cub_softmax_kernel_k<BlockDim, RowsPerThreadBlock> \
<<<grid, block, 0, stream>>>(__VA_ARGS__); \
} else { \
cub_softmax_kernel_k<BlockDim, OneRowPerThreadBlock> \
<<<grid, block, 0, stream>>>(__VA_ARGS__); \
} \
break
#define RUN_KERNEL(...) \
do { \
switch (block.x) { \
SOFTMAX_KERNEL_CASE(32, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(64, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(96, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(128, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(160, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(192, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(224, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(256, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(288, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(320, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(352, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(384, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(416, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(448, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(480, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(512, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(544, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(576, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(608, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(640, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(672, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(704, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(736, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(768, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(800, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(832, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(864, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(896, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(928, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(960, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(992, __VA_ARGS__); \
SOFTMAX_KERNEL_CASE(1024, __VA_ARGS__); \
default: \
throw std::runtime_error("The block.x should be 32x."); \
} \
} while (0)
template <>
void GPUSoftmaxMask(float* qk_buf, const float* attr_mask, int64_t batch_size,
int64_t head_num, int64_t from_seq_len, int64_t to_seq_len,
float scale, bool is_2D, cudaStream_t stream) {
dim3 block, grid;
int high_dim_size = batch_size * head_num * from_seq_len;
const int OneRowPerThreadBlock = 1;
const int RowsPerThreadBlock = 2;
int row_per_thread_block = OneRowPerThreadBlock;
if ((head_num * from_seq_len) % RowsPerThreadBlock == 0) {
row_per_thread_block = RowsPerThreadBlock;
}
// block size must be 32x, so warp reduce can work
block.x = (to_seq_len + 31) / 32 * 32;
grid.x = high_dim_size / row_per_thread_block;
// Because there are many function templates, the compilation speed may be
// slow.
RUN_KERNEL(qk_buf, attr_mask, batch_size, head_num, from_seq_len, to_seq_len,
scale, is_2D);
}
#undef RUN_KERNEL
#undef SOFTMAX_KERNEL_CASE
} // namespace kernels
} // namespace layers
} // namespace turbo_transformers
|
502680062604ee27bd9ce6b1832d0aad245c2c22.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "util.hip"
#include "tabs/sbox.tab"
union u32_t {
uint i;
uchar c[4];
};
#if TTABLE == 128
#define STE_128_LH(state) (sbox[(state >> 1) ] >> ((state & 0x1) << 2))
#define STE_128_HH(state) (sbox[(state >> 1) + 128] >> ((state & 0x1) << 2))
#define STE(state) ((STE_128_LH(state) & 0x0f) | (STE_128_HH(state) << 4))
#elif TTABLE == 64
#define STE_64_0(state) (sbox[(state >> 2) ] >> ((state & 0x3) << 1))
#define STE_64_1(state) (sbox[(state >> 2) + 64] >> ((state & 0x3) << 1))
#define STE_64_2(state) (sbox[(state >> 2) + 128] >> ((state & 0x3) << 1))
#define STE_64_3(state) (sbox[(state >> 2) + 192] >> ((state & 0x3) << 1))
#define STE(state) ((STE_64_0(state) & 0x03) \
| ((STE_64_1(state) & 0x03) << 2) \
| ((STE_64_2(state) & 0x03) << 4) \
| ((STE_64_3(state) & 0x03) << 6))
#elif TTABLE == 32
#define STE_32_0(state) (sbox[(state >> 3) ] >> (state & 0x7))
#define STE_32_1(state) (sbox[(state >> 3) + 32] >> (state & 0x7))
#define STE_32_2(state) (sbox[(state >> 3) + 64] >> (state & 0x7))
#define STE_32_3(state) (sbox[(state >> 3) + 96] >> (state & 0x7))
#define STE_32_4(state) (sbox[(state >> 3) + 128] >> (state & 0x7))
#define STE_32_5(state) (sbox[(state >> 3) + 160] >> (state & 0x7))
#define STE_32_6(state) (sbox[(state >> 3) + 192] >> (state & 0x7))
#define STE_32_7(state) (sbox[(state >> 3) + 224] >> (state & 0x7))
#define STE(state) ((STE_32_0(state) & 0x01) \
| ((STE_32_1(state) & 0x01) << 1) \
| ((STE_32_2(state) & 0x01) << 2) \
| ((STE_32_3(state) & 0x01) << 3) \
| ((STE_32_4(state) & 0x01) << 4) \
| ((STE_32_5(state) & 0x01) << 5) \
| ((STE_32_6(state) & 0x01) << 6) \
| ((STE_32_7(state) & 0x01) << 7))
#else
#define STE(state) (sbox[state])
#endif
#define SWAP(a, b) (a) ^= (b); (b) ^= (a); (a) ^= (b);
__device__ void TransposeSelf(uchar *state) {
SWAP(state[1], state[4]);
SWAP(state[2], state[8]);
SWAP(state[3], state[12]);
SWAP(state[6], state[9]);
SWAP(state[7], state[13]);
SWAP(state[11], state[14]);
}
__device__ void Transpose(uchar *dst, uchar *src) {
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
dst[j*4+i] = src[i*4+j];
}
}
}
__device__ void AddRoundKey(uchar *state, uchar *rek) {
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
state[j*4+i] ^= rek[i*4+3-j];
}
}
}
__device__ void SubBytes(uchar *state, uchar *sbox) {
for (int i = 0; i < 16; i++) {
state[i] = STE(state[i]);//sbox[state[i]];
//state[i] = Tsbox_256[state[i]];
}
}
#define xtime(x) ((x << 1) ^ ((x >> 7) * 0x1b))
__device__ void MixColumns(uchar *state) {
uchar Tmp, Tm, t;
for(int i = 0; i < 4; i++) {
t = state[i];
Tmp = state[i] ^ state[4+i] ^ state[8+i] ^ state[12+i] ;
Tm = state[i] ^ state[4+i] ;
Tm = xtime(Tm);
state[i] ^= Tm ^ Tmp ;
Tm = state[4+i] ^ state[8+i] ;
Tm = xtime(Tm);
state[4+i] ^= Tm ^ Tmp ;
Tm = state[8+i] ^ state[12+i] ;
Tm = xtime(Tm);
state[8+i] ^= Tm ^ Tmp ;
Tm = state[12+i] ^ t ;
Tm = xtime(Tm);
state[12+i] ^= Tm ^ Tmp ;
}
}
__device__ void ShiftRows(uchar *state) {
uchar temp;
// Rotate first row 1 columns to left
temp = state[4];
state[4] = state[5];
state[5] = state[6];
state[6] = state[7];
state[7] = temp;
// Rotate second row 2 columns to left
temp = state[8];
state[8] = state[10];
state[10] = temp;
temp = state[9];
state[9] = state[11];
state[11] = temp;
// Rotate third row 3 columns to left
temp = state[12];
state[12] = state[15];
state[15] = state[14];
state[14] = state[13];
state[13] = temp;
}
#define REV_ENDIAN(x) (((x)>>24)&0x000000FF) | (((x)>>8)&0x0000FF00) | (((x)<<8)&0x00FF0000) | (((x)<<24)&0xFF000000)
__global__ void AES_encrypt(const uint *pt, uint *ct, uint *rek, uint Nr, uint size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
u32_t state[4];
//uchar state[16];
uchar *_rk = (uchar *)rek;
#ifdef USE_SMEM
__shared__ uchar sbox[256];
#if TTABLE == 256
load_smem_sbox(sbox, Tsbox_256);
#elif TTABLE == 128
load_smem_sbox(sbox, Tsbox_128);
#elif TTABLE == 64
load_smem_sbox(sbox, Tsbox_64);
#elif TTABLE == 32
load_smem_sbox(sbox, Tsbox_32);
#endif // TTABLE
#else
#if TTABLE == 256
uchar *sbox = Tsbox_256;
#elif TTABLE == 128
uchar *sbox = Tsbox_128;
#elif TTABLE == 64
uchar *sbox = Tsbox_64;
#elif TTABLE == 32
uchar *sbox = Tsbox_32;
#endif // TTABLE
#endif // USE_SMEM
int iter = 0;
BEGIN:
int offset = (iter * NUM_THREADS * NUM_BLOCKS + tid) << 2;
if (offset >= size) return;
state[0].i = REV_ENDIAN(pt[offset + 0]);
state[1].i = REV_ENDIAN(pt[offset + 1]);
state[2].i = REV_ENDIAN(pt[offset + 2]);
state[3].i = REV_ENDIAN(pt[offset + 3]);
TransposeSelf((uchar*)state);
AddRoundKey((uchar*)state, (uchar*)_rk);
for (int i = 1; i < Nr; i++)
{
SubBytes((uchar*)state, sbox);
ShiftRows((uchar*)state);
MixColumns((uchar*)state);
AddRoundKey((uchar*)state, (uchar*)(rek + i*4));
}
SubBytes((uchar*)state, sbox);
ShiftRows((uchar*)state);
AddRoundKey((uchar*)state, (uchar*)(rek + Nr*4));
TransposeSelf((uchar*)state);
ct[offset + 0] = REV_ENDIAN(state[0].i);
ct[offset + 1] = REV_ENDIAN(state[1].i);
ct[offset + 2] = REV_ENDIAN(state[2].i);
ct[offset + 3] = REV_ENDIAN(state[3].i);
iter++;
goto BEGIN;
}
| 502680062604ee27bd9ce6b1832d0aad245c2c22.cu | #include "util.cu"
#include "tabs/sbox.tab"
union u32_t {
uint i;
uchar c[4];
};
#if TTABLE == 128
#define STE_128_LH(state) (sbox[(state >> 1) ] >> ((state & 0x1) << 2))
#define STE_128_HH(state) (sbox[(state >> 1) + 128] >> ((state & 0x1) << 2))
#define STE(state) ((STE_128_LH(state) & 0x0f) | (STE_128_HH(state) << 4))
#elif TTABLE == 64
#define STE_64_0(state) (sbox[(state >> 2) ] >> ((state & 0x3) << 1))
#define STE_64_1(state) (sbox[(state >> 2) + 64] >> ((state & 0x3) << 1))
#define STE_64_2(state) (sbox[(state >> 2) + 128] >> ((state & 0x3) << 1))
#define STE_64_3(state) (sbox[(state >> 2) + 192] >> ((state & 0x3) << 1))
#define STE(state) ((STE_64_0(state) & 0x03) \
| ((STE_64_1(state) & 0x03) << 2) \
| ((STE_64_2(state) & 0x03) << 4) \
| ((STE_64_3(state) & 0x03) << 6))
#elif TTABLE == 32
#define STE_32_0(state) (sbox[(state >> 3) ] >> (state & 0x7))
#define STE_32_1(state) (sbox[(state >> 3) + 32] >> (state & 0x7))
#define STE_32_2(state) (sbox[(state >> 3) + 64] >> (state & 0x7))
#define STE_32_3(state) (sbox[(state >> 3) + 96] >> (state & 0x7))
#define STE_32_4(state) (sbox[(state >> 3) + 128] >> (state & 0x7))
#define STE_32_5(state) (sbox[(state >> 3) + 160] >> (state & 0x7))
#define STE_32_6(state) (sbox[(state >> 3) + 192] >> (state & 0x7))
#define STE_32_7(state) (sbox[(state >> 3) + 224] >> (state & 0x7))
#define STE(state) ((STE_32_0(state) & 0x01) \
| ((STE_32_1(state) & 0x01) << 1) \
| ((STE_32_2(state) & 0x01) << 2) \
| ((STE_32_3(state) & 0x01) << 3) \
| ((STE_32_4(state) & 0x01) << 4) \
| ((STE_32_5(state) & 0x01) << 5) \
| ((STE_32_6(state) & 0x01) << 6) \
| ((STE_32_7(state) & 0x01) << 7))
#else
#define STE(state) (sbox[state])
#endif
#define SWAP(a, b) (a) ^= (b); (b) ^= (a); (a) ^= (b);
__device__ void TransposeSelf(uchar *state) {
SWAP(state[1], state[4]);
SWAP(state[2], state[8]);
SWAP(state[3], state[12]);
SWAP(state[6], state[9]);
SWAP(state[7], state[13]);
SWAP(state[11], state[14]);
}
__device__ void Transpose(uchar *dst, uchar *src) {
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
dst[j*4+i] = src[i*4+j];
}
}
}
__device__ void AddRoundKey(uchar *state, uchar *rek) {
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
state[j*4+i] ^= rek[i*4+3-j];
}
}
}
__device__ void SubBytes(uchar *state, uchar *sbox) {
for (int i = 0; i < 16; i++) {
state[i] = STE(state[i]);//sbox[state[i]];
//state[i] = Tsbox_256[state[i]];
}
}
#define xtime(x) ((x << 1) ^ ((x >> 7) * 0x1b))
__device__ void MixColumns(uchar *state) {
uchar Tmp, Tm, t;
for(int i = 0; i < 4; i++) {
t = state[i];
Tmp = state[i] ^ state[4+i] ^ state[8+i] ^ state[12+i] ;
Tm = state[i] ^ state[4+i] ;
Tm = xtime(Tm);
state[i] ^= Tm ^ Tmp ;
Tm = state[4+i] ^ state[8+i] ;
Tm = xtime(Tm);
state[4+i] ^= Tm ^ Tmp ;
Tm = state[8+i] ^ state[12+i] ;
Tm = xtime(Tm);
state[8+i] ^= Tm ^ Tmp ;
Tm = state[12+i] ^ t ;
Tm = xtime(Tm);
state[12+i] ^= Tm ^ Tmp ;
}
}
__device__ void ShiftRows(uchar *state) {
uchar temp;
// Rotate first row 1 columns to left
temp = state[4];
state[4] = state[5];
state[5] = state[6];
state[6] = state[7];
state[7] = temp;
// Rotate second row 2 columns to left
temp = state[8];
state[8] = state[10];
state[10] = temp;
temp = state[9];
state[9] = state[11];
state[11] = temp;
// Rotate third row 3 columns to left
temp = state[12];
state[12] = state[15];
state[15] = state[14];
state[14] = state[13];
state[13] = temp;
}
#define REV_ENDIAN(x) (((x)>>24)&0x000000FF) | (((x)>>8)&0x0000FF00) | (((x)<<8)&0x00FF0000) | (((x)<<24)&0xFF000000)
__global__ void AES_encrypt(const uint *pt, uint *ct, uint *rek, uint Nr, uint size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
u32_t state[4];
//uchar state[16];
uchar *_rk = (uchar *)rek;
#ifdef USE_SMEM
__shared__ uchar sbox[256];
#if TTABLE == 256
load_smem_sbox(sbox, Tsbox_256);
#elif TTABLE == 128
load_smem_sbox(sbox, Tsbox_128);
#elif TTABLE == 64
load_smem_sbox(sbox, Tsbox_64);
#elif TTABLE == 32
load_smem_sbox(sbox, Tsbox_32);
#endif // TTABLE
#else
#if TTABLE == 256
uchar *sbox = Tsbox_256;
#elif TTABLE == 128
uchar *sbox = Tsbox_128;
#elif TTABLE == 64
uchar *sbox = Tsbox_64;
#elif TTABLE == 32
uchar *sbox = Tsbox_32;
#endif // TTABLE
#endif // USE_SMEM
int iter = 0;
BEGIN:
int offset = (iter * NUM_THREADS * NUM_BLOCKS + tid) << 2;
if (offset >= size) return;
state[0].i = REV_ENDIAN(pt[offset + 0]);
state[1].i = REV_ENDIAN(pt[offset + 1]);
state[2].i = REV_ENDIAN(pt[offset + 2]);
state[3].i = REV_ENDIAN(pt[offset + 3]);
TransposeSelf((uchar*)state);
AddRoundKey((uchar*)state, (uchar*)_rk);
for (int i = 1; i < Nr; i++)
{
SubBytes((uchar*)state, sbox);
ShiftRows((uchar*)state);
MixColumns((uchar*)state);
AddRoundKey((uchar*)state, (uchar*)(rek + i*4));
}
SubBytes((uchar*)state, sbox);
ShiftRows((uchar*)state);
AddRoundKey((uchar*)state, (uchar*)(rek + Nr*4));
TransposeSelf((uchar*)state);
ct[offset + 0] = REV_ENDIAN(state[0].i);
ct[offset + 1] = REV_ENDIAN(state[1].i);
ct[offset + 2] = REV_ENDIAN(state[2].i);
ct[offset + 3] = REV_ENDIAN(state[3].i);
iter++;
goto BEGIN;
}
|
fdcf4928f57f8b45bb41b4dd5b90fc676ae4a118.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <cstdlib>
#include <ctime>
#include <thrust/count.h>
#include <thrust/execution_policy.h>
#define cudaCheckErrors(msg) \
do { \
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, hipGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
__device__ double calSum(const double *x, const int length, const int step)
{
double sum = 0;
for(int i = 0; i < length; i++) {
sum += x[i*step];
}
return sum;
}
__device__ double calAvg(const double *x, const int length, const int step)
{
return calSum(x, length, step) / length;
}
__device__ double calSquareSum(const double *x, const int length, const int step)
{
double sum = 0;
for(int i = 0; i < length; i++) {
sum += x[i*step] * x[i*step];
}
return sum;
}
__device__ double calMultiplySum(const double *x, const double *y, const int length, const int step)
{
double sum = 0;
for(int i = 0; i < length; i++) {
sum += x[i*step] * y[i*step];
}
return sum;
}
__device__ double calStd(const double *x, const int length, const int step)
{
const double x_square_sum = calSquareSum(x, length, step);
const double x_avg = calAvg(x, length, step);
return sqrt((x_square_sum - length * x_avg * x_avg) / (length - 1));
}
__device__ double calCorrCoef(const double *x, const double *y, const int length, const int step)
{
const double xy_sum = calMultiplySum(x, y, length, step);
const double x_avg = calAvg(x, length, step);
const double y_avg = calAvg(y, length, step);
const double x_std = calStd(x, length, step);
const double y_std = calStd(y, length, step);
return (xy_sum - length * x_avg * y_avg) / ((length - 1) * x_std * y_std);
}
__device__ double calFisherTransform(const double x, const int time_size)
{
// z=0.5.*log((1+rr)./(1-rr));
return 0.5 * log((1+x) / (1-x));
}
__device__ double calInverseFisherTransform(const double x)
{
// zm= (exp(2.*zm)-1)./(exp(2.*zm)+1);
return (exp(2*x) - 1) / (exp(2*x) + 1);
}
__global__ void calculateCorrelationCoefficientMatrix(double *all_corr_coef_matrix, const double *all_data_matrix, const int subject_size, const int time_size, const int repeat_times)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
const int matrix_works = subject_size * (subject_size - 1) / 2;
if (idx >= repeat_times * matrix_works)
return;
const int n_matrix = idx / matrix_works;
int remain_works = idx % matrix_works;
int x = 1, y = 1;
for(int i = 0; i < subject_size; i ++) {
const int row_works = subject_size - i - 1;
if (remain_works < row_works) {
x = i;
y = i + 1 + remain_works;
break;
}
remain_works -= row_works;
}
const double *data_matrix = all_data_matrix + n_matrix * time_size * subject_size;
const double coef = calCorrCoef(data_matrix + x, data_matrix + y, time_size, subject_size);
const double zvalue = calFisherTransform(coef, time_size);
all_corr_coef_matrix[idx] = zvalue;
}
__global__ void calculateInterSubjectCorrelation(double *isc_array, const double *all_corr_coef_matrix, const int subject_size, const int repeat_times)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= repeat_times)
return;
const int matrix_works = subject_size * (subject_size - 1) / 2;
const double *corr_coef_matrix = all_corr_coef_matrix + idx * matrix_works;
double sum = 0;
for (int i = 0; i < matrix_works; i++)
sum += corr_coef_matrix[i];
const double mean = sum / matrix_works;
isc_array[idx] = calInverseFisherTransform(mean);
}
__global__ void rearrangeMatrixPosition(double *data_matrix, const double *source_matrix, const int subject_size, const int time_size, const int repeat_times)
{
// 1st subject_size, 2nd repeat_times, 3rd time_size
// to
// 1st repeat_times, 2nd time_size, 3rd subject_size
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= repeat_times * time_size * subject_size)
return;
const int subject_idx = idx / (repeat_times * time_size);
const int repeat_idx = (idx % (repeat_times * time_size)) / time_size;
const int time_idx = (idx % (repeat_times * time_size)) % time_size;
const int data_idx = repeat_idx * time_size * subject_size + time_idx * subject_size + subject_idx;
data_matrix[data_idx] = source_matrix[idx];
}
void printMatrix(const double *data, const int first, const int second, const int third)
{
double *tmp = (double *)malloc(sizeof(double) * first * second * third);
hipMemcpy(tmp, data, sizeof(double) * first * second * third, hipMemcpyDeviceToHost);
printf("%% 1st:%d 2nd:%d 3rd:%d\n", first, second, third);
for (int i = 0; i < first; i++ ) {
for (int j = 0; j < second; j++ ) {
printf("%% ");
for (int k = 0; k < third; k++ ) {
printf("%f ", tmp[i*second*third + j*third + k]);
}
printf("\n");
}
printf("%%\n");
}
free(tmp);
}
void correlationCoefficient(double *d_isc_array, const double *d_aaft_matrix, const int subject_size, const int time_size, const int repeat_times)
{
// std::clock_t start;
const int blocksize = 128;
int total_works, nblock;
double *d_data_matrix, *d_coef_matrix;
hipMalloc(&d_data_matrix, sizeof(double) * repeat_times * time_size * subject_size);
hipMalloc(&d_coef_matrix, sizeof(double) * repeat_times * subject_size * (subject_size - 1) / 2);
cudaCheckErrors("hipMalloc");
// start = std::clock();
total_works = repeat_times * subject_size * time_size;
nblock = total_works/blocksize + (total_works%blocksize==0?0:1);
hipLaunchKernelGGL(( rearrangeMatrixPosition), dim3(nblock), dim3(blocksize), 0, 0, d_data_matrix, d_aaft_matrix, subject_size, time_size, repeat_times);
hipDeviceSynchronize();
cudaCheckErrors("rearrangeMatrixPosition");
// printf("%% transposeMatrix: %fs\n", (std::clock() - start) / (double) CLOCKS_PER_SEC);
// start = std::clock();
total_works = repeat_times * subject_size * (subject_size - 1) / 2;
nblock = total_works/blocksize + (total_works%blocksize==0?0:1);
hipLaunchKernelGGL(( calculateCorrelationCoefficientMatrix), dim3(nblock), dim3(blocksize), 0, 0, d_coef_matrix, d_data_matrix, subject_size, time_size, repeat_times);
hipDeviceSynchronize();
cudaCheckErrors("calculateCorrelationCoefficientMatrix");
// printf("%% calculateCorrelationCoefficientMatrix: %fs\n", (std::clock() - start) / (double) CLOCKS_PER_SEC);
// start = std::clock();
nblock = repeat_times/blocksize + (repeat_times%blocksize==0?0:1);
hipLaunchKernelGGL(( calculateInterSubjectCorrelation), dim3(nblock), dim3(blocksize), 0, 0, d_isc_array, d_coef_matrix, subject_size, repeat_times);
hipDeviceSynchronize();
cudaCheckErrors("calculateInterSubjectCorrelation");
// printf("%% calculateInterSubjectCorrelation: %fs\n", (std::clock() - start) / (double) CLOCKS_PER_SEC);
// {
// printMatrix(d_aaft_matrix, subject_size, repeat_times, time_size);
// printMatrix(d_data_matrix, repeat_times, time_size, subject_size);
// double *h_data_matrix, *h_coef_matrix, *h_isc_array;
// h_data_matrix = (double *)malloc(sizeof(double) * repeat_times * subject_size * time_size);
// h_coef_matrix = (double *)malloc(sizeof(double) * total_works);
// h_isc_array = (double *)malloc(sizeof(double) * repeat_times);
// hipMemcpy(h_data_matrix, d_data_matrix, sizeof(double) * repeat_times * subject_size * time_size, hipMemcpyDeviceToHost);
// hipMemcpy(h_coef_matrix, d_coef_matrix, sizeof(double) * total_works, hipMemcpyDeviceToHost);
// hipMemcpy(h_isc_array, d_isc_array, sizeof(double) * repeat_times, hipMemcpyDeviceToHost);
// const int idx = rand() % repeat_times;
// printf("%% idx: %d\n", idx);
// printf("data = [ ");
// for(int i = 0; i < time_size; i++) {
// for(int j = 0; j < subject_size; j++) {
// printf("%f ", h_data_matrix[idx * time_size * subject_size + i * subject_size + j]);
// }
// printf(";");
// }
// printf("];\n");
// printf("tmp=tril(corrcoef(data),-1);\n");
// printf("rr=tmp(find(tmp));\n");
// printf("z=0.5.*log((1+rr)./(1-rr))./(1/sqrt(size(data,1)/2.34-3));\n");
// printf("zm=mean(z)\n");
// printf("z'\n");
// printf("exit;\n");
// printf("%% coef_matrix: ");
// const int matrix_works = subject_size * (subject_size - 1) / 2;
// for (int i = 0; i < matrix_works; i++ )
// printf("%f ", h_coef_matrix[idx * matrix_works + i]);
// printf("\n");
// printf("%% --------------------\n");
// printf("%% mean: %f\n", h_isc_array[idx]);
// }
hipFree(d_data_matrix);
hipFree(d_coef_matrix);
}
// int main(int argc, char **argv)
// {
// srand(time(NULL));
// std::clock_t start;
//
// const int subject_size = 8, time_size = 440, repeat_times = 10000;
//
// double *h_data_matrix;
// double *d_data_matrix, *d_isc_array;
//
// h_data_matrix = (double *)malloc(sizeof(double) * repeat_times * time_size * subject_size);
//
// start = std::clock();
// for(int i = 0; i < repeat_times; i++)
// for(int j = 0; j < time_size; j++)
// for(int k = 0; k < subject_size; k++)
// h_data_matrix[i * time_size * subject_size + j * subject_size + k] = rand();
// printf("%% Generating data: %fs\n", (std::clock() - start) / (double) CLOCKS_PER_SEC);
//
// hipMalloc(&d_data_matrix, sizeof(double) * repeat_times * time_size * subject_size);
// hipMalloc(&d_isc_array, sizeof(double) * repeat_times);
// cudaCheckErrors("hipMalloc");
//
// hipMemcpy(d_data_matrix, h_data_matrix, sizeof(double) * repeat_times * subject_size * time_size, hipMemcpyHostToDevice);
// cudaCheckErrors("hipMemcpy");
//
// start = std::clock();
// correlationCoefficient(d_isc_array, d_data_matrix, subject_size, time_size, repeat_times);
// printf("%% correlationCoefficient: %fs\n", (std::clock() - start) / (double) CLOCKS_PER_SEC);
//
// free(h_data_matrix);
// hipFree(d_data_matrix);
// hipFree(d_isc_array);
// cudaCheckErrors("hipFree");
//
// return 0;
// }
| fdcf4928f57f8b45bb41b4dd5b90fc676ae4a118.cu | #include <cstdio>
#include <cstdlib>
#include <ctime>
#include <thrust/count.h>
#include <thrust/execution_policy.h>
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, cudaGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
__device__ double calSum(const double *x, const int length, const int step)
{
double sum = 0;
for(int i = 0; i < length; i++) {
sum += x[i*step];
}
return sum;
}
__device__ double calAvg(const double *x, const int length, const int step)
{
return calSum(x, length, step) / length;
}
__device__ double calSquareSum(const double *x, const int length, const int step)
{
double sum = 0;
for(int i = 0; i < length; i++) {
sum += x[i*step] * x[i*step];
}
return sum;
}
__device__ double calMultiplySum(const double *x, const double *y, const int length, const int step)
{
double sum = 0;
for(int i = 0; i < length; i++) {
sum += x[i*step] * y[i*step];
}
return sum;
}
__device__ double calStd(const double *x, const int length, const int step)
{
const double x_square_sum = calSquareSum(x, length, step);
const double x_avg = calAvg(x, length, step);
return sqrt((x_square_sum - length * x_avg * x_avg) / (length - 1));
}
__device__ double calCorrCoef(const double *x, const double *y, const int length, const int step)
{
const double xy_sum = calMultiplySum(x, y, length, step);
const double x_avg = calAvg(x, length, step);
const double y_avg = calAvg(y, length, step);
const double x_std = calStd(x, length, step);
const double y_std = calStd(y, length, step);
return (xy_sum - length * x_avg * y_avg) / ((length - 1) * x_std * y_std);
}
__device__ double calFisherTransform(const double x, const int time_size)
{
// z=0.5.*log((1+rr)./(1-rr));
return 0.5 * log((1+x) / (1-x));
}
__device__ double calInverseFisherTransform(const double x)
{
// zm= (exp(2.*zm)-1)./(exp(2.*zm)+1);
return (exp(2*x) - 1) / (exp(2*x) + 1);
}
__global__ void calculateCorrelationCoefficientMatrix(double *all_corr_coef_matrix, const double *all_data_matrix, const int subject_size, const int time_size, const int repeat_times)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
const int matrix_works = subject_size * (subject_size - 1) / 2;
if (idx >= repeat_times * matrix_works)
return;
const int n_matrix = idx / matrix_works;
int remain_works = idx % matrix_works;
int x = 1, y = 1;
for(int i = 0; i < subject_size; i ++) {
const int row_works = subject_size - i - 1;
if (remain_works < row_works) {
x = i;
y = i + 1 + remain_works;
break;
}
remain_works -= row_works;
}
const double *data_matrix = all_data_matrix + n_matrix * time_size * subject_size;
const double coef = calCorrCoef(data_matrix + x, data_matrix + y, time_size, subject_size);
const double zvalue = calFisherTransform(coef, time_size);
all_corr_coef_matrix[idx] = zvalue;
}
__global__ void calculateInterSubjectCorrelation(double *isc_array, const double *all_corr_coef_matrix, const int subject_size, const int repeat_times)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= repeat_times)
return;
const int matrix_works = subject_size * (subject_size - 1) / 2;
const double *corr_coef_matrix = all_corr_coef_matrix + idx * matrix_works;
double sum = 0;
for (int i = 0; i < matrix_works; i++)
sum += corr_coef_matrix[i];
const double mean = sum / matrix_works;
isc_array[idx] = calInverseFisherTransform(mean);
}
__global__ void rearrangeMatrixPosition(double *data_matrix, const double *source_matrix, const int subject_size, const int time_size, const int repeat_times)
{
// 1st subject_size, 2nd repeat_times, 3rd time_size
// to
// 1st repeat_times, 2nd time_size, 3rd subject_size
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= repeat_times * time_size * subject_size)
return;
const int subject_idx = idx / (repeat_times * time_size);
const int repeat_idx = (idx % (repeat_times * time_size)) / time_size;
const int time_idx = (idx % (repeat_times * time_size)) % time_size;
const int data_idx = repeat_idx * time_size * subject_size + time_idx * subject_size + subject_idx;
data_matrix[data_idx] = source_matrix[idx];
}
void printMatrix(const double *data, const int first, const int second, const int third)
{
double *tmp = (double *)malloc(sizeof(double) * first * second * third);
cudaMemcpy(tmp, data, sizeof(double) * first * second * third, cudaMemcpyDeviceToHost);
printf("%% 1st:%d 2nd:%d 3rd:%d\n", first, second, third);
for (int i = 0; i < first; i++ ) {
for (int j = 0; j < second; j++ ) {
printf("%% ");
for (int k = 0; k < third; k++ ) {
printf("%f ", tmp[i*second*third + j*third + k]);
}
printf("\n");
}
printf("%%\n");
}
free(tmp);
}
void correlationCoefficient(double *d_isc_array, const double *d_aaft_matrix, const int subject_size, const int time_size, const int repeat_times)
{
// std::clock_t start;
const int blocksize = 128;
int total_works, nblock;
double *d_data_matrix, *d_coef_matrix;
cudaMalloc(&d_data_matrix, sizeof(double) * repeat_times * time_size * subject_size);
cudaMalloc(&d_coef_matrix, sizeof(double) * repeat_times * subject_size * (subject_size - 1) / 2);
cudaCheckErrors("cudaMalloc");
// start = std::clock();
total_works = repeat_times * subject_size * time_size;
nblock = total_works/blocksize + (total_works%blocksize==0?0:1);
rearrangeMatrixPosition<<<nblock, blocksize>>>(d_data_matrix, d_aaft_matrix, subject_size, time_size, repeat_times);
cudaDeviceSynchronize();
cudaCheckErrors("rearrangeMatrixPosition");
// printf("%% transposeMatrix: %fs\n", (std::clock() - start) / (double) CLOCKS_PER_SEC);
// start = std::clock();
total_works = repeat_times * subject_size * (subject_size - 1) / 2;
nblock = total_works/blocksize + (total_works%blocksize==0?0:1);
calculateCorrelationCoefficientMatrix<<<nblock, blocksize>>>(d_coef_matrix, d_data_matrix, subject_size, time_size, repeat_times);
cudaDeviceSynchronize();
cudaCheckErrors("calculateCorrelationCoefficientMatrix");
// printf("%% calculateCorrelationCoefficientMatrix: %fs\n", (std::clock() - start) / (double) CLOCKS_PER_SEC);
// start = std::clock();
nblock = repeat_times/blocksize + (repeat_times%blocksize==0?0:1);
calculateInterSubjectCorrelation<<<nblock, blocksize>>>(d_isc_array, d_coef_matrix, subject_size, repeat_times);
cudaDeviceSynchronize();
cudaCheckErrors("calculateInterSubjectCorrelation");
// printf("%% calculateInterSubjectCorrelation: %fs\n", (std::clock() - start) / (double) CLOCKS_PER_SEC);
// {
// printMatrix(d_aaft_matrix, subject_size, repeat_times, time_size);
// printMatrix(d_data_matrix, repeat_times, time_size, subject_size);
// double *h_data_matrix, *h_coef_matrix, *h_isc_array;
// h_data_matrix = (double *)malloc(sizeof(double) * repeat_times * subject_size * time_size);
// h_coef_matrix = (double *)malloc(sizeof(double) * total_works);
// h_isc_array = (double *)malloc(sizeof(double) * repeat_times);
// cudaMemcpy(h_data_matrix, d_data_matrix, sizeof(double) * repeat_times * subject_size * time_size, cudaMemcpyDeviceToHost);
// cudaMemcpy(h_coef_matrix, d_coef_matrix, sizeof(double) * total_works, cudaMemcpyDeviceToHost);
// cudaMemcpy(h_isc_array, d_isc_array, sizeof(double) * repeat_times, cudaMemcpyDeviceToHost);
// const int idx = rand() % repeat_times;
// printf("%% idx: %d\n", idx);
// printf("data = [ ");
// for(int i = 0; i < time_size; i++) {
// for(int j = 0; j < subject_size; j++) {
// printf("%f ", h_data_matrix[idx * time_size * subject_size + i * subject_size + j]);
// }
// printf(";");
// }
// printf("];\n");
// printf("tmp=tril(corrcoef(data),-1);\n");
// printf("rr=tmp(find(tmp));\n");
// printf("z=0.5.*log((1+rr)./(1-rr))./(1/sqrt(size(data,1)/2.34-3));\n");
// printf("zm=mean(z)\n");
// printf("z'\n");
// printf("exit;\n");
// printf("%% coef_matrix: ");
// const int matrix_works = subject_size * (subject_size - 1) / 2;
// for (int i = 0; i < matrix_works; i++ )
// printf("%f ", h_coef_matrix[idx * matrix_works + i]);
// printf("\n");
// printf("%% --------------------\n");
// printf("%% mean: %f\n", h_isc_array[idx]);
// }
cudaFree(d_data_matrix);
cudaFree(d_coef_matrix);
}
// int main(int argc, char **argv)
// {
// srand(time(NULL));
// std::clock_t start;
//
// const int subject_size = 8, time_size = 440, repeat_times = 10000;
//
// double *h_data_matrix;
// double *d_data_matrix, *d_isc_array;
//
// h_data_matrix = (double *)malloc(sizeof(double) * repeat_times * time_size * subject_size);
//
// start = std::clock();
// for(int i = 0; i < repeat_times; i++)
// for(int j = 0; j < time_size; j++)
// for(int k = 0; k < subject_size; k++)
// h_data_matrix[i * time_size * subject_size + j * subject_size + k] = rand();
// printf("%% Generating data: %fs\n", (std::clock() - start) / (double) CLOCKS_PER_SEC);
//
// cudaMalloc(&d_data_matrix, sizeof(double) * repeat_times * time_size * subject_size);
// cudaMalloc(&d_isc_array, sizeof(double) * repeat_times);
// cudaCheckErrors("cudaMalloc");
//
// cudaMemcpy(d_data_matrix, h_data_matrix, sizeof(double) * repeat_times * subject_size * time_size, cudaMemcpyHostToDevice);
// cudaCheckErrors("cudaMemcpy");
//
// start = std::clock();
// correlationCoefficient(d_isc_array, d_data_matrix, subject_size, time_size, repeat_times);
// printf("%% correlationCoefficient: %fs\n", (std::clock() - start) / (double) CLOCKS_PER_SEC);
//
// free(h_data_matrix);
// cudaFree(d_data_matrix);
// cudaFree(d_isc_array);
// cudaCheckErrors("cudaFree");
//
// return 0;
// }
|
7561b905385b915266dbbaaa10ae6c11f79938f9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <PeriodicHeightFieldCollision_Kernel.cuh>
#include <double3.h>
/******************************************************************************************/
/******************************************************************************************/
extern "C"
{
/******************************************************************************************/
/******************************************************************************************/
__host__ __device__ double calculateHeight_Periodic(double3 pos, uint nbFunc, double* A, double* k, double* theta, double* phi)
{
double y = 0;
for(uint i=0;i<nbFunc;i++)
y+= A[i]*cos(k[i]*(pos.x*cos(theta[i])+pos.z*sin(theta[i]))+phi[i]);
return y;
}
/******************************************************************************************/
/******************************************************************************************/
__host__ __device__ double3 approximateNormale_Periodic(double3 pos, uint nbFunc, double* A, double* k, double* theta, double* phi)
{
double d=0.01;
double3 V1 = make_double3(pos.x+d,pos.y,pos.z);
double3 V2 = make_double3(pos.x-d,pos.y,pos.z);
double dpx = (calculateHeight_Periodic(V1,nbFunc,A,k,theta,phi)-calculateHeight_Periodic(V2,nbFunc,A,k,theta,phi))/(2*d);
V1.x = pos.x; V1.z = pos.z+d; V2.x = pos.x; V2.z = pos.z-d;
double dpy = 1.0;
double dpz = (calculateHeight_Periodic(V1,nbFunc,A,k,theta,phi)-calculateHeight_Periodic(V2,nbFunc,A,k,theta,phi))/(2*d);
double norme = sqrt(dpx*dpx + dpy*dpy + dpz*dpz);
double3 N;
N.x =-dpx/norme;
N.y = dpy/norme;
N.z =-dpz/norme;
return N;
}
/******************************************************************************************/
/******************************************************************************************/
__global__ void collisionSystem_Periodic_HeightFieldCollision_Kernel
(double3* newPos, double3 *newVel, double radiusParticle, float dt, uint nbBodiesP,
uint nbFunc, double* A, double* k, double* theta, double* phi,
float3 min_, float3 max_, float elast)
{
int indexP = blockIdx.x * blockDim.x + threadIdx.x;
if(indexP < nbBodiesP){
double3 pos = newPos[indexP];
if(pos.x>=min_.x && pos.z>=min_.z && pos.x<=max_.x && pos.z<=max_.z){
double y = calculateHeight_Periodic(pos,nbFunc,A,k,theta,phi) + min_.y;
if(y>=pos.y){
double3 pInter = make_double3(pos.x,y,pos.z);
float r = 0;
if(length(newVel[indexP])>0 && elast>0 && length(newVel[indexP])>0)
r = elast*length(pos - pInter)/(dt*length(newVel[indexP]));
double3 nInter = approximateNormale_Periodic(pInter,nbFunc,A,k,theta,phi);
nInter = normalize(nInter);
double3 V;
V.x = newVel[indexP].x - (1+r)*dot(newVel[indexP],nInter)*nInter.x;
V.y = newVel[indexP].y - (1+r)*dot(newVel[indexP],nInter)*nInter.y;
V.z = newVel[indexP].z - (1+r)*dot(newVel[indexP],nInter)*nInter.z;
newVel[indexP] = make_double3(V.x,V.y,V.z);
newPos[indexP].y = y;
}
}
}
}
/******************************************************************************************/
/******************************************************************************************/
}
/******************************************************************************************/
/******************************************************************************************/
| 7561b905385b915266dbbaaa10ae6c11f79938f9.cu | #include <PeriodicHeightFieldCollision_Kernel.cuh>
#include <double3.h>
/******************************************************************************************/
/******************************************************************************************/
extern "C"
{
/******************************************************************************************/
/******************************************************************************************/
__host__ __device__ double calculateHeight_Periodic(double3 pos, uint nbFunc, double* A, double* k, double* theta, double* phi)
{
double y = 0;
for(uint i=0;i<nbFunc;i++)
y+= A[i]*cos(k[i]*(pos.x*cos(theta[i])+pos.z*sin(theta[i]))+phi[i]);
return y;
}
/******************************************************************************************/
/******************************************************************************************/
__host__ __device__ double3 approximateNormale_Periodic(double3 pos, uint nbFunc, double* A, double* k, double* theta, double* phi)
{
double d=0.01;
double3 V1 = make_double3(pos.x+d,pos.y,pos.z);
double3 V2 = make_double3(pos.x-d,pos.y,pos.z);
double dpx = (calculateHeight_Periodic(V1,nbFunc,A,k,theta,phi)-calculateHeight_Periodic(V2,nbFunc,A,k,theta,phi))/(2*d);
V1.x = pos.x; V1.z = pos.z+d; V2.x = pos.x; V2.z = pos.z-d;
double dpy = 1.0;
double dpz = (calculateHeight_Periodic(V1,nbFunc,A,k,theta,phi)-calculateHeight_Periodic(V2,nbFunc,A,k,theta,phi))/(2*d);
double norme = sqrt(dpx*dpx + dpy*dpy + dpz*dpz);
double3 N;
N.x =-dpx/norme;
N.y = dpy/norme;
N.z =-dpz/norme;
return N;
}
/******************************************************************************************/
/******************************************************************************************/
__global__ void collisionSystem_Periodic_HeightFieldCollision_Kernel
(double3* newPos, double3 *newVel, double radiusParticle, float dt, uint nbBodiesP,
uint nbFunc, double* A, double* k, double* theta, double* phi,
float3 min_, float3 max_, float elast)
{
int indexP = blockIdx.x * blockDim.x + threadIdx.x;
if(indexP < nbBodiesP){
double3 pos = newPos[indexP];
if(pos.x>=min_.x && pos.z>=min_.z && pos.x<=max_.x && pos.z<=max_.z){
double y = calculateHeight_Periodic(pos,nbFunc,A,k,theta,phi) + min_.y;
if(y>=pos.y){
double3 pInter = make_double3(pos.x,y,pos.z);
float r = 0;
if(length(newVel[indexP])>0 && elast>0 && length(newVel[indexP])>0)
r = elast*length(pos - pInter)/(dt*length(newVel[indexP]));
double3 nInter = approximateNormale_Periodic(pInter,nbFunc,A,k,theta,phi);
nInter = normalize(nInter);
double3 V;
V.x = newVel[indexP].x - (1+r)*dot(newVel[indexP],nInter)*nInter.x;
V.y = newVel[indexP].y - (1+r)*dot(newVel[indexP],nInter)*nInter.y;
V.z = newVel[indexP].z - (1+r)*dot(newVel[indexP],nInter)*nInter.z;
newVel[indexP] = make_double3(V.x,V.y,V.z);
newPos[indexP].y = y;
}
}
}
}
/******************************************************************************************/
/******************************************************************************************/
}
/******************************************************************************************/
/******************************************************************************************/
|
085160a55e866a534980a5afc85d18f1b72034be.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "wave3d.h"
__global__ void forwardKernel(float *nextSigmaX,float *curSigmaX1,float *prevSigmaX,float *nextSigmaZ,float *curSigmaZ0,float *curSigmaZ1,float *curSigmaZ2,float *prevSigmaZ,float *c11,float *c13,float *c33,int nx,int ny,float dt2dx2,float dt2dy2,float dt2dz2){
__shared__ float sSigmaX[BLOCK_DIM+2*HALF_STENCIL][BLOCK_DIM+2*HALF_STENCIL];
int ix=threadIdx.x+blockIdx.x*blockDim.x+HALF_STENCIL;
int iy=threadIdx.y+blockIdx.y*blockDim.y+HALF_STENCIL;
if(ix<nx-HALF_STENCIL && iy<ny-HALF_STENCIL){
int i=ix+iy*nx;
int six=threadIdx.x+HALF_STENCIL;
int siy=threadIdx.y+HALF_STENCIL;
float zSigmaZ[2*HALF_STENCIL+1];
for(int iz=0;iz<HALF_STENCIL;++iz){
int j=i+iz*nx*ny;
zSigmaZ[iz+1]=curSigmaZ0[j];
zSigmaZ[iz+1+HALF_STENCIL]=curSigmaZ1[j];
}
for(int iz=0;iz<HALF_STENCIL;++iz){
int j=i+iz*nx*ny;
zSigmaZ[0]=zSigmaZ[1];
zSigmaZ[1]=zSigmaZ[2];
zSigmaZ[2]=zSigmaZ[3];
zSigmaZ[3]=zSigmaZ[4];
zSigmaZ[4]=zSigmaZ[5];
zSigmaZ[5]=zSigmaZ[6];
zSigmaZ[6]=zSigmaZ[7];
zSigmaZ[7]=zSigmaZ[8];
zSigmaZ[8]=curSigmaZ2[j];
__syncthreads();
sSigmaX[six][siy]=curSigmaX1[j];
if(threadIdx.x<HALF_STENCIL){
int k=min(blockDim.x,nx-2*HALF_STENCIL-blockIdx.x*blockDim.x);
sSigmaX[threadIdx.x][siy]=curSigmaX1[j-HALF_STENCIL];
sSigmaX[six+k][siy]=curSigmaX1[j+k];
}
if(threadIdx.y<HALF_STENCIL){
int k=min(blockDim.y,ny-2*HALF_STENCIL-blockIdx.y*blockDim.y);
sSigmaX[six][threadIdx.y]=curSigmaX1[j-HALF_STENCIL*nx];
sSigmaX[six][siy+k]=curSigmaX1[j+k*nx];
}
__syncthreads();
float tx=(C0*sSigmaX[six][siy]+C1*(sSigmaX[six-1][siy]+sSigmaX[six+1][siy])
+C2*(sSigmaX[six-2][siy]+sSigmaX[six+2][siy])
+C3*(sSigmaX[six-3][siy]+sSigmaX[six+3][siy])
+C4*(sSigmaX[six-4][siy]+sSigmaX[six+4][siy]))*dt2dx2;
float ty=(C0*sSigmaX[six][siy]+C1*(sSigmaX[six][siy-1]+sSigmaX[six][siy+1])
+C2*(sSigmaX[six][siy-2]+sSigmaX[six][siy+2])
+C3*(sSigmaX[six][siy-3]+sSigmaX[six][siy+3])
+C4*(sSigmaX[six][siy-4]+sSigmaX[six][siy+4]))*dt2dy2;
ty+=tx;
float tz=(C0*zSigmaZ[HALF_STENCIL]+C1*(zSigmaZ[HALF_STENCIL-1]+zSigmaZ[HALF_STENCIL+1])
+C2*(zSigmaZ[HALF_STENCIL-2]+zSigmaZ[HALF_STENCIL+2])
+C3*(zSigmaZ[HALF_STENCIL-3]+zSigmaZ[HALF_STENCIL+3])
+C4*(zSigmaZ[HALF_STENCIL-4]+zSigmaZ[HALF_STENCIL+4]))*dt2dz2;
nextSigmaX[j]=c11[j]*ty+c13[j]*tz+2.*sSigmaX[six][siy]-prevSigmaX[j];
nextSigmaZ[j]=c13[j]*ty+c33[j]*tz+2.*zSigmaZ[HALF_STENCIL]-prevSigmaZ[j];
}
}
return;
}
__global__ void forwardKernelTopBlock(float *nextSigmaX,float *curSigmaX1,float *prevSigmaX,float *nextSigmaZ,float *curSigmaZ0,float *curSigmaZ1,float *curSigmaZ2,float *prevSigmaZ,float *c11,float *c13,float *c33,int nx,int ny,float dt2dx2,float dt2dy2,float dt2dz2){
__shared__ float sSigmaX[BLOCK_DIM+2*HALF_STENCIL][BLOCK_DIM+2*HALF_STENCIL];
int ix=threadIdx.x+blockIdx.x*blockDim.x+HALF_STENCIL;
int iy=threadIdx.y+blockIdx.y*blockDim.y+HALF_STENCIL;
if(ix<nx-HALF_STENCIL && iy<ny-HALF_STENCIL){
int i=ix+iy*nx;
int six=threadIdx.x+HALF_STENCIL;
int siy=threadIdx.y+HALF_STENCIL;
float zSigmaZ[2*HALF_STENCIL+1];
for(int iz=0;iz<HALF_STENCIL;++iz){
int j=i+iz*nx*ny;
zSigmaZ[iz+1]=0.;
zSigmaZ[iz+1+HALF_STENCIL]=curSigmaZ1[j];
}
for(int iz=0;iz<HALF_STENCIL;++iz){
int j=i+iz*nx*ny;
zSigmaZ[0]=zSigmaZ[1];
zSigmaZ[1]=zSigmaZ[2];
zSigmaZ[2]=zSigmaZ[3];
zSigmaZ[3]=zSigmaZ[4];
zSigmaZ[4]=zSigmaZ[5];
zSigmaZ[5]=zSigmaZ[6];
zSigmaZ[6]=zSigmaZ[7];
zSigmaZ[7]=zSigmaZ[8];
zSigmaZ[8]=curSigmaZ2[j];
__syncthreads();
sSigmaX[six][siy]=curSigmaX1[j];
if(threadIdx.x<HALF_STENCIL){
int k=min(blockDim.x,nx-2*HALF_STENCIL-blockIdx.x*blockDim.x);
sSigmaX[threadIdx.x][siy]=curSigmaX1[j-HALF_STENCIL];
sSigmaX[six+k][siy]=curSigmaX1[j+k];
}
if(threadIdx.y<HALF_STENCIL){
int k=min(blockDim.y,ny-2*HALF_STENCIL-blockIdx.y*blockDim.y);
sSigmaX[six][threadIdx.y]=curSigmaX1[j-HALF_STENCIL*nx];
sSigmaX[six][siy+k]=curSigmaX1[j+k*nx];
}
__syncthreads();
float tx=(C0*sSigmaX[six][siy]+C1*(sSigmaX[six-1][siy]+sSigmaX[six+1][siy])
+C2*(sSigmaX[six-2][siy]+sSigmaX[six+2][siy])
+C3*(sSigmaX[six-3][siy]+sSigmaX[six+3][siy])
+C4*(sSigmaX[six-4][siy]+sSigmaX[six+4][siy]))*dt2dx2;
float ty=(C0*sSigmaX[six][siy]+C1*(sSigmaX[six][siy-1]+sSigmaX[six][siy+1])
+C2*(sSigmaX[six][siy-2]+sSigmaX[six][siy+2])
+C3*(sSigmaX[six][siy-3]+sSigmaX[six][siy+3])
+C4*(sSigmaX[six][siy-4]+sSigmaX[six][siy+4]))*dt2dy2;
ty+=tx;
float tz=(C0*zSigmaZ[HALF_STENCIL]+C1*(zSigmaZ[HALF_STENCIL-1]+zSigmaZ[HALF_STENCIL+1])
+C2*(zSigmaZ[HALF_STENCIL-2]+zSigmaZ[HALF_STENCIL+2])
+C3*(zSigmaZ[HALF_STENCIL-3]+zSigmaZ[HALF_STENCIL+3])
+C4*(zSigmaZ[HALF_STENCIL-4]+zSigmaZ[HALF_STENCIL+4]))*dt2dz2;
nextSigmaX[j]=c11[j]*ty+c13[j]*tz+2.*sSigmaX[six][siy]-prevSigmaX[j];
nextSigmaZ[j]=c13[j]*ty+c33[j]*tz+2.*zSigmaZ[HALF_STENCIL]-prevSigmaZ[j];
}
}
return;
}
__global__ void forwardKernelBottomBlock(float *nextSigmaX,float *curSigmaX1,float *prevSigmaX,float *nextSigmaZ,float *curSigmaZ0,float *curSigmaZ1,float *curSigmaZ2,float *prevSigmaZ,float *c11,float *c13,float *c33,int nx,int ny,float dt2dx2,float dt2dy2,float dt2dz2){
__shared__ float sSigmaX[BLOCK_DIM+2*HALF_STENCIL][BLOCK_DIM+2*HALF_STENCIL];
int ix=threadIdx.x+blockIdx.x*blockDim.x+HALF_STENCIL;
int iy=threadIdx.y+blockIdx.y*blockDim.y+HALF_STENCIL;
if(ix<nx-HALF_STENCIL && iy<ny-HALF_STENCIL){
int i=ix+iy*nx;
int six=threadIdx.x+HALF_STENCIL;
int siy=threadIdx.y+HALF_STENCIL;
float zSigmaZ[2*HALF_STENCIL+1];
for(int iz=0;iz<HALF_STENCIL;++iz){
int j=i+iz*nx*ny;
zSigmaZ[iz+1]=curSigmaZ0[j];
zSigmaZ[iz+1+HALF_STENCIL]=curSigmaZ1[j];
}
for(int iz=0;iz<HALF_STENCIL;++iz){
int j=i+iz*nx*ny;
zSigmaZ[0]=zSigmaZ[1];
zSigmaZ[1]=zSigmaZ[2];
zSigmaZ[2]=zSigmaZ[3];
zSigmaZ[3]=zSigmaZ[4];
zSigmaZ[4]=zSigmaZ[5];
zSigmaZ[5]=zSigmaZ[6];
zSigmaZ[6]=zSigmaZ[7];
zSigmaZ[7]=zSigmaZ[8];
zSigmaZ[8]=0.;
__syncthreads();
sSigmaX[six][siy]=curSigmaX1[j];
if(threadIdx.x<HALF_STENCIL){
int k=min(blockDim.x,nx-2*HALF_STENCIL-blockIdx.x*blockDim.x);
sSigmaX[threadIdx.x][siy]=curSigmaX1[j-HALF_STENCIL];
sSigmaX[six+k][siy]=curSigmaX1[j+k];
}
if(threadIdx.y<HALF_STENCIL){
int k=min(blockDim.y,ny-2*HALF_STENCIL-blockIdx.y*blockDim.y);
sSigmaX[six][threadIdx.y]=curSigmaX1[j-HALF_STENCIL*nx];
sSigmaX[six][siy+k]=curSigmaX1[j+k*nx];
}
__syncthreads();
float tx=(C0*sSigmaX[six][siy]+C1*(sSigmaX[six-1][siy]+sSigmaX[six+1][siy])
+C2*(sSigmaX[six-2][siy]+sSigmaX[six+2][siy])
+C3*(sSigmaX[six-3][siy]+sSigmaX[six+3][siy])
+C4*(sSigmaX[six-4][siy]+sSigmaX[six+4][siy]))*dt2dx2;
float ty=(C0*sSigmaX[six][siy]+C1*(sSigmaX[six][siy-1]+sSigmaX[six][siy+1])
+C2*(sSigmaX[six][siy-2]+sSigmaX[six][siy+2])
+C3*(sSigmaX[six][siy-3]+sSigmaX[six][siy+3])
+C4*(sSigmaX[six][siy-4]+sSigmaX[six][siy+4]))*dt2dy2;
ty+=tx;
float tz=(C0*zSigmaZ[HALF_STENCIL]+C1*(zSigmaZ[HALF_STENCIL-1]+zSigmaZ[HALF_STENCIL+1])
+C2*(zSigmaZ[HALF_STENCIL-2]+zSigmaZ[HALF_STENCIL+2])
+C3*(zSigmaZ[HALF_STENCIL-3]+zSigmaZ[HALF_STENCIL+3])
+C4*(zSigmaZ[HALF_STENCIL-4]+zSigmaZ[HALF_STENCIL+4]))*dt2dz2;
nextSigmaX[j]=c11[j]*ty+c13[j]*tz+2.*sSigmaX[six][siy]-prevSigmaX[j];
nextSigmaZ[j]=c13[j]*ty+c33[j]*tz+2.*zSigmaZ[HALF_STENCIL]-prevSigmaZ[j];
}
}
return;
}
__global__ void gradientKernel(float *gc11,float *gc13,float *gc33,float *nextSigmaX,float *curSigmaX1,float *prevSigmaX,float *nextSigmaZ,float *curSigmaZ0,float *curSigmaZ1,float *curSigmaZ2,float *prevSigmaZ,float *curLambdaX,float *curLambdaZ,float *c11,float *c13,float *c33,int nx,int ny,float dx2,float dy2,float dz2,float dt2){
__shared__ float sSigmaX[BLOCK_DIM+2*HALF_STENCIL][BLOCK_DIM+2*HALF_STENCIL];
int ix=threadIdx.x+blockIdx.x*blockDim.x+HALF_STENCIL;
int iy=threadIdx.y+blockIdx.y*blockDim.y+HALF_STENCIL;
if(ix<nx-HALF_STENCIL && iy<ny-HALF_STENCIL){
int i=ix+iy*nx;
int six=threadIdx.x+HALF_STENCIL;
int siy=threadIdx.y+HALF_STENCIL;
float zSigmaZ[2*HALF_STENCIL+1];
for(int iz=0;iz<HALF_STENCIL;++iz){
int j=i+iz*nx*ny;
zSigmaZ[iz+1]=curSigmaZ0[j];
zSigmaZ[iz+1+HALF_STENCIL]=curSigmaZ1[j];
}
for(int iz=0;iz<HALF_STENCIL;++iz){
int j=i+iz*nx*ny;
zSigmaZ[0]=zSigmaZ[1];
zSigmaZ[1]=zSigmaZ[2];
zSigmaZ[2]=zSigmaZ[3];
zSigmaZ[3]=zSigmaZ[4];
zSigmaZ[4]=zSigmaZ[5];
zSigmaZ[5]=zSigmaZ[6];
zSigmaZ[6]=zSigmaZ[7];
zSigmaZ[7]=zSigmaZ[8];
zSigmaZ[8]=curSigmaZ2[j];
__syncthreads();
sSigmaX[six][siy]=curSigmaX1[j];
if(threadIdx.x<HALF_STENCIL){
int k=min(blockDim.x,nx-2*HALF_STENCIL-blockIdx.x*blockDim.x);
sSigmaX[threadIdx.x][siy]=curSigmaX1[j-HALF_STENCIL];
sSigmaX[six+k][siy]=curSigmaX1[j+k];
}
if(threadIdx.y<HALF_STENCIL){
int k=min(blockDim.y,ny-2*HALF_STENCIL-blockIdx.y*blockDim.y);
sSigmaX[six][threadIdx.y]=curSigmaX1[j-HALF_STENCIL*nx];
sSigmaX[six][siy+k]=curSigmaX1[j+k*nx];
}
__syncthreads();
float tx=(C0*sSigmaX[six][siy]+C1*(sSigmaX[six-1][siy]+sSigmaX[six+1][siy])
+C2*(sSigmaX[six-2][siy]+sSigmaX[six+2][siy])
+C3*(sSigmaX[six-3][siy]+sSigmaX[six+3][siy])
+C4*(sSigmaX[six-4][siy]+sSigmaX[six+4][siy]))/dx2;
float ty=(C0*sSigmaX[six][siy]+C1*(sSigmaX[six][siy-1]+sSigmaX[six][siy+1])
+C2*(sSigmaX[six][siy-2]+sSigmaX[six][siy+2])
+C3*(sSigmaX[six][siy-3]+sSigmaX[six][siy+3])
+C4*(sSigmaX[six][siy-4]+sSigmaX[six][siy+4]))/dy2;
ty+=tx;
float tz=(C0*zSigmaZ[HALF_STENCIL]+C1*(zSigmaZ[HALF_STENCIL-1]+zSigmaZ[HALF_STENCIL+1])
+C2*(zSigmaZ[HALF_STENCIL-2]+zSigmaZ[HALF_STENCIL+2])
+C3*(zSigmaZ[HALF_STENCIL-3]+zSigmaZ[HALF_STENCIL+3])
+C4*(zSigmaZ[HALF_STENCIL-4]+zSigmaZ[HALF_STENCIL+4]))/dz2;
nextSigmaX[j]=dt2*(c11[j]*ty+c13[j]*tz)+2.*sSigmaX[six][siy]-prevSigmaX[j];
nextSigmaZ[j]=dt2*(c13[j]*ty+c33[j]*tz)+2.*zSigmaZ[HALF_STENCIL]-prevSigmaZ[j];
//imaging conditions to compute gradient
gc11[j]+=curLambdaX[j]*ty;
gc33[j]+=curLambdaZ[j]*tz;
gc13[j]+=curLambdaX[j]*tz+curLambdaZ[j]*ty;
}
}
return;
}
__global__ void gradientKernelTopBlock(float *gc11,float *gc13,float *gc33,float *nextSigmaX,float *curSigmaX1,float *prevSigmaX,float *nextSigmaZ,float *curSigmaZ0,float *curSigmaZ1,float *curSigmaZ2,float *prevSigmaZ,float *curLambdaX,float *curLambdaZ,float *c11,float *c13,float *c33,int nx,int ny,float dx2,float dy2,float dz2,float dt2){
__shared__ float sSigmaX[BLOCK_DIM+2*HALF_STENCIL][BLOCK_DIM+2*HALF_STENCIL];
int ix=threadIdx.x+blockIdx.x*blockDim.x+HALF_STENCIL;
int iy=threadIdx.y+blockIdx.y*blockDim.y+HALF_STENCIL;
if(ix<nx-HALF_STENCIL && iy<ny-HALF_STENCIL){
int i=ix+iy*nx;
int six=threadIdx.x+HALF_STENCIL;
int siy=threadIdx.y+HALF_STENCIL;
float zSigmaZ[2*HALF_STENCIL+1];
for(int iz=0;iz<HALF_STENCIL;++iz){
int j=i+iz*nx*ny;
zSigmaZ[iz+1]=0.;
zSigmaZ[iz+1+HALF_STENCIL]=curSigmaZ1[j];
}
for(int iz=0;iz<HALF_STENCIL;++iz){
int j=i+iz*nx*ny;
zSigmaZ[0]=zSigmaZ[1];
zSigmaZ[1]=zSigmaZ[2];
zSigmaZ[2]=zSigmaZ[3];
zSigmaZ[3]=zSigmaZ[4];
zSigmaZ[4]=zSigmaZ[5];
zSigmaZ[5]=zSigmaZ[6];
zSigmaZ[6]=zSigmaZ[7];
zSigmaZ[7]=zSigmaZ[8];
zSigmaZ[8]=curSigmaZ2[j];
__syncthreads();
sSigmaX[six][siy]=curSigmaX1[j];
if(threadIdx.x<HALF_STENCIL){
int k=min(blockDim.x,nx-2*HALF_STENCIL-blockIdx.x*blockDim.x);
sSigmaX[threadIdx.x][siy]=curSigmaX1[j-HALF_STENCIL];
sSigmaX[six+k][siy]=curSigmaX1[j+k];
}
if(threadIdx.y<HALF_STENCIL){
int k=min(blockDim.y,ny-2*HALF_STENCIL-blockIdx.y*blockDim.y);
sSigmaX[six][threadIdx.y]=curSigmaX1[j-HALF_STENCIL*nx];
sSigmaX[six][siy+k]=curSigmaX1[j+k*nx];
}
__syncthreads();
float tx=(C0*sSigmaX[six][siy]+C1*(sSigmaX[six-1][siy]+sSigmaX[six+1][siy])
+C2*(sSigmaX[six-2][siy]+sSigmaX[six+2][siy])
+C3*(sSigmaX[six-3][siy]+sSigmaX[six+3][siy])
+C4*(sSigmaX[six-4][siy]+sSigmaX[six+4][siy]))/dx2;
float ty=(C0*sSigmaX[six][siy]+C1*(sSigmaX[six][siy-1]+sSigmaX[six][siy+1])
+C2*(sSigmaX[six][siy-2]+sSigmaX[six][siy+2])
+C3*(sSigmaX[six][siy-3]+sSigmaX[six][siy+3])
+C4*(sSigmaX[six][siy-4]+sSigmaX[six][siy+4]))/dy2;
ty+=tx;
float tz=(C0*zSigmaZ[HALF_STENCIL]+C1*(zSigmaZ[HALF_STENCIL-1]+zSigmaZ[HALF_STENCIL+1])
+C2*(zSigmaZ[HALF_STENCIL-2]+zSigmaZ[HALF_STENCIL+2])
+C3*(zSigmaZ[HALF_STENCIL-3]+zSigmaZ[HALF_STENCIL+3])
+C4*(zSigmaZ[HALF_STENCIL-4]+zSigmaZ[HALF_STENCIL+4]))/dz2;
nextSigmaX[j]=dt2*(c11[j]*ty+c13[j]*tz)+2.*sSigmaX[six][siy]-prevSigmaX[j];
nextSigmaZ[j]=dt2*(c13[j]*ty+c33[j]*tz)+2.*zSigmaZ[HALF_STENCIL]-prevSigmaZ[j];
//imaging conditions to compute gradient
gc11[j]+=curLambdaX[j]*ty;
gc33[j]+=curLambdaZ[j]*tz;
gc13[j]+=curLambdaX[j]*tz+curLambdaZ[j]*ty;
}
}
return;
}
__global__ void gradientKernelBottomBlock(float *gc11,float *gc13,float *gc33,float *nextSigmaX,float *curSigmaX1,float *prevSigmaX,float *nextSigmaZ,float *curSigmaZ0,float *curSigmaZ1,float *curSigmaZ2,float *prevSigmaZ,float *curLambdaX,float *curLambdaZ,float *c11,float *c13,float *c33,int nx,int ny,float dx2,float dy2,float dz2,float dt2){
__shared__ float sSigmaX[BLOCK_DIM+2*HALF_STENCIL][BLOCK_DIM+2*HALF_STENCIL];
int ix=threadIdx.x+blockIdx.x*blockDim.x+HALF_STENCIL;
int iy=threadIdx.y+blockIdx.y*blockDim.y+HALF_STENCIL;
if(ix<nx-HALF_STENCIL && iy<ny-HALF_STENCIL){
int i=ix+iy*nx;
int six=threadIdx.x+HALF_STENCIL;
int siy=threadIdx.y+HALF_STENCIL;
float zSigmaZ[2*HALF_STENCIL+1];
for(int iz=0;iz<HALF_STENCIL;++iz){
int j=i+iz*nx*ny;
zSigmaZ[iz+1]=curSigmaZ0[j];
zSigmaZ[iz+1+HALF_STENCIL]=curSigmaZ1[j];
}
for(int iz=0;iz<HALF_STENCIL;++iz){
int j=i+iz*nx*ny;
zSigmaZ[0]=zSigmaZ[1];
zSigmaZ[1]=zSigmaZ[2];
zSigmaZ[2]=zSigmaZ[3];
zSigmaZ[3]=zSigmaZ[4];
zSigmaZ[4]=zSigmaZ[5];
zSigmaZ[5]=zSigmaZ[6];
zSigmaZ[6]=zSigmaZ[7];
zSigmaZ[7]=zSigmaZ[8];
zSigmaZ[8]=0.;
__syncthreads();
sSigmaX[six][siy]=curSigmaX1[j];
if(threadIdx.x<HALF_STENCIL){
int k=min(blockDim.x,nx-2*HALF_STENCIL-blockIdx.x*blockDim.x);
sSigmaX[threadIdx.x][siy]=curSigmaX1[j-HALF_STENCIL];
sSigmaX[six+k][siy]=curSigmaX1[j+k];
}
if(threadIdx.y<HALF_STENCIL){
int k=min(blockDim.y,ny-2*HALF_STENCIL-blockIdx.y*blockDim.y);
sSigmaX[six][threadIdx.y]=curSigmaX1[j-HALF_STENCIL*nx];
sSigmaX[six][siy+k]=curSigmaX1[j+k*nx];
}
__syncthreads();
float tx=(C0*sSigmaX[six][siy]+C1*(sSigmaX[six-1][siy]+sSigmaX[six+1][siy])
+C2*(sSigmaX[six-2][siy]+sSigmaX[six+2][siy])
+C3*(sSigmaX[six-3][siy]+sSigmaX[six+3][siy])
+C4*(sSigmaX[six-4][siy]+sSigmaX[six+4][siy]))/dx2;
float ty=(C0*sSigmaX[six][siy]+C1*(sSigmaX[six][siy-1]+sSigmaX[six][siy+1])
+C2*(sSigmaX[six][siy-2]+sSigmaX[six][siy+2])
+C3*(sSigmaX[six][siy-3]+sSigmaX[six][siy+3])
+C4*(sSigmaX[six][siy-4]+sSigmaX[six][siy+4]))/dy2;
ty+=tx;
float tz=(C0*zSigmaZ[HALF_STENCIL]+C1*(zSigmaZ[HALF_STENCIL-1]+zSigmaZ[HALF_STENCIL+1])
+C2*(zSigmaZ[HALF_STENCIL-2]+zSigmaZ[HALF_STENCIL+2])
+C3*(zSigmaZ[HALF_STENCIL-3]+zSigmaZ[HALF_STENCIL+3])
+C4*(zSigmaZ[HALF_STENCIL-4]+zSigmaZ[HALF_STENCIL+4]))/dz2;
nextSigmaX[j]=dt2*(c11[j]*ty+c13[j]*tz)+2.*sSigmaX[six][siy]-prevSigmaX[j];
nextSigmaZ[j]=dt2*(c13[j]*ty+c33[j]*tz)+2.*zSigmaZ[HALF_STENCIL]-prevSigmaZ[j];
//imaging conditions to compute gradient
gc11[j]+=curLambdaX[j]*ty;
gc33[j]+=curLambdaZ[j]*tz;
gc13[j]+=curLambdaX[j]*tz+curLambdaZ[j]*ty;
}
}
return;
}
__global__ void imagingKernel(float *image,float *curSigmaX,float *curSigmaZ,float *curLambdaX,float *curLambdaZ,int nx,int ny){
int ix=threadIdx.x+blockIdx.x*blockDim.x+HALF_STENCIL;
int iy=threadIdx.y+blockIdx.y*blockDim.y+HALF_STENCIL;
if(ix<nx-HALF_STENCIL && iy<ny-HALF_STENCIL){
int i=ix+iy*nx;
for(int iz=0;iz<HALF_STENCIL;++iz){
int j=i+iz*nx*ny;
image[j]+=(TWOTHIRD*curSigmaX[j]+ONETHIRD*curSigmaZ[j])*(TWOTHIRD*curLambdaX[j]+ONETHIRD*curLambdaZ[j]);
}
}
return;
}
| 085160a55e866a534980a5afc85d18f1b72034be.cu | #include "wave3d.h"
__global__ void forwardKernel(float *nextSigmaX,float *curSigmaX1,float *prevSigmaX,float *nextSigmaZ,float *curSigmaZ0,float *curSigmaZ1,float *curSigmaZ2,float *prevSigmaZ,float *c11,float *c13,float *c33,int nx,int ny,float dt2dx2,float dt2dy2,float dt2dz2){
__shared__ float sSigmaX[BLOCK_DIM+2*HALF_STENCIL][BLOCK_DIM+2*HALF_STENCIL];
int ix=threadIdx.x+blockIdx.x*blockDim.x+HALF_STENCIL;
int iy=threadIdx.y+blockIdx.y*blockDim.y+HALF_STENCIL;
if(ix<nx-HALF_STENCIL && iy<ny-HALF_STENCIL){
int i=ix+iy*nx;
int six=threadIdx.x+HALF_STENCIL;
int siy=threadIdx.y+HALF_STENCIL;
float zSigmaZ[2*HALF_STENCIL+1];
for(int iz=0;iz<HALF_STENCIL;++iz){
int j=i+iz*nx*ny;
zSigmaZ[iz+1]=curSigmaZ0[j];
zSigmaZ[iz+1+HALF_STENCIL]=curSigmaZ1[j];
}
for(int iz=0;iz<HALF_STENCIL;++iz){
int j=i+iz*nx*ny;
zSigmaZ[0]=zSigmaZ[1];
zSigmaZ[1]=zSigmaZ[2];
zSigmaZ[2]=zSigmaZ[3];
zSigmaZ[3]=zSigmaZ[4];
zSigmaZ[4]=zSigmaZ[5];
zSigmaZ[5]=zSigmaZ[6];
zSigmaZ[6]=zSigmaZ[7];
zSigmaZ[7]=zSigmaZ[8];
zSigmaZ[8]=curSigmaZ2[j];
__syncthreads();
sSigmaX[six][siy]=curSigmaX1[j];
if(threadIdx.x<HALF_STENCIL){
int k=min(blockDim.x,nx-2*HALF_STENCIL-blockIdx.x*blockDim.x);
sSigmaX[threadIdx.x][siy]=curSigmaX1[j-HALF_STENCIL];
sSigmaX[six+k][siy]=curSigmaX1[j+k];
}
if(threadIdx.y<HALF_STENCIL){
int k=min(blockDim.y,ny-2*HALF_STENCIL-blockIdx.y*blockDim.y);
sSigmaX[six][threadIdx.y]=curSigmaX1[j-HALF_STENCIL*nx];
sSigmaX[six][siy+k]=curSigmaX1[j+k*nx];
}
__syncthreads();
float tx=(C0*sSigmaX[six][siy]+C1*(sSigmaX[six-1][siy]+sSigmaX[six+1][siy])
+C2*(sSigmaX[six-2][siy]+sSigmaX[six+2][siy])
+C3*(sSigmaX[six-3][siy]+sSigmaX[six+3][siy])
+C4*(sSigmaX[six-4][siy]+sSigmaX[six+4][siy]))*dt2dx2;
float ty=(C0*sSigmaX[six][siy]+C1*(sSigmaX[six][siy-1]+sSigmaX[six][siy+1])
+C2*(sSigmaX[six][siy-2]+sSigmaX[six][siy+2])
+C3*(sSigmaX[six][siy-3]+sSigmaX[six][siy+3])
+C4*(sSigmaX[six][siy-4]+sSigmaX[six][siy+4]))*dt2dy2;
ty+=tx;
float tz=(C0*zSigmaZ[HALF_STENCIL]+C1*(zSigmaZ[HALF_STENCIL-1]+zSigmaZ[HALF_STENCIL+1])
+C2*(zSigmaZ[HALF_STENCIL-2]+zSigmaZ[HALF_STENCIL+2])
+C3*(zSigmaZ[HALF_STENCIL-3]+zSigmaZ[HALF_STENCIL+3])
+C4*(zSigmaZ[HALF_STENCIL-4]+zSigmaZ[HALF_STENCIL+4]))*dt2dz2;
nextSigmaX[j]=c11[j]*ty+c13[j]*tz+2.*sSigmaX[six][siy]-prevSigmaX[j];
nextSigmaZ[j]=c13[j]*ty+c33[j]*tz+2.*zSigmaZ[HALF_STENCIL]-prevSigmaZ[j];
}
}
return;
}
__global__ void forwardKernelTopBlock(float *nextSigmaX,float *curSigmaX1,float *prevSigmaX,float *nextSigmaZ,float *curSigmaZ0,float *curSigmaZ1,float *curSigmaZ2,float *prevSigmaZ,float *c11,float *c13,float *c33,int nx,int ny,float dt2dx2,float dt2dy2,float dt2dz2){
__shared__ float sSigmaX[BLOCK_DIM+2*HALF_STENCIL][BLOCK_DIM+2*HALF_STENCIL];
int ix=threadIdx.x+blockIdx.x*blockDim.x+HALF_STENCIL;
int iy=threadIdx.y+blockIdx.y*blockDim.y+HALF_STENCIL;
if(ix<nx-HALF_STENCIL && iy<ny-HALF_STENCIL){
int i=ix+iy*nx;
int six=threadIdx.x+HALF_STENCIL;
int siy=threadIdx.y+HALF_STENCIL;
float zSigmaZ[2*HALF_STENCIL+1];
for(int iz=0;iz<HALF_STENCIL;++iz){
int j=i+iz*nx*ny;
zSigmaZ[iz+1]=0.;
zSigmaZ[iz+1+HALF_STENCIL]=curSigmaZ1[j];
}
for(int iz=0;iz<HALF_STENCIL;++iz){
int j=i+iz*nx*ny;
zSigmaZ[0]=zSigmaZ[1];
zSigmaZ[1]=zSigmaZ[2];
zSigmaZ[2]=zSigmaZ[3];
zSigmaZ[3]=zSigmaZ[4];
zSigmaZ[4]=zSigmaZ[5];
zSigmaZ[5]=zSigmaZ[6];
zSigmaZ[6]=zSigmaZ[7];
zSigmaZ[7]=zSigmaZ[8];
zSigmaZ[8]=curSigmaZ2[j];
__syncthreads();
sSigmaX[six][siy]=curSigmaX1[j];
if(threadIdx.x<HALF_STENCIL){
int k=min(blockDim.x,nx-2*HALF_STENCIL-blockIdx.x*blockDim.x);
sSigmaX[threadIdx.x][siy]=curSigmaX1[j-HALF_STENCIL];
sSigmaX[six+k][siy]=curSigmaX1[j+k];
}
if(threadIdx.y<HALF_STENCIL){
int k=min(blockDim.y,ny-2*HALF_STENCIL-blockIdx.y*blockDim.y);
sSigmaX[six][threadIdx.y]=curSigmaX1[j-HALF_STENCIL*nx];
sSigmaX[six][siy+k]=curSigmaX1[j+k*nx];
}
__syncthreads();
float tx=(C0*sSigmaX[six][siy]+C1*(sSigmaX[six-1][siy]+sSigmaX[six+1][siy])
+C2*(sSigmaX[six-2][siy]+sSigmaX[six+2][siy])
+C3*(sSigmaX[six-3][siy]+sSigmaX[six+3][siy])
+C4*(sSigmaX[six-4][siy]+sSigmaX[six+4][siy]))*dt2dx2;
float ty=(C0*sSigmaX[six][siy]+C1*(sSigmaX[six][siy-1]+sSigmaX[six][siy+1])
+C2*(sSigmaX[six][siy-2]+sSigmaX[six][siy+2])
+C3*(sSigmaX[six][siy-3]+sSigmaX[six][siy+3])
+C4*(sSigmaX[six][siy-4]+sSigmaX[six][siy+4]))*dt2dy2;
ty+=tx;
float tz=(C0*zSigmaZ[HALF_STENCIL]+C1*(zSigmaZ[HALF_STENCIL-1]+zSigmaZ[HALF_STENCIL+1])
+C2*(zSigmaZ[HALF_STENCIL-2]+zSigmaZ[HALF_STENCIL+2])
+C3*(zSigmaZ[HALF_STENCIL-3]+zSigmaZ[HALF_STENCIL+3])
+C4*(zSigmaZ[HALF_STENCIL-4]+zSigmaZ[HALF_STENCIL+4]))*dt2dz2;
nextSigmaX[j]=c11[j]*ty+c13[j]*tz+2.*sSigmaX[six][siy]-prevSigmaX[j];
nextSigmaZ[j]=c13[j]*ty+c33[j]*tz+2.*zSigmaZ[HALF_STENCIL]-prevSigmaZ[j];
}
}
return;
}
__global__ void forwardKernelBottomBlock(float *nextSigmaX,float *curSigmaX1,float *prevSigmaX,float *nextSigmaZ,float *curSigmaZ0,float *curSigmaZ1,float *curSigmaZ2,float *prevSigmaZ,float *c11,float *c13,float *c33,int nx,int ny,float dt2dx2,float dt2dy2,float dt2dz2){
__shared__ float sSigmaX[BLOCK_DIM+2*HALF_STENCIL][BLOCK_DIM+2*HALF_STENCIL];
int ix=threadIdx.x+blockIdx.x*blockDim.x+HALF_STENCIL;
int iy=threadIdx.y+blockIdx.y*blockDim.y+HALF_STENCIL;
if(ix<nx-HALF_STENCIL && iy<ny-HALF_STENCIL){
int i=ix+iy*nx;
int six=threadIdx.x+HALF_STENCIL;
int siy=threadIdx.y+HALF_STENCIL;
float zSigmaZ[2*HALF_STENCIL+1];
for(int iz=0;iz<HALF_STENCIL;++iz){
int j=i+iz*nx*ny;
zSigmaZ[iz+1]=curSigmaZ0[j];
zSigmaZ[iz+1+HALF_STENCIL]=curSigmaZ1[j];
}
for(int iz=0;iz<HALF_STENCIL;++iz){
int j=i+iz*nx*ny;
zSigmaZ[0]=zSigmaZ[1];
zSigmaZ[1]=zSigmaZ[2];
zSigmaZ[2]=zSigmaZ[3];
zSigmaZ[3]=zSigmaZ[4];
zSigmaZ[4]=zSigmaZ[5];
zSigmaZ[5]=zSigmaZ[6];
zSigmaZ[6]=zSigmaZ[7];
zSigmaZ[7]=zSigmaZ[8];
zSigmaZ[8]=0.;
__syncthreads();
sSigmaX[six][siy]=curSigmaX1[j];
if(threadIdx.x<HALF_STENCIL){
int k=min(blockDim.x,nx-2*HALF_STENCIL-blockIdx.x*blockDim.x);
sSigmaX[threadIdx.x][siy]=curSigmaX1[j-HALF_STENCIL];
sSigmaX[six+k][siy]=curSigmaX1[j+k];
}
if(threadIdx.y<HALF_STENCIL){
int k=min(blockDim.y,ny-2*HALF_STENCIL-blockIdx.y*blockDim.y);
sSigmaX[six][threadIdx.y]=curSigmaX1[j-HALF_STENCIL*nx];
sSigmaX[six][siy+k]=curSigmaX1[j+k*nx];
}
__syncthreads();
float tx=(C0*sSigmaX[six][siy]+C1*(sSigmaX[six-1][siy]+sSigmaX[six+1][siy])
+C2*(sSigmaX[six-2][siy]+sSigmaX[six+2][siy])
+C3*(sSigmaX[six-3][siy]+sSigmaX[six+3][siy])
+C4*(sSigmaX[six-4][siy]+sSigmaX[six+4][siy]))*dt2dx2;
float ty=(C0*sSigmaX[six][siy]+C1*(sSigmaX[six][siy-1]+sSigmaX[six][siy+1])
+C2*(sSigmaX[six][siy-2]+sSigmaX[six][siy+2])
+C3*(sSigmaX[six][siy-3]+sSigmaX[six][siy+3])
+C4*(sSigmaX[six][siy-4]+sSigmaX[six][siy+4]))*dt2dy2;
ty+=tx;
float tz=(C0*zSigmaZ[HALF_STENCIL]+C1*(zSigmaZ[HALF_STENCIL-1]+zSigmaZ[HALF_STENCIL+1])
+C2*(zSigmaZ[HALF_STENCIL-2]+zSigmaZ[HALF_STENCIL+2])
+C3*(zSigmaZ[HALF_STENCIL-3]+zSigmaZ[HALF_STENCIL+3])
+C4*(zSigmaZ[HALF_STENCIL-4]+zSigmaZ[HALF_STENCIL+4]))*dt2dz2;
nextSigmaX[j]=c11[j]*ty+c13[j]*tz+2.*sSigmaX[six][siy]-prevSigmaX[j];
nextSigmaZ[j]=c13[j]*ty+c33[j]*tz+2.*zSigmaZ[HALF_STENCIL]-prevSigmaZ[j];
}
}
return;
}
__global__ void gradientKernel(float *gc11,float *gc13,float *gc33,float *nextSigmaX,float *curSigmaX1,float *prevSigmaX,float *nextSigmaZ,float *curSigmaZ0,float *curSigmaZ1,float *curSigmaZ2,float *prevSigmaZ,float *curLambdaX,float *curLambdaZ,float *c11,float *c13,float *c33,int nx,int ny,float dx2,float dy2,float dz2,float dt2){
__shared__ float sSigmaX[BLOCK_DIM+2*HALF_STENCIL][BLOCK_DIM+2*HALF_STENCIL];
int ix=threadIdx.x+blockIdx.x*blockDim.x+HALF_STENCIL;
int iy=threadIdx.y+blockIdx.y*blockDim.y+HALF_STENCIL;
if(ix<nx-HALF_STENCIL && iy<ny-HALF_STENCIL){
int i=ix+iy*nx;
int six=threadIdx.x+HALF_STENCIL;
int siy=threadIdx.y+HALF_STENCIL;
float zSigmaZ[2*HALF_STENCIL+1];
for(int iz=0;iz<HALF_STENCIL;++iz){
int j=i+iz*nx*ny;
zSigmaZ[iz+1]=curSigmaZ0[j];
zSigmaZ[iz+1+HALF_STENCIL]=curSigmaZ1[j];
}
for(int iz=0;iz<HALF_STENCIL;++iz){
int j=i+iz*nx*ny;
zSigmaZ[0]=zSigmaZ[1];
zSigmaZ[1]=zSigmaZ[2];
zSigmaZ[2]=zSigmaZ[3];
zSigmaZ[3]=zSigmaZ[4];
zSigmaZ[4]=zSigmaZ[5];
zSigmaZ[5]=zSigmaZ[6];
zSigmaZ[6]=zSigmaZ[7];
zSigmaZ[7]=zSigmaZ[8];
zSigmaZ[8]=curSigmaZ2[j];
__syncthreads();
sSigmaX[six][siy]=curSigmaX1[j];
if(threadIdx.x<HALF_STENCIL){
int k=min(blockDim.x,nx-2*HALF_STENCIL-blockIdx.x*blockDim.x);
sSigmaX[threadIdx.x][siy]=curSigmaX1[j-HALF_STENCIL];
sSigmaX[six+k][siy]=curSigmaX1[j+k];
}
if(threadIdx.y<HALF_STENCIL){
int k=min(blockDim.y,ny-2*HALF_STENCIL-blockIdx.y*blockDim.y);
sSigmaX[six][threadIdx.y]=curSigmaX1[j-HALF_STENCIL*nx];
sSigmaX[six][siy+k]=curSigmaX1[j+k*nx];
}
__syncthreads();
float tx=(C0*sSigmaX[six][siy]+C1*(sSigmaX[six-1][siy]+sSigmaX[six+1][siy])
+C2*(sSigmaX[six-2][siy]+sSigmaX[six+2][siy])
+C3*(sSigmaX[six-3][siy]+sSigmaX[six+3][siy])
+C4*(sSigmaX[six-4][siy]+sSigmaX[six+4][siy]))/dx2;
float ty=(C0*sSigmaX[six][siy]+C1*(sSigmaX[six][siy-1]+sSigmaX[six][siy+1])
+C2*(sSigmaX[six][siy-2]+sSigmaX[six][siy+2])
+C3*(sSigmaX[six][siy-3]+sSigmaX[six][siy+3])
+C4*(sSigmaX[six][siy-4]+sSigmaX[six][siy+4]))/dy2;
ty+=tx;
float tz=(C0*zSigmaZ[HALF_STENCIL]+C1*(zSigmaZ[HALF_STENCIL-1]+zSigmaZ[HALF_STENCIL+1])
+C2*(zSigmaZ[HALF_STENCIL-2]+zSigmaZ[HALF_STENCIL+2])
+C3*(zSigmaZ[HALF_STENCIL-3]+zSigmaZ[HALF_STENCIL+3])
+C4*(zSigmaZ[HALF_STENCIL-4]+zSigmaZ[HALF_STENCIL+4]))/dz2;
nextSigmaX[j]=dt2*(c11[j]*ty+c13[j]*tz)+2.*sSigmaX[six][siy]-prevSigmaX[j];
nextSigmaZ[j]=dt2*(c13[j]*ty+c33[j]*tz)+2.*zSigmaZ[HALF_STENCIL]-prevSigmaZ[j];
//imaging conditions to compute gradient
gc11[j]+=curLambdaX[j]*ty;
gc33[j]+=curLambdaZ[j]*tz;
gc13[j]+=curLambdaX[j]*tz+curLambdaZ[j]*ty;
}
}
return;
}
__global__ void gradientKernelTopBlock(float *gc11,float *gc13,float *gc33,float *nextSigmaX,float *curSigmaX1,float *prevSigmaX,float *nextSigmaZ,float *curSigmaZ0,float *curSigmaZ1,float *curSigmaZ2,float *prevSigmaZ,float *curLambdaX,float *curLambdaZ,float *c11,float *c13,float *c33,int nx,int ny,float dx2,float dy2,float dz2,float dt2){
__shared__ float sSigmaX[BLOCK_DIM+2*HALF_STENCIL][BLOCK_DIM+2*HALF_STENCIL];
int ix=threadIdx.x+blockIdx.x*blockDim.x+HALF_STENCIL;
int iy=threadIdx.y+blockIdx.y*blockDim.y+HALF_STENCIL;
if(ix<nx-HALF_STENCIL && iy<ny-HALF_STENCIL){
int i=ix+iy*nx;
int six=threadIdx.x+HALF_STENCIL;
int siy=threadIdx.y+HALF_STENCIL;
float zSigmaZ[2*HALF_STENCIL+1];
for(int iz=0;iz<HALF_STENCIL;++iz){
int j=i+iz*nx*ny;
zSigmaZ[iz+1]=0.;
zSigmaZ[iz+1+HALF_STENCIL]=curSigmaZ1[j];
}
for(int iz=0;iz<HALF_STENCIL;++iz){
int j=i+iz*nx*ny;
zSigmaZ[0]=zSigmaZ[1];
zSigmaZ[1]=zSigmaZ[2];
zSigmaZ[2]=zSigmaZ[3];
zSigmaZ[3]=zSigmaZ[4];
zSigmaZ[4]=zSigmaZ[5];
zSigmaZ[5]=zSigmaZ[6];
zSigmaZ[6]=zSigmaZ[7];
zSigmaZ[7]=zSigmaZ[8];
zSigmaZ[8]=curSigmaZ2[j];
__syncthreads();
sSigmaX[six][siy]=curSigmaX1[j];
if(threadIdx.x<HALF_STENCIL){
int k=min(blockDim.x,nx-2*HALF_STENCIL-blockIdx.x*blockDim.x);
sSigmaX[threadIdx.x][siy]=curSigmaX1[j-HALF_STENCIL];
sSigmaX[six+k][siy]=curSigmaX1[j+k];
}
if(threadIdx.y<HALF_STENCIL){
int k=min(blockDim.y,ny-2*HALF_STENCIL-blockIdx.y*blockDim.y);
sSigmaX[six][threadIdx.y]=curSigmaX1[j-HALF_STENCIL*nx];
sSigmaX[six][siy+k]=curSigmaX1[j+k*nx];
}
__syncthreads();
float tx=(C0*sSigmaX[six][siy]+C1*(sSigmaX[six-1][siy]+sSigmaX[six+1][siy])
+C2*(sSigmaX[six-2][siy]+sSigmaX[six+2][siy])
+C3*(sSigmaX[six-3][siy]+sSigmaX[six+3][siy])
+C4*(sSigmaX[six-4][siy]+sSigmaX[six+4][siy]))/dx2;
float ty=(C0*sSigmaX[six][siy]+C1*(sSigmaX[six][siy-1]+sSigmaX[six][siy+1])
+C2*(sSigmaX[six][siy-2]+sSigmaX[six][siy+2])
+C3*(sSigmaX[six][siy-3]+sSigmaX[six][siy+3])
+C4*(sSigmaX[six][siy-4]+sSigmaX[six][siy+4]))/dy2;
ty+=tx;
float tz=(C0*zSigmaZ[HALF_STENCIL]+C1*(zSigmaZ[HALF_STENCIL-1]+zSigmaZ[HALF_STENCIL+1])
+C2*(zSigmaZ[HALF_STENCIL-2]+zSigmaZ[HALF_STENCIL+2])
+C3*(zSigmaZ[HALF_STENCIL-3]+zSigmaZ[HALF_STENCIL+3])
+C4*(zSigmaZ[HALF_STENCIL-4]+zSigmaZ[HALF_STENCIL+4]))/dz2;
nextSigmaX[j]=dt2*(c11[j]*ty+c13[j]*tz)+2.*sSigmaX[six][siy]-prevSigmaX[j];
nextSigmaZ[j]=dt2*(c13[j]*ty+c33[j]*tz)+2.*zSigmaZ[HALF_STENCIL]-prevSigmaZ[j];
//imaging conditions to compute gradient
gc11[j]+=curLambdaX[j]*ty;
gc33[j]+=curLambdaZ[j]*tz;
gc13[j]+=curLambdaX[j]*tz+curLambdaZ[j]*ty;
}
}
return;
}
__global__ void gradientKernelBottomBlock(float *gc11,float *gc13,float *gc33,float *nextSigmaX,float *curSigmaX1,float *prevSigmaX,float *nextSigmaZ,float *curSigmaZ0,float *curSigmaZ1,float *curSigmaZ2,float *prevSigmaZ,float *curLambdaX,float *curLambdaZ,float *c11,float *c13,float *c33,int nx,int ny,float dx2,float dy2,float dz2,float dt2){
__shared__ float sSigmaX[BLOCK_DIM+2*HALF_STENCIL][BLOCK_DIM+2*HALF_STENCIL];
int ix=threadIdx.x+blockIdx.x*blockDim.x+HALF_STENCIL;
int iy=threadIdx.y+blockIdx.y*blockDim.y+HALF_STENCIL;
if(ix<nx-HALF_STENCIL && iy<ny-HALF_STENCIL){
int i=ix+iy*nx;
int six=threadIdx.x+HALF_STENCIL;
int siy=threadIdx.y+HALF_STENCIL;
float zSigmaZ[2*HALF_STENCIL+1];
for(int iz=0;iz<HALF_STENCIL;++iz){
int j=i+iz*nx*ny;
zSigmaZ[iz+1]=curSigmaZ0[j];
zSigmaZ[iz+1+HALF_STENCIL]=curSigmaZ1[j];
}
for(int iz=0;iz<HALF_STENCIL;++iz){
int j=i+iz*nx*ny;
zSigmaZ[0]=zSigmaZ[1];
zSigmaZ[1]=zSigmaZ[2];
zSigmaZ[2]=zSigmaZ[3];
zSigmaZ[3]=zSigmaZ[4];
zSigmaZ[4]=zSigmaZ[5];
zSigmaZ[5]=zSigmaZ[6];
zSigmaZ[6]=zSigmaZ[7];
zSigmaZ[7]=zSigmaZ[8];
zSigmaZ[8]=0.;
__syncthreads();
sSigmaX[six][siy]=curSigmaX1[j];
if(threadIdx.x<HALF_STENCIL){
int k=min(blockDim.x,nx-2*HALF_STENCIL-blockIdx.x*blockDim.x);
sSigmaX[threadIdx.x][siy]=curSigmaX1[j-HALF_STENCIL];
sSigmaX[six+k][siy]=curSigmaX1[j+k];
}
if(threadIdx.y<HALF_STENCIL){
int k=min(blockDim.y,ny-2*HALF_STENCIL-blockIdx.y*blockDim.y);
sSigmaX[six][threadIdx.y]=curSigmaX1[j-HALF_STENCIL*nx];
sSigmaX[six][siy+k]=curSigmaX1[j+k*nx];
}
__syncthreads();
float tx=(C0*sSigmaX[six][siy]+C1*(sSigmaX[six-1][siy]+sSigmaX[six+1][siy])
+C2*(sSigmaX[six-2][siy]+sSigmaX[six+2][siy])
+C3*(sSigmaX[six-3][siy]+sSigmaX[six+3][siy])
+C4*(sSigmaX[six-4][siy]+sSigmaX[six+4][siy]))/dx2;
float ty=(C0*sSigmaX[six][siy]+C1*(sSigmaX[six][siy-1]+sSigmaX[six][siy+1])
+C2*(sSigmaX[six][siy-2]+sSigmaX[six][siy+2])
+C3*(sSigmaX[six][siy-3]+sSigmaX[six][siy+3])
+C4*(sSigmaX[six][siy-4]+sSigmaX[six][siy+4]))/dy2;
ty+=tx;
float tz=(C0*zSigmaZ[HALF_STENCIL]+C1*(zSigmaZ[HALF_STENCIL-1]+zSigmaZ[HALF_STENCIL+1])
+C2*(zSigmaZ[HALF_STENCIL-2]+zSigmaZ[HALF_STENCIL+2])
+C3*(zSigmaZ[HALF_STENCIL-3]+zSigmaZ[HALF_STENCIL+3])
+C4*(zSigmaZ[HALF_STENCIL-4]+zSigmaZ[HALF_STENCIL+4]))/dz2;
nextSigmaX[j]=dt2*(c11[j]*ty+c13[j]*tz)+2.*sSigmaX[six][siy]-prevSigmaX[j];
nextSigmaZ[j]=dt2*(c13[j]*ty+c33[j]*tz)+2.*zSigmaZ[HALF_STENCIL]-prevSigmaZ[j];
//imaging conditions to compute gradient
gc11[j]+=curLambdaX[j]*ty;
gc33[j]+=curLambdaZ[j]*tz;
gc13[j]+=curLambdaX[j]*tz+curLambdaZ[j]*ty;
}
}
return;
}
__global__ void imagingKernel(float *image,float *curSigmaX,float *curSigmaZ,float *curLambdaX,float *curLambdaZ,int nx,int ny){
int ix=threadIdx.x+blockIdx.x*blockDim.x+HALF_STENCIL;
int iy=threadIdx.y+blockIdx.y*blockDim.y+HALF_STENCIL;
if(ix<nx-HALF_STENCIL && iy<ny-HALF_STENCIL){
int i=ix+iy*nx;
for(int iz=0;iz<HALF_STENCIL;++iz){
int j=i+iz*nx*ny;
image[j]+=(TWOTHIRD*curSigmaX[j]+ONETHIRD*curSigmaZ[j])*(TWOTHIRD*curLambdaX[j]+ONETHIRD*curLambdaZ[j]);
}
}
return;
}
|
e885a3a5039f645401a5ff30cd03ae16c95a3cde.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2020 Christopher Khan
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the license at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Description of ADMIRE_GPU_curvilinear_probe_reshaped_data_type.cu:
// This file contains the MEX-interface that calls the C/CUDA code for
// performing ADMIRE on a GPU. It is used when params.data_type = 'Reshaped'
// and params.probe_type = 'Curvilinear'.
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <hipfft.h>
#include "definitions.h"
#include "GPU_processing_kernels.cu"
#include "mex.h"
// Parameters
static int initialized = 0; // Specifies whether everything has been initialized or not
static int t0; // Verasonics t0 index (any depth samples before this will be removed), and one is subtracted from it in the GPU code to obtain zero-based indexing
static int num_depths; // Number of depth sample
static int num_elements; // Number of receive elements used to obtain one beam
static int num_beams; // Number of beams
static int start_depth_offset; // Index of the first depth sample to which ADMIRE is applied, and one is subtracted from it in the GPU code to obtain zero-based indexing
static int stft_num_zeros; // Number of zeros to use for the FFT zero-padding when performing the STFT;
static int stft_num_windows; // Number of STFT windows for one beam
static int stft_window_shift; // Number of depth samples to shift by when moving to the next STFT window
static int stft_length; // STFT window length without zero-padding
static int max_windows_per_set; // Number of windows to group together for STFT calculation (largest value for this is the number of threads per block divided by the padded STFT length)
static int num_selected_freqs; // Number of frequencies within one STFT window to perform ADMIRE on
static int num_corresponding_negative_freqs; // The number of selected frequencies in one STFT window that have corresponding negative frequencies
static int num_fits; // Total number of model fits that are performed
static int total_num_cropped_y_observations; // Total number of values in the cropped_y_d array
static int total_num_X_matrix_values; // Total number of values in the X_matrix_d array
static int total_num_B_values; // Total number of values in the B_d array
static float alpha; // The alpha to use for elastic-net regularization
static float tolerance; // Maximum weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values between iterations of cyclic coordinate descent (convergence criterion)
static int max_iterations; // Maximum number of cyclic coordinate descent iterations to perform (convergence criterion)
static float lambda_scaling_factor; // Scaling factor used for the calculation of lambda, which is used in elastic-net regularization
static int scan_conversion_parameters_length; // Length of each scan conversion parameter vector
static int scan_converted_num_axial_positions; // Number of depths in the scan-converted image
static int scan_converted_num_lateral_positions; // Number of lateral positions in the scan-converted image
static int channel_data_output_flag; // Flag that specifies whether or not to output the processed channel data in addition to the envelope data (0 means don't output, and 1 means output)
// GPU device arrays
static float * reshaped_d; // Stores the reshaped channel data
static hipArray * cuArray; // CUDA array for texture memory binding
static float * delays_d; // Stores the calculated delays in samples
static float * delayed_data_d; // Stores the delayed channel data
static float * stft_window_d; // Stores the windowing function coefficients that are used in calculating the STFT of the delayed channel data
static hipfftComplex * stft_d; // Stores the STFT data
static float * selected_freq_inds_d; // Stores the indices of the selected frequencies within one STFT window (these indices use zero-based indexing)
static float * negative_freq_inds_d; // Stores the indices of the negative frequencies that correspond to the positive frequencies being fitted (these indices use zero-based indexing)
static float * negative_freq_include_d; // Stores the binary flag (0 or 1) that indicates whether a frequency being fitted has a corresponding negative frequency to store the conjugate for
static float * y_d; // Stores the standardized STFT data for the selected frequencies (the real components are stacked on top of the imaginary components)
static float * cropped_y_d; // Stores the cropped y data that results from applying aperture growth
static float * residual_y_d; // Stores the residual values that are obtained during each fit
static float * y_include_mask_d; // Stores the binary flag (0 or 1) that indicates whether to crop a y value or not
static double * start_ind_d; // Stores the index for the first location where there is a 1 in each aperture growth binary mask
static float * y_std_d; // Stores the standard deviations for each portion of the y_d array (one portion corresponds to one elastic net regression fit)
static float * standardized_lambda_d; // Stores the standardized lambda values for each portion of the y_d array (one portion corresponds to one elastic net regression fit)
static double * num_observations_d; // Stores the number of observations for each fit after cropping the y data
static double * observation_thread_stride_d; // Stores the indices corresponding to where each fit starts in the cropped_y_d array (these indices use zero-based indexing)
static double * num_predictors_d; // Stores the number of predictors for each fit after cropping the y data
static float * X_matrix_d; // Stores all of the ADMIRE models matrices
static double * X_matrix_thread_stride_d; // Stores the indices corresponding to where each model begins in the X_matrix_d array (these indices use zero-based indexing)
static float * B_d; // Stores the predictor coefficient values that are obtained from each fit
static double * B_thread_stride_d; // Stores the indices corresponding to where each set of predictor coefficients begins in the B_d array (these indices use zero-based indexing)
static float * model_fit_flag_d; // Stores the flag that determines whether to perform a model fit or not for each model
static hipfftComplex * summed_data_d; // Stores the summed channel data
static float * envelope_d; // Stores the envelope data
static float * envelope_max_value_d; // Stores the maximum value for the envelope data
static float * normalized_log_compressed_envelope_d; // Stores the normalized and log compressed envelope data
static float * dr_d; // Scan conversion parameter array
static float * dth_d; // Scan conversion parameter array
static float * i00_d; // Scan conversion parameter array (these indices use zero-based indexing)
static float * i01_d; // Scan conversion parameter array (these indices use zero-based indexing)
static float * i10_d; // Scan conversion parameter array (these indices use zero-based indexing)
static float * i11_d; // Scan conversion parameter array (these indices use zero-based indexing)
static float * idx_d; // Scan conversion parameter array (these indices use zero-based indexing)
static float * envelope_min_value_d; // Stores the minimum value for the normalized and log compressed envelope data (need this when doing scan conversion)
static float * row_column_replicated_envelope_d; // Same as normalized_log_compressed_envelope_d but has the last row and column replicated once (need this when doing scan conversion)
static float * scan_converted_envelope_d; // Stores the scan-converted envelope data
// Channel format description for hipArray
static hipChannelFormatDesc channelDescFLOAT;
// Define a texture
static texture<float, 2, hipReadModeElementType> texRef;
// cufft plans
static hipfftHandle FFTplan1; // Handle to the cufft plan that is used to perform the Fourier Transform of each column of each STFT window
static hipfftHandle FFTplan2; // Handle to the cufft plan that is used to perform the Fourier Transform of each column of the summed channel data
// Define the kernel that delays the channel data based off of the calculated delays in sample shifts
__global__ void delay_data(float * delayed_data_d, float * delays_d, int t0, int num_depths, int num_elements) {
// Obtain the depth, element, and beam indices
int depth_ind = blockIdx.x;
int elem_ind = threadIdx.x;
int beam_ind = blockIdx.y;
// Obtain the index of the delay
int delay_ind = (beam_ind * num_elements * num_depths) + (elem_ind * num_depths) + depth_ind;
// Obtain the index to store the delayed data
int store_ind = (beam_ind * num_elements * num_depths) + (elem_ind * num_depths) + depth_ind;
// Obtain the index for the column of depth samples (this is technically the row in texture memory because the data is stored in row-major order)
int column = (beam_ind * num_elements) + elem_ind;
// Obtain the delay value (this delay also accounts for t0)
float delay = delays_d[delay_ind] + (float)(t0 - 1);
// Interpolate the data based off of the delay and store the result
delayed_data_d[store_ind] = tex2D(texRef, delay + 0.5f, (float)column + 0.5f);
}
// Define the function that frees allocated memory on the GPU when the MEX interface is exited
void cleanup() {
mexPrintf("MEX-file is terminating, destroying the arrays\n");
// Free the GPU device arrays
hipFree(reshaped_d);
hipFree(delays_d);
hipFree(delayed_data_d);
hipFree(stft_window_d);
hipFree(stft_d);
hipFree(selected_freq_inds_d);
hipFree(negative_freq_inds_d);
hipFree(negative_freq_include_d);
hipFree(y_d);
hipFree(cropped_y_d);
hipFree(residual_y_d);
hipFree(y_include_mask_d);
hipFree(start_ind_d);
hipFree(y_std_d);
hipFree(standardized_lambda_d);
hipFree(num_observations_d);
hipFree(observation_thread_stride_d);
hipFree(num_predictors_d);
hipFree(X_matrix_thread_stride_d);
hipFree(X_matrix_d);
hipFree(B_d);
hipFree(B_thread_stride_d);
hipFree(model_fit_flag_d);
hipFree(summed_data_d);
hipFree(envelope_d);
hipFree(envelope_max_value_d);
hipFree(normalized_log_compressed_envelope_d);
hipFree(dr_d);
hipFree(dth_d);
hipFree(i00_d);
hipFree(i01_d);
hipFree(i10_d);
hipFree(i11_d);
hipFree(idx_d);
hipFree(envelope_min_value_d);
hipFree(row_column_replicated_envelope_d);
hipFree(scan_converted_envelope_d);
hipFreeArray(cuArray);
// Free the cufft plans
hipfftDestroy(FFTplan1);
hipfftDestroy(FFTplan2);
// Reset the GPU device (need this for profiling the MEX file using the Nvidia Visual Profiler)
hipDeviceReset();
}
// Define the MEX gateway function
void mexFunction(int nlhs, mxArray * plhs[], int nrhs, const mxArray * prhs[]) {
// Initialize everything if it is the first call to the MEX-file
if (!initialized) {
// Print to the console
mexPrintf("MEX-file initializing\n");
// Define the host arrays
double * GPU_fixed_params_h;
float * delays_h;
float * stft_window_h;
float * selected_freq_inds_h;
float * negative_freq_inds_h;
float * negative_freq_include_h;
float * y_include_mask_h;
double * num_observations_h;
double * observation_thread_stride_h;
double * num_predictors_h;
double * X_matrix_thread_stride_h;
float * X_matrix_h;
double * B_thread_stride_h;
float * dr_h;
float * dth_h;
float * i00_h;
float * i01_h;
float * i10_h;
float * i11_h;
float * idx_h;
// Obtain the array that contains the GPU parameters that are fixed
GPU_fixed_params_h = (double*)mxGetData(prhs[0]);
t0 = (int)GPU_fixed_params_h[0];
num_depths = (int)GPU_fixed_params_h[1];
num_elements = (int)GPU_fixed_params_h[2];
num_beams = (int)GPU_fixed_params_h[3];
start_depth_offset = (int)GPU_fixed_params_h[4];
stft_num_zeros = (int)GPU_fixed_params_h[5];
stft_num_windows = (int)GPU_fixed_params_h[6];
stft_window_shift = (int)GPU_fixed_params_h[7];
stft_length = (int)GPU_fixed_params_h[8];
max_windows_per_set = (int)GPU_fixed_params_h[9];
num_selected_freqs = (int)GPU_fixed_params_h[10];
num_corresponding_negative_freqs = (int)GPU_fixed_params_h[11];
num_fits = (int)GPU_fixed_params_h[12];
total_num_cropped_y_observations = (int)GPU_fixed_params_h[13];
total_num_X_matrix_values = (int)GPU_fixed_params_h[14];
total_num_B_values = (int)GPU_fixed_params_h[15];
scan_conversion_parameters_length = (int)GPU_fixed_params_h[16];
scan_converted_num_axial_positions = (int)GPU_fixed_params_h[17];
scan_converted_num_lateral_positions = (int)GPU_fixed_params_h[18];
channel_data_output_flag = (int)GPU_fixed_params_h[19];
// Obtain the other input arrays
delays_h = (float*)mxGetData(prhs[1]);
stft_window_h = (float*)mxGetData(prhs[2]);
selected_freq_inds_h = (float*)mxGetData(prhs[3]);
negative_freq_inds_h = (float*)mxGetData(prhs[4]);
negative_freq_include_h = (float*)mxGetData(prhs[5]);
y_include_mask_h = (float*)mxGetData(prhs[6]);
num_observations_h = (double*)mxGetData(prhs[7]);
observation_thread_stride_h = (double*)mxGetData(prhs[8]);
num_predictors_h = (double*)mxGetData(prhs[9]);
X_matrix_thread_stride_h = (double*)mxGetData(prhs[10]);
X_matrix_h = (float*)mxGetData(prhs[11]);
B_thread_stride_h = (double*)mxGetData(prhs[12]);
dr_h = (float*)mxGetData(prhs[13]);
dth_h = (float*)mxGetData(prhs[14]);
i00_h = (float*)mxGetData(prhs[15]);
i01_h = (float*)mxGetData(prhs[16]);
i10_h = (float*)mxGetData(prhs[17]);
i11_h = (float*)mxGetData(prhs[18]);
idx_h = (float*)mxGetData(prhs[19]);
// Allocate the GPU device arrays
hipMalloc(&reshaped_d, (num_depths + t0 - 1) * num_elements * num_beams * sizeof(float));
hipMalloc(&delays_d, num_depths * num_elements * num_beams * sizeof(float));
hipMalloc(&delayed_data_d, num_depths * num_elements * num_beams * sizeof(float));
hipMalloc(&stft_window_d, stft_length * sizeof(float));
hipMalloc(&stft_d, stft_num_windows * (stft_length + stft_num_zeros) * num_elements * num_beams * sizeof(hipfftComplex));
hipMalloc(&selected_freq_inds_d, num_selected_freqs * sizeof(float));
hipMalloc(&negative_freq_inds_d, num_corresponding_negative_freqs * sizeof(float));
hipMalloc(&negative_freq_include_d, num_selected_freqs * sizeof(float));
hipMalloc(&y_d, 2 * stft_num_windows * num_selected_freqs * num_elements * num_beams * sizeof(float));
hipMalloc(&cropped_y_d, total_num_cropped_y_observations * sizeof(float));
hipMalloc(&residual_y_d, total_num_cropped_y_observations * sizeof(float));
hipMalloc(&y_include_mask_d, 2 * num_elements * num_selected_freqs * stft_num_windows * num_beams * sizeof(float));
hipMalloc(&start_ind_d, num_fits * sizeof(double));
hipMalloc(&y_std_d, num_fits * sizeof(float));
hipMalloc(&standardized_lambda_d, num_fits * sizeof(float));
hipMalloc(&num_observations_d, num_fits * sizeof(double));
hipMalloc(&observation_thread_stride_d, num_fits * sizeof(double));
hipMalloc(&num_predictors_d, num_fits * sizeof(double));
hipMalloc(&X_matrix_thread_stride_d, num_fits * sizeof(double));
hipMalloc(&X_matrix_d, total_num_X_matrix_values * sizeof(float));
hipMalloc(&B_d, total_num_B_values * sizeof(float));
hipMalloc(&B_thread_stride_d, num_fits * sizeof(double));
hipMalloc(&model_fit_flag_d, num_fits * sizeof(float));
hipMalloc(&summed_data_d, num_depths * num_beams * sizeof(hipfftComplex));
hipMalloc(&envelope_d, num_depths * num_beams * sizeof(float));
hipMalloc(&envelope_max_value_d, 1 * sizeof(float));
hipMalloc(&normalized_log_compressed_envelope_d, num_depths * num_beams * sizeof(float));
hipMalloc(&dr_d, scan_conversion_parameters_length * sizeof(float));
hipMalloc(&dth_d, scan_conversion_parameters_length * sizeof(float));
hipMalloc(&i00_d, scan_conversion_parameters_length * sizeof(float));
hipMalloc(&i01_d, scan_conversion_parameters_length * sizeof(float));
hipMalloc(&i10_d, scan_conversion_parameters_length * sizeof(float));
hipMalloc(&i11_d, scan_conversion_parameters_length * sizeof(float));
hipMalloc(&idx_d, scan_conversion_parameters_length * sizeof(float));
hipMalloc(&envelope_min_value_d, 1 * sizeof(float));
hipMalloc(&row_column_replicated_envelope_d, (num_depths + 1) * (num_beams + 1) * sizeof(float));
hipMalloc(&scan_converted_envelope_d, scan_converted_num_axial_positions * scan_converted_num_lateral_positions * sizeof(float));
// Transfer the data from the host arrays to the GPU device arrays
hipMemcpy(delays_d, delays_h, num_depths * num_elements * num_beams * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(stft_window_d, stft_window_h, stft_length * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(selected_freq_inds_d, selected_freq_inds_h, num_selected_freqs * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(negative_freq_inds_d, negative_freq_inds_h, num_corresponding_negative_freqs * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(negative_freq_include_d, negative_freq_include_h, num_selected_freqs * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(y_include_mask_d, y_include_mask_h, 2 * num_elements * num_selected_freqs * stft_num_windows * num_beams * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(num_observations_d, num_observations_h, num_fits * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(observation_thread_stride_d, observation_thread_stride_h, num_fits * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(num_predictors_d, num_predictors_h, num_fits * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(X_matrix_thread_stride_d, X_matrix_thread_stride_h, num_fits * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(X_matrix_d, X_matrix_h, total_num_X_matrix_values * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(B_thread_stride_d, B_thread_stride_h, num_fits * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dr_d, dr_h, scan_conversion_parameters_length * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dth_d, dth_h, scan_conversion_parameters_length * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(i00_d, i00_h, scan_conversion_parameters_length * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(i01_d, i01_h, scan_conversion_parameters_length * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(i10_d, i10_h, scan_conversion_parameters_length * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(i11_d, i11_h, scan_conversion_parameters_length * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(idx_d, idx_h, scan_conversion_parameters_length * sizeof(float), hipMemcpyHostToDevice);
// Allocate the CUDA array for texture memory
channelDescFLOAT = hipCreateChannelDesc<float>();
hipMallocArray(&cuArray, &channelDescFLOAT, num_depths + t0 - 1, num_elements * num_beams);
// Create a cufft plan to take the fast Fourier transform of each column of each STFT window
hipfftPlan1d(&FFTplan1, stft_length + stft_num_zeros, HIPFFT_C2C, stft_num_windows * num_elements * num_beams);
// Create a cufft plan to take the fast Fourier transform of each column of the summed channel data
hipfftPlan1d(&FFTplan2, num_depths, HIPFFT_C2C, num_beams);
// Run the cleanup function when exiting the MEX interface
mexAtExit(cleanup);
// Set initialization variable to 1 because everything has been initialized
initialized = 1;
}
//// THIS SECTION DELAYS THE CHANNEL DATA ////
// Obtain the array that contains the adjustable GPU parameters
float * GPU_adjustable_params_h;
GPU_adjustable_params_h = (float*)mxGetData(prhs[20]);
alpha = GPU_adjustable_params_h[0];
tolerance = GPU_adjustable_params_h[1];
max_iterations = (int)GPU_adjustable_params_h[2];
lambda_scaling_factor = GPU_adjustable_params_h[3];
// Obtain the input data
float * data_h;
data_h = (float*)mxGetData(prhs[21]);
// Set the predictor coefficient values to 0
hipMemset(B_d, 0, total_num_B_values * sizeof(float));
// Set the model fit flag values to 0
hipMemset(model_fit_flag_d, 0, num_fits * sizeof(float));
// Transfer the channel data from the host array to the GPU device array
hipMemcpy(reshaped_d, data_h, (num_depths + t0 - 1) * num_elements * num_beams * sizeof(float), hipMemcpyHostToDevice);
// Set up texture memory for performing linear interpolation in order to delay the channel data
hipMemcpyToArray(cuArray, 0, 0, reshaped_d, (num_depths + t0 - 1) * num_elements * num_beams * sizeof(float), hipMemcpyDeviceToDevice);
texRef.addressMode[0] = hipAddressModeBorder;
texRef.addressMode[1] = hipAddressModeBorder;
texRef.filterMode = hipFilterModeLinear;
texRef.normalized = false;
hipBindTextureToArray(texRef, cuArray, channelDescFLOAT);
// Define the grid and block dimensions for the delay_data GPU kernel
dim3 DELAY_GRID_SIZE;
DELAY_GRID_SIZE = dim3(num_depths, num_beams, 1);
dim3 DELAY_BLOCK_SIZE;
DELAY_BLOCK_SIZE = dim3(num_elements, 1, 1);
// Call the delay_data GPU kernel in order to delay the channel data by performing linear interpolationhipLaunchKernelGGL((
delay_data), dim3(DELAY_GRID_SIZE), dim3(DELAY_BLOCK_SIZE), 0, 0, delayed_data_d, delays_d, t0, num_depths, num_elements);
//// END OF DELAY SECTION ////
//// THIS SECTION CALCULATES THE SHORT-TIME FOURIER TRANSFORM OF THE DELAYED CHANNEL DATA ////
// Set max_windows_per_set to the number of STFT windows if the the number of STFT windows is less than max_windows_per_set
int num_windows_per_set;
if (stft_num_windows < max_windows_per_set) {
num_windows_per_set = 1;
} else {
num_windows_per_set = max_windows_per_set;
}
// Calculate the number of STFT window groupings for the STFT calculation
int num_sets = (int)(ceilf((float)stft_num_windows / (float)num_windows_per_set));
// Calculate the number of STFT windows in the last grouping
int num_windows_per_set_last = stft_num_windows - (num_windows_per_set * (num_sets - 1));
// Obtain the index that corresponds to the last STFT window grouping set
int last_set_ind = num_sets - 1;
// Calculate the zero-padded STFT window length
int stft_padded_length = stft_length + stft_num_zeros;
// Define the grid and block dimensions for the the stft_preparation GPU kernel
dim3 STFT_PREPARATION_GRID_SIZE;
STFT_PREPARATION_GRID_SIZE = dim3(num_sets, num_elements, num_beams);
dim3 STFT_PREPARATION_BLOCK_SIZE;
STFT_PREPARATION_BLOCK_SIZE = dim3(stft_padded_length, num_windows_per_set, 1);
// Call the stft_preparation GPU kernel in order to arrange the data for all of the STFT windows and to apply the STFT windowing function coefficientshipLaunchKernelGGL((
stft_preparation), dim3(STFT_PREPARATION_GRID_SIZE), dim3(STFT_PREPARATION_BLOCK_SIZE), stft_length * sizeof(float), 0, stft_d, delayed_data_d, stft_window_d, stft_num_zeros, stft_num_windows, stft_window_shift, stft_length, num_windows_per_set, num_windows_per_set_last, last_set_ind, num_depths, num_elements, start_depth_offset);
// Calculate the short-time Fourier transform of the data by taking the fast Fourier transform of each column within each STFT window
hipfftExecC2C(FFTplan1, stft_d, stft_d, HIPFFT_FORWARD);
//// END OF SHORT TIME FOURIER TRANSFORM SECTION ////
//// THIS SECTION OBTAINS THE FREQUENCY DATA THAT CORRESPONDS TO THE SELECTED FREQUENCIES AND PROCESSES IT USING ADMIRE ////
// Define the grid and block dimensions for the frequency_selection GPU kernel
dim3 FREQUENCY_SELECTION_GRID_SIZE;
FREQUENCY_SELECTION_GRID_SIZE = dim3(num_selected_freqs, stft_num_windows, num_beams);
dim3 FREQUENCY_SELECTION_BLOCK_SIZE;
FREQUENCY_SELECTION_BLOCK_SIZE = dim3(num_elements, 1, 1);
// Call the frequency_selection GPU kernel in order to obtain the frequency data that corresponds to the selected frequencies for ADMIRE hipLaunchKernelGGL((
frequency_selection), dim3(FREQUENCY_SELECTION_GRID_SIZE), dim3(FREQUENCY_SELECTION_BLOCK_SIZE), 0, 0, y_d, selected_freq_inds_d, stft_d, stft_length, stft_num_zeros, stft_num_windows, num_selected_freqs, num_elements);
// Define the number of model fits to perform within one GPU block
int num_threads_per_block = 32;
// Set num_threads_per_block to num_fits if the total number of model fits is less than the number of model fits per GPU block
if (num_fits < num_threads_per_block) {
num_threads_per_block = num_fits;
}
// Calculate the number of GPU blocks that are required to perform all of the model fits
int num_blocks = (int)ceilf((float)num_fits / (float)num_threads_per_block);
// Calculate the number of model fits that are performed within the last GPU block
int num_threads_last_block = num_fits - ((num_blocks - 1) * num_threads_per_block);
// Define the grid and block dimensions for the model_fit_preparation GPU kernel
dim3 MODEL_FIT_PREPARATION_GRID_SIZE;
MODEL_FIT_PREPARATION_GRID_SIZE = dim3(num_blocks, 1, 1);
dim3 MODEL_FIT_PREPARATION_BLOCK_SIZE;
MODEL_FIT_PREPARATION_BLOCK_SIZE = dim3(num_threads_per_block, 1, 1);hipLaunchKernelGGL((
model_fit_preparation), dim3(MODEL_FIT_PREPARATION_GRID_SIZE), dim3(MODEL_FIT_PREPARATION_BLOCK_SIZE), num_threads_per_block * 2 * num_elements * sizeof(float), 0, cropped_y_d, model_fit_flag_d, y_d, residual_y_d, y_include_mask_d, start_ind_d, y_std_d, standardized_lambda_d, num_observations_d, observation_thread_stride_d, lambda_scaling_factor, num_elements, num_threads_per_block, num_blocks, num_threads_last_block);
// Calculate the number of blocks that are required to perform the model fits for all of the beams for one frequency and one STFT window depth range
int num_beam_blocks = (int)ceilf((float)num_beams / (float)num_threads_per_block);
// Define the number of blocks that correspond to the selected frequencies
int num_freq_blocks = num_selected_freqs;
// Define the number of blocks that correspond to the STFT windows
int num_window_blocks = stft_num_windows;
// Define the grid and block dimensions for the model_fit_reconstruction GPU kernel
dim3 MODEL_FIT_RECONSTRUCTION_GRID_SIZE;
MODEL_FIT_RECONSTRUCTION_GRID_SIZE = dim3(num_beam_blocks, num_window_blocks, num_freq_blocks);
dim3 MODEL_FIT_RECONSTRUCTION_BLOCK_SIZE;
MODEL_FIT_RECONSTRUCTION_BLOCK_SIZE = dim3(num_threads_per_block, 1, 1);
// Call the model_fit_reconstruction GPU kernel in order to fit the ADMIRE models to the frequency data and calculate the reconstructed frequency datahipLaunchKernelGGL((
model_fit_reconstruction), dim3(MODEL_FIT_RECONSTRUCTION_GRID_SIZE), dim3(MODEL_FIT_RECONSTRUCTION_BLOCK_SIZE), num_threads_per_block * 2 * num_elements * sizeof(float), 0, B_d, B_thread_stride_d, X_matrix_d, X_matrix_thread_stride_d, cropped_y_d, model_fit_flag_d, observation_thread_stride_d, residual_y_d, y_std_d, standardized_lambda_d, num_observations_d, num_predictors_d, alpha, tolerance, max_iterations, num_elements, num_threads_per_block, num_beams, num_selected_freqs, stft_num_windows);
//// END OF DATA SELECTION AND MODEL FIT SECTION ////
//// THIS SECTION CALCULATES THE INVERSE SHORT-TIME FOURIER TRANSFORM OF THE RECONSTRUCTED SHORT-TIME FOURIER TRANSFORM DATA ////
// Set the stft_d array values to 0 (this is to zero out all of the frequencies that were not reconstructed with ADMIRE)
hipMemset(stft_d, 0, stft_num_windows * num_elements * num_beams * (stft_length + stft_num_zeros) * sizeof(hipfftComplex));
// Define the grid and block dimensions for the inverse_stft_preparation GPU kernel
dim3 ISTFT_PREPARATION_GRID_SIZE;
ISTFT_PREPARATION_GRID_SIZE = dim3(num_selected_freqs, stft_num_windows, num_beams);
dim3 ISTFT_PREPARATION_BLOCK_SIZE;
ISTFT_PREPARATION_BLOCK_SIZE = dim3(num_elements, 1, 1);
// Call the inverse_stft_preparation GPU kernel in order to place the reconstructed STFT data back into the stft_d arrayhipLaunchKernelGGL((
inverse_stft_preparation), dim3(ISTFT_PREPARATION_GRID_SIZE), dim3(ISTFT_PREPARATION_BLOCK_SIZE), 0, 0, cropped_y_d, selected_freq_inds_d, negative_freq_inds_d, negative_freq_include_d, stft_d, observation_thread_stride_d, y_include_mask_d, start_ind_d, num_observations_d, stft_length, stft_num_zeros, stft_num_windows, num_selected_freqs, num_elements);
// Calculates the inverse short-time Fourier transform (the window overlap is assumed to be 0 for GPU execution, so the inverse fast fourier transform along each column of each STFT window just needs to be calculated in order to obtain the inverse short-time Fourier transform)
hipfftExecC2C(FFTplan1, stft_d, stft_d, HIPFFT_BACKWARD);
//// END OF INVERSE SHORT-TIME FOURIER TRANSFORM SECTION ////
//// THIS SECTION REMOVES THE ZERO-PADDING THAT WAS ADDED FOR THE SHORT-TIME FOURIER TRANSFORM ////
// Define the grid and block dimensions for the stft_data_array_to_delayed_data_array GPU kernel
dim3 TRANSFER_GRID_SIZE;
TRANSFER_GRID_SIZE = dim3(stft_length, stft_num_windows, num_beams);
dim3 TRANSFER_BLOCK_SIZE;
TRANSFER_BLOCK_SIZE = dim3(num_elements, 1, 1);
// Call the stft_data_array_to_delayed_data_array GPU kernel in order to remove the STFT zero-padding and to store the reconstructed channel data back into the delayed_data_d arrayhipLaunchKernelGGL((
stft_data_array_to_delayed_data_array), dim3(TRANSFER_GRID_SIZE), dim3(TRANSFER_BLOCK_SIZE), 0, 0, delayed_data_d, stft_d, stft_num_windows, stft_length, stft_num_zeros, num_depths, num_elements, start_depth_offset);
//// END OF ZERO-PADDING REMOVAL SECTION ////
//// THIS SECTION SUMS THE CHANNEL DATA AND CALCULATES THE ENVELOPE DATA ////
// Define the grid and block dimensions for the sum_channel_data GPU kernel
dim3 SUM_GRID_SIZE;
SUM_GRID_SIZE = dim3(num_depths, 1, 1);
dim3 SUM_BLOCK_SIZE;
SUM_BLOCK_SIZE = dim3(num_beams, 1, 1);
// Define the grid and block dimensions for the optimized summing GPU kernels
dim3 SUM_OPTIMIZED_GRID_SIZE;
SUM_OPTIMIZED_GRID_SIZE = dim3(num_depths, num_beams, 1);
dim3 SUM_OPTIMIZED_BLOCK_SIZE;
SUM_OPTIMIZED_BLOCK_SIZE = dim3(num_elements, 1);
// Define a variable that stores the number of elements as an unsigned integer
unsigned int num_elements_uint = (unsigned int)num_elements;
// Define a variable that stores the number of elements minus one as an unsigned integer
unsigned int num_elements_minus_one_uint = (unsigned int)(num_elements - 1);
// Determine if the number of elements is a power of two
bool case_1 = num_elements_uint && !(num_elements_uint & (num_elements_uint - ((unsigned int)1)));
// Determine if the number of elements minus one is a power two
bool case_2 = num_elements_minus_one_uint && !(num_elements_minus_one_uint & (num_elements_minus_one_uint - ((unsigned int)1)));
// Sum the delayed channel data (the conditional statements determine which summing GPU kernel to use)
if (case_1 || case_2) {
if (num_elements % 2 == 0) {
// Call the sum_channel_data_optimized_even GPU kernel in order to sum the delayed channel data if the number of elements is a power of two and an even number
hipLaunchKernelGGL(( sum_channel_data_optimized_even), dim3(SUM_OPTIMIZED_GRID_SIZE), dim3(SUM_OPTIMIZED_BLOCK_SIZE), num_elements * sizeof(float), 0, summed_data_d, delayed_data_d, num_depths, num_elements);
} else {
// Call the sum_channel_data_optimized_odd GPU kernel in order to sum the delayed channel data if the number of elements minus one is a power of two and if the number of elements is odd
hipLaunchKernelGGL(( sum_channel_data_optimized_odd), dim3(SUM_OPTIMIZED_GRID_SIZE), dim3(SUM_OPTIMIZED_BLOCK_SIZE), num_elements * sizeof(float), 0, summed_data_d, delayed_data_d, num_depths, num_elements);
}
} else {
// Call the sum_channel_data GPU kernel in order to sum the delayed channel data for all other cases (this kernel does not use the optimized summing algorithm that is used by the other two kernels)
hipLaunchKernelGGL(( sum_channel_data), dim3(SUM_GRID_SIZE), dim3(SUM_BLOCK_SIZE), 0, 0, summed_data_d, delayed_data_d, num_depths, num_elements);
}
// Calculate the fast Fourier transform for each column of the summed channel data that has been beamformed
hipfftExecC2C(FFTplan2, summed_data_d, summed_data_d, HIPFFT_FORWARD);
// Define the grid and block sizes for the hilbert_weighting GPU kernel
dim3 HILBERT_WEIGHTING_GRID_SIZE;
HILBERT_WEIGHTING_GRID_SIZE = dim3(num_depths, 1, 1);
dim3 HILBERT_WEIGHTING_BLOCK_SIZE;
HILBERT_WEIGHTING_BLOCK_SIZE = dim3(num_beams, 1, 1);
// Call the hilbert_weighting GPU kernel in order to apply weighting to the data according to the Hilbert Transform algorithm that MATLAB useshipLaunchKernelGGL((
hilbert_weighting), dim3(HILBERT_WEIGHTING_GRID_SIZE), dim3(HILBERT_WEIGHTING_BLOCK_SIZE), 0, 0, summed_data_d, num_depths);
// Calculate the inverse fast Fourier transform of each column of the weighted data
hipfftExecC2C(FFTplan2, summed_data_d, summed_data_d, HIPFFT_BACKWARD);
// Define the grid and block sizes for the envelope GPU kernel
dim3 ENVELOPE_GRID_SIZE;
ENVELOPE_GRID_SIZE = dim3(num_depths, 1, 1);
dim3 ENVELOPE_BLOCK_SIZE;
ENVELOPE_BLOCK_SIZE = dim3(num_beams, 1, 1);
// Call the envelope GPU kernel in order to obtain the envelope datahipLaunchKernelGGL((
envelope), dim3(ENVELOPE_GRID_SIZE), dim3(ENVELOPE_BLOCK_SIZE), 0, 0, envelope_d, summed_data_d, num_depths);
//// END OF SUM AND ENVELOPE CALCULATION SECTION ////
//// THIS SECTION NORMALIZES AND LOG COMPRESSES THE ENVELOPE DATA ////
// Define the grid and block dimensions for the max_envelope_value GPU kernel
dim3 MAX_ENVELOPE_VALUE_GRID_SIZE;
MAX_ENVELOPE_VALUE_GRID_SIZE = dim3(1, 1, 1);
dim3 MAX_ENVELOPE_VALUE_BLOCK_SIZE;
MAX_ENVELOPE_VALUE_BLOCK_SIZE = dim3(num_beams, 1, 1);
// Call the maximum_envelope_value GPU kernel in order to obtain the maximum value of the envelope datahipLaunchKernelGGL((
maximum_envelope_value), dim3(MAX_ENVELOPE_VALUE_GRID_SIZE), dim3(MAX_ENVELOPE_VALUE_BLOCK_SIZE), num_beams * sizeof(float), 0, envelope_max_value_d, envelope_d, num_depths, num_beams);
// Define the grid and block sizes for the envelope_normalization_and_log_compression GPU kernel
dim3 NORMALIZE_LOG_COMPRESS_GRID_SIZE;
NORMALIZE_LOG_COMPRESS_GRID_SIZE = dim3(num_depths, 1, 1);
dim3 NORMALIZE_LOG_COMPRESS_BLOCK_SIZE;
NORMALIZE_LOG_COMPRESS_BLOCK_SIZE = dim3(num_beams, 1, 1);
// Call the envelope_normalization_and_log_compression GPU kernel in order to normalize and apply log compression to the envelope datahipLaunchKernelGGL((
envelope_normalization_and_log_compression), dim3(NORMALIZE_LOG_COMPRESS_GRID_SIZE), dim3(NORMALIZE_LOG_COMPRESS_BLOCK_SIZE), 0, 0, normalized_log_compressed_envelope_d, envelope_d, envelope_max_value_d, num_depths);
//// END OF ENVELOPE NORMALIZATION AND LOG COMPRESSION SECTION ////
//// THIS SECTION SCAN CONVERTS THE NORMALIZED AND LOG-COMPRESSED ENVELOPE ////
// Define the grid and block sizes for the minimum_envelope_value GPU kernel
dim3 MIN_ENVELOPE_VALUE_GRID_SIZE;
MIN_ENVELOPE_VALUE_GRID_SIZE = dim3(1, 1, 1);
dim3 MIN_ENVELOPE_VALUE_BLOCK_SIZE;
MIN_ENVELOPE_VALUE_BLOCK_SIZE = dim3(num_beams, 1, 1);
// Call the minimum_envelope_value GPU kernel in order to obtain the minimum value of the normalized and log-compressed envelope datahipLaunchKernelGGL((
minimum_envelope_value), dim3(MIN_ENVELOPE_VALUE_GRID_SIZE), dim3(MIN_ENVELOPE_VALUE_BLOCK_SIZE), num_beams * sizeof(float), 0, envelope_min_value_d, normalized_log_compressed_envelope_d, num_depths, num_beams);
// Define the grid and block sizes of the row_column_replicate GPU kernel
dim3 REPLICATION_GRID_SIZE;
REPLICATION_GRID_SIZE = dim3(num_depths + 1, 1, 1);
dim3 REPLICATION_BLOCK_SIZE;
REPLICATION_BLOCK_SIZE = dim3(num_beams + 1, 1, 1);
// Call the row_column_replicate GPU kernel in order to replicate the last row and the last column of the normlaized and log-compressed envelope datahipLaunchKernelGGL((
row_column_replicate), dim3(REPLICATION_GRID_SIZE), dim3(REPLICATION_BLOCK_SIZE), 0, 0, row_column_replicated_envelope_d, normalized_log_compressed_envelope_d, num_depths, num_beams);
// Calculate the total number of pixels that are in the scan-converted image
int total_num_pixels = scan_converted_num_axial_positions * scan_converted_num_lateral_positions;
// Define the number of threads to use within one block
int num_threads_per_block_scan_convert = 512;
// Set the number of threads per block to the total number of pixels in the scan-converted image if the total number of pixels is less than the number of threads per block
if (total_num_pixels < num_threads_per_block_scan_convert) {
num_threads_per_block_scan_convert = total_num_pixels;
}
// Calculate the number of blocks that are required to perform the initialization of the scan-converted image
int num_blocks_scan_convert = ceilf((float)total_num_pixels / (float)num_threads_per_block_scan_convert);
// Calculate the number of threads that are used within the last block
int num_threads_last_block_scan_convert = total_num_pixels - ((num_blocks_scan_convert - 1) * num_threads_per_block_scan_convert);
// Define the grid and block dimensions for the scan_converted_envelope_initialization GPU kernel
dim3 SCAN_CONVERT_INIT_GRID_SIZE;
SCAN_CONVERT_INIT_GRID_SIZE = dim3(num_blocks_scan_convert, 1, 1);
dim3 SCAN_CONVERT_INIT_BLOCK_SIZE;
SCAN_CONVERT_INIT_BLOCK_SIZE = dim3(num_threads_per_block_scan_convert, 1, 1);
// Call the scan_converted_envelope_initialization GPU kernel in order to initialize every pixel of the scan-converted envelope to the minimum value of the normalized and log-compressed envelope datahipLaunchKernelGGL((
scan_converted_envelope_initialization), dim3(SCAN_CONVERT_INIT_GRID_SIZE), dim3(SCAN_CONVERT_INIT_BLOCK_SIZE), 0, 0, scan_converted_envelope_d, envelope_min_value_d, num_threads_per_block_scan_convert, num_threads_last_block_scan_convert, num_blocks_scan_convert);
// Define the number of threads to use within one block
int num_threads_per_block_scan_convert_2 = 512;
// Set the number of threads per block to the scan_conversion_parameters_length if scan_conversion_parameters_length is less than the number of threads per block
if (scan_conversion_parameters_length < num_threads_per_block_scan_convert_2) {
num_threads_per_block_scan_convert_2 = scan_conversion_parameters_length;
}
// Calculate the number of blocks that are required to perform scan conversion
int num_blocks_scan_convert_2 = ceilf((float)scan_conversion_parameters_length / (float)num_threads_per_block_scan_convert_2);
// Calculate the number of threads that are used within the last block
int num_threads_last_block_scan_convert_2 = scan_conversion_parameters_length - ((num_blocks_scan_convert_2 - 1) * num_threads_per_block_scan_convert_2);
// Define the grid and block dimensions for the scan_conversion GPU kernel
dim3 SCAN_CONVERSION_GRID_SIZE;
SCAN_CONVERSION_GRID_SIZE = dim3(num_blocks_scan_convert_2, 1, 1);
dim3 SCAN_CONVERSION_BLOCK_SIZE;
SCAN_CONVERSION_BLOCK_SIZE = dim3(num_threads_per_block_scan_convert_2, 1, 1);
// Call the scan_conversion GPU kernle in order to perform scan conversion of the normalized and log-compressed envelope datahipLaunchKernelGGL((
scan_conversion), dim3(SCAN_CONVERSION_GRID_SIZE), dim3(SCAN_CONVERSION_BLOCK_SIZE), 0, 0, scan_converted_envelope_d, row_column_replicated_envelope_d, dr_d, dth_d, idx_d, i00_d, i01_d, i10_d, i11_d, num_threads_per_block_scan_convert_2, num_threads_last_block_scan_convert_2, num_blocks_scan_convert_2);
//// END OF SCAN CONVERSION SECTION ////
//// THIS SECTION OBTAINS THE MEX-FILE OUTPUTS AND UNBINDS THE TEXTURE MEMORY ////
// Declare the pointers to the MEX-file outputs
float * scan_converted_envelope_h;
float * delayed_data_h;
// Allocate the scan_converted_envelope_h array (this is the output array for the scan-converted envelope data)
plhs[0] = mxCreateNumericMatrix(scan_converted_num_axial_positions, scan_converted_num_lateral_positions, mxSINGLE_CLASS, mxREAL);
scan_converted_envelope_h = (float*)mxGetData(plhs[0]);
// Allocate the delayed_data_d array if the channel_data_output_flag parameter is set to 1 (this outputs the reconstructed channel data as a column vector that can be reshaped in MATLAB)
if (channel_data_output_flag == 1) {
plhs[1] = mxCreateNumericMatrix(num_depths * num_elements * num_beams, 1, mxSINGLE_CLASS, mxREAL);
delayed_data_h = (float*)mxGetData(plhs[1]);
}
// Transfer the scan-converted envelope data
hipMemcpy(scan_converted_envelope_h, scan_converted_envelope_d, scan_converted_num_axial_positions * scan_converted_num_lateral_positions * sizeof(float), hipMemcpyDeviceToHost);
// Transfer the reconstructed channel data if the channel_data_output_flag parameter is set to 1
if (channel_data_output_flag == 1) {
hipMemcpy(delayed_data_h, delayed_data_d, num_depths * num_elements * num_beams * sizeof(float), hipMemcpyDeviceToHost);
}
// Unbind the texture memory
hipUnbindTexture(texRef);
//// END OF OUTPUT AND UNBIND SECTION ////
}
| e885a3a5039f645401a5ff30cd03ae16c95a3cde.cu | // Copyright 2020 Christopher Khan
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the license at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Description of ADMIRE_GPU_curvilinear_probe_reshaped_data_type.cu:
// This file contains the MEX-interface that calls the C/CUDA code for
// performing ADMIRE on a GPU. It is used when params.data_type = 'Reshaped'
// and params.probe_type = 'Curvilinear'.
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <cufft.h>
#include "definitions.h"
#include "GPU_processing_kernels.cu"
#include "mex.h"
// Parameters
static int initialized = 0; // Specifies whether everything has been initialized or not
static int t0; // Verasonics t0 index (any depth samples before this will be removed), and one is subtracted from it in the GPU code to obtain zero-based indexing
static int num_depths; // Number of depth sample
static int num_elements; // Number of receive elements used to obtain one beam
static int num_beams; // Number of beams
static int start_depth_offset; // Index of the first depth sample to which ADMIRE is applied, and one is subtracted from it in the GPU code to obtain zero-based indexing
static int stft_num_zeros; // Number of zeros to use for the FFT zero-padding when performing the STFT;
static int stft_num_windows; // Number of STFT windows for one beam
static int stft_window_shift; // Number of depth samples to shift by when moving to the next STFT window
static int stft_length; // STFT window length without zero-padding
static int max_windows_per_set; // Number of windows to group together for STFT calculation (largest value for this is the number of threads per block divided by the padded STFT length)
static int num_selected_freqs; // Number of frequencies within one STFT window to perform ADMIRE on
static int num_corresponding_negative_freqs; // The number of selected frequencies in one STFT window that have corresponding negative frequencies
static int num_fits; // Total number of model fits that are performed
static int total_num_cropped_y_observations; // Total number of values in the cropped_y_d array
static int total_num_X_matrix_values; // Total number of values in the X_matrix_d array
static int total_num_B_values; // Total number of values in the B_d array
static float alpha; // The alpha to use for elastic-net regularization
static float tolerance; // Maximum weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values between iterations of cyclic coordinate descent (convergence criterion)
static int max_iterations; // Maximum number of cyclic coordinate descent iterations to perform (convergence criterion)
static float lambda_scaling_factor; // Scaling factor used for the calculation of lambda, which is used in elastic-net regularization
static int scan_conversion_parameters_length; // Length of each scan conversion parameter vector
static int scan_converted_num_axial_positions; // Number of depths in the scan-converted image
static int scan_converted_num_lateral_positions; // Number of lateral positions in the scan-converted image
static int channel_data_output_flag; // Flag that specifies whether or not to output the processed channel data in addition to the envelope data (0 means don't output, and 1 means output)
// GPU device arrays
static float * reshaped_d; // Stores the reshaped channel data
static cudaArray * cuArray; // CUDA array for texture memory binding
static float * delays_d; // Stores the calculated delays in samples
static float * delayed_data_d; // Stores the delayed channel data
static float * stft_window_d; // Stores the windowing function coefficients that are used in calculating the STFT of the delayed channel data
static cufftComplex * stft_d; // Stores the STFT data
static float * selected_freq_inds_d; // Stores the indices of the selected frequencies within one STFT window (these indices use zero-based indexing)
static float * negative_freq_inds_d; // Stores the indices of the negative frequencies that correspond to the positive frequencies being fitted (these indices use zero-based indexing)
static float * negative_freq_include_d; // Stores the binary flag (0 or 1) that indicates whether a frequency being fitted has a corresponding negative frequency to store the conjugate for
static float * y_d; // Stores the standardized STFT data for the selected frequencies (the real components are stacked on top of the imaginary components)
static float * cropped_y_d; // Stores the cropped y data that results from applying aperture growth
static float * residual_y_d; // Stores the residual values that are obtained during each fit
static float * y_include_mask_d; // Stores the binary flag (0 or 1) that indicates whether to crop a y value or not
static double * start_ind_d; // Stores the index for the first location where there is a 1 in each aperture growth binary mask
static float * y_std_d; // Stores the standard deviations for each portion of the y_d array (one portion corresponds to one elastic net regression fit)
static float * standardized_lambda_d; // Stores the standardized lambda values for each portion of the y_d array (one portion corresponds to one elastic net regression fit)
static double * num_observations_d; // Stores the number of observations for each fit after cropping the y data
static double * observation_thread_stride_d; // Stores the indices corresponding to where each fit starts in the cropped_y_d array (these indices use zero-based indexing)
static double * num_predictors_d; // Stores the number of predictors for each fit after cropping the y data
static float * X_matrix_d; // Stores all of the ADMIRE models matrices
static double * X_matrix_thread_stride_d; // Stores the indices corresponding to where each model begins in the X_matrix_d array (these indices use zero-based indexing)
static float * B_d; // Stores the predictor coefficient values that are obtained from each fit
static double * B_thread_stride_d; // Stores the indices corresponding to where each set of predictor coefficients begins in the B_d array (these indices use zero-based indexing)
static float * model_fit_flag_d; // Stores the flag that determines whether to perform a model fit or not for each model
static cufftComplex * summed_data_d; // Stores the summed channel data
static float * envelope_d; // Stores the envelope data
static float * envelope_max_value_d; // Stores the maximum value for the envelope data
static float * normalized_log_compressed_envelope_d; // Stores the normalized and log compressed envelope data
static float * dr_d; // Scan conversion parameter array
static float * dth_d; // Scan conversion parameter array
static float * i00_d; // Scan conversion parameter array (these indices use zero-based indexing)
static float * i01_d; // Scan conversion parameter array (these indices use zero-based indexing)
static float * i10_d; // Scan conversion parameter array (these indices use zero-based indexing)
static float * i11_d; // Scan conversion parameter array (these indices use zero-based indexing)
static float * idx_d; // Scan conversion parameter array (these indices use zero-based indexing)
static float * envelope_min_value_d; // Stores the minimum value for the normalized and log compressed envelope data (need this when doing scan conversion)
static float * row_column_replicated_envelope_d; // Same as normalized_log_compressed_envelope_d but has the last row and column replicated once (need this when doing scan conversion)
static float * scan_converted_envelope_d; // Stores the scan-converted envelope data
// Channel format description for cudaArray
static cudaChannelFormatDesc channelDescFLOAT;
// Define a texture
static texture<float, 2, cudaReadModeElementType> texRef;
// cufft plans
static cufftHandle FFTplan1; // Handle to the cufft plan that is used to perform the Fourier Transform of each column of each STFT window
static cufftHandle FFTplan2; // Handle to the cufft plan that is used to perform the Fourier Transform of each column of the summed channel data
// Define the kernel that delays the channel data based off of the calculated delays in sample shifts
__global__ void delay_data(float * delayed_data_d, float * delays_d, int t0, int num_depths, int num_elements) {
// Obtain the depth, element, and beam indices
int depth_ind = blockIdx.x;
int elem_ind = threadIdx.x;
int beam_ind = blockIdx.y;
// Obtain the index of the delay
int delay_ind = (beam_ind * num_elements * num_depths) + (elem_ind * num_depths) + depth_ind;
// Obtain the index to store the delayed data
int store_ind = (beam_ind * num_elements * num_depths) + (elem_ind * num_depths) + depth_ind;
// Obtain the index for the column of depth samples (this is technically the row in texture memory because the data is stored in row-major order)
int column = (beam_ind * num_elements) + elem_ind;
// Obtain the delay value (this delay also accounts for t0)
float delay = delays_d[delay_ind] + (float)(t0 - 1);
// Interpolate the data based off of the delay and store the result
delayed_data_d[store_ind] = tex2D(texRef, delay + 0.5f, (float)column + 0.5f);
}
// Define the function that frees allocated memory on the GPU when the MEX interface is exited
void cleanup() {
mexPrintf("MEX-file is terminating, destroying the arrays\n");
// Free the GPU device arrays
cudaFree(reshaped_d);
cudaFree(delays_d);
cudaFree(delayed_data_d);
cudaFree(stft_window_d);
cudaFree(stft_d);
cudaFree(selected_freq_inds_d);
cudaFree(negative_freq_inds_d);
cudaFree(negative_freq_include_d);
cudaFree(y_d);
cudaFree(cropped_y_d);
cudaFree(residual_y_d);
cudaFree(y_include_mask_d);
cudaFree(start_ind_d);
cudaFree(y_std_d);
cudaFree(standardized_lambda_d);
cudaFree(num_observations_d);
cudaFree(observation_thread_stride_d);
cudaFree(num_predictors_d);
cudaFree(X_matrix_thread_stride_d);
cudaFree(X_matrix_d);
cudaFree(B_d);
cudaFree(B_thread_stride_d);
cudaFree(model_fit_flag_d);
cudaFree(summed_data_d);
cudaFree(envelope_d);
cudaFree(envelope_max_value_d);
cudaFree(normalized_log_compressed_envelope_d);
cudaFree(dr_d);
cudaFree(dth_d);
cudaFree(i00_d);
cudaFree(i01_d);
cudaFree(i10_d);
cudaFree(i11_d);
cudaFree(idx_d);
cudaFree(envelope_min_value_d);
cudaFree(row_column_replicated_envelope_d);
cudaFree(scan_converted_envelope_d);
cudaFreeArray(cuArray);
// Free the cufft plans
cufftDestroy(FFTplan1);
cufftDestroy(FFTplan2);
// Reset the GPU device (need this for profiling the MEX file using the Nvidia Visual Profiler)
cudaDeviceReset();
}
// Define the MEX gateway function
void mexFunction(int nlhs, mxArray * plhs[], int nrhs, const mxArray * prhs[]) {
// Initialize everything if it is the first call to the MEX-file
if (!initialized) {
// Print to the console
mexPrintf("MEX-file initializing\n");
// Define the host arrays
double * GPU_fixed_params_h;
float * delays_h;
float * stft_window_h;
float * selected_freq_inds_h;
float * negative_freq_inds_h;
float * negative_freq_include_h;
float * y_include_mask_h;
double * num_observations_h;
double * observation_thread_stride_h;
double * num_predictors_h;
double * X_matrix_thread_stride_h;
float * X_matrix_h;
double * B_thread_stride_h;
float * dr_h;
float * dth_h;
float * i00_h;
float * i01_h;
float * i10_h;
float * i11_h;
float * idx_h;
// Obtain the array that contains the GPU parameters that are fixed
GPU_fixed_params_h = (double*)mxGetData(prhs[0]);
t0 = (int)GPU_fixed_params_h[0];
num_depths = (int)GPU_fixed_params_h[1];
num_elements = (int)GPU_fixed_params_h[2];
num_beams = (int)GPU_fixed_params_h[3];
start_depth_offset = (int)GPU_fixed_params_h[4];
stft_num_zeros = (int)GPU_fixed_params_h[5];
stft_num_windows = (int)GPU_fixed_params_h[6];
stft_window_shift = (int)GPU_fixed_params_h[7];
stft_length = (int)GPU_fixed_params_h[8];
max_windows_per_set = (int)GPU_fixed_params_h[9];
num_selected_freqs = (int)GPU_fixed_params_h[10];
num_corresponding_negative_freqs = (int)GPU_fixed_params_h[11];
num_fits = (int)GPU_fixed_params_h[12];
total_num_cropped_y_observations = (int)GPU_fixed_params_h[13];
total_num_X_matrix_values = (int)GPU_fixed_params_h[14];
total_num_B_values = (int)GPU_fixed_params_h[15];
scan_conversion_parameters_length = (int)GPU_fixed_params_h[16];
scan_converted_num_axial_positions = (int)GPU_fixed_params_h[17];
scan_converted_num_lateral_positions = (int)GPU_fixed_params_h[18];
channel_data_output_flag = (int)GPU_fixed_params_h[19];
// Obtain the other input arrays
delays_h = (float*)mxGetData(prhs[1]);
stft_window_h = (float*)mxGetData(prhs[2]);
selected_freq_inds_h = (float*)mxGetData(prhs[3]);
negative_freq_inds_h = (float*)mxGetData(prhs[4]);
negative_freq_include_h = (float*)mxGetData(prhs[5]);
y_include_mask_h = (float*)mxGetData(prhs[6]);
num_observations_h = (double*)mxGetData(prhs[7]);
observation_thread_stride_h = (double*)mxGetData(prhs[8]);
num_predictors_h = (double*)mxGetData(prhs[9]);
X_matrix_thread_stride_h = (double*)mxGetData(prhs[10]);
X_matrix_h = (float*)mxGetData(prhs[11]);
B_thread_stride_h = (double*)mxGetData(prhs[12]);
dr_h = (float*)mxGetData(prhs[13]);
dth_h = (float*)mxGetData(prhs[14]);
i00_h = (float*)mxGetData(prhs[15]);
i01_h = (float*)mxGetData(prhs[16]);
i10_h = (float*)mxGetData(prhs[17]);
i11_h = (float*)mxGetData(prhs[18]);
idx_h = (float*)mxGetData(prhs[19]);
// Allocate the GPU device arrays
cudaMalloc(&reshaped_d, (num_depths + t0 - 1) * num_elements * num_beams * sizeof(float));
cudaMalloc(&delays_d, num_depths * num_elements * num_beams * sizeof(float));
cudaMalloc(&delayed_data_d, num_depths * num_elements * num_beams * sizeof(float));
cudaMalloc(&stft_window_d, stft_length * sizeof(float));
cudaMalloc(&stft_d, stft_num_windows * (stft_length + stft_num_zeros) * num_elements * num_beams * sizeof(cufftComplex));
cudaMalloc(&selected_freq_inds_d, num_selected_freqs * sizeof(float));
cudaMalloc(&negative_freq_inds_d, num_corresponding_negative_freqs * sizeof(float));
cudaMalloc(&negative_freq_include_d, num_selected_freqs * sizeof(float));
cudaMalloc(&y_d, 2 * stft_num_windows * num_selected_freqs * num_elements * num_beams * sizeof(float));
cudaMalloc(&cropped_y_d, total_num_cropped_y_observations * sizeof(float));
cudaMalloc(&residual_y_d, total_num_cropped_y_observations * sizeof(float));
cudaMalloc(&y_include_mask_d, 2 * num_elements * num_selected_freqs * stft_num_windows * num_beams * sizeof(float));
cudaMalloc(&start_ind_d, num_fits * sizeof(double));
cudaMalloc(&y_std_d, num_fits * sizeof(float));
cudaMalloc(&standardized_lambda_d, num_fits * sizeof(float));
cudaMalloc(&num_observations_d, num_fits * sizeof(double));
cudaMalloc(&observation_thread_stride_d, num_fits * sizeof(double));
cudaMalloc(&num_predictors_d, num_fits * sizeof(double));
cudaMalloc(&X_matrix_thread_stride_d, num_fits * sizeof(double));
cudaMalloc(&X_matrix_d, total_num_X_matrix_values * sizeof(float));
cudaMalloc(&B_d, total_num_B_values * sizeof(float));
cudaMalloc(&B_thread_stride_d, num_fits * sizeof(double));
cudaMalloc(&model_fit_flag_d, num_fits * sizeof(float));
cudaMalloc(&summed_data_d, num_depths * num_beams * sizeof(cufftComplex));
cudaMalloc(&envelope_d, num_depths * num_beams * sizeof(float));
cudaMalloc(&envelope_max_value_d, 1 * sizeof(float));
cudaMalloc(&normalized_log_compressed_envelope_d, num_depths * num_beams * sizeof(float));
cudaMalloc(&dr_d, scan_conversion_parameters_length * sizeof(float));
cudaMalloc(&dth_d, scan_conversion_parameters_length * sizeof(float));
cudaMalloc(&i00_d, scan_conversion_parameters_length * sizeof(float));
cudaMalloc(&i01_d, scan_conversion_parameters_length * sizeof(float));
cudaMalloc(&i10_d, scan_conversion_parameters_length * sizeof(float));
cudaMalloc(&i11_d, scan_conversion_parameters_length * sizeof(float));
cudaMalloc(&idx_d, scan_conversion_parameters_length * sizeof(float));
cudaMalloc(&envelope_min_value_d, 1 * sizeof(float));
cudaMalloc(&row_column_replicated_envelope_d, (num_depths + 1) * (num_beams + 1) * sizeof(float));
cudaMalloc(&scan_converted_envelope_d, scan_converted_num_axial_positions * scan_converted_num_lateral_positions * sizeof(float));
// Transfer the data from the host arrays to the GPU device arrays
cudaMemcpy(delays_d, delays_h, num_depths * num_elements * num_beams * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(stft_window_d, stft_window_h, stft_length * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(selected_freq_inds_d, selected_freq_inds_h, num_selected_freqs * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(negative_freq_inds_d, negative_freq_inds_h, num_corresponding_negative_freqs * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(negative_freq_include_d, negative_freq_include_h, num_selected_freqs * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(y_include_mask_d, y_include_mask_h, 2 * num_elements * num_selected_freqs * stft_num_windows * num_beams * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(num_observations_d, num_observations_h, num_fits * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(observation_thread_stride_d, observation_thread_stride_h, num_fits * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(num_predictors_d, num_predictors_h, num_fits * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(X_matrix_thread_stride_d, X_matrix_thread_stride_h, num_fits * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(X_matrix_d, X_matrix_h, total_num_X_matrix_values * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(B_thread_stride_d, B_thread_stride_h, num_fits * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dr_d, dr_h, scan_conversion_parameters_length * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dth_d, dth_h, scan_conversion_parameters_length * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(i00_d, i00_h, scan_conversion_parameters_length * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(i01_d, i01_h, scan_conversion_parameters_length * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(i10_d, i10_h, scan_conversion_parameters_length * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(i11_d, i11_h, scan_conversion_parameters_length * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(idx_d, idx_h, scan_conversion_parameters_length * sizeof(float), cudaMemcpyHostToDevice);
// Allocate the CUDA array for texture memory
channelDescFLOAT = cudaCreateChannelDesc<float>();
cudaMallocArray(&cuArray, &channelDescFLOAT, num_depths + t0 - 1, num_elements * num_beams);
// Create a cufft plan to take the fast Fourier transform of each column of each STFT window
cufftPlan1d(&FFTplan1, stft_length + stft_num_zeros, CUFFT_C2C, stft_num_windows * num_elements * num_beams);
// Create a cufft plan to take the fast Fourier transform of each column of the summed channel data
cufftPlan1d(&FFTplan2, num_depths, CUFFT_C2C, num_beams);
// Run the cleanup function when exiting the MEX interface
mexAtExit(cleanup);
// Set initialization variable to 1 because everything has been initialized
initialized = 1;
}
//// THIS SECTION DELAYS THE CHANNEL DATA ////
// Obtain the array that contains the adjustable GPU parameters
float * GPU_adjustable_params_h;
GPU_adjustable_params_h = (float*)mxGetData(prhs[20]);
alpha = GPU_adjustable_params_h[0];
tolerance = GPU_adjustable_params_h[1];
max_iterations = (int)GPU_adjustable_params_h[2];
lambda_scaling_factor = GPU_adjustable_params_h[3];
// Obtain the input data
float * data_h;
data_h = (float*)mxGetData(prhs[21]);
// Set the predictor coefficient values to 0
cudaMemset(B_d, 0, total_num_B_values * sizeof(float));
// Set the model fit flag values to 0
cudaMemset(model_fit_flag_d, 0, num_fits * sizeof(float));
// Transfer the channel data from the host array to the GPU device array
cudaMemcpy(reshaped_d, data_h, (num_depths + t0 - 1) * num_elements * num_beams * sizeof(float), cudaMemcpyHostToDevice);
// Set up texture memory for performing linear interpolation in order to delay the channel data
cudaMemcpyToArray(cuArray, 0, 0, reshaped_d, (num_depths + t0 - 1) * num_elements * num_beams * sizeof(float), cudaMemcpyDeviceToDevice);
texRef.addressMode[0] = cudaAddressModeBorder;
texRef.addressMode[1] = cudaAddressModeBorder;
texRef.filterMode = cudaFilterModeLinear;
texRef.normalized = false;
cudaBindTextureToArray(texRef, cuArray, channelDescFLOAT);
// Define the grid and block dimensions for the delay_data GPU kernel
dim3 DELAY_GRID_SIZE;
DELAY_GRID_SIZE = dim3(num_depths, num_beams, 1);
dim3 DELAY_BLOCK_SIZE;
DELAY_BLOCK_SIZE = dim3(num_elements, 1, 1);
// Call the delay_data GPU kernel in order to delay the channel data by performing linear interpolation
delay_data<<<DELAY_GRID_SIZE, DELAY_BLOCK_SIZE>>>(delayed_data_d, delays_d, t0, num_depths, num_elements);
//// END OF DELAY SECTION ////
//// THIS SECTION CALCULATES THE SHORT-TIME FOURIER TRANSFORM OF THE DELAYED CHANNEL DATA ////
// Set max_windows_per_set to the number of STFT windows if the the number of STFT windows is less than max_windows_per_set
int num_windows_per_set;
if (stft_num_windows < max_windows_per_set) {
num_windows_per_set = 1;
} else {
num_windows_per_set = max_windows_per_set;
}
// Calculate the number of STFT window groupings for the STFT calculation
int num_sets = (int)(ceilf((float)stft_num_windows / (float)num_windows_per_set));
// Calculate the number of STFT windows in the last grouping
int num_windows_per_set_last = stft_num_windows - (num_windows_per_set * (num_sets - 1));
// Obtain the index that corresponds to the last STFT window grouping set
int last_set_ind = num_sets - 1;
// Calculate the zero-padded STFT window length
int stft_padded_length = stft_length + stft_num_zeros;
// Define the grid and block dimensions for the the stft_preparation GPU kernel
dim3 STFT_PREPARATION_GRID_SIZE;
STFT_PREPARATION_GRID_SIZE = dim3(num_sets, num_elements, num_beams);
dim3 STFT_PREPARATION_BLOCK_SIZE;
STFT_PREPARATION_BLOCK_SIZE = dim3(stft_padded_length, num_windows_per_set, 1);
// Call the stft_preparation GPU kernel in order to arrange the data for all of the STFT windows and to apply the STFT windowing function coefficients
stft_preparation<<<STFT_PREPARATION_GRID_SIZE, STFT_PREPARATION_BLOCK_SIZE, stft_length * sizeof(float)>>>(stft_d, delayed_data_d, stft_window_d, stft_num_zeros, stft_num_windows, stft_window_shift, stft_length, num_windows_per_set, num_windows_per_set_last, last_set_ind, num_depths, num_elements, start_depth_offset);
// Calculate the short-time Fourier transform of the data by taking the fast Fourier transform of each column within each STFT window
cufftExecC2C(FFTplan1, stft_d, stft_d, CUFFT_FORWARD);
//// END OF SHORT TIME FOURIER TRANSFORM SECTION ////
//// THIS SECTION OBTAINS THE FREQUENCY DATA THAT CORRESPONDS TO THE SELECTED FREQUENCIES AND PROCESSES IT USING ADMIRE ////
// Define the grid and block dimensions for the frequency_selection GPU kernel
dim3 FREQUENCY_SELECTION_GRID_SIZE;
FREQUENCY_SELECTION_GRID_SIZE = dim3(num_selected_freqs, stft_num_windows, num_beams);
dim3 FREQUENCY_SELECTION_BLOCK_SIZE;
FREQUENCY_SELECTION_BLOCK_SIZE = dim3(num_elements, 1, 1);
// Call the frequency_selection GPU kernel in order to obtain the frequency data that corresponds to the selected frequencies for ADMIRE
frequency_selection<<<FREQUENCY_SELECTION_GRID_SIZE, FREQUENCY_SELECTION_BLOCK_SIZE>>>(y_d, selected_freq_inds_d, stft_d, stft_length, stft_num_zeros, stft_num_windows, num_selected_freqs, num_elements);
// Define the number of model fits to perform within one GPU block
int num_threads_per_block = 32;
// Set num_threads_per_block to num_fits if the total number of model fits is less than the number of model fits per GPU block
if (num_fits < num_threads_per_block) {
num_threads_per_block = num_fits;
}
// Calculate the number of GPU blocks that are required to perform all of the model fits
int num_blocks = (int)ceilf((float)num_fits / (float)num_threads_per_block);
// Calculate the number of model fits that are performed within the last GPU block
int num_threads_last_block = num_fits - ((num_blocks - 1) * num_threads_per_block);
// Define the grid and block dimensions for the model_fit_preparation GPU kernel
dim3 MODEL_FIT_PREPARATION_GRID_SIZE;
MODEL_FIT_PREPARATION_GRID_SIZE = dim3(num_blocks, 1, 1);
dim3 MODEL_FIT_PREPARATION_BLOCK_SIZE;
MODEL_FIT_PREPARATION_BLOCK_SIZE = dim3(num_threads_per_block, 1, 1);
model_fit_preparation<<<MODEL_FIT_PREPARATION_GRID_SIZE, MODEL_FIT_PREPARATION_BLOCK_SIZE, num_threads_per_block * 2 * num_elements * sizeof(float)>>>(cropped_y_d, model_fit_flag_d, y_d, residual_y_d, y_include_mask_d, start_ind_d, y_std_d, standardized_lambda_d, num_observations_d, observation_thread_stride_d, lambda_scaling_factor, num_elements, num_threads_per_block, num_blocks, num_threads_last_block);
// Calculate the number of blocks that are required to perform the model fits for all of the beams for one frequency and one STFT window depth range
int num_beam_blocks = (int)ceilf((float)num_beams / (float)num_threads_per_block);
// Define the number of blocks that correspond to the selected frequencies
int num_freq_blocks = num_selected_freqs;
// Define the number of blocks that correspond to the STFT windows
int num_window_blocks = stft_num_windows;
// Define the grid and block dimensions for the model_fit_reconstruction GPU kernel
dim3 MODEL_FIT_RECONSTRUCTION_GRID_SIZE;
MODEL_FIT_RECONSTRUCTION_GRID_SIZE = dim3(num_beam_blocks, num_window_blocks, num_freq_blocks);
dim3 MODEL_FIT_RECONSTRUCTION_BLOCK_SIZE;
MODEL_FIT_RECONSTRUCTION_BLOCK_SIZE = dim3(num_threads_per_block, 1, 1);
// Call the model_fit_reconstruction GPU kernel in order to fit the ADMIRE models to the frequency data and calculate the reconstructed frequency data
model_fit_reconstruction<<<MODEL_FIT_RECONSTRUCTION_GRID_SIZE, MODEL_FIT_RECONSTRUCTION_BLOCK_SIZE, num_threads_per_block * 2 * num_elements * sizeof(float)>>>(B_d, B_thread_stride_d, X_matrix_d, X_matrix_thread_stride_d, cropped_y_d, model_fit_flag_d, observation_thread_stride_d, residual_y_d, y_std_d, standardized_lambda_d, num_observations_d, num_predictors_d, alpha, tolerance, max_iterations, num_elements, num_threads_per_block, num_beams, num_selected_freqs, stft_num_windows);
//// END OF DATA SELECTION AND MODEL FIT SECTION ////
//// THIS SECTION CALCULATES THE INVERSE SHORT-TIME FOURIER TRANSFORM OF THE RECONSTRUCTED SHORT-TIME FOURIER TRANSFORM DATA ////
// Set the stft_d array values to 0 (this is to zero out all of the frequencies that were not reconstructed with ADMIRE)
cudaMemset(stft_d, 0, stft_num_windows * num_elements * num_beams * (stft_length + stft_num_zeros) * sizeof(cufftComplex));
// Define the grid and block dimensions for the inverse_stft_preparation GPU kernel
dim3 ISTFT_PREPARATION_GRID_SIZE;
ISTFT_PREPARATION_GRID_SIZE = dim3(num_selected_freqs, stft_num_windows, num_beams);
dim3 ISTFT_PREPARATION_BLOCK_SIZE;
ISTFT_PREPARATION_BLOCK_SIZE = dim3(num_elements, 1, 1);
// Call the inverse_stft_preparation GPU kernel in order to place the reconstructed STFT data back into the stft_d array
inverse_stft_preparation<<<ISTFT_PREPARATION_GRID_SIZE, ISTFT_PREPARATION_BLOCK_SIZE>>>(cropped_y_d, selected_freq_inds_d, negative_freq_inds_d, negative_freq_include_d, stft_d, observation_thread_stride_d, y_include_mask_d, start_ind_d, num_observations_d, stft_length, stft_num_zeros, stft_num_windows, num_selected_freqs, num_elements);
// Calculates the inverse short-time Fourier transform (the window overlap is assumed to be 0 for GPU execution, so the inverse fast fourier transform along each column of each STFT window just needs to be calculated in order to obtain the inverse short-time Fourier transform)
cufftExecC2C(FFTplan1, stft_d, stft_d, CUFFT_INVERSE);
//// END OF INVERSE SHORT-TIME FOURIER TRANSFORM SECTION ////
//// THIS SECTION REMOVES THE ZERO-PADDING THAT WAS ADDED FOR THE SHORT-TIME FOURIER TRANSFORM ////
// Define the grid and block dimensions for the stft_data_array_to_delayed_data_array GPU kernel
dim3 TRANSFER_GRID_SIZE;
TRANSFER_GRID_SIZE = dim3(stft_length, stft_num_windows, num_beams);
dim3 TRANSFER_BLOCK_SIZE;
TRANSFER_BLOCK_SIZE = dim3(num_elements, 1, 1);
// Call the stft_data_array_to_delayed_data_array GPU kernel in order to remove the STFT zero-padding and to store the reconstructed channel data back into the delayed_data_d array
stft_data_array_to_delayed_data_array<<<TRANSFER_GRID_SIZE, TRANSFER_BLOCK_SIZE>>>(delayed_data_d, stft_d, stft_num_windows, stft_length, stft_num_zeros, num_depths, num_elements, start_depth_offset);
//// END OF ZERO-PADDING REMOVAL SECTION ////
//// THIS SECTION SUMS THE CHANNEL DATA AND CALCULATES THE ENVELOPE DATA ////
// Define the grid and block dimensions for the sum_channel_data GPU kernel
dim3 SUM_GRID_SIZE;
SUM_GRID_SIZE = dim3(num_depths, 1, 1);
dim3 SUM_BLOCK_SIZE;
SUM_BLOCK_SIZE = dim3(num_beams, 1, 1);
// Define the grid and block dimensions for the optimized summing GPU kernels
dim3 SUM_OPTIMIZED_GRID_SIZE;
SUM_OPTIMIZED_GRID_SIZE = dim3(num_depths, num_beams, 1);
dim3 SUM_OPTIMIZED_BLOCK_SIZE;
SUM_OPTIMIZED_BLOCK_SIZE = dim3(num_elements, 1);
// Define a variable that stores the number of elements as an unsigned integer
unsigned int num_elements_uint = (unsigned int)num_elements;
// Define a variable that stores the number of elements minus one as an unsigned integer
unsigned int num_elements_minus_one_uint = (unsigned int)(num_elements - 1);
// Determine if the number of elements is a power of two
bool case_1 = num_elements_uint && !(num_elements_uint & (num_elements_uint - ((unsigned int)1)));
// Determine if the number of elements minus one is a power two
bool case_2 = num_elements_minus_one_uint && !(num_elements_minus_one_uint & (num_elements_minus_one_uint - ((unsigned int)1)));
// Sum the delayed channel data (the conditional statements determine which summing GPU kernel to use)
if (case_1 || case_2) {
if (num_elements % 2 == 0) {
// Call the sum_channel_data_optimized_even GPU kernel in order to sum the delayed channel data if the number of elements is a power of two and an even number
sum_channel_data_optimized_even<<<SUM_OPTIMIZED_GRID_SIZE, SUM_OPTIMIZED_BLOCK_SIZE, num_elements * sizeof(float)>>>(summed_data_d, delayed_data_d, num_depths, num_elements);
} else {
// Call the sum_channel_data_optimized_odd GPU kernel in order to sum the delayed channel data if the number of elements minus one is a power of two and if the number of elements is odd
sum_channel_data_optimized_odd<<<SUM_OPTIMIZED_GRID_SIZE, SUM_OPTIMIZED_BLOCK_SIZE, num_elements * sizeof(float)>>>(summed_data_d, delayed_data_d, num_depths, num_elements);
}
} else {
// Call the sum_channel_data GPU kernel in order to sum the delayed channel data for all other cases (this kernel does not use the optimized summing algorithm that is used by the other two kernels)
sum_channel_data<<<SUM_GRID_SIZE, SUM_BLOCK_SIZE>>>(summed_data_d, delayed_data_d, num_depths, num_elements);
}
// Calculate the fast Fourier transform for each column of the summed channel data that has been beamformed
cufftExecC2C(FFTplan2, summed_data_d, summed_data_d, CUFFT_FORWARD);
// Define the grid and block sizes for the hilbert_weighting GPU kernel
dim3 HILBERT_WEIGHTING_GRID_SIZE;
HILBERT_WEIGHTING_GRID_SIZE = dim3(num_depths, 1, 1);
dim3 HILBERT_WEIGHTING_BLOCK_SIZE;
HILBERT_WEIGHTING_BLOCK_SIZE = dim3(num_beams, 1, 1);
// Call the hilbert_weighting GPU kernel in order to apply weighting to the data according to the Hilbert Transform algorithm that MATLAB uses
hilbert_weighting<<<HILBERT_WEIGHTING_GRID_SIZE, HILBERT_WEIGHTING_BLOCK_SIZE>>>(summed_data_d, num_depths);
// Calculate the inverse fast Fourier transform of each column of the weighted data
cufftExecC2C(FFTplan2, summed_data_d, summed_data_d, CUFFT_INVERSE);
// Define the grid and block sizes for the envelope GPU kernel
dim3 ENVELOPE_GRID_SIZE;
ENVELOPE_GRID_SIZE = dim3(num_depths, 1, 1);
dim3 ENVELOPE_BLOCK_SIZE;
ENVELOPE_BLOCK_SIZE = dim3(num_beams, 1, 1);
// Call the envelope GPU kernel in order to obtain the envelope data
envelope<<<ENVELOPE_GRID_SIZE, ENVELOPE_BLOCK_SIZE>>>(envelope_d, summed_data_d, num_depths);
//// END OF SUM AND ENVELOPE CALCULATION SECTION ////
//// THIS SECTION NORMALIZES AND LOG COMPRESSES THE ENVELOPE DATA ////
// Define the grid and block dimensions for the max_envelope_value GPU kernel
dim3 MAX_ENVELOPE_VALUE_GRID_SIZE;
MAX_ENVELOPE_VALUE_GRID_SIZE = dim3(1, 1, 1);
dim3 MAX_ENVELOPE_VALUE_BLOCK_SIZE;
MAX_ENVELOPE_VALUE_BLOCK_SIZE = dim3(num_beams, 1, 1);
// Call the maximum_envelope_value GPU kernel in order to obtain the maximum value of the envelope data
maximum_envelope_value<<<MAX_ENVELOPE_VALUE_GRID_SIZE, MAX_ENVELOPE_VALUE_BLOCK_SIZE, num_beams * sizeof(float)>>>(envelope_max_value_d, envelope_d, num_depths, num_beams);
// Define the grid and block sizes for the envelope_normalization_and_log_compression GPU kernel
dim3 NORMALIZE_LOG_COMPRESS_GRID_SIZE;
NORMALIZE_LOG_COMPRESS_GRID_SIZE = dim3(num_depths, 1, 1);
dim3 NORMALIZE_LOG_COMPRESS_BLOCK_SIZE;
NORMALIZE_LOG_COMPRESS_BLOCK_SIZE = dim3(num_beams, 1, 1);
// Call the envelope_normalization_and_log_compression GPU kernel in order to normalize and apply log compression to the envelope data
envelope_normalization_and_log_compression<<<NORMALIZE_LOG_COMPRESS_GRID_SIZE, NORMALIZE_LOG_COMPRESS_BLOCK_SIZE>>>(normalized_log_compressed_envelope_d, envelope_d, envelope_max_value_d, num_depths);
//// END OF ENVELOPE NORMALIZATION AND LOG COMPRESSION SECTION ////
//// THIS SECTION SCAN CONVERTS THE NORMALIZED AND LOG-COMPRESSED ENVELOPE ////
// Define the grid and block sizes for the minimum_envelope_value GPU kernel
dim3 MIN_ENVELOPE_VALUE_GRID_SIZE;
MIN_ENVELOPE_VALUE_GRID_SIZE = dim3(1, 1, 1);
dim3 MIN_ENVELOPE_VALUE_BLOCK_SIZE;
MIN_ENVELOPE_VALUE_BLOCK_SIZE = dim3(num_beams, 1, 1);
// Call the minimum_envelope_value GPU kernel in order to obtain the minimum value of the normalized and log-compressed envelope data
minimum_envelope_value<<<MIN_ENVELOPE_VALUE_GRID_SIZE, MIN_ENVELOPE_VALUE_BLOCK_SIZE, num_beams * sizeof(float)>>>(envelope_min_value_d, normalized_log_compressed_envelope_d, num_depths, num_beams);
// Define the grid and block sizes of the row_column_replicate GPU kernel
dim3 REPLICATION_GRID_SIZE;
REPLICATION_GRID_SIZE = dim3(num_depths + 1, 1, 1);
dim3 REPLICATION_BLOCK_SIZE;
REPLICATION_BLOCK_SIZE = dim3(num_beams + 1, 1, 1);
// Call the row_column_replicate GPU kernel in order to replicate the last row and the last column of the normlaized and log-compressed envelope data
row_column_replicate<<<REPLICATION_GRID_SIZE, REPLICATION_BLOCK_SIZE>>>(row_column_replicated_envelope_d, normalized_log_compressed_envelope_d, num_depths, num_beams);
// Calculate the total number of pixels that are in the scan-converted image
int total_num_pixels = scan_converted_num_axial_positions * scan_converted_num_lateral_positions;
// Define the number of threads to use within one block
int num_threads_per_block_scan_convert = 512;
// Set the number of threads per block to the total number of pixels in the scan-converted image if the total number of pixels is less than the number of threads per block
if (total_num_pixels < num_threads_per_block_scan_convert) {
num_threads_per_block_scan_convert = total_num_pixels;
}
// Calculate the number of blocks that are required to perform the initialization of the scan-converted image
int num_blocks_scan_convert = ceilf((float)total_num_pixels / (float)num_threads_per_block_scan_convert);
// Calculate the number of threads that are used within the last block
int num_threads_last_block_scan_convert = total_num_pixels - ((num_blocks_scan_convert - 1) * num_threads_per_block_scan_convert);
// Define the grid and block dimensions for the scan_converted_envelope_initialization GPU kernel
dim3 SCAN_CONVERT_INIT_GRID_SIZE;
SCAN_CONVERT_INIT_GRID_SIZE = dim3(num_blocks_scan_convert, 1, 1);
dim3 SCAN_CONVERT_INIT_BLOCK_SIZE;
SCAN_CONVERT_INIT_BLOCK_SIZE = dim3(num_threads_per_block_scan_convert, 1, 1);
// Call the scan_converted_envelope_initialization GPU kernel in order to initialize every pixel of the scan-converted envelope to the minimum value of the normalized and log-compressed envelope data
scan_converted_envelope_initialization<<<SCAN_CONVERT_INIT_GRID_SIZE, SCAN_CONVERT_INIT_BLOCK_SIZE>>>(scan_converted_envelope_d, envelope_min_value_d, num_threads_per_block_scan_convert, num_threads_last_block_scan_convert, num_blocks_scan_convert);
// Define the number of threads to use within one block
int num_threads_per_block_scan_convert_2 = 512;
// Set the number of threads per block to the scan_conversion_parameters_length if scan_conversion_parameters_length is less than the number of threads per block
if (scan_conversion_parameters_length < num_threads_per_block_scan_convert_2) {
num_threads_per_block_scan_convert_2 = scan_conversion_parameters_length;
}
// Calculate the number of blocks that are required to perform scan conversion
int num_blocks_scan_convert_2 = ceilf((float)scan_conversion_parameters_length / (float)num_threads_per_block_scan_convert_2);
// Calculate the number of threads that are used within the last block
int num_threads_last_block_scan_convert_2 = scan_conversion_parameters_length - ((num_blocks_scan_convert_2 - 1) * num_threads_per_block_scan_convert_2);
// Define the grid and block dimensions for the scan_conversion GPU kernel
dim3 SCAN_CONVERSION_GRID_SIZE;
SCAN_CONVERSION_GRID_SIZE = dim3(num_blocks_scan_convert_2, 1, 1);
dim3 SCAN_CONVERSION_BLOCK_SIZE;
SCAN_CONVERSION_BLOCK_SIZE = dim3(num_threads_per_block_scan_convert_2, 1, 1);
// Call the scan_conversion GPU kernle in order to perform scan conversion of the normalized and log-compressed envelope data
scan_conversion<<<SCAN_CONVERSION_GRID_SIZE, SCAN_CONVERSION_BLOCK_SIZE>>>(scan_converted_envelope_d, row_column_replicated_envelope_d, dr_d, dth_d, idx_d, i00_d, i01_d, i10_d, i11_d, num_threads_per_block_scan_convert_2, num_threads_last_block_scan_convert_2, num_blocks_scan_convert_2);
//// END OF SCAN CONVERSION SECTION ////
//// THIS SECTION OBTAINS THE MEX-FILE OUTPUTS AND UNBINDS THE TEXTURE MEMORY ////
// Declare the pointers to the MEX-file outputs
float * scan_converted_envelope_h;
float * delayed_data_h;
// Allocate the scan_converted_envelope_h array (this is the output array for the scan-converted envelope data)
plhs[0] = mxCreateNumericMatrix(scan_converted_num_axial_positions, scan_converted_num_lateral_positions, mxSINGLE_CLASS, mxREAL);
scan_converted_envelope_h = (float*)mxGetData(plhs[0]);
// Allocate the delayed_data_d array if the channel_data_output_flag parameter is set to 1 (this outputs the reconstructed channel data as a column vector that can be reshaped in MATLAB)
if (channel_data_output_flag == 1) {
plhs[1] = mxCreateNumericMatrix(num_depths * num_elements * num_beams, 1, mxSINGLE_CLASS, mxREAL);
delayed_data_h = (float*)mxGetData(plhs[1]);
}
// Transfer the scan-converted envelope data
cudaMemcpy(scan_converted_envelope_h, scan_converted_envelope_d, scan_converted_num_axial_positions * scan_converted_num_lateral_positions * sizeof(float), cudaMemcpyDeviceToHost);
// Transfer the reconstructed channel data if the channel_data_output_flag parameter is set to 1
if (channel_data_output_flag == 1) {
cudaMemcpy(delayed_data_h, delayed_data_d, num_depths * num_elements * num_beams * sizeof(float), cudaMemcpyDeviceToHost);
}
// Unbind the texture memory
cudaUnbindTexture(texRef);
//// END OF OUTPUT AND UNBIND SECTION ////
}
|
9ced2b1f71f3289d1bbdd309f270cad7e2fb1339.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2019-2020 by Contributors
* \file survival_metric.cu
* \brief Metrics for survival analysis
* \author Avinash Barnwal, Hyunsu Cho and Toby Hocking
*/
#include <rabit/rabit.h>
#include <dmlc/registry.h>
#include <memory>
#include <vector>
#include "xgboost/json.h"
#include "xgboost/metric.h"
#include "xgboost/host_device_vector.h"
#include "metric_common.h"
#include "../common/math.h"
#include "../common/survival_util.h"
#if defined(XGBOOST_USE_CUDA)
#include <thrust/execution_policy.h> // thrust::hip::par
#include "../common/device_helpers.cuh"
#endif // XGBOOST_USE_CUDA
using AFTParam = xgboost::common::AFTParam;
using ProbabilityDistributionType = xgboost::common::ProbabilityDistributionType;
template <typename Distribution>
using AFTLoss = xgboost::common::AFTLoss<Distribution>;
namespace xgboost {
namespace metric {
// tag the this file, used by force static link later.
DMLC_REGISTRY_FILE_TAG(survival_metric);
template <typename EvalRow>
class ElementWiseSurvivalMetricsReduction {
public:
ElementWiseSurvivalMetricsReduction() = default;
void Configure(EvalRow policy) {
policy_ = policy;
}
PackedReduceResult CpuReduceMetrics(
const HostDeviceVector<bst_float>& weights,
const HostDeviceVector<bst_float>& labels_lower_bound,
const HostDeviceVector<bst_float>& labels_upper_bound,
const HostDeviceVector<bst_float>& preds) const {
size_t ndata = labels_lower_bound.Size();
CHECK_EQ(ndata, labels_upper_bound.Size());
const auto& h_labels_lower_bound = labels_lower_bound.HostVector();
const auto& h_labels_upper_bound = labels_upper_bound.HostVector();
const auto& h_weights = weights.HostVector();
const auto& h_preds = preds.HostVector();
double residue_sum = 0;
double weights_sum = 0;
#pragma omp parallel for reduction(+: residue_sum, weights_sum) schedule(static)
for (omp_ulong i = 0; i < ndata; ++i) {
const double wt = h_weights.empty() ? 1.0 : static_cast<double>(h_weights[i]);
residue_sum += policy_.EvalRow(
static_cast<double>(h_labels_lower_bound[i]),
static_cast<double>(h_labels_upper_bound[i]),
static_cast<double>(h_preds[i])) * wt;
weights_sum += wt;
}
PackedReduceResult res{residue_sum, weights_sum};
return res;
}
#if defined(XGBOOST_USE_CUDA)
PackedReduceResult DeviceReduceMetrics(
const HostDeviceVector<bst_float>& weights,
const HostDeviceVector<bst_float>& labels_lower_bound,
const HostDeviceVector<bst_float>& labels_upper_bound,
const HostDeviceVector<bst_float>& preds) {
size_t ndata = labels_lower_bound.Size();
CHECK_EQ(ndata, labels_upper_bound.Size());
thrust::counting_iterator<size_t> begin(0);
thrust::counting_iterator<size_t> end = begin + ndata;
auto s_label_lower_bound = labels_lower_bound.DeviceSpan();
auto s_label_upper_bound = labels_upper_bound.DeviceSpan();
auto s_preds = preds.DeviceSpan();
auto s_weights = weights.DeviceSpan();
const bool is_null_weight = (weights.Size() == 0);
auto d_policy = policy_;
dh::XGBCachingDeviceAllocator<char> alloc;
PackedReduceResult result = thrust::transform_reduce(
thrust::hip::par(alloc),
begin, end,
[=] XGBOOST_DEVICE(size_t idx) {
double weight = is_null_weight ? 1.0 : static_cast<double>(s_weights[idx]);
double residue = d_policy.EvalRow(
static_cast<double>(s_label_lower_bound[idx]),
static_cast<double>(s_label_upper_bound[idx]),
static_cast<double>(s_preds[idx]));
residue *= weight;
return PackedReduceResult{residue, weight};
},
PackedReduceResult(),
thrust::plus<PackedReduceResult>());
return result;
}
#endif // XGBOOST_USE_CUDA
PackedReduceResult Reduce(
int device,
const HostDeviceVector<bst_float>& weights,
const HostDeviceVector<bst_float>& labels_lower_bound,
const HostDeviceVector<bst_float>& labels_upper_bound,
const HostDeviceVector<bst_float>& preds) {
PackedReduceResult result;
if (device < 0) {
result = CpuReduceMetrics(weights, labels_lower_bound, labels_upper_bound, preds);
}
#if defined(XGBOOST_USE_CUDA)
else { // NOLINT
device_ = device;
preds.SetDevice(device_);
labels_lower_bound.SetDevice(device_);
labels_upper_bound.SetDevice(device_);
weights.SetDevice(device_);
dh::safe_cuda(hipSetDevice(device_));
result = DeviceReduceMetrics(weights, labels_lower_bound, labels_upper_bound, preds);
}
#endif // defined(XGBOOST_USE_CUDA)
return result;
}
private:
EvalRow policy_;
#if defined(XGBOOST_USE_CUDA)
int device_{-1};
#endif // defined(XGBOOST_USE_CUDA)
};
struct EvalIntervalRegressionAccuracy {
void Configure(const Args& args) {}
const char* Name() const {
return "interval-regression-accuracy";
}
XGBOOST_DEVICE double EvalRow(
double label_lower_bound, double label_upper_bound, double log_pred) const {
const double pred = exp(log_pred);
return (pred >= label_lower_bound && pred <= label_upper_bound) ? 1.0 : 0.0;
}
static double GetFinal(double esum, double wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
/*! \brief Negative log likelihood of Accelerated Failure Time model */
template <typename Distribution>
struct EvalAFTNLogLik {
void Configure(const Args& args) {
param_.UpdateAllowUnknown(args);
}
const char* Name() const {
return "aft-nloglik";
}
XGBOOST_DEVICE double EvalRow(
double label_lower_bound, double label_upper_bound, double pred) const {
return AFTLoss<Distribution>::Loss(
label_lower_bound, label_upper_bound, pred, param_.aft_loss_distribution_scale);
}
static double GetFinal(double esum, double wsum) {
return wsum == 0 ? esum : esum / wsum;
}
private:
AFTParam param_;
};
template<typename Policy>
struct EvalEWiseSurvivalBase : public Metric {
EvalEWiseSurvivalBase() = default;
void Configure(const Args& args) override {
policy_.Configure(args);
for (const auto& e : args) {
if (e.first == "gpu_id") {
device_ = dmlc::ParseSignedInt<int>(e.second.c_str(), nullptr, 10);
}
}
reducer_.Configure(policy_);
}
bst_float Eval(const HostDeviceVector<bst_float>& preds,
const MetaInfo& info,
bool distributed) override {
CHECK_NE(info.labels_lower_bound_.Size(), 0U)
<< "labels_lower_bound cannot be empty";
CHECK_NE(info.labels_upper_bound_.Size(), 0U)
<< "labels_upper_bound cannot be empty";
CHECK_EQ(preds.Size(), info.labels_lower_bound_.Size());
CHECK_EQ(preds.Size(), info.labels_upper_bound_.Size());
auto result = reducer_.Reduce(
device_, info.weights_, info.labels_lower_bound_, info.labels_upper_bound_, preds);
double dat[2] {result.Residue(), result.Weights()};
if (distributed) {
rabit::Allreduce<rabit::op::Sum>(dat, 2);
}
return static_cast<bst_float>(Policy::GetFinal(dat[0], dat[1]));
}
const char* Name() const override {
return policy_.Name();
}
private:
Policy policy_;
ElementWiseSurvivalMetricsReduction<Policy> reducer_;
int device_{-1}; // used only for GPU metric
};
// This class exists because we want to perform dispatch according to the distribution type at
// configuration time, not at prediction time.
struct AFTNLogLikDispatcher : public Metric {
const char* Name() const override {
return "aft-nloglik";
}
bst_float Eval(const HostDeviceVector<bst_float>& preds,
const MetaInfo& info,
bool distributed) override {
CHECK(metric_) << "AFT metric must be configured first, with distribution type and scale";
return metric_->Eval(preds, info, distributed);
}
void Configure(const Args& args) override {
param_.UpdateAllowUnknown(args);
switch (param_.aft_loss_distribution) {
case common::ProbabilityDistributionType::kNormal:
metric_.reset(new EvalEWiseSurvivalBase<EvalAFTNLogLik<common::NormalDistribution>>());
break;
case common::ProbabilityDistributionType::kLogistic:
metric_.reset(new EvalEWiseSurvivalBase<EvalAFTNLogLik<common::LogisticDistribution>>());
break;
case common::ProbabilityDistributionType::kExtreme:
metric_.reset(new EvalEWiseSurvivalBase<EvalAFTNLogLik<common::ExtremeDistribution>>());
break;
default:
LOG(FATAL) << "Unknown probability distribution";
}
Args new_args{args};
// tparam_ doesn't get propagated to the inner metric object because we didn't use
// Metric::Create(). I don't think it's a good idea to pollute the metric registry with
// specialized versions of the AFT metric, so as a work-around, manually pass the GPU ID
// into the inner metric via configuration.
new_args.emplace_back("gpu_id", std::to_string(tparam_->gpu_id));
metric_->Configure(new_args);
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String(this->Name());
out["aft_loss_param"] = ToJson(param_);
}
void LoadConfig(const Json& in) override {
FromJson(in["aft_loss_param"], ¶m_);
}
private:
AFTParam param_;
std::unique_ptr<Metric> metric_;
};
XGBOOST_REGISTER_METRIC(AFTNLogLik, "aft-nloglik")
.describe("Negative log likelihood of Accelerated Failure Time model.")
.set_body([](const char* param) {
return new AFTNLogLikDispatcher();
});
XGBOOST_REGISTER_METRIC(IntervalRegressionAccuracy, "interval-regression-accuracy")
.describe("")
.set_body([](const char* param) {
return new EvalEWiseSurvivalBase<EvalIntervalRegressionAccuracy>();
});
} // namespace metric
} // namespace xgboost
| 9ced2b1f71f3289d1bbdd309f270cad7e2fb1339.cu | /*!
* Copyright 2019-2020 by Contributors
* \file survival_metric.cu
* \brief Metrics for survival analysis
* \author Avinash Barnwal, Hyunsu Cho and Toby Hocking
*/
#include <rabit/rabit.h>
#include <dmlc/registry.h>
#include <memory>
#include <vector>
#include "xgboost/json.h"
#include "xgboost/metric.h"
#include "xgboost/host_device_vector.h"
#include "metric_common.h"
#include "../common/math.h"
#include "../common/survival_util.h"
#if defined(XGBOOST_USE_CUDA)
#include <thrust/execution_policy.h> // thrust::cuda::par
#include "../common/device_helpers.cuh"
#endif // XGBOOST_USE_CUDA
using AFTParam = xgboost::common::AFTParam;
using ProbabilityDistributionType = xgboost::common::ProbabilityDistributionType;
template <typename Distribution>
using AFTLoss = xgboost::common::AFTLoss<Distribution>;
namespace xgboost {
namespace metric {
// tag the this file, used by force static link later.
DMLC_REGISTRY_FILE_TAG(survival_metric);
template <typename EvalRow>
class ElementWiseSurvivalMetricsReduction {
public:
ElementWiseSurvivalMetricsReduction() = default;
void Configure(EvalRow policy) {
policy_ = policy;
}
PackedReduceResult CpuReduceMetrics(
const HostDeviceVector<bst_float>& weights,
const HostDeviceVector<bst_float>& labels_lower_bound,
const HostDeviceVector<bst_float>& labels_upper_bound,
const HostDeviceVector<bst_float>& preds) const {
size_t ndata = labels_lower_bound.Size();
CHECK_EQ(ndata, labels_upper_bound.Size());
const auto& h_labels_lower_bound = labels_lower_bound.HostVector();
const auto& h_labels_upper_bound = labels_upper_bound.HostVector();
const auto& h_weights = weights.HostVector();
const auto& h_preds = preds.HostVector();
double residue_sum = 0;
double weights_sum = 0;
#pragma omp parallel for reduction(+: residue_sum, weights_sum) schedule(static)
for (omp_ulong i = 0; i < ndata; ++i) {
const double wt = h_weights.empty() ? 1.0 : static_cast<double>(h_weights[i]);
residue_sum += policy_.EvalRow(
static_cast<double>(h_labels_lower_bound[i]),
static_cast<double>(h_labels_upper_bound[i]),
static_cast<double>(h_preds[i])) * wt;
weights_sum += wt;
}
PackedReduceResult res{residue_sum, weights_sum};
return res;
}
#if defined(XGBOOST_USE_CUDA)
PackedReduceResult DeviceReduceMetrics(
const HostDeviceVector<bst_float>& weights,
const HostDeviceVector<bst_float>& labels_lower_bound,
const HostDeviceVector<bst_float>& labels_upper_bound,
const HostDeviceVector<bst_float>& preds) {
size_t ndata = labels_lower_bound.Size();
CHECK_EQ(ndata, labels_upper_bound.Size());
thrust::counting_iterator<size_t> begin(0);
thrust::counting_iterator<size_t> end = begin + ndata;
auto s_label_lower_bound = labels_lower_bound.DeviceSpan();
auto s_label_upper_bound = labels_upper_bound.DeviceSpan();
auto s_preds = preds.DeviceSpan();
auto s_weights = weights.DeviceSpan();
const bool is_null_weight = (weights.Size() == 0);
auto d_policy = policy_;
dh::XGBCachingDeviceAllocator<char> alloc;
PackedReduceResult result = thrust::transform_reduce(
thrust::cuda::par(alloc),
begin, end,
[=] XGBOOST_DEVICE(size_t idx) {
double weight = is_null_weight ? 1.0 : static_cast<double>(s_weights[idx]);
double residue = d_policy.EvalRow(
static_cast<double>(s_label_lower_bound[idx]),
static_cast<double>(s_label_upper_bound[idx]),
static_cast<double>(s_preds[idx]));
residue *= weight;
return PackedReduceResult{residue, weight};
},
PackedReduceResult(),
thrust::plus<PackedReduceResult>());
return result;
}
#endif // XGBOOST_USE_CUDA
PackedReduceResult Reduce(
int device,
const HostDeviceVector<bst_float>& weights,
const HostDeviceVector<bst_float>& labels_lower_bound,
const HostDeviceVector<bst_float>& labels_upper_bound,
const HostDeviceVector<bst_float>& preds) {
PackedReduceResult result;
if (device < 0) {
result = CpuReduceMetrics(weights, labels_lower_bound, labels_upper_bound, preds);
}
#if defined(XGBOOST_USE_CUDA)
else { // NOLINT
device_ = device;
preds.SetDevice(device_);
labels_lower_bound.SetDevice(device_);
labels_upper_bound.SetDevice(device_);
weights.SetDevice(device_);
dh::safe_cuda(cudaSetDevice(device_));
result = DeviceReduceMetrics(weights, labels_lower_bound, labels_upper_bound, preds);
}
#endif // defined(XGBOOST_USE_CUDA)
return result;
}
private:
EvalRow policy_;
#if defined(XGBOOST_USE_CUDA)
int device_{-1};
#endif // defined(XGBOOST_USE_CUDA)
};
struct EvalIntervalRegressionAccuracy {
void Configure(const Args& args) {}
const char* Name() const {
return "interval-regression-accuracy";
}
XGBOOST_DEVICE double EvalRow(
double label_lower_bound, double label_upper_bound, double log_pred) const {
const double pred = exp(log_pred);
return (pred >= label_lower_bound && pred <= label_upper_bound) ? 1.0 : 0.0;
}
static double GetFinal(double esum, double wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
/*! \brief Negative log likelihood of Accelerated Failure Time model */
template <typename Distribution>
struct EvalAFTNLogLik {
void Configure(const Args& args) {
param_.UpdateAllowUnknown(args);
}
const char* Name() const {
return "aft-nloglik";
}
XGBOOST_DEVICE double EvalRow(
double label_lower_bound, double label_upper_bound, double pred) const {
return AFTLoss<Distribution>::Loss(
label_lower_bound, label_upper_bound, pred, param_.aft_loss_distribution_scale);
}
static double GetFinal(double esum, double wsum) {
return wsum == 0 ? esum : esum / wsum;
}
private:
AFTParam param_;
};
template<typename Policy>
struct EvalEWiseSurvivalBase : public Metric {
EvalEWiseSurvivalBase() = default;
void Configure(const Args& args) override {
policy_.Configure(args);
for (const auto& e : args) {
if (e.first == "gpu_id") {
device_ = dmlc::ParseSignedInt<int>(e.second.c_str(), nullptr, 10);
}
}
reducer_.Configure(policy_);
}
bst_float Eval(const HostDeviceVector<bst_float>& preds,
const MetaInfo& info,
bool distributed) override {
CHECK_NE(info.labels_lower_bound_.Size(), 0U)
<< "labels_lower_bound cannot be empty";
CHECK_NE(info.labels_upper_bound_.Size(), 0U)
<< "labels_upper_bound cannot be empty";
CHECK_EQ(preds.Size(), info.labels_lower_bound_.Size());
CHECK_EQ(preds.Size(), info.labels_upper_bound_.Size());
auto result = reducer_.Reduce(
device_, info.weights_, info.labels_lower_bound_, info.labels_upper_bound_, preds);
double dat[2] {result.Residue(), result.Weights()};
if (distributed) {
rabit::Allreduce<rabit::op::Sum>(dat, 2);
}
return static_cast<bst_float>(Policy::GetFinal(dat[0], dat[1]));
}
const char* Name() const override {
return policy_.Name();
}
private:
Policy policy_;
ElementWiseSurvivalMetricsReduction<Policy> reducer_;
int device_{-1}; // used only for GPU metric
};
// This class exists because we want to perform dispatch according to the distribution type at
// configuration time, not at prediction time.
struct AFTNLogLikDispatcher : public Metric {
const char* Name() const override {
return "aft-nloglik";
}
bst_float Eval(const HostDeviceVector<bst_float>& preds,
const MetaInfo& info,
bool distributed) override {
CHECK(metric_) << "AFT metric must be configured first, with distribution type and scale";
return metric_->Eval(preds, info, distributed);
}
void Configure(const Args& args) override {
param_.UpdateAllowUnknown(args);
switch (param_.aft_loss_distribution) {
case common::ProbabilityDistributionType::kNormal:
metric_.reset(new EvalEWiseSurvivalBase<EvalAFTNLogLik<common::NormalDistribution>>());
break;
case common::ProbabilityDistributionType::kLogistic:
metric_.reset(new EvalEWiseSurvivalBase<EvalAFTNLogLik<common::LogisticDistribution>>());
break;
case common::ProbabilityDistributionType::kExtreme:
metric_.reset(new EvalEWiseSurvivalBase<EvalAFTNLogLik<common::ExtremeDistribution>>());
break;
default:
LOG(FATAL) << "Unknown probability distribution";
}
Args new_args{args};
// tparam_ doesn't get propagated to the inner metric object because we didn't use
// Metric::Create(). I don't think it's a good idea to pollute the metric registry with
// specialized versions of the AFT metric, so as a work-around, manually pass the GPU ID
// into the inner metric via configuration.
new_args.emplace_back("gpu_id", std::to_string(tparam_->gpu_id));
metric_->Configure(new_args);
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String(this->Name());
out["aft_loss_param"] = ToJson(param_);
}
void LoadConfig(const Json& in) override {
FromJson(in["aft_loss_param"], ¶m_);
}
private:
AFTParam param_;
std::unique_ptr<Metric> metric_;
};
XGBOOST_REGISTER_METRIC(AFTNLogLik, "aft-nloglik")
.describe("Negative log likelihood of Accelerated Failure Time model.")
.set_body([](const char* param) {
return new AFTNLogLikDispatcher();
});
XGBOOST_REGISTER_METRIC(IntervalRegressionAccuracy, "interval-regression-accuracy")
.describe("")
.set_body([](const char* param) {
return new EvalEWiseSurvivalBase<EvalIntervalRegressionAccuracy>();
});
} // namespace metric
} // namespace xgboost
|
080fca6d268914c11aefe3a94fd4acc8eb8120eb.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "stencilConst1.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *src = NULL;
hipMalloc(&src, XSIZE*YSIZE);
float *dst = NULL;
hipMalloc(&dst, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int raio = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
stencilConst1), dim3(gridBlock),dim3(threadBlock), 0, 0, src,dst,size,raio);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
stencilConst1), dim3(gridBlock),dim3(threadBlock), 0, 0, src,dst,size,raio);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
stencilConst1), dim3(gridBlock),dim3(threadBlock), 0, 0, src,dst,size,raio);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 080fca6d268914c11aefe3a94fd4acc8eb8120eb.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "stencilConst1.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *src = NULL;
cudaMalloc(&src, XSIZE*YSIZE);
float *dst = NULL;
cudaMalloc(&dst, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int raio = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
stencilConst1<<<gridBlock,threadBlock>>>(src,dst,size,raio);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
stencilConst1<<<gridBlock,threadBlock>>>(src,dst,size,raio);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
stencilConst1<<<gridBlock,threadBlock>>>(src,dst,size,raio);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
29b0cb9ea889a9b02fa6d586cac0ef9eb102d02e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// TanH neuron activation function layer.
// Adapted from ReLU layer code written by Yangqing Jia
#include <vector>
#include "caffe/layers/tanh_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void TanHForward(const int n, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = tanh(in[index]);
}
}
template <typename Dtype>
void TanHLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top,
const bool preforward_flag) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( TanHForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, top_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void TanHBackward(const int n, const Dtype* in_diff,
const Dtype* out_data, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
Dtype tanhx = out_data[index];
out_diff[index] = in_diff[index] * (1 - tanhx * tanhx);
}
}
template <typename Dtype>
void TanHLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom,
const bool prebackward_flag) {
if (propagate_down[0]) {
const Dtype* top_data = top[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( TanHBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, top_data, bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(TanHLayer);
} // namespace caffe
| 29b0cb9ea889a9b02fa6d586cac0ef9eb102d02e.cu | // TanH neuron activation function layer.
// Adapted from ReLU layer code written by Yangqing Jia
#include <vector>
#include "caffe/layers/tanh_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void TanHForward(const int n, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = tanh(in[index]);
}
}
template <typename Dtype>
void TanHLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top,
const bool preforward_flag) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
TanHForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, top_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void TanHBackward(const int n, const Dtype* in_diff,
const Dtype* out_data, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
Dtype tanhx = out_data[index];
out_diff[index] = in_diff[index] * (1 - tanhx * tanhx);
}
}
template <typename Dtype>
void TanHLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom,
const bool prebackward_flag) {
if (propagate_down[0]) {
const Dtype* top_data = top[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
TanHBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, top_data, bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(TanHLayer);
} // namespace caffe
|
6a1d7bcc10956aa839fdb31f9ee8b737c6282969.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cuda/std/barrier>
using barrier = cuda::std::barrier<>;
__managed__ cuda::std::aligned_storage<sizeof(barrier), alignof(barrier)>::type b_;
__global__ void test()
{
auto& b = reinterpret_cast<barrier&>(b_);
for(int i = 0;i < 1024; ++i)
b.arrive_and_wait();
}
int main()
{
new (&b_) cuda::std::barrier<>(256);
hipLaunchKernelGGL(( test), dim3(32), dim3(8), 0, 0, );
hipDeviceSynchronize();
return 0;
}
| 6a1d7bcc10956aa839fdb31f9ee8b737c6282969.cu | #include <cuda/std/barrier>
using barrier = cuda::std::barrier<>;
__managed__ cuda::std::aligned_storage<sizeof(barrier), alignof(barrier)>::type b_;
__global__ void test()
{
auto& b = reinterpret_cast<barrier&>(b_);
for(int i = 0;i < 1024; ++i)
b.arrive_and_wait();
}
int main()
{
new (&b_) cuda::std::barrier<>(256);
test<<<32, 8>>>();
cudaDeviceSynchronize();
return 0;
}
|
91e94285a4e463b4170ffcd780c172d7d5fca385.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void char_to_int(int * img2, unsigned char * img)
{
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS)
img2[(y+j)*width + x] = img[3*((y+j)*width + x) + 0] * 256 * 256 + img[3*((y+j)*width + x) + 1] * 256 + img[3*((y+j)*width + x) + 2];
} | 91e94285a4e463b4170ffcd780c172d7d5fca385.cu | #include "includes.h"
__global__ void char_to_int(int * img2, unsigned char * img)
{
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS)
img2[(y+j)*width + x] = img[3*((y+j)*width + x) + 0] * 256 * 256 + img[3*((y+j)*width + x) + 1] * 256 + img[3*((y+j)*width + x) + 2];
} |
b80f4d6fa41121c1c6355611a18a47ccef0c6644.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Inverse Discrete Sine Transform in row wise (DST three)
* DST_III_Row_Inverse
* This CUDA code can handle/work with any type of the input mxArrays,
* GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array}
* gpuArray output, B=DST_III_Row_Inverse(A)=mexFunction(A).
* Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London
* Wellcome Trust Centre for Neuroimaging
* Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm)
* Copyright 2018
* Kevin Bronik
*/
#include "matrix.h"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#define DELTA(i, j) ((i==j)?1:0)
#define DEFAULT_DIM 32
const double PI_d = 3.141592653589793238462643383279502884; //pi
__global__ void DSTIII_Row__InverseKernel_GPUA(double const * const A, double const * const B, double * const C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns)
{
double CValue = 0.0;
int Row = blockIdx.y*DEFAULT_DIM + threadIdx.y;
int Col = blockIdx.x*DEFAULT_DIM + threadIdx.x;
for (int k = 0; k < (DEFAULT_DIM + numAColumns - 1) / DEFAULT_DIM; k++) {
for (int n = 0; n < DEFAULT_DIM; ++n)
if ((k*DEFAULT_DIM + n < numAColumns && Row < numARows) && (k*DEFAULT_DIM + n < numBRows && Col < numBColumns))
CValue += A[Row*numAColumns + k*DEFAULT_DIM + n] * B[(k*DEFAULT_DIM + n)*numBColumns + Col];
}
if (Row < numCRows && Col < numCColumns) C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue;
}
__global__ void DSTIII_Row__InverseKernel(double *A, double *B, double *C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns)
{
double CValue = 0.0;
int Row = blockIdx.y*DEFAULT_DIM + threadIdx.y;
int Col = blockIdx.x*DEFAULT_DIM + threadIdx.x;
for (int k = 0; k < (DEFAULT_DIM + numAColumns - 1) / DEFAULT_DIM; k++) {
for (int n = 0; n < DEFAULT_DIM; ++n)
if ((k*DEFAULT_DIM + n < numAColumns && Row < numARows) && (k*DEFAULT_DIM + n < numBRows && Col < numBColumns))
CValue += A[Row*numAColumns + k*DEFAULT_DIM + n] * B[(k*DEFAULT_DIM + n)*numBColumns + Col];
}
if (Row < numCRows && Col < numCColumns) C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue;
}
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
void CalculateTransform(double * A, double * B, double * C, int numARows,
int numAColumns, int numBRows, int numBColumns,
int numCRows, int numCColumns)
{
double * hostA = A; // The A matrix
double * hostB = B; // The B matrix
double * hostC = C; // The output C matrix
//double * hostComputedC;
double * deviceA=0;
double * deviceB=0;
double * deviceC=0;
//hostA = (double *)malloc(sizeof(float)*numARows*numAColumns);
//hostB = (v *)malloc(sizeof(float)*numBRows*numBColumns);
// Setting numCRows and numCColumns
numCRows = numARows;
numCColumns = numBColumns;
// Allocate GPU buffers for three vectors (two input, one output) .
//hostC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
//hostComputedC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
hipMalloc((void **)&deviceA, sizeof(double )*numARows*numAColumns);
hipMalloc((void **)&deviceB, sizeof(double )*numBRows*numBColumns);
hipMalloc((void **)&deviceC, sizeof(double )*numCRows*numCColumns);
hipMemcpy(deviceA, hostA, sizeof(double )*numARows*numAColumns, hipMemcpyHostToDevice);
hipMemcpy(deviceB, hostB, sizeof(double )*numBRows*numBColumns, hipMemcpyHostToDevice);
dim3 dimBlock(DEFAULT_DIM, DEFAULT_DIM, 1);
dim3 dimGrid;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
DSTIII_Row__InverseKernel << <dimGrid, dimBlock >> >(deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
hipDeviceSynchronize();//To synchronize the device
// Copy the results in GPU memory back to the CPU
hipMemcpy(hostC, deviceC, sizeof(double)*numCRows*numCColumns, hipMemcpyDeviceToHost);
C = hostC;
hipFree(deviceA);
hipFree(deviceB);
hipFree(deviceC);
}
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[])
{
int nDevices;
hipError_t errCode =hipGetDeviceCount(&nDevices);
//int nDevices;
//hipGetDeviceCount(&nDevices);
if (errCode != hipSuccess){
printf("Error! No CUDA devices found! \n");
return;
}
/// input standard GPUarray
if (mxIsGPUArray(prhs[0])) {
//mexErrMsgIdAndTxt(errId, errMsg);
/* Declare all variables.*/
mxGPUArray const *A;
mxGPUArray const *DCOS;
mxGPUArray *B;
double const *d_A, *d_DCOS;
double *d_B;
// mxArray * hostcos;
//test
// double * hostcos, *pointer;
double *pointer;
//int N;
int numARows, numAColumns, numDCOSRows, numDCOSColumns, numCRows, numCColumns;
char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput";
char const * const errMsg = "Invalid input to MEX file.";
/* Initialize the MathWorks GPU API. */
mxInitGPU();
/* Throw an error if the input is not a GPU array. */
if ((nrhs!=1) || !(mxIsGPUArray(prhs[0]))) {
mexErrMsgIdAndTxt(errId, errMsg);
}
A = mxGPUCreateFromMxArray(prhs[0]);
const mwSize *dims;
dims=mxGPUGetDimensions(A);
numARows = (int)dims[0]; /* gets number of rows of A */
numAColumns = (int)dims[1]; /* gets number of columns of A */
numDCOSRows=numDCOSColumns=numAColumns;
numCRows = numARows;
numCColumns = numDCOSColumns;
if (numAColumns==1)
{
printf("Attention, this is a column vector, please try Inverse Discrete Sine Transform in column wise \n");
return;
}
mxArray *COS= mxCreateNumericMatrix(numDCOSRows, numDCOSColumns, mxDOUBLE_CLASS, mxREAL);
pointer = mxGetPr(COS);
//Inverse Discrete Sine Transform in row wise
for (int i = 0; i < numDCOSRows; i++){
for (int j = 0; j < numDCOSColumns; j++){
//hostB[i * numBColumns + j] = i + j* numAColumns;
//hostBinv[i * numBColumns + j] = cosval2x[i + j* numAColumns];
//hostBinv[i * numBColumns + j] = 1;
//hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
//hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
//hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
//hostB[i + j* numBColumns] = sin((((j + 1)*3.14*(i + 1)) / (numBColumns + 1)))*sqrt(2.0 / (numBColumns + 1));
//hostBinv[i + j* numBColumns] = sin((((j + 1)*3.14*(i + 1)) / (numBColumns + 1)))*sqrt(2.0 / (numBColumns + 1));// DST I
//hostBinv[i + j* numBColumns] = sin(((j + 1)*3.14*(i + 0.5)) / (numBColumns))*sqrt(2.0 / (numBColumns))*sqrt(1.0 / (1 + DELTA(1, j + 1))); test
pointer[i + j* numDCOSColumns] = sin(((j + 0.5)*PI_d*(i + 1)) / (numDCOSColumns))*sqrt((2.0 - DELTA(i + 1, numDCOSRows)) / (numDCOSColumns));
//hostB[i + j* numBColumns] = 1;
//hostBinv[i + j* numBColumns] = cos((j*3.14*i / (numBColumns - 1)))*sqrt(2.0 / numBColumns)*(1.0 / 2.0);
//hostB[i + j* numBColumns] = 1;
}
}
// for (int i = 0; i < numDCOSRows; i++){
// for (int j = 0; j < numDCOSColumns; j++){
// //hostB[i * numBColumns + j] = i + j* numAColumns;
// //hostBinv[i * numBColumns + j] = cosval2x[i + j* numAColumns];
// //hostBinv[i * numBColumns + j] = 1;
// //hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
// //hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
// //hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
// pointer[i + j* numDCOSColumns] = cos(((2 * j + 1) / (2.0 * numDCOSColumns))*PI_d*i)*sqrt(1.0 / (1 + DELTA(i + 1, 1) ))*sqrt(2.0 / numDCOSColumns);
// //hostB[i + j* numBColumns] = 1;
// //hostBinv[i + j* numBColumns] = cos((j*3.14*i / (numBColumns - 1)))*sqrt(2.0 / numBColumns)*(1.0 / 2.0);
// //hostB[i + j* numBColumns] = 1;
//
//
// }
// }
DCOS=mxGPUCreateFromMxArray(COS);
// DCOS=mxGPUCreateFromMxArray(hostcos);
if (mxGPUGetClassID(A) != mxDOUBLE_CLASS) {
mexErrMsgIdAndTxt(errId, errMsg);
}
d_A = (double const *)(mxGPUGetDataReadOnly(A));
d_DCOS=(double const *)(mxGPUGetDataReadOnly(DCOS));
B = mxGPUCreateGPUArray(mxGPUGetNumberOfDimensions(A),
mxGPUGetDimensions(A),
mxGPUGetClassID(A),
mxGPUGetComplexity(A),
MX_GPU_DO_NOT_INITIALIZE);
d_B = (double *)(mxGPUGetData(B));
dim3 dimBlock(DEFAULT_DIM, DEFAULT_DIM, 1);
dim3 dimGrid;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
DSTIII_Row__InverseKernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_DCOS, d_B, numARows, numAColumns, numDCOSRows, numDCOSColumns, numCRows, numCColumns);
// hipError_t err1 = hipPeekAtLastError();//To capture last error in function call
//hipDeviceSynchronize();//To synchronize the device
plhs[0] = mxGPUCreateMxArrayOnGPU(B);
mxGPUDestroyGPUArray(A);
mxGPUDestroyGPUArray(DCOS);
mxGPUDestroyGPUArray(B);
}
/// input standard array
else if (!(mxIsGPUArray(prhs[0]))){
int numARows = (int)mxGetM(prhs[0]); // number of rows in the matrix A
int numAColumns = (int)mxGetN(prhs[0]); // number of columns in the matrix A
int numBRows; // number of rows in the matrix B
int numBColumns; // number of columns in the matrix B
int numCRows; // number of rows in the matrix C (you have to set this)
int numCColumns; // number of columns in the matrix C (you have to set this)
numBRows = numBColumns = numAColumns;
numCRows = numARows;
numCColumns = numBColumns;
if (numAColumns==1)
{
printf("Attention, this is a column vector, please try Inverse Discrete Sine Transform in column wise \n");
return;
}
//char const * const errId = "parallel:gpu:DCTTWO:InvalidInput";
//char const * const errMsg = "Invalid input to MEX file.";
double * hostA ; // The A matrix
double * hostB ; // The B matrix
/* Initialize the MathWorks GPU API. */
//mxInitGPU();
/* Throw an error if the input is not a GPU array. */
//if ((nrhs != 1) || !(mxIsGPUArray(prhs[0]))) {
//mexErrMsgIdAndTxt(errId, errMsg);
//}
//hostA = (double *)malloc(sizeof(double)*numARows*numAColumns);
//hostAx = (double *)malloc(sizeof(double)*numARows*numAColumns);
//hostAy = (double *)malloc(sizeof(double)*numARows*numAColumns);
hostB = (double *)malloc(sizeof(double)*numBRows*numBColumns);
//const mxArray *G =prhs[0];
// if ((nrhs != 1) || (mxIsGPUArray(G))) {
//mexErrMsgIdAndTxt(errId, errMsg);
// G = gather(G);
// }
hostA = (double *)mxGetData(prhs[0]);
// hostA = (double *)mxGetData(G);
//Inverse Discrete Sine Transform in row wise
for (int i = 0; i < numBRows; i++){
for (int j = 0; j < numBColumns; j++){
//hostB[i * numBColumns + j] = i + j* numAColumns;
//hostBinv[i * numBColumns + j] = cosval2x[i + j* numAColumns];
//hostBinv[i * numBColumns + j] = 1;
//hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
//hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
//hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
//hostB[i + j* numBColumns] = sin((((j + 1)*3.14*(i + 1)) / (numBColumns + 1)))*sqrt(2.0 / (numBColumns + 1));
//hostBinv[i + j* numBColumns] = sin((((j + 1)*3.14*(i + 1)) / (numBColumns + 1)))*sqrt(2.0 / (numBColumns + 1));// DST I
//hostBinv[i + j* numBColumns] = sin(((j + 1)*3.14*(i + 0.5)) / (numBColumns))*sqrt(2.0 / (numBColumns))*sqrt(1.0 / (1 + DELTA(1, j + 1))); test
hostB[i + j* numBColumns] = sin(((j + 0.5)*PI_d*(i + 1)) / (numBColumns))*sqrt((2.0 - DELTA(i + 1, numBRows)) / (numBColumns));
//hostB[i + j* numBColumns] = 1;
//hostBinv[i + j* numBColumns] = cos((j*3.14*i / (numBColumns - 1)))*sqrt(2.0 / numBColumns)*(1.0 / 2.0);
//hostB[i + j* numBColumns] = 1;
}
}
// for (int i = 0; i < numBRows; i++){
// for (int j = 0; j < numBColumns; j++){
// //hostB[i * numBColumns + j] = i + j* numAColumns;
// //hostBinv[i * numBColumns + j] = cosval2x[i + j* numAColumns];
// //hostBinv[i * numBColumns + j] = 1;
// //hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
// //hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
// //hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
// hostB[i + j* numBColumns] = cos(((2 * j + 1) / (2.0 * numBColumns))*3.14*i)*sqrt(1.0 / (1 + DELTA(i + 1, 1) ))*sqrt(2.0 / numBColumns);
// //hostB[i + j* numBColumns] = 1;
// //hostBinv[i + j* numBColumns] = cos((j*3.14*i / (numBColumns - 1)))*sqrt(2.0 / numBColumns)*(1.0 / 2.0);
// //hostB[i + j* numBColumns] = 1;
//
//
// }
// }
//plhs[0] = mxCreateNumericMatrix(numARows, numBColumns, mxDOUBLE_CLASS, mxREAL);
//hostC = (double*)mxGetData(plhs[0]);
plhs[0] = mxCreateNumericMatrix(numCRows, numCColumns, mxDOUBLE_CLASS, mxREAL);
double *pointer = mxGetPr(plhs[0]);
//CalculateTransform(hostA, hostB, hostC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
CalculateTransform(hostA, hostB, pointer, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
//memcpy(pointer, hostC, numCRows*numCColumns*sizeof(double));
free(hostB);
}
}
| b80f4d6fa41121c1c6355611a18a47ccef0c6644.cu | /*
* Inverse Discrete Sine Transform in row wise (DST three)
* DST_III_Row_Inverse
* This CUDA code can handle/work with any type of the input mxArrays,
* GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array}
* gpuArray output, B=DST_III_Row_Inverse(A)=mexFunction(A).
* Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London
* Wellcome Trust Centre for Neuroimaging
* Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm)
* Copyright 2018
* Kevin Bronik
*/
#include "matrix.h"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#define DELTA(i, j) ((i==j)?1:0)
#define DEFAULT_DIM 32
const double PI_d = 3.141592653589793238462643383279502884; //pi
__global__ void DSTIII_Row__InverseKernel_GPUA(double const * const A, double const * const B, double * const C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns)
{
double CValue = 0.0;
int Row = blockIdx.y*DEFAULT_DIM + threadIdx.y;
int Col = blockIdx.x*DEFAULT_DIM + threadIdx.x;
for (int k = 0; k < (DEFAULT_DIM + numAColumns - 1) / DEFAULT_DIM; k++) {
for (int n = 0; n < DEFAULT_DIM; ++n)
if ((k*DEFAULT_DIM + n < numAColumns && Row < numARows) && (k*DEFAULT_DIM + n < numBRows && Col < numBColumns))
CValue += A[Row*numAColumns + k*DEFAULT_DIM + n] * B[(k*DEFAULT_DIM + n)*numBColumns + Col];
}
if (Row < numCRows && Col < numCColumns) C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue;
}
__global__ void DSTIII_Row__InverseKernel(double *A, double *B, double *C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns)
{
double CValue = 0.0;
int Row = blockIdx.y*DEFAULT_DIM + threadIdx.y;
int Col = blockIdx.x*DEFAULT_DIM + threadIdx.x;
for (int k = 0; k < (DEFAULT_DIM + numAColumns - 1) / DEFAULT_DIM; k++) {
for (int n = 0; n < DEFAULT_DIM; ++n)
if ((k*DEFAULT_DIM + n < numAColumns && Row < numARows) && (k*DEFAULT_DIM + n < numBRows && Col < numBColumns))
CValue += A[Row*numAColumns + k*DEFAULT_DIM + n] * B[(k*DEFAULT_DIM + n)*numBColumns + Col];
}
if (Row < numCRows && Col < numCColumns) C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue;
}
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
void CalculateTransform(double * A, double * B, double * C, int numARows,
int numAColumns, int numBRows, int numBColumns,
int numCRows, int numCColumns)
{
double * hostA = A; // The A matrix
double * hostB = B; // The B matrix
double * hostC = C; // The output C matrix
//double * hostComputedC;
double * deviceA=0;
double * deviceB=0;
double * deviceC=0;
//hostA = (double *)malloc(sizeof(float)*numARows*numAColumns);
//hostB = (v *)malloc(sizeof(float)*numBRows*numBColumns);
// Setting numCRows and numCColumns
numCRows = numARows;
numCColumns = numBColumns;
// Allocate GPU buffers for three vectors (two input, one output) .
//hostC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
//hostComputedC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
cudaMalloc((void **)&deviceA, sizeof(double )*numARows*numAColumns);
cudaMalloc((void **)&deviceB, sizeof(double )*numBRows*numBColumns);
cudaMalloc((void **)&deviceC, sizeof(double )*numCRows*numCColumns);
cudaMemcpy(deviceA, hostA, sizeof(double )*numARows*numAColumns, cudaMemcpyHostToDevice);
cudaMemcpy(deviceB, hostB, sizeof(double )*numBRows*numBColumns, cudaMemcpyHostToDevice);
dim3 dimBlock(DEFAULT_DIM, DEFAULT_DIM, 1);
dim3 dimGrid;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
DSTIII_Row__InverseKernel << <dimGrid, dimBlock >> >(deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
cudaDeviceSynchronize();//To synchronize the device
// Copy the results in GPU memory back to the CPU
cudaMemcpy(hostC, deviceC, sizeof(double)*numCRows*numCColumns, cudaMemcpyDeviceToHost);
C = hostC;
cudaFree(deviceA);
cudaFree(deviceB);
cudaFree(deviceC);
}
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[])
{
int nDevices;
cudaError_t errCode =cudaGetDeviceCount(&nDevices);
//int nDevices;
//cudaGetDeviceCount(&nDevices);
if (errCode != cudaSuccess){
printf("Error! No CUDA devices found! \n");
return;
}
/// input standard GPUarray
if (mxIsGPUArray(prhs[0])) {
//mexErrMsgIdAndTxt(errId, errMsg);
/* Declare all variables.*/
mxGPUArray const *A;
mxGPUArray const *DCOS;
mxGPUArray *B;
double const *d_A, *d_DCOS;
double *d_B;
// mxArray * hostcos;
//test
// double * hostcos, *pointer;
double *pointer;
//int N;
int numARows, numAColumns, numDCOSRows, numDCOSColumns, numCRows, numCColumns;
char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput";
char const * const errMsg = "Invalid input to MEX file.";
/* Initialize the MathWorks GPU API. */
mxInitGPU();
/* Throw an error if the input is not a GPU array. */
if ((nrhs!=1) || !(mxIsGPUArray(prhs[0]))) {
mexErrMsgIdAndTxt(errId, errMsg);
}
A = mxGPUCreateFromMxArray(prhs[0]);
const mwSize *dims;
dims=mxGPUGetDimensions(A);
numARows = (int)dims[0]; /* gets number of rows of A */
numAColumns = (int)dims[1]; /* gets number of columns of A */
numDCOSRows=numDCOSColumns=numAColumns;
numCRows = numARows;
numCColumns = numDCOSColumns;
if (numAColumns==1)
{
printf("Attention, this is a column vector, please try Inverse Discrete Sine Transform in column wise \n");
return;
}
mxArray *COS= mxCreateNumericMatrix(numDCOSRows, numDCOSColumns, mxDOUBLE_CLASS, mxREAL);
pointer = mxGetPr(COS);
//Inverse Discrete Sine Transform in row wise
for (int i = 0; i < numDCOSRows; i++){
for (int j = 0; j < numDCOSColumns; j++){
//hostB[i * numBColumns + j] = i + j* numAColumns;
//hostBinv[i * numBColumns + j] = cosval2x[i + j* numAColumns];
//hostBinv[i * numBColumns + j] = 1;
//hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
//hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
//hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
//hostB[i + j* numBColumns] = sin((((j + 1)*3.14*(i + 1)) / (numBColumns + 1)))*sqrt(2.0 / (numBColumns + 1));
//hostBinv[i + j* numBColumns] = sin((((j + 1)*3.14*(i + 1)) / (numBColumns + 1)))*sqrt(2.0 / (numBColumns + 1));// DST I
//hostBinv[i + j* numBColumns] = sin(((j + 1)*3.14*(i + 0.5)) / (numBColumns))*sqrt(2.0 / (numBColumns))*sqrt(1.0 / (1 + DELTA(1, j + 1))); test
pointer[i + j* numDCOSColumns] = sin(((j + 0.5)*PI_d*(i + 1)) / (numDCOSColumns))*sqrt((2.0 - DELTA(i + 1, numDCOSRows)) / (numDCOSColumns));
//hostB[i + j* numBColumns] = 1;
//hostBinv[i + j* numBColumns] = cos((j*3.14*i / (numBColumns - 1)))*sqrt(2.0 / numBColumns)*(1.0 / 2.0);
//hostB[i + j* numBColumns] = 1;
}
}
// for (int i = 0; i < numDCOSRows; i++){
// for (int j = 0; j < numDCOSColumns; j++){
// //hostB[i * numBColumns + j] = i + j* numAColumns;
// //hostBinv[i * numBColumns + j] = cosval2x[i + j* numAColumns];
// //hostBinv[i * numBColumns + j] = 1;
// //hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
// //hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
// //hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
// pointer[i + j* numDCOSColumns] = cos(((2 * j + 1) / (2.0 * numDCOSColumns))*PI_d*i)*sqrt(1.0 / (1 + DELTA(i + 1, 1) ))*sqrt(2.0 / numDCOSColumns);
// //hostB[i + j* numBColumns] = 1;
// //hostBinv[i + j* numBColumns] = cos((j*3.14*i / (numBColumns - 1)))*sqrt(2.0 / numBColumns)*(1.0 / 2.0);
// //hostB[i + j* numBColumns] = 1;
//
//
// }
// }
DCOS=mxGPUCreateFromMxArray(COS);
// DCOS=mxGPUCreateFromMxArray(hostcos);
if (mxGPUGetClassID(A) != mxDOUBLE_CLASS) {
mexErrMsgIdAndTxt(errId, errMsg);
}
d_A = (double const *)(mxGPUGetDataReadOnly(A));
d_DCOS=(double const *)(mxGPUGetDataReadOnly(DCOS));
B = mxGPUCreateGPUArray(mxGPUGetNumberOfDimensions(A),
mxGPUGetDimensions(A),
mxGPUGetClassID(A),
mxGPUGetComplexity(A),
MX_GPU_DO_NOT_INITIALIZE);
d_B = (double *)(mxGPUGetData(B));
dim3 dimBlock(DEFAULT_DIM, DEFAULT_DIM, 1);
dim3 dimGrid;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
DSTIII_Row__InverseKernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_DCOS, d_B, numARows, numAColumns, numDCOSRows, numDCOSColumns, numCRows, numCColumns);
// cudaError_t err1 = cudaPeekAtLastError();//To capture last error in function call
//cudaDeviceSynchronize();//To synchronize the device
plhs[0] = mxGPUCreateMxArrayOnGPU(B);
mxGPUDestroyGPUArray(A);
mxGPUDestroyGPUArray(DCOS);
mxGPUDestroyGPUArray(B);
}
/// input standard array
else if (!(mxIsGPUArray(prhs[0]))){
int numARows = (int)mxGetM(prhs[0]); // number of rows in the matrix A
int numAColumns = (int)mxGetN(prhs[0]); // number of columns in the matrix A
int numBRows; // number of rows in the matrix B
int numBColumns; // number of columns in the matrix B
int numCRows; // number of rows in the matrix C (you have to set this)
int numCColumns; // number of columns in the matrix C (you have to set this)
numBRows = numBColumns = numAColumns;
numCRows = numARows;
numCColumns = numBColumns;
if (numAColumns==1)
{
printf("Attention, this is a column vector, please try Inverse Discrete Sine Transform in column wise \n");
return;
}
//char const * const errId = "parallel:gpu:DCTTWO:InvalidInput";
//char const * const errMsg = "Invalid input to MEX file.";
double * hostA ; // The A matrix
double * hostB ; // The B matrix
/* Initialize the MathWorks GPU API. */
//mxInitGPU();
/* Throw an error if the input is not a GPU array. */
//if ((nrhs != 1) || !(mxIsGPUArray(prhs[0]))) {
//mexErrMsgIdAndTxt(errId, errMsg);
//}
//hostA = (double *)malloc(sizeof(double)*numARows*numAColumns);
//hostAx = (double *)malloc(sizeof(double)*numARows*numAColumns);
//hostAy = (double *)malloc(sizeof(double)*numARows*numAColumns);
hostB = (double *)malloc(sizeof(double)*numBRows*numBColumns);
//const mxArray *G =prhs[0];
// if ((nrhs != 1) || (mxIsGPUArray(G))) {
//mexErrMsgIdAndTxt(errId, errMsg);
// G = gather(G);
// }
hostA = (double *)mxGetData(prhs[0]);
// hostA = (double *)mxGetData(G);
//Inverse Discrete Sine Transform in row wise
for (int i = 0; i < numBRows; i++){
for (int j = 0; j < numBColumns; j++){
//hostB[i * numBColumns + j] = i + j* numAColumns;
//hostBinv[i * numBColumns + j] = cosval2x[i + j* numAColumns];
//hostBinv[i * numBColumns + j] = 1;
//hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
//hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
//hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
//hostB[i + j* numBColumns] = sin((((j + 1)*3.14*(i + 1)) / (numBColumns + 1)))*sqrt(2.0 / (numBColumns + 1));
//hostBinv[i + j* numBColumns] = sin((((j + 1)*3.14*(i + 1)) / (numBColumns + 1)))*sqrt(2.0 / (numBColumns + 1));// DST I
//hostBinv[i + j* numBColumns] = sin(((j + 1)*3.14*(i + 0.5)) / (numBColumns))*sqrt(2.0 / (numBColumns))*sqrt(1.0 / (1 + DELTA(1, j + 1))); test
hostB[i + j* numBColumns] = sin(((j + 0.5)*PI_d*(i + 1)) / (numBColumns))*sqrt((2.0 - DELTA(i + 1, numBRows)) / (numBColumns));
//hostB[i + j* numBColumns] = 1;
//hostBinv[i + j* numBColumns] = cos((j*3.14*i / (numBColumns - 1)))*sqrt(2.0 / numBColumns)*(1.0 / 2.0);
//hostB[i + j* numBColumns] = 1;
}
}
// for (int i = 0; i < numBRows; i++){
// for (int j = 0; j < numBColumns; j++){
// //hostB[i * numBColumns + j] = i + j* numAColumns;
// //hostBinv[i * numBColumns + j] = cosval2x[i + j* numAColumns];
// //hostBinv[i * numBColumns + j] = 1;
// //hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
// //hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
// //hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
// hostB[i + j* numBColumns] = cos(((2 * j + 1) / (2.0 * numBColumns))*3.14*i)*sqrt(1.0 / (1 + DELTA(i + 1, 1) ))*sqrt(2.0 / numBColumns);
// //hostB[i + j* numBColumns] = 1;
// //hostBinv[i + j* numBColumns] = cos((j*3.14*i / (numBColumns - 1)))*sqrt(2.0 / numBColumns)*(1.0 / 2.0);
// //hostB[i + j* numBColumns] = 1;
//
//
// }
// }
//plhs[0] = mxCreateNumericMatrix(numARows, numBColumns, mxDOUBLE_CLASS, mxREAL);
//hostC = (double*)mxGetData(plhs[0]);
plhs[0] = mxCreateNumericMatrix(numCRows, numCColumns, mxDOUBLE_CLASS, mxREAL);
double *pointer = mxGetPr(plhs[0]);
//CalculateTransform(hostA, hostB, hostC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
CalculateTransform(hostA, hostB, pointer, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
//memcpy(pointer, hostC, numCRows*numCColumns*sizeof(double));
free(hostB);
}
}
|
c2cbfb118ab30d9cc083ce18913dc48163a913fe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void __toInt(double *A, int *B, int N) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) {
B[i] = (int)(A[i]);
}
} | c2cbfb118ab30d9cc083ce18913dc48163a913fe.cu | #include "includes.h"
__global__ void __toInt(double *A, int *B, int N) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) {
B[i] = (int)(A[i]);
}
} |
537f3f1a7982a8128bbb63dcdfd6dd9737068d23.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <dirent.h>
#include <string.h>
#include <hip/hip_runtime.h>
#include <sys/stat.h>
// input and outpt files
#define IMAGE_INPUT_DIR "../dataset/Imacx01Animal2/csv"
#define IMAGE_OUTPUT_DIR "../dataset/Imacx01Animal2/region"
// region growing parameters
#define SEED_X 193
#define SEED_Y 197
#define SEED_Z 147
#define NUM_FEATURES 5
#define LIMIAR 0.3
// image properties
#define WIDTH 512
#define MAX_NUMBER_CORTES 400
// lung area thresholds
#define HU_PULMAO_MIN -700
#define HU_PULMAO_MAX -600
// MIN/MAX das tomografias dos ratos
#define MIN_HU -1024
#define MAX_HU 100
// file constants
#define MAX_LINE_SIZE 3072 // se pixel value 16bit: valores -32768 a +32767: 6 caracteres * 512 elementos por linha = 3.072
#define MAX_TOKEN_SIZE 6 // tamanho de cada valor do csv
#define MAX_FILENAME 1024
// *********************************************************************
// funcao que informa a quantidade de cortes no diretrio
// *********************************************************************
int countSlices(){
DIR *d;
struct dirent *dir;
d = opendir(IMAGE_INPUT_DIR);
int num_slices = 0;
if (d){
while ((dir = readdir(d)) != NULL)
{
if (dir->d_type == DT_REG){
num_slices++;
}
}
closedir(d);
} else {
printf("countSlices: no conseguiu ler o diretrio\n");
return(-1);
}
return num_slices;
}
// *********************************************************************
// funcao para carregar os cortes do filesystem para a memria principal
// *********************************************************************
int loadCT(int *imagem){
// verifica os arquivos no diretorio
DIR *d;
struct dirent *dir;
d = opendir(IMAGE_INPUT_DIR);
char files[MAX_NUMBER_CORTES][MAX_FILENAME];
int num_files = 0;
if (d){
while ((dir = readdir(d)) != NULL)
{
if (dir->d_type == DT_REG){
char filename[MAX_FILENAME] = IMAGE_INPUT_DIR "/";
strcpy(files[num_files++], strcat(filename,dir->d_name));
}
}
closedir(d);
} else {
printf("loadCT: no conseguiu ler o diretrio\n");
return(-1);
}
// ordena a lista de arquivos
for (int i = 0; i < num_files; i++){
for (int j = 0; j < num_files; j++){
if (strcmp(files[i], files[j]) < 0){
char temp[MAX_FILENAME] = {};
strcpy(temp,files[i]);
strcpy(files[i], files[j]);
strcpy(files[j],temp);
}
}
}
// carrega cada corte na memria
int ntoken = 0;
for (int i = 0; i < num_files; i++){
FILE *file = NULL;
file = fopen(files[i], "r");
if (!file){
printf("loadCT: no conseguiu abrir arquivo\n");
return(-2);
}
int nlines = 0;
char *pbuf;
char buf[MAX_LINE_SIZE] = {};
while (pbuf = fgets(buf, sizeof(buf), file)){ // le a linha do arquivo
char *p = pbuf;
while ((p=strchr(pbuf, ',')) != NULL || (p=strchr(pbuf, '\n')) != NULL){ // obtem cada valor de pixel
int len = p - pbuf;
char token[MAX_TOKEN_SIZE];
int k= 0;
for (; k < len; k++){
token[k] = pbuf[k];
}
token[k] = '\0';
pbuf = p+1;
imagem[ntoken++] = atoi(token);
}
++nlines;
}
fclose(file);
}
return(0);
}
// *********************************************************************
// funcao para salvar os arquivos em disco
// *********************************************************************
int saveCT(int *imagem, int num_slices){
// cria diretorio de sada se ele nao existe
struct stat st = {0};
if (stat(IMAGE_OUTPUT_DIR, &st) == -1) {
mkdir(IMAGE_OUTPUT_DIR, 0700);
}
int pixels_por_slice = WIDTH * WIDTH;
char filename[MAX_FILENAME];
char filepath[MAX_FILENAME];
int cursor=0;
for (int i = 0; i < num_slices; i++){
snprintf(filename, 16, "/SLICE-%04d.txt", i+1);
strcpy(filepath, IMAGE_OUTPUT_DIR);
strcat(filepath, filename);
// printf("%s\n", filepath);
FILE *fp;
if ((fp = fopen(filepath,"w")) == NULL){
return -1;
}
for (int j=cursor; j < (cursor + pixels_por_slice); j++){
fprintf(fp, "%d", imagem[j]);
if (((j+1) % WIDTH) == 0) {
fprintf(fp, "\n");
} else {
fprintf(fp, ",");
}
}
cursor += pixels_por_slice;
fclose(fp);
}
return 0;
}
// *********************************************************************
// obtm posio de um voxel no vetor linearizado
// *********************************************************************z
__host__ __device__
int getFlat(int x, int y, int z){
int offset_y = WIDTH;
int offset_z = WIDTH * WIDTH;
int flat = z * offset_z + y * offset_y + x;
return flat;
}
// *********************************************************************
// obtm coordenadas de um elemento do vetor linearizado
// *********************************************************************
__host__ __device__
int getCoord(int flat, int *x, int *y, int *z){
int offset_y = WIDTH;
int offset_z = WIDTH * WIDTH;
*z = flat / (offset_z);
*y = (flat - ((*z) * (offset_z)))/offset_y;
*x = flat - ((*z) * (offset_z)) - ((*y) * offset_y);
return 0;
}
// *********************************************************************
// calcula o pixel semente
// *********************************************************************
int calculateSeed(int *imagedata){
// Inicialmente usando uma semente apenas.
// Para identificar a semente incial utilizei o seguinte critrio:
// No corte central, busca na linha 255, a partir da coluna 255 o primeiro pixel entre -600 e -700 (tipicamente pulmo)
int x = WIDTH / 2; //256
int y = WIDTH / 2; // 256
// int z = depth / 2;
int z = SEED_Z;
int pos_seed = -1;
for (int i = x; i < WIDTH; i++){
int flat = getFlat(i, y, z);
if (imagedata[flat] > HU_PULMAO_MIN && imagedata[flat] < HU_PULMAO_MAX){
printf("seed[%d] (%d, %d, %d): %d\n", flat, i, y, z, imagedata[flat]);
pos_seed = flat;
break;
}
}
return (pos_seed);
}
// *********************************************************************
// funcao para verificar se um pixel vizinho a regio
// *********************************************************************
__host__ __device__
bool isNeighbor(int index, int *regiondata, int depth){
int x; int y; int z;
getCoord(index, &x, &y, &z);
// printf("calcula feature: %d, %d, %d\n", x, y, z);
for (int k = z-1; k <= z + 1; k++){
for (int j = y-1; j <= y + 1; j++){
for (int i = x-1; i <= x + 1; i++){
if (((k > 0) && (k < depth)) && ((j > 0) && (j < WIDTH)) && ((i > 0) && (i < WIDTH))){ // testa se est dentro da imagem
// printf("(k, j, i): (%d, %d, %d)\n", k, j, i);
int index_neighbor = getFlat(i, j, k);
if (index_neighbor != index) // testa se no o prprio elemento
if (regiondata[index_neighbor] == 1) // se um dos vizinhos 1 ele um vizinho
return true;
}
}
}
}
return false;
}
// *********************************************************************
// MIN-MAX HU normalization
// *********************************************************************
__host__ __device__
float normalizeHU(int hu){
if (hu<MIN_HU)
hu = MIN_HU;
else if (hu > MAX_HU){
hu = MAX_HU;
}
return ((float)abs(hu-MIN_HU))/abs(MAX_HU-MIN_HU);
}
// *********************************************************************
// funcao para calcular o vetor de caracteristicas (HU, MEAN, MIN, MAX, CVE)
// *********************************************************************
__host__ __device__
int calculateFeatures(int index, int *pixeldata, int depth, float *vector){
vector[0] = normalizeHU(pixeldata[index]); //HU
vector[1] = 0; // MEAN
vector[2] = 0; // MIN
vector[3] = 0; // MAX
vector[4] = 0; // CVE (to be implemented)
int x; int y; int z;
getCoord(index, &x, &y, &z);
// printf("calcula feature: %d, %d, %d\n", x, y, z);
float min = 1;
float max = 0;
float sum = 0;
float qtde = 0;
// CVE calculation variables (not in use yet)
float classes_distance[3] = {0.0, 0.0, 0.0};
float classes_mean[3] = {0.0, 0.0, 0.0};
float classes_cv[3] = {0.0, 0.0, 0.0};
float classes_cve[3] = {0.0, 0.0, 0.0};
float classes_cve_mean = 0;
float classes_cve_cv = 0;
float cve_texture = 0;
// calculates: mean, min and max
for (int k = z-1; k <= z + 1; k++){
for (int j = y-1; j <= y + 1; j++){
for (int i = x-1; i <= x + 1; i++){
if (((k > 0) && (k < depth)) && ((j > 0) && (j < WIDTH)) && ((i > 0) && (i < WIDTH))){
// printf("(k, j, i): (%d, %d, %d)\n", k, j, i);
float hu = normalizeHU(pixeldata[getFlat(i, j, k)]);
sum+=hu;
if (hu < min) min = hu;
if (hu > max) max = hu;
qtde++;
}
}
}
}
vector[1] = (sum/qtde); // MEAN
vector[2] = min; // MIN
vector[3] = max; // MAX
return 0;
}
__device__
float calculateDistance(float *vector, float *seed){
float sum = 0;
for (int i = 0; i < NUM_FEATURES; i++){
sum += pow((vector[i] - seed[i]), 2);
}
return (float)sqrt(sum);
}
__global__
void regionGrowing(int *imagedata, int *regiondata, float *seed_vector, int *incluidos, int depth){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
if ((x < WIDTH) && (y < WIDTH) && (z < depth)){
int i = getFlat(x, y, z);
if ((regiondata[i] != 1) && (isNeighbor(i, regiondata, depth))){
float vector[NUM_FEATURES];
calculateFeatures(i, imagedata, depth, vector);
float distance = calculateDistance(vector, seed_vector);
//printf("[hu, mean, min, max, cve]: [%f, %f, %f, %f, %f] :: distance:=%f\n", vector[0], vector[1], vector[2], vector[3], vector[4], distance );
if (distance < LIMIAR){
regiondata[i] = 1;
*incluidos += 1;
}
}
}
}
__global__
void regionMask(int *imagedata, int *regiondata, int depth){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
if ((x < WIDTH) && (y < WIDTH) && (z < depth)){
int i = getFlat(x, y, z);
if (regiondata[i] == 0) {
regiondata[i] = MIN_HU;
} else {
regiondata[i] = imagedata[i];
}
}
}
// *********************************************************************
// funo principal do programa
// *********************************************************************
int main(void)
{
// 1. inicializa variveis no host
int num_slices = 0;
num_slices = countSlices();
int num_elementos = num_slices * WIDTH * WIDTH;
size_t sizect = num_elementos * sizeof(int);
int *h_imagedata = (int *)malloc(sizect);
int *h_regiondata = (int *)malloc(sizect);
// inicializa vetor da regiao com zeros
for (int i = 0; i < num_elementos; i++) h_regiondata[i] = 0;
// 2. carrega os cortes na memoria principal
printf(">>> carregando a tomografia na memria principal \n");
if (loadCT(h_imagedata) != 0){
printf("erro ao carregar arquivos da tomografia\n");
return(-1);
}
// 3. aloca as variaveis na memoria do device
int *d_imagedata;
hipMalloc((void **)&d_imagedata, sizect);
int *d_regiondata;
hipMalloc((void **)&d_regiondata, sizect);
// 4. identifica o pixel semente e calcula vetor de caracteristicas (HU, MEAN, MIN, MAX, CVE)
printf(">>> identificando a semente\n");
int index_seed = 0;
//index_seed = calculateSeed(h_imagedata);
index_seed = getFlat(SEED_X, SEED_Y, SEED_Z);
if ( index_seed < 0){
printf("couldn't find seed pixel, try another slice\n");
return(-1);
}
if (index_seed == 0){
printf("no obteve a semente para o crescimento de regio\n");
return(-1);
}else{
printf("seed position: %d\n", index_seed);
}
h_regiondata[index_seed] = 1;
// calcula vetor de caracteristicas da semente(HU, MEAN, MIN, MAX, CVE)
size_t size_vector = 5 * sizeof(float);
float *h_seed_vector = (float *)malloc(size_vector);
float *d_seed_vector;
hipMalloc((void **)&d_seed_vector, size_vector);
calculateFeatures(index_seed, h_imagedata, num_slices, h_seed_vector);
// 5. copia os dados na memria do device
hipMemcpy(d_imagedata, h_imagedata, sizect, hipMemcpyHostToDevice);
hipMemcpy(d_regiondata, h_regiondata, sizect, hipMemcpyHostToDevice);
hipMemcpy(d_seed_vector, h_seed_vector, size_vector, hipMemcpyHostToDevice);
// 4. inicia loop com o crescimento de regiao e roda ate que novos pixels nao sejam mais incluidos
int *h_incluidos = (int *)malloc(sizeof(int));
int *d_incluidos;
hipMalloc((void **)&d_incluidos, sizeof(int));
// define o nmero de blocos e threads
dim3 dimBlock(16, 16, 4);
dim3 dimGrid(32, 32, (num_slices+4)/4);
int iteracao = 0;
do{
*h_incluidos = 0;
hipMemcpy(d_incluidos, h_incluidos, sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( regionGrowing), dim3(dimGrid),dim3(dimBlock), 0, 0, d_imagedata, d_regiondata, d_seed_vector, d_incluidos, num_slices);
hipDeviceSynchronize();
hipMemcpy(h_incluidos, d_incluidos, sizeof(int), hipMemcpyDeviceToHost);
printf("%d) incluidos=%d\n", iteracao++, *h_incluidos); //debug
} while(*h_incluidos != 0);
// 5. Kernel que aplica uma mscara na imagem original para destacar a rea obtida com o crescimento de regio
// O resultado armazenado na prxima mascara (d_regiondata)
// regionMask<<<dimGrid,dimBlock>>>(d_imagedata, d_regiondata, num_slices);
// 6. copia resultado para memoria principal
hipMemcpy(h_regiondata, d_regiondata, sizect, hipMemcpyDeviceToHost);
// 7. salva em disco
printf(">>> carregando a regio em disco \n");
if (saveCT(h_regiondata, num_slices) != 0){
printf("erro ao salvar o resultado em disco\n");
return(-1);
}
// 8. summary
printf(">>> resumo da TC \n");
printf("num slices da TC: %d\n", num_slices);
printf("tamanho da TC (elementos): %d\n", num_elementos);
printf("tamanho da TC (bytes): %lu\n", sizect);
int volume = 0;
for (int i = 0; i<num_elementos;i++){
if (h_regiondata[i] == 1) volume +=1;
}
printf("volume da regio (pixels): %d\n", volume);
// 9. cleaning
free (h_imagedata);
free (h_regiondata);
free(h_seed_vector);
free(h_incluidos);
hipFree(d_imagedata);
hipFree(d_regiondata);
hipFree(d_seed_vector);
hipFree(d_incluidos);
printf("Done\n");
return 0;
}
| 537f3f1a7982a8128bbb63dcdfd6dd9737068d23.cu | #include <stdio.h>
#include <dirent.h>
#include <string.h>
#include <cuda.h>
#include <sys/stat.h>
// input and outpt files
#define IMAGE_INPUT_DIR "../dataset/Imacx01Animal2/csv"
#define IMAGE_OUTPUT_DIR "../dataset/Imacx01Animal2/region"
// region growing parameters
#define SEED_X 193
#define SEED_Y 197
#define SEED_Z 147
#define NUM_FEATURES 5
#define LIMIAR 0.3
// image properties
#define WIDTH 512
#define MAX_NUMBER_CORTES 400
// lung area thresholds
#define HU_PULMAO_MIN -700
#define HU_PULMAO_MAX -600
// MIN/MAX das tomografias dos ratos
#define MIN_HU -1024
#define MAX_HU 100
// file constants
#define MAX_LINE_SIZE 3072 // se pixel value 16bit: valores -32768 a +32767: 6 caracteres * 512 elementos por linha = 3.072
#define MAX_TOKEN_SIZE 6 // tamanho de cada valor do csv
#define MAX_FILENAME 1024
// *********************************************************************
// funcao que informa a quantidade de cortes no diretório
// *********************************************************************
int countSlices(){
DIR *d;
struct dirent *dir;
d = opendir(IMAGE_INPUT_DIR);
int num_slices = 0;
if (d){
while ((dir = readdir(d)) != NULL)
{
if (dir->d_type == DT_REG){
num_slices++;
}
}
closedir(d);
} else {
printf("countSlices: não conseguiu ler o diretório\n");
return(-1);
}
return num_slices;
}
// *********************************************************************
// funcao para carregar os cortes do filesystem para a memória principal
// *********************************************************************
int loadCT(int *imagem){
// verifica os arquivos no diretorio
DIR *d;
struct dirent *dir;
d = opendir(IMAGE_INPUT_DIR);
char files[MAX_NUMBER_CORTES][MAX_FILENAME];
int num_files = 0;
if (d){
while ((dir = readdir(d)) != NULL)
{
if (dir->d_type == DT_REG){
char filename[MAX_FILENAME] = IMAGE_INPUT_DIR "/";
strcpy(files[num_files++], strcat(filename,dir->d_name));
}
}
closedir(d);
} else {
printf("loadCT: não conseguiu ler o diretório\n");
return(-1);
}
// ordena a lista de arquivos
for (int i = 0; i < num_files; i++){
for (int j = 0; j < num_files; j++){
if (strcmp(files[i], files[j]) < 0){
char temp[MAX_FILENAME] = {};
strcpy(temp,files[i]);
strcpy(files[i], files[j]);
strcpy(files[j],temp);
}
}
}
// carrega cada corte na memória
int ntoken = 0;
for (int i = 0; i < num_files; i++){
FILE *file = NULL;
file = fopen(files[i], "r");
if (!file){
printf("loadCT: não conseguiu abrir arquivo\n");
return(-2);
}
int nlines = 0;
char *pbuf;
char buf[MAX_LINE_SIZE] = {};
while (pbuf = fgets(buf, sizeof(buf), file)){ // le a linha do arquivo
char *p = pbuf;
while ((p=strchr(pbuf, ',')) != NULL || (p=strchr(pbuf, '\n')) != NULL){ // obtem cada valor de pixel
int len = p - pbuf;
char token[MAX_TOKEN_SIZE];
int k= 0;
for (; k < len; k++){
token[k] = pbuf[k];
}
token[k] = '\0';
pbuf = p+1;
imagem[ntoken++] = atoi(token);
}
++nlines;
}
fclose(file);
}
return(0);
}
// *********************************************************************
// funcao para salvar os arquivos em disco
// *********************************************************************
int saveCT(int *imagem, int num_slices){
// cria diretorio de saída se ele nao existe
struct stat st = {0};
if (stat(IMAGE_OUTPUT_DIR, &st) == -1) {
mkdir(IMAGE_OUTPUT_DIR, 0700);
}
int pixels_por_slice = WIDTH * WIDTH;
char filename[MAX_FILENAME];
char filepath[MAX_FILENAME];
int cursor=0;
for (int i = 0; i < num_slices; i++){
snprintf(filename, 16, "/SLICE-%04d.txt", i+1);
strcpy(filepath, IMAGE_OUTPUT_DIR);
strcat(filepath, filename);
// printf("%s\n", filepath);
FILE *fp;
if ((fp = fopen(filepath,"w")) == NULL){
return -1;
}
for (int j=cursor; j < (cursor + pixels_por_slice); j++){
fprintf(fp, "%d", imagem[j]);
if (((j+1) % WIDTH) == 0) {
fprintf(fp, "\n");
} else {
fprintf(fp, ",");
}
}
cursor += pixels_por_slice;
fclose(fp);
}
return 0;
}
// *********************************************************************
// obtém posição de um voxel no vetor linearizado
// *********************************************************************z
__host__ __device__
int getFlat(int x, int y, int z){
int offset_y = WIDTH;
int offset_z = WIDTH * WIDTH;
int flat = z * offset_z + y * offset_y + x;
return flat;
}
// *********************************************************************
// obtém coordenadas de um elemento do vetor linearizado
// *********************************************************************
__host__ __device__
int getCoord(int flat, int *x, int *y, int *z){
int offset_y = WIDTH;
int offset_z = WIDTH * WIDTH;
*z = flat / (offset_z);
*y = (flat - ((*z) * (offset_z)))/offset_y;
*x = flat - ((*z) * (offset_z)) - ((*y) * offset_y);
return 0;
}
// *********************************************************************
// calcula o pixel semente
// *********************************************************************
int calculateSeed(int *imagedata){
// Inicialmente usando uma semente apenas.
// Para identificar a semente incial utilizei o seguinte critério:
// No corte central, busca na linha 255, a partir da coluna 255 o primeiro pixel entre -600 e -700 (tipicamente pulmão)
int x = WIDTH / 2; //256
int y = WIDTH / 2; // 256
// int z = depth / 2;
int z = SEED_Z;
int pos_seed = -1;
for (int i = x; i < WIDTH; i++){
int flat = getFlat(i, y, z);
if (imagedata[flat] > HU_PULMAO_MIN && imagedata[flat] < HU_PULMAO_MAX){
printf("seed[%d] (%d, %d, %d): %d\n", flat, i, y, z, imagedata[flat]);
pos_seed = flat;
break;
}
}
return (pos_seed);
}
// *********************************************************************
// funcao para verificar se é um pixel vizinho a região
// *********************************************************************
__host__ __device__
bool isNeighbor(int index, int *regiondata, int depth){
int x; int y; int z;
getCoord(index, &x, &y, &z);
// printf("calcula feature: %d, %d, %d\n", x, y, z);
for (int k = z-1; k <= z + 1; k++){
for (int j = y-1; j <= y + 1; j++){
for (int i = x-1; i <= x + 1; i++){
if (((k > 0) && (k < depth)) && ((j > 0) && (j < WIDTH)) && ((i > 0) && (i < WIDTH))){ // testa se está dentro da imagem
// printf("(k, j, i): (%d, %d, %d)\n", k, j, i);
int index_neighbor = getFlat(i, j, k);
if (index_neighbor != index) // testa se não é o próprio elemento
if (regiondata[index_neighbor] == 1) // se um dos vizinhos é 1 ele é um vizinho
return true;
}
}
}
}
return false;
}
// *********************************************************************
// MIN-MAX HU normalization
// *********************************************************************
__host__ __device__
float normalizeHU(int hu){
if (hu<MIN_HU)
hu = MIN_HU;
else if (hu > MAX_HU){
hu = MAX_HU;
}
return ((float)abs(hu-MIN_HU))/abs(MAX_HU-MIN_HU);
}
// *********************************************************************
// funcao para calcular o vetor de caracteristicas (HU, MEAN, MIN, MAX, CVE)
// *********************************************************************
__host__ __device__
int calculateFeatures(int index, int *pixeldata, int depth, float *vector){
vector[0] = normalizeHU(pixeldata[index]); //HU
vector[1] = 0; // MEAN
vector[2] = 0; // MIN
vector[3] = 0; // MAX
vector[4] = 0; // CVE (to be implemented)
int x; int y; int z;
getCoord(index, &x, &y, &z);
// printf("calcula feature: %d, %d, %d\n", x, y, z);
float min = 1;
float max = 0;
float sum = 0;
float qtde = 0;
// CVE calculation variables (not in use yet)
float classes_distance[3] = {0.0, 0.0, 0.0};
float classes_mean[3] = {0.0, 0.0, 0.0};
float classes_cv[3] = {0.0, 0.0, 0.0};
float classes_cve[3] = {0.0, 0.0, 0.0};
float classes_cve_mean = 0;
float classes_cve_cv = 0;
float cve_texture = 0;
// calculates: mean, min and max
for (int k = z-1; k <= z + 1; k++){
for (int j = y-1; j <= y + 1; j++){
for (int i = x-1; i <= x + 1; i++){
if (((k > 0) && (k < depth)) && ((j > 0) && (j < WIDTH)) && ((i > 0) && (i < WIDTH))){
// printf("(k, j, i): (%d, %d, %d)\n", k, j, i);
float hu = normalizeHU(pixeldata[getFlat(i, j, k)]);
sum+=hu;
if (hu < min) min = hu;
if (hu > max) max = hu;
qtde++;
}
}
}
}
vector[1] = (sum/qtde); // MEAN
vector[2] = min; // MIN
vector[3] = max; // MAX
return 0;
}
__device__
float calculateDistance(float *vector, float *seed){
float sum = 0;
for (int i = 0; i < NUM_FEATURES; i++){
sum += pow((vector[i] - seed[i]), 2);
}
return (float)sqrt(sum);
}
__global__
void regionGrowing(int *imagedata, int *regiondata, float *seed_vector, int *incluidos, int depth){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
if ((x < WIDTH) && (y < WIDTH) && (z < depth)){
int i = getFlat(x, y, z);
if ((regiondata[i] != 1) && (isNeighbor(i, regiondata, depth))){
float vector[NUM_FEATURES];
calculateFeatures(i, imagedata, depth, vector);
float distance = calculateDistance(vector, seed_vector);
//printf("[hu, mean, min, max, cve]: [%f, %f, %f, %f, %f] :: distance:=%f\n", vector[0], vector[1], vector[2], vector[3], vector[4], distance );
if (distance < LIMIAR){
regiondata[i] = 1;
*incluidos += 1;
}
}
}
}
__global__
void regionMask(int *imagedata, int *regiondata, int depth){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
if ((x < WIDTH) && (y < WIDTH) && (z < depth)){
int i = getFlat(x, y, z);
if (regiondata[i] == 0) {
regiondata[i] = MIN_HU;
} else {
regiondata[i] = imagedata[i];
}
}
}
// *********************************************************************
// função principal do programa
// *********************************************************************
int main(void)
{
// 1. inicializa variáveis no host
int num_slices = 0;
num_slices = countSlices();
int num_elementos = num_slices * WIDTH * WIDTH;
size_t sizect = num_elementos * sizeof(int);
int *h_imagedata = (int *)malloc(sizect);
int *h_regiondata = (int *)malloc(sizect);
// inicializa vetor da regiao com zeros
for (int i = 0; i < num_elementos; i++) h_regiondata[i] = 0;
// 2. carrega os cortes na memoria principal
printf(">>> carregando a tomografia na memória principal \n");
if (loadCT(h_imagedata) != 0){
printf("erro ao carregar arquivos da tomografia\n");
return(-1);
}
// 3. aloca as variaveis na memoria do device
int *d_imagedata;
cudaMalloc((void **)&d_imagedata, sizect);
int *d_regiondata;
cudaMalloc((void **)&d_regiondata, sizect);
// 4. identifica o pixel semente e calcula vetor de caracteristicas (HU, MEAN, MIN, MAX, CVE)
printf(">>> identificando a semente\n");
int index_seed = 0;
//index_seed = calculateSeed(h_imagedata);
index_seed = getFlat(SEED_X, SEED_Y, SEED_Z);
if ( index_seed < 0){
printf("couldn't find seed pixel, try another slice\n");
return(-1);
}
if (index_seed == 0){
printf("não obteve a semente para o crescimento de região\n");
return(-1);
}else{
printf("seed position: %d\n", index_seed);
}
h_regiondata[index_seed] = 1;
// calcula vetor de caracteristicas da semente(HU, MEAN, MIN, MAX, CVE)
size_t size_vector = 5 * sizeof(float);
float *h_seed_vector = (float *)malloc(size_vector);
float *d_seed_vector;
cudaMalloc((void **)&d_seed_vector, size_vector);
calculateFeatures(index_seed, h_imagedata, num_slices, h_seed_vector);
// 5. copia os dados na memória do device
cudaMemcpy(d_imagedata, h_imagedata, sizect, cudaMemcpyHostToDevice);
cudaMemcpy(d_regiondata, h_regiondata, sizect, cudaMemcpyHostToDevice);
cudaMemcpy(d_seed_vector, h_seed_vector, size_vector, cudaMemcpyHostToDevice);
// 4. inicia loop com o crescimento de regiao e roda ate que novos pixels nao sejam mais incluidos
int *h_incluidos = (int *)malloc(sizeof(int));
int *d_incluidos;
cudaMalloc((void **)&d_incluidos, sizeof(int));
// define o número de blocos e threads
dim3 dimBlock(16, 16, 4);
dim3 dimGrid(32, 32, (num_slices+4)/4);
int iteracao = 0;
do{
*h_incluidos = 0;
cudaMemcpy(d_incluidos, h_incluidos, sizeof(int), cudaMemcpyHostToDevice);
regionGrowing<<<dimGrid,dimBlock>>>(d_imagedata, d_regiondata, d_seed_vector, d_incluidos, num_slices);
cudaDeviceSynchronize();
cudaMemcpy(h_incluidos, d_incluidos, sizeof(int), cudaMemcpyDeviceToHost);
printf("%d) incluidos=%d\n", iteracao++, *h_incluidos); //debug
} while(*h_incluidos != 0);
// 5. Kernel que aplica uma máscara na imagem original para destacar a área obtida com o crescimento de região
// O resultado é armazenado na próxima mascara (d_regiondata)
// regionMask<<<dimGrid,dimBlock>>>(d_imagedata, d_regiondata, num_slices);
// 6. copia resultado para memoria principal
cudaMemcpy(h_regiondata, d_regiondata, sizect, cudaMemcpyDeviceToHost);
// 7. salva em disco
printf(">>> carregando a região em disco \n");
if (saveCT(h_regiondata, num_slices) != 0){
printf("erro ao salvar o resultado em disco\n");
return(-1);
}
// 8. summary
printf(">>> resumo da TC \n");
printf("num slices da TC: %d\n", num_slices);
printf("tamanho da TC (elementos): %d\n", num_elementos);
printf("tamanho da TC (bytes): %lu\n", sizect);
int volume = 0;
for (int i = 0; i<num_elementos;i++){
if (h_regiondata[i] == 1) volume +=1;
}
printf("volume da região (pixels): %d\n", volume);
// 9. cleaning
free (h_imagedata);
free (h_regiondata);
free(h_seed_vector);
free(h_incluidos);
cudaFree(d_imagedata);
cudaFree(d_regiondata);
cudaFree(d_seed_vector);
cudaFree(d_incluidos);
printf("Done\n");
return 0;
}
|
e39b607074563336a1959f38eb1501d6125dda7c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "convlayer.h"
__global__ void calc_gradient(float *output, float *grad, int N)
{
int pos = blockIdx.x * blockDim.x + threadIdx.x;
if(pos < N){
output[pos] += dt * grad[pos];
}
}
__global__ void apply_convolve_1(float input[28][28], float middle[6][24][24], float weight[6][5][5], float * bias) {
int pos = blockIdx.x * blockDim.x + threadIdx.x;
int total_operations = 5 * 5 * 6 * 24 * 24;
if(pos < total_operations) {
int i1 = (pos /= 1) % 5;
int i2 = (pos /= 5) % 5;
int i3 = (pos /= 5) % 6;
int i4 = (pos /= 6) % 24;
int i5 = (pos /= 24) % 24;
atomicAdd(&middle[i3][i4][i5], weight[i3][i1][i2] * input[i4 + i1][i5 + i2]);
if(i1 == 0 && i2 == 0) {
middle[i3][i4][i5] += bias[i3];
}
}
}
__global__ void apply_strided_convolve_2(float input[6][24][24], float middle[6][6][6], float weight[1][4][4], float * bias) {
int pos = blockIdx.x * blockDim.x + threadIdx.x;
int total_operations = 4 * 4 * 6 * 6 * 6;
if(pos < total_operations) {
int i1 = (pos /= 1) % 4;
int i2 = (pos /= 4) % 4;
int i3 = (pos /= 4) % 6;
int i4 = (pos /= 6) % 6;
int i5 = (pos /= 6) % 6;
atomicAdd(&middle[i3][i4][i5], weight[0][i1][i2] * input[i3][i4 * 4 + i1][i5 * 4 + i2]);
if(i1 == 0 && i2 == 0) {
middle[i3][i4][i5] += bias[0];
}
}
}
__global__ void final_convolve_3(float input[6][6][6], float middle[10], float weight[10][6][6][6], float * bias)
{
int pos = blockIdx.x * blockDim.x + threadIdx.x;
int total_operations = 10 * 6 * 6 * 6;
if(pos < total_operations) {
int i1 = (pos /= 1) % 10;
int i2 = (pos /= 10) % 6;
int i3 = (pos /= 6) % 6;
int i4 = (pos /= 6) % 6;
atomicAdd(&middle[i1], weight[i1][i2][i3][i4] * input[i2][i3][i4]);
if(i2 == 0 && i3 == 0 && i4 == 0) {
middle[i1] += bias[i1];
}
}
}
__global__ void apply_sigmoid(float * middle, float * output, float output_size) {
int pos = blockDim.x * blockIdx.x + threadIdx.x;
if(pos < output_size) {
output[pos] = 1 / (1 + exp(-middle[pos]));
}
}
__global__ void backpass_final_3(float d_weight[10][6][6][6], float middle[10], float output[6][6][6]) {
int pos = blockDim.x * blockIdx.x + threadIdx.x;
int total_operations = 10 * 6 * 6 * 6;
if(pos < total_operations) {
int i1 = (pos /= 1) % 10;
int i2 = (pos /= 10) % 6;
int i3 = (pos /= 6) % 6;
int i4 = (pos /= 6) % 6;
d_weight[i1][i2][i3][i4] = middle[i1] * output[i2][i3][i4];
}
}
__global__ void backpass_final_bias_3(float bias[10], float middle[10]) {
int pos = blockDim.x * blockIdx.x + threadIdx.x;
int total_operations = 10;
if(pos < total_operations) {
bias[pos] += dt * middle[pos];
}
}
__global__ void backpass_strided_convolve_2(float output[6][6][6], float weight[10][6][6][6], float middle[10]) {
int pos = blockIdx.x * blockDim.x + threadIdx.x;
int total_operations = 10 * 6 * 6 * 6;
if (pos < total_operations) {
int i1 = (pos /= 1) % 10;
int i2 = (pos /= 10) % 6;
int i3 = (pos /= 6) % 6;
int i4 = (pos /= 6) % 6;
atomicAdd(&output[i2][i3][i4], weight[i1][i2][i3][i4] * middle[i1]);
}
}
__global__ void backpass_strided_convolve_middle_2(float d_middle[6][6][6], float output[6][6][6], float middle[6][6][6])
{
int pos = blockIdx.x * blockDim.x + threadIdx.x;
int total_operations = 6*6*6;
if(pos < total_operations){
int i1 = (pos /= 1) % 6;
int i2 = (pos /= 6) % 6;
int i3 = (pos /= 6) % 6;
float sigm = 1 / (1 + exp(-middle[i1][i2][i3]));
d_middle[i1][i2][i3] = output[i1][i2][i3] * sigm * (1 - sigm);
}
}
__global__ void backpass_strided_convolve_weight_2(float weight[1][4][4], float middle[6][6][6], float output[6][24][24])
{
int pos = blockIdx.x * blockDim.x + threadIdx.x;
int total_operations = 1*4*4*6*6*6;
if(pos < total_operations){
int i1 = (pos /= 1) % 1;
int i2 = (pos /= 1) % 4;
int i3 = (pos /= 4) % 4;
int i4 = (pos /= 4) % 6;
int i5 = (pos /= 6) % 6;
int i6 = (pos /= 6) % 6;
atomicAdd(&weight[i1][i2][i3], middle[i4][i5][i6] * output[i4][i5 * 4 + i2][i6 * 4 + i3]);
}
}
__global__ void backpass_strided_convolve_bias_2(float bias[1], float middle[6][6][6])
{
int pos = blockIdx.x * blockDim.x + threadIdx.x;
int total_operations = 6*6*6;
float d = pow(6.0f, 3.0f);
if(pos < total_operations) {
int i1 = (pos /= 1) % 6;
int i2 = (pos /= 6) % 6;
int i3 = (pos /= 6) % 6;
atomicAdd(&bias[0], dt * middle[i1][i2][i3] / d);
}
}
__global__ void backpass_convolve_1(float output[6][24][24], float weight[1][4][4], float middle[6][6][6])
{
int pos = blockIdx.x * blockDim.x + threadIdx.x;
int total_operations = 1*4*4*6*6*6;
if(pos < total_operations) {
int i1 = (pos /= 1) % 1;
int i2 = (pos /= 1) % 4;
int i3 = (pos /= 4) % 4;
int i4 = (pos /= 4) % 6;
int i5 = (pos /= 6) % 6;
int i6 = (pos /= 6) % 6;
atomicAdd(&output[i4][i5 * 4 + i2][i6 * 4 + i3], weight[i1][i2][i3] * middle[i4][i5][i6]);
}
}
__global__ void backpass_convolve_middle_1(float d_middle[6][24][24], float output[6][24][24], float middle[6][24][24])
{
int pos = blockIdx.x * blockDim.x + threadIdx.x;
int total_operations = 6*24*24;
if(pos < total_operations) {
int i1 = (pos /= 1 ) % 6;
int i2 = (pos /= 6 ) % 24;
int i3 = (pos /= 24 ) % 24;
float o = 1 / (1 + exp(-middle[i1][i2][i3]));
d_middle[i1][i2][i3] = output[i1][i2][i3] * o * (1 - o);
}
}
__global__ void backpas_convolve_weight_1(float weight[6][5][5], float middle[6][24][24], float output[28][28])
{
int pos = blockIdx.x * blockDim.x + threadIdx.x;
int total_operations = 6*5*5*24*24;
float d = pow(24.0f, 2.0f);
if(pos < total_operations) {
int i1 = (pos /= 1) % 6;
int i2 = (pos /= 6) % 5;
int i3 = (pos /= 5) % 5;
int i4 = (pos /= 5) % 24;
int i5 = (pos /= 24) % 24;
atomicAdd(&weight[i1][i2][i3], middle[i1][i4][i5] * output[i4 + i2][i5 + i3] / d);
}
}
__global__ void backpass_convolve_bias_1(float bias[6], float middle[6][24][24])
{
int pos = blockIdx.x * blockDim.x + threadIdx.x;
int total_operations = 6*24*24;
float d = pow(24.0f, 2.0f);
if(pos < total_operations) {
int i1 = (pos /= 1) % 6;
int i2 = (pos /= 6) % 24;
int i3 = (pos /= 24) % 24;
atomicAdd(&bias[i1], dt * middle[i1][i2][i3] / d);
}
}
__global__ void calcError(float *err, float *output, unsigned int Y, int N)
{
int pos = blockIdx.x * blockDim.x + threadIdx.x;
if(pos < N) {
err[pos] = ((Y == pos ? 1.0f : 0.0f) - output[pos]);
}
}
| e39b607074563336a1959f38eb1501d6125dda7c.cu | #include "convlayer.h"
__global__ void calc_gradient(float *output, float *grad, int N)
{
int pos = blockIdx.x * blockDim.x + threadIdx.x;
if(pos < N){
output[pos] += dt * grad[pos];
}
}
__global__ void apply_convolve_1(float input[28][28], float middle[6][24][24], float weight[6][5][5], float * bias) {
int pos = blockIdx.x * blockDim.x + threadIdx.x;
int total_operations = 5 * 5 * 6 * 24 * 24;
if(pos < total_operations) {
int i1 = (pos /= 1) % 5;
int i2 = (pos /= 5) % 5;
int i3 = (pos /= 5) % 6;
int i4 = (pos /= 6) % 24;
int i5 = (pos /= 24) % 24;
atomicAdd(&middle[i3][i4][i5], weight[i3][i1][i2] * input[i4 + i1][i5 + i2]);
if(i1 == 0 && i2 == 0) {
middle[i3][i4][i5] += bias[i3];
}
}
}
__global__ void apply_strided_convolve_2(float input[6][24][24], float middle[6][6][6], float weight[1][4][4], float * bias) {
int pos = blockIdx.x * blockDim.x + threadIdx.x;
int total_operations = 4 * 4 * 6 * 6 * 6;
if(pos < total_operations) {
int i1 = (pos /= 1) % 4;
int i2 = (pos /= 4) % 4;
int i3 = (pos /= 4) % 6;
int i4 = (pos /= 6) % 6;
int i5 = (pos /= 6) % 6;
atomicAdd(&middle[i3][i4][i5], weight[0][i1][i2] * input[i3][i4 * 4 + i1][i5 * 4 + i2]);
if(i1 == 0 && i2 == 0) {
middle[i3][i4][i5] += bias[0];
}
}
}
__global__ void final_convolve_3(float input[6][6][6], float middle[10], float weight[10][6][6][6], float * bias)
{
int pos = blockIdx.x * blockDim.x + threadIdx.x;
int total_operations = 10 * 6 * 6 * 6;
if(pos < total_operations) {
int i1 = (pos /= 1) % 10;
int i2 = (pos /= 10) % 6;
int i3 = (pos /= 6) % 6;
int i4 = (pos /= 6) % 6;
atomicAdd(&middle[i1], weight[i1][i2][i3][i4] * input[i2][i3][i4]);
if(i2 == 0 && i3 == 0 && i4 == 0) {
middle[i1] += bias[i1];
}
}
}
__global__ void apply_sigmoid(float * middle, float * output, float output_size) {
int pos = blockDim.x * blockIdx.x + threadIdx.x;
if(pos < output_size) {
output[pos] = 1 / (1 + exp(-middle[pos]));
}
}
__global__ void backpass_final_3(float d_weight[10][6][6][6], float middle[10], float output[6][6][6]) {
int pos = blockDim.x * blockIdx.x + threadIdx.x;
int total_operations = 10 * 6 * 6 * 6;
if(pos < total_operations) {
int i1 = (pos /= 1) % 10;
int i2 = (pos /= 10) % 6;
int i3 = (pos /= 6) % 6;
int i4 = (pos /= 6) % 6;
d_weight[i1][i2][i3][i4] = middle[i1] * output[i2][i3][i4];
}
}
__global__ void backpass_final_bias_3(float bias[10], float middle[10]) {
int pos = blockDim.x * blockIdx.x + threadIdx.x;
int total_operations = 10;
if(pos < total_operations) {
bias[pos] += dt * middle[pos];
}
}
__global__ void backpass_strided_convolve_2(float output[6][6][6], float weight[10][6][6][6], float middle[10]) {
int pos = blockIdx.x * blockDim.x + threadIdx.x;
int total_operations = 10 * 6 * 6 * 6;
if (pos < total_operations) {
int i1 = (pos /= 1) % 10;
int i2 = (pos /= 10) % 6;
int i3 = (pos /= 6) % 6;
int i4 = (pos /= 6) % 6;
atomicAdd(&output[i2][i3][i4], weight[i1][i2][i3][i4] * middle[i1]);
}
}
__global__ void backpass_strided_convolve_middle_2(float d_middle[6][6][6], float output[6][6][6], float middle[6][6][6])
{
int pos = blockIdx.x * blockDim.x + threadIdx.x;
int total_operations = 6*6*6;
if(pos < total_operations){
int i1 = (pos /= 1) % 6;
int i2 = (pos /= 6) % 6;
int i3 = (pos /= 6) % 6;
float sigm = 1 / (1 + exp(-middle[i1][i2][i3]));
d_middle[i1][i2][i3] = output[i1][i2][i3] * sigm * (1 - sigm);
}
}
__global__ void backpass_strided_convolve_weight_2(float weight[1][4][4], float middle[6][6][6], float output[6][24][24])
{
int pos = blockIdx.x * blockDim.x + threadIdx.x;
int total_operations = 1*4*4*6*6*6;
if(pos < total_operations){
int i1 = (pos /= 1) % 1;
int i2 = (pos /= 1) % 4;
int i3 = (pos /= 4) % 4;
int i4 = (pos /= 4) % 6;
int i5 = (pos /= 6) % 6;
int i6 = (pos /= 6) % 6;
atomicAdd(&weight[i1][i2][i3], middle[i4][i5][i6] * output[i4][i5 * 4 + i2][i6 * 4 + i3]);
}
}
__global__ void backpass_strided_convolve_bias_2(float bias[1], float middle[6][6][6])
{
int pos = blockIdx.x * blockDim.x + threadIdx.x;
int total_operations = 6*6*6;
float d = pow(6.0f, 3.0f);
if(pos < total_operations) {
int i1 = (pos /= 1) % 6;
int i2 = (pos /= 6) % 6;
int i3 = (pos /= 6) % 6;
atomicAdd(&bias[0], dt * middle[i1][i2][i3] / d);
}
}
__global__ void backpass_convolve_1(float output[6][24][24], float weight[1][4][4], float middle[6][6][6])
{
int pos = blockIdx.x * blockDim.x + threadIdx.x;
int total_operations = 1*4*4*6*6*6;
if(pos < total_operations) {
int i1 = (pos /= 1) % 1;
int i2 = (pos /= 1) % 4;
int i3 = (pos /= 4) % 4;
int i4 = (pos /= 4) % 6;
int i5 = (pos /= 6) % 6;
int i6 = (pos /= 6) % 6;
atomicAdd(&output[i4][i5 * 4 + i2][i6 * 4 + i3], weight[i1][i2][i3] * middle[i4][i5][i6]);
}
}
__global__ void backpass_convolve_middle_1(float d_middle[6][24][24], float output[6][24][24], float middle[6][24][24])
{
int pos = blockIdx.x * blockDim.x + threadIdx.x;
int total_operations = 6*24*24;
if(pos < total_operations) {
int i1 = (pos /= 1 ) % 6;
int i2 = (pos /= 6 ) % 24;
int i3 = (pos /= 24 ) % 24;
float o = 1 / (1 + exp(-middle[i1][i2][i3]));
d_middle[i1][i2][i3] = output[i1][i2][i3] * o * (1 - o);
}
}
__global__ void backpas_convolve_weight_1(float weight[6][5][5], float middle[6][24][24], float output[28][28])
{
int pos = blockIdx.x * blockDim.x + threadIdx.x;
int total_operations = 6*5*5*24*24;
float d = pow(24.0f, 2.0f);
if(pos < total_operations) {
int i1 = (pos /= 1) % 6;
int i2 = (pos /= 6) % 5;
int i3 = (pos /= 5) % 5;
int i4 = (pos /= 5) % 24;
int i5 = (pos /= 24) % 24;
atomicAdd(&weight[i1][i2][i3], middle[i1][i4][i5] * output[i4 + i2][i5 + i3] / d);
}
}
__global__ void backpass_convolve_bias_1(float bias[6], float middle[6][24][24])
{
int pos = blockIdx.x * blockDim.x + threadIdx.x;
int total_operations = 6*24*24;
float d = pow(24.0f, 2.0f);
if(pos < total_operations) {
int i1 = (pos /= 1) % 6;
int i2 = (pos /= 6) % 24;
int i3 = (pos /= 24) % 24;
atomicAdd(&bias[i1], dt * middle[i1][i2][i3] / d);
}
}
__global__ void calcError(float *err, float *output, unsigned int Y, int N)
{
int pos = blockIdx.x * blockDim.x + threadIdx.x;
if(pos < N) {
err[pos] = ((Y == pos ? 1.0f : 0.0f) - output[pos]);
}
}
|
e611eca9aad9e296c59aa53fce131511d0cd7224.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "layer.h"
// --------------------------------------------------------------------------
// kernel code
// roi_pool_{gpu, cpu}
// --------------------------------------------------------------------------
// RoI pooling bottom3d (C x H x W) -> top4d (R x C x H' x W')
// given pixel (r, c, h, w) at top4d and RoI (x1, y1,, x2, y2),
// top4d[r][c][h][w] = max_{hb,wb}{ bottom3d[c][hb][wb] }
// hb, wb: pooling region corresponding to (h, w)
#ifdef GPU
__global__
void roi_pool_gpu(const real* const bottom3d,
const real* const roi2d,
real* const top4d,
int* const argmax4d,
const int R, const int C, const int H, const int W,
const int top_H, const int top_W,
const real spatial_scale)
{
// thread index: (r, c, h, w) = r*C*H'*W' + c*H'*W' + h*W' + w
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < R * C * top_W * top_H) {
// parse thread index -> (r, c, h, w)
const int r = index / top_W / top_H / C;
const int c = (index / top_W / top_H) % C;
const int h = (index / top_W) % top_H;
const int w = index % top_W;
// RoI in the bottom plane
const int x1 = ROUND(roi2d[r * 4 + 0] * spatial_scale);
const int y1 = ROUND(roi2d[r * 4 + 1] * spatial_scale);
const int x2 = ROUND(roi2d[r * 4 + 2] * spatial_scale);
const int y2 = ROUND(roi2d[r * 4 + 3] * spatial_scale);
const int roi_W = x2 - x1 + 1;
const int roi_H = y2 - y1 + 1;
// pooling region for pixel top[r][c][h][w]
const int hb_start = MIN(H, MAX(0,
y1 + (h * roi_H) / top_H));
const int hb_end = MIN(H, MAX(0,
y1 + DIV_THEN_CEIL((h + 1) * roi_H, top_H)));
const int wb_start = MIN(W, MAX(0,
x1 + (w * roi_W) / top_W));
const int wb_end = MIN(W, MAX(0,
x1 + DIV_THEN_CEIL((w + 1) * roi_W, top_W)));
// find maximum in the bottom region
const real* p_bottom3d = bottom3d + c * H * W;
int maxidx = hb_start * W + wb_start;
real maxval = p_bottom3d[maxidx];
for (int hb = hb_start; hb < hb_end; ++hb) {
for (int wb = wb_start; wb < wb_end; ++wb) {
const int bottom_index = hb * W + wb;
if (p_bottom3d[bottom_index] > maxval) {
maxval = p_bottom3d[bottom_index];
maxidx = bottom_index;
}
}
}
// if the bottom region is not empty,
// top[r][c][h][w] = "max in the region"
// otherwise, assign 0
{
const int not_empty = (hb_start < hb_end) * (wb_start < wb_end);
top4d[index] = not_empty * maxval;
argmax4d[index] = not_empty * maxidx + (1 - not_empty) * (-1);
}
}
}
#else
void roi_pool_cpu(const real* const bottom3d,
const real* const roi2d,
real* const top4d,
int* const argmax4d,
const int R, const int C, const int H, const int W,
const int top_H, const int top_W,
const real spatial_scale)
{
// thread index: (r, c, h, w) = r*C*H'*W' + c*H'*W' + h*W' + w
const int top_size = R * C * top_H * top_W;
for (int index = 0; index < top_size; ++index) {
// parse thread index -> (r, c, h, w)
const int r = index / top_W / top_H / C;
const int c = (index / top_W / top_H) % C;
const int h = (index / top_W) % top_H;
const int w = index % top_W;
// RoI in the bottom plane
const int x1 = ROUND(roi2d[r * 4 + 0] * spatial_scale);
const int y1 = ROUND(roi2d[r * 4 + 1] * spatial_scale);
const int x2 = ROUND(roi2d[r * 4 + 2] * spatial_scale);
const int y2 = ROUND(roi2d[r * 4 + 3] * spatial_scale);
const int roi_W = x2 - x1 + 1;
const int roi_H = y2 - y1 + 1;
// pooling region for pixel top[r][c][h][w]
const int hb_start = MIN(H, MAX(0,
y1 + (h * roi_H) / top_H));
const int hb_end = MIN(H, MAX(0,
y1 + DIV_THEN_CEIL((h + 1) * roi_H, top_H)));
const int wb_start = MIN(W, MAX(0,
x1 + (w * roi_W) / top_W));
const int wb_end = MIN(W, MAX(0,
x1 + DIV_THEN_CEIL((w + 1) * roi_W, top_W)));
if (hb_start >= hb_end || wb_start >= wb_end) {
top4d[index] = 0;
argmax4d[index] = -1;
continue;
}
// find maximum in the bottom region
const real* p_bottom3d = bottom3d + c * H * W;
int maxidx = hb_start * W + wb_start;
real maxval = p_bottom3d[maxidx];
for (int hb = hb_start; hb < hb_end; ++hb) {
for (int wb = wb_start; wb < wb_end; ++wb) {
const int bottom_index = hb * W + wb;
if (p_bottom3d[bottom_index] > maxval) {
maxval = p_bottom3d[bottom_index];
maxidx = bottom_index;
}
}
}
top4d[index] = maxval;
argmax4d[index] = maxidx;
continue;
// if the bottom region is not empty,
// top[r][c][h][w] = "max in the region"
// otherwise, assign 0
{
const int not_empty = (hb_start < hb_end) * (wb_start < wb_end);
top4d[index] = not_empty * maxval;
argmax4d[index] = not_empty * maxidx + (1 - not_empty) * (-1);
}
}
}
#endif
// --------------------------------------------------------------------------
// layer operator code
// roipool_forward
// --------------------------------------------------------------------------
// RoI pooling: bottom -> top
// bottom: C x H x W
// roi: R x 4
// top: R x C x H' x W'
// argmax: R * C * H' * W' array
void roipool_forward(const Tensor* const bottom3d,
const Tensor* const roi2d,
Tensor* const top4d,
int* const argmax_data,
const LayerOption* option)
{
// top height & width
const int top_H = option->pooled_height; // H'
const int top_W = option->pooled_width; // W'
// do forward-pass for each item in the batch
const real* p_bottom_item = bottom3d->data;
const real* p_roi_item = roi2d->data;
real* p_top_item = top4d->data;
int* p_argmax_item = argmax_data;
for (int n = 0; n < bottom3d->num_items; ++n) {
// bottom shape: R x C x H X W
const int R = roi2d->shape[n][0];
const int C = bottom3d->shape[n][0];
const int H = bottom3d->shape[n][1];
const int W = bottom3d->shape[n][2];
// set top shape: R x C x H' x W'
top4d->shape[n][0] = R;
top4d->shape[n][1] = C;
top4d->shape[n][2] = top_H;
top4d->shape[n][3] = top_W;
// RoI pooling
// bottom3d (C x H x W) -> top4d (R x C x H' x W')
{
#ifdef GPU
const int num_threads = R * C * top_H * top_W;
const int threads_per_block = 512;
const int num_blocks = DIV_THEN_CEIL(num_threads, threads_per_block);
hipLaunchKernelGGL(( roi_pool_gpu), dim3(num_blocks), dim3(threads_per_block), 0, 0,
p_bottom_item, p_roi_item, p_top_item, p_argmax_item,
R, C, H, W, top_H, top_W, option->spatial_scale);
#else
roi_pool_cpu(
p_bottom_item, p_roi_item, p_top_item, p_argmax_item,
R, C, H, W, top_H, top_W, option->spatial_scale);
#endif
}
// locate next item
{
const int bottom_size = C * H * W;
const int roi_size = R * 4;
const int top_size = R * C * top_H * top_W;
p_bottom_item += bottom_size;
p_roi_item += roi_size;
p_top_item += top_size;
p_argmax_item += top_size;
}
} // endfor batch
// if option->flatten = true,
// reshape to 2d tensor: total_num_rois x (C * H' * W')
if (option->flatten) {
// for all items, C should be equal to each other
const int C = bottom3d->shape[0][0];
// calculate total number of RoI-pooled data
int total_num_rois = 0;
for (int n = 0; n < roi2d->num_items; ++n) {
total_num_rois += roi2d->shape[n][0];
}
// reshape to 2d tensor: total_num_rois x (C * H' * W')
top4d->ndim = 2;
top4d->num_items = 1;
top4d->shape[0][0] = total_num_rois;
top4d->shape[0][1] = C * top_H * top_W;
top4d->start[0] = 0;
}
else {
top4d->ndim = 4;
top4d->num_items = bottom3d->num_items;
{
int total_size = 0;
for (int n = 0; n < bottom3d->num_items; ++n) {
const int R = roi2d->shape[n][0];
const int C = bottom3d->shape[n][0];
const int top_size = R * C * top_H * top_W;
top4d->start[n] = total_size;
total_size += top_size;
}
}
}
}
// --------------------------------------------------------------------------
// layer shape calculator code
// --------------------------------------------------------------------------
void roipool_shape(const Tensor* const bottom3d,
const Tensor* const roi2d,
Tensor* const top4d,
int* const argmax_size,
const LayerOption* option)
{
// top height & width
const int top_H = option->pooled_height; // H'
const int top_W = option->pooled_width; // W'
// if option->flatten = true,
// reshape to 2d tensor: total_num_rois x (C * H' * W')
if (option->flatten) {
// for all items, C should be equal to each other
const int C = bottom3d->shape[0][0];
// calculate total number of RoI-pooled data
int total_num_rois = 0;
for (int n = 0; n < roi2d->num_items; ++n) {
total_num_rois += roi2d->shape[n][0];
}
// reshape to 2d tensor: total_num_rois x (C * H' * W')
top4d->ndim = 2;
top4d->num_items = 1;
top4d->shape[0][0] = total_num_rois;
top4d->shape[0][1] = C * top_H * top_W;
top4d->start[0] = 0;
// argmax data size = total top size
*argmax_size = top4d->shape[0][0] * top4d->shape[0][1];
return;
}
// otherwise, calculate shape for each item in the batch
for (int n = 0; n < bottom3d->num_items; ++n) {
// bottom shape: R x C x H X W
const int R = roi2d->shape[n][0];
const int C = bottom3d->shape[n][0];
// top shape: R x C x H' x W'
top4d->shape[n][0] = R;
top4d->shape[n][1] = C;
top4d->shape[n][2] = top_H;
top4d->shape[n][3] = top_W;
}
top4d->ndim = 4;
top4d->num_items = bottom3d->num_items;
{
int total_size = 0;
for (int n = 0; n < bottom3d->num_items; ++n) {
const int R = roi2d->shape[n][0];
const int C = bottom3d->shape[n][0];
const int top_size = R * C * top_H * top_W;
top4d->start[n] = total_size;
total_size += top_size;
}
// argmax data size = total top size
*argmax_size = total_size;
}
}
// --------------------------------------------------------------------------
// API code
// --------------------------------------------------------------------------
void forward_roipool_layer(void* const net_, void* const layer_)
{
Net* const net = (Net*)net_;
Layer* const layer = (Layer*)layer_;
roipool_forward(layer->p_bottoms[0], layer->p_bottoms[1],
&layer->tops[0],
net->tempint_data, &layer->option);
print_tensor_info(layer->name, &layer->tops[0]);
}
void shape_roipool_layer(void* const net_, void* const layer_)
{
Net* const net = (Net*)net_;
Layer* const layer = (Layer*)layer_;
int tempint_size;
roipool_shape(layer->p_bottoms[0], layer->p_bottoms[1], &layer->tops[0],
&tempint_size, &layer->option);
update_net_size(net, layer, 0, tempint_size, 0);
}
// --------------------------------------------------------------------------
// test code
// --------------------------------------------------------------------------
#ifdef TEST
int main(int argc, char* argv[])
{
// variable declaration & memory allocation
Tensor X, Y, roi;
real *X_data = NULL, *Y_data = NULL, *Y_true_data = NULL;
real *roi_data = NULL;
int* p_argmax_data = NULL;
LayerOption option;
int argmax_size;
// set option
{
option.pooled_height = 6;
option.pooled_width = 6;
option.spatial_scale = 0.0625;
option.flatten = 0;
}
// load data
{
int ndim;
int shape[g_max_ndim];
int total_size;
X_data = load_data("../data/temp/roipool_bottom0.bin",
&ndim, shape, NULL);
X.num_items = shape[0];
X.ndim = ndim - 1;
total_size = 0;
for (int n = 0; n < X.num_items; ++n) {
int size_n = 1;
for (int i = 0; i < X.ndim; ++i) {
X.shape[n][i] = shape[i + 1];
size_n *= shape[i + 1];
}
X.start[n] = total_size;
total_size += size_n;
}
roi_data = load_data("../data/temp/roipool_bottom1.bin",
&ndim, shape, NULL);
roi.num_items = X.num_items;
roi.ndim = 2;
for (int n = 0; n < roi.num_items; ++n) {
roi.shape[n][1] = 4;
}
{
const int num_rois = shape[0];
for (int i = 0; i < num_rois; ++i) {
const int n = (int)ROUND(roi_data[i * 5 + 0]);
const real x1 = roi_data[i * 5 + 1];
const real y1 = roi_data[i * 5 + 2];
const real x2 = roi_data[i * 5 + 3];
const real y2 = roi_data[i * 5 + 4];
++roi.shape[n][0];
roi_data[i * 4 + 0] = x1;
roi_data[i * 4 + 1] = y1;
roi_data[i * 4 + 2] = x2;
roi_data[i * 4 + 3] = y2;
}
}
roipool_shape(&X, &roi, &Y, &argmax_size, &option);
Y_true_data = load_data("../data/temp/roipool_top0.bin",
&ndim, shape, NULL);
Y_data = (real*)malloc(flatten_size(&Y) * sizeof(real));
}
// CUDA initialization
#ifdef GPU
{
printf("set device\n");
hipSetDevice(0);
}
#endif
// bind loaded data to corresponding tensors
#ifdef GPU
{
const long int X_size = flatten_size(&X);
const long int Y_size = flatten_size(&Y);
const long int roi_size = flatten_size(&roi);
printf("gpu malloc\n");
hipMalloc(&X.data, X_size * sizeof(real));
hipMalloc(&roi.data, roi_size * sizeof(real));
hipMalloc(&Y.data, Y_size * sizeof(real));
hipMalloc(&p_argmax_data, argmax_size * sizeof(int));
printf("memcpy: cpu -> gpu\n");
hipMemcpyAsync(X.data, X_data, X_size * sizeof(real),
hipMemcpyHostToDevice);
hipMemcpyAsync(roi.data, roi_data, roi_size * sizeof(real),
hipMemcpyHostToDevice);
}
#else
{
X.data = X_data;
Y.data = Y_data;
roi.data = roi_data;
p_argmax_data = (int*)malloc(argmax_size * sizeof(int));
}
#endif
// do forward operation
{
printf("do forward\n");
roipool_forward(&X, &roi, &Y, p_argmax_data, &option);
}
// copy GPU data to main memory
#ifdef GPU
{
const long int Y_size = flatten_size(&Y);
printf("memcpy: cpu <- gpu\n");
hipMemcpyAsync(Y_data, Y.data, Y_size * sizeof(real),
hipMemcpyDeviceToHost);
}
#endif
// verify results
{
const long int Y_size = flatten_size(&Y);
printf("verification\n");
for (int i = 0; i < Y_size; ++i) {
if (Y_data[i] != Y_true_data[i]) {
printf("Y[%d] = %.6f Y_true[%d] = %.6f\n",
i, Y_data[i], i, Y_true_data[i]);
}
}
}
// memory deallocation
{
printf("free\n");
free(X_data);
free(roi_data);
free(Y_data);
free(Y_true_data);
}
#ifdef GPU
{
printf("gpu free\n");
hipFree(X.data);
hipFree(roi.data);
hipFree(Y.data);
hipFree(p_argmax_data);
}
#else
{
free(p_argmax_data);
}
#endif
return 0;
}
#endif // endifdef TEST
| e611eca9aad9e296c59aa53fce131511d0cd7224.cu | #include "layer.h"
// --------------------------------------------------------------------------
// kernel code
// roi_pool_{gpu, cpu}
// --------------------------------------------------------------------------
// RoI pooling bottom3d (C x H x W) -> top4d (R x C x H' x W')
// given pixel (r, c, h, w) at top4d and RoI (x1, y1,, x2, y2),
// top4d[r][c][h][w] = max_{hb,wb}{ bottom3d[c][hb][wb] }
// hb, wb: pooling region corresponding to (h, w)
#ifdef GPU
__global__
void roi_pool_gpu(const real* const bottom3d,
const real* const roi2d,
real* const top4d,
int* const argmax4d,
const int R, const int C, const int H, const int W,
const int top_H, const int top_W,
const real spatial_scale)
{
// thread index: (r, c, h, w) = r*C*H'*W' + c*H'*W' + h*W' + w
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < R * C * top_W * top_H) {
// parse thread index -> (r, c, h, w)
const int r = index / top_W / top_H / C;
const int c = (index / top_W / top_H) % C;
const int h = (index / top_W) % top_H;
const int w = index % top_W;
// RoI in the bottom plane
const int x1 = ROUND(roi2d[r * 4 + 0] * spatial_scale);
const int y1 = ROUND(roi2d[r * 4 + 1] * spatial_scale);
const int x2 = ROUND(roi2d[r * 4 + 2] * spatial_scale);
const int y2 = ROUND(roi2d[r * 4 + 3] * spatial_scale);
const int roi_W = x2 - x1 + 1;
const int roi_H = y2 - y1 + 1;
// pooling region for pixel top[r][c][h][w]
const int hb_start = MIN(H, MAX(0,
y1 + (h * roi_H) / top_H));
const int hb_end = MIN(H, MAX(0,
y1 + DIV_THEN_CEIL((h + 1) * roi_H, top_H)));
const int wb_start = MIN(W, MAX(0,
x1 + (w * roi_W) / top_W));
const int wb_end = MIN(W, MAX(0,
x1 + DIV_THEN_CEIL((w + 1) * roi_W, top_W)));
// find maximum in the bottom region
const real* p_bottom3d = bottom3d + c * H * W;
int maxidx = hb_start * W + wb_start;
real maxval = p_bottom3d[maxidx];
for (int hb = hb_start; hb < hb_end; ++hb) {
for (int wb = wb_start; wb < wb_end; ++wb) {
const int bottom_index = hb * W + wb;
if (p_bottom3d[bottom_index] > maxval) {
maxval = p_bottom3d[bottom_index];
maxidx = bottom_index;
}
}
}
// if the bottom region is not empty,
// top[r][c][h][w] = "max in the region"
// otherwise, assign 0
{
const int not_empty = (hb_start < hb_end) * (wb_start < wb_end);
top4d[index] = not_empty * maxval;
argmax4d[index] = not_empty * maxidx + (1 - not_empty) * (-1);
}
}
}
#else
void roi_pool_cpu(const real* const bottom3d,
const real* const roi2d,
real* const top4d,
int* const argmax4d,
const int R, const int C, const int H, const int W,
const int top_H, const int top_W,
const real spatial_scale)
{
// thread index: (r, c, h, w) = r*C*H'*W' + c*H'*W' + h*W' + w
const int top_size = R * C * top_H * top_W;
for (int index = 0; index < top_size; ++index) {
// parse thread index -> (r, c, h, w)
const int r = index / top_W / top_H / C;
const int c = (index / top_W / top_H) % C;
const int h = (index / top_W) % top_H;
const int w = index % top_W;
// RoI in the bottom plane
const int x1 = ROUND(roi2d[r * 4 + 0] * spatial_scale);
const int y1 = ROUND(roi2d[r * 4 + 1] * spatial_scale);
const int x2 = ROUND(roi2d[r * 4 + 2] * spatial_scale);
const int y2 = ROUND(roi2d[r * 4 + 3] * spatial_scale);
const int roi_W = x2 - x1 + 1;
const int roi_H = y2 - y1 + 1;
// pooling region for pixel top[r][c][h][w]
const int hb_start = MIN(H, MAX(0,
y1 + (h * roi_H) / top_H));
const int hb_end = MIN(H, MAX(0,
y1 + DIV_THEN_CEIL((h + 1) * roi_H, top_H)));
const int wb_start = MIN(W, MAX(0,
x1 + (w * roi_W) / top_W));
const int wb_end = MIN(W, MAX(0,
x1 + DIV_THEN_CEIL((w + 1) * roi_W, top_W)));
if (hb_start >= hb_end || wb_start >= wb_end) {
top4d[index] = 0;
argmax4d[index] = -1;
continue;
}
// find maximum in the bottom region
const real* p_bottom3d = bottom3d + c * H * W;
int maxidx = hb_start * W + wb_start;
real maxval = p_bottom3d[maxidx];
for (int hb = hb_start; hb < hb_end; ++hb) {
for (int wb = wb_start; wb < wb_end; ++wb) {
const int bottom_index = hb * W + wb;
if (p_bottom3d[bottom_index] > maxval) {
maxval = p_bottom3d[bottom_index];
maxidx = bottom_index;
}
}
}
top4d[index] = maxval;
argmax4d[index] = maxidx;
continue;
// if the bottom region is not empty,
// top[r][c][h][w] = "max in the region"
// otherwise, assign 0
{
const int not_empty = (hb_start < hb_end) * (wb_start < wb_end);
top4d[index] = not_empty * maxval;
argmax4d[index] = not_empty * maxidx + (1 - not_empty) * (-1);
}
}
}
#endif
// --------------------------------------------------------------------------
// layer operator code
// roipool_forward
// --------------------------------------------------------------------------
// RoI pooling: bottom -> top
// bottom: C x H x W
// roi: R x 4
// top: R x C x H' x W'
// argmax: R * C * H' * W' array
void roipool_forward(const Tensor* const bottom3d,
const Tensor* const roi2d,
Tensor* const top4d,
int* const argmax_data,
const LayerOption* option)
{
// top height & width
const int top_H = option->pooled_height; // H'
const int top_W = option->pooled_width; // W'
// do forward-pass for each item in the batch
const real* p_bottom_item = bottom3d->data;
const real* p_roi_item = roi2d->data;
real* p_top_item = top4d->data;
int* p_argmax_item = argmax_data;
for (int n = 0; n < bottom3d->num_items; ++n) {
// bottom shape: R x C x H X W
const int R = roi2d->shape[n][0];
const int C = bottom3d->shape[n][0];
const int H = bottom3d->shape[n][1];
const int W = bottom3d->shape[n][2];
// set top shape: R x C x H' x W'
top4d->shape[n][0] = R;
top4d->shape[n][1] = C;
top4d->shape[n][2] = top_H;
top4d->shape[n][3] = top_W;
// RoI pooling
// bottom3d (C x H x W) -> top4d (R x C x H' x W')
{
#ifdef GPU
const int num_threads = R * C * top_H * top_W;
const int threads_per_block = 512;
const int num_blocks = DIV_THEN_CEIL(num_threads, threads_per_block);
roi_pool_gpu<<<num_blocks, threads_per_block>>>(
p_bottom_item, p_roi_item, p_top_item, p_argmax_item,
R, C, H, W, top_H, top_W, option->spatial_scale);
#else
roi_pool_cpu(
p_bottom_item, p_roi_item, p_top_item, p_argmax_item,
R, C, H, W, top_H, top_W, option->spatial_scale);
#endif
}
// locate next item
{
const int bottom_size = C * H * W;
const int roi_size = R * 4;
const int top_size = R * C * top_H * top_W;
p_bottom_item += bottom_size;
p_roi_item += roi_size;
p_top_item += top_size;
p_argmax_item += top_size;
}
} // endfor batch
// if option->flatten = true,
// reshape to 2d tensor: total_num_rois x (C * H' * W')
if (option->flatten) {
// for all items, C should be equal to each other
const int C = bottom3d->shape[0][0];
// calculate total number of RoI-pooled data
int total_num_rois = 0;
for (int n = 0; n < roi2d->num_items; ++n) {
total_num_rois += roi2d->shape[n][0];
}
// reshape to 2d tensor: total_num_rois x (C * H' * W')
top4d->ndim = 2;
top4d->num_items = 1;
top4d->shape[0][0] = total_num_rois;
top4d->shape[0][1] = C * top_H * top_W;
top4d->start[0] = 0;
}
else {
top4d->ndim = 4;
top4d->num_items = bottom3d->num_items;
{
int total_size = 0;
for (int n = 0; n < bottom3d->num_items; ++n) {
const int R = roi2d->shape[n][0];
const int C = bottom3d->shape[n][0];
const int top_size = R * C * top_H * top_W;
top4d->start[n] = total_size;
total_size += top_size;
}
}
}
}
// --------------------------------------------------------------------------
// layer shape calculator code
// --------------------------------------------------------------------------
void roipool_shape(const Tensor* const bottom3d,
const Tensor* const roi2d,
Tensor* const top4d,
int* const argmax_size,
const LayerOption* option)
{
// top height & width
const int top_H = option->pooled_height; // H'
const int top_W = option->pooled_width; // W'
// if option->flatten = true,
// reshape to 2d tensor: total_num_rois x (C * H' * W')
if (option->flatten) {
// for all items, C should be equal to each other
const int C = bottom3d->shape[0][0];
// calculate total number of RoI-pooled data
int total_num_rois = 0;
for (int n = 0; n < roi2d->num_items; ++n) {
total_num_rois += roi2d->shape[n][0];
}
// reshape to 2d tensor: total_num_rois x (C * H' * W')
top4d->ndim = 2;
top4d->num_items = 1;
top4d->shape[0][0] = total_num_rois;
top4d->shape[0][1] = C * top_H * top_W;
top4d->start[0] = 0;
// argmax data size = total top size
*argmax_size = top4d->shape[0][0] * top4d->shape[0][1];
return;
}
// otherwise, calculate shape for each item in the batch
for (int n = 0; n < bottom3d->num_items; ++n) {
// bottom shape: R x C x H X W
const int R = roi2d->shape[n][0];
const int C = bottom3d->shape[n][0];
// top shape: R x C x H' x W'
top4d->shape[n][0] = R;
top4d->shape[n][1] = C;
top4d->shape[n][2] = top_H;
top4d->shape[n][3] = top_W;
}
top4d->ndim = 4;
top4d->num_items = bottom3d->num_items;
{
int total_size = 0;
for (int n = 0; n < bottom3d->num_items; ++n) {
const int R = roi2d->shape[n][0];
const int C = bottom3d->shape[n][0];
const int top_size = R * C * top_H * top_W;
top4d->start[n] = total_size;
total_size += top_size;
}
// argmax data size = total top size
*argmax_size = total_size;
}
}
// --------------------------------------------------------------------------
// API code
// --------------------------------------------------------------------------
void forward_roipool_layer(void* const net_, void* const layer_)
{
Net* const net = (Net*)net_;
Layer* const layer = (Layer*)layer_;
roipool_forward(layer->p_bottoms[0], layer->p_bottoms[1],
&layer->tops[0],
net->tempint_data, &layer->option);
print_tensor_info(layer->name, &layer->tops[0]);
}
void shape_roipool_layer(void* const net_, void* const layer_)
{
Net* const net = (Net*)net_;
Layer* const layer = (Layer*)layer_;
int tempint_size;
roipool_shape(layer->p_bottoms[0], layer->p_bottoms[1], &layer->tops[0],
&tempint_size, &layer->option);
update_net_size(net, layer, 0, tempint_size, 0);
}
// --------------------------------------------------------------------------
// test code
// --------------------------------------------------------------------------
#ifdef TEST
int main(int argc, char* argv[])
{
// variable declaration & memory allocation
Tensor X, Y, roi;
real *X_data = NULL, *Y_data = NULL, *Y_true_data = NULL;
real *roi_data = NULL;
int* p_argmax_data = NULL;
LayerOption option;
int argmax_size;
// set option
{
option.pooled_height = 6;
option.pooled_width = 6;
option.spatial_scale = 0.0625;
option.flatten = 0;
}
// load data
{
int ndim;
int shape[g_max_ndim];
int total_size;
X_data = load_data("../data/temp/roipool_bottom0.bin",
&ndim, shape, NULL);
X.num_items = shape[0];
X.ndim = ndim - 1;
total_size = 0;
for (int n = 0; n < X.num_items; ++n) {
int size_n = 1;
for (int i = 0; i < X.ndim; ++i) {
X.shape[n][i] = shape[i + 1];
size_n *= shape[i + 1];
}
X.start[n] = total_size;
total_size += size_n;
}
roi_data = load_data("../data/temp/roipool_bottom1.bin",
&ndim, shape, NULL);
roi.num_items = X.num_items;
roi.ndim = 2;
for (int n = 0; n < roi.num_items; ++n) {
roi.shape[n][1] = 4;
}
{
const int num_rois = shape[0];
for (int i = 0; i < num_rois; ++i) {
const int n = (int)ROUND(roi_data[i * 5 + 0]);
const real x1 = roi_data[i * 5 + 1];
const real y1 = roi_data[i * 5 + 2];
const real x2 = roi_data[i * 5 + 3];
const real y2 = roi_data[i * 5 + 4];
++roi.shape[n][0];
roi_data[i * 4 + 0] = x1;
roi_data[i * 4 + 1] = y1;
roi_data[i * 4 + 2] = x2;
roi_data[i * 4 + 3] = y2;
}
}
roipool_shape(&X, &roi, &Y, &argmax_size, &option);
Y_true_data = load_data("../data/temp/roipool_top0.bin",
&ndim, shape, NULL);
Y_data = (real*)malloc(flatten_size(&Y) * sizeof(real));
}
// CUDA initialization
#ifdef GPU
{
printf("set device\n");
cudaSetDevice(0);
}
#endif
// bind loaded data to corresponding tensors
#ifdef GPU
{
const long int X_size = flatten_size(&X);
const long int Y_size = flatten_size(&Y);
const long int roi_size = flatten_size(&roi);
printf("gpu malloc\n");
cudaMalloc(&X.data, X_size * sizeof(real));
cudaMalloc(&roi.data, roi_size * sizeof(real));
cudaMalloc(&Y.data, Y_size * sizeof(real));
cudaMalloc(&p_argmax_data, argmax_size * sizeof(int));
printf("memcpy: cpu -> gpu\n");
cudaMemcpyAsync(X.data, X_data, X_size * sizeof(real),
cudaMemcpyHostToDevice);
cudaMemcpyAsync(roi.data, roi_data, roi_size * sizeof(real),
cudaMemcpyHostToDevice);
}
#else
{
X.data = X_data;
Y.data = Y_data;
roi.data = roi_data;
p_argmax_data = (int*)malloc(argmax_size * sizeof(int));
}
#endif
// do forward operation
{
printf("do forward\n");
roipool_forward(&X, &roi, &Y, p_argmax_data, &option);
}
// copy GPU data to main memory
#ifdef GPU
{
const long int Y_size = flatten_size(&Y);
printf("memcpy: cpu <- gpu\n");
cudaMemcpyAsync(Y_data, Y.data, Y_size * sizeof(real),
cudaMemcpyDeviceToHost);
}
#endif
// verify results
{
const long int Y_size = flatten_size(&Y);
printf("verification\n");
for (int i = 0; i < Y_size; ++i) {
if (Y_data[i] != Y_true_data[i]) {
printf("Y[%d] = %.6f Y_true[%d] = %.6f\n",
i, Y_data[i], i, Y_true_data[i]);
}
}
}
// memory deallocation
{
printf("free\n");
free(X_data);
free(roi_data);
free(Y_data);
free(Y_true_data);
}
#ifdef GPU
{
printf("gpu free\n");
cudaFree(X.data);
cudaFree(roi.data);
cudaFree(Y.data);
cudaFree(p_argmax_data);
}
#else
{
free(p_argmax_data);
}
#endif
return 0;
}
#endif // endifdef TEST
|
a3a80982e40aee84e36ddff5b2b0d77aecbbd05f.hip | // !!! This is a file automatically generated by hipify!!!
/**
* 3DConvolution.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <sgrauerg@gmail.com>
* Will Killian <killian@udel.edu>
* Louis-Noel Pouchet <pouchet@cse.ohio-state.edu>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <unistd.h>
#include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
#include <hip/hip_runtime.h>
#define POLYBENCH_TIME 1
#include "3DConvolution.cuh"
#include "../../common/polybench.h"
#include "../../common/polybenchUtilFuncts.h"
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.5
#define GPU_DEVICE 0
#define RUN_ON_CPU
void conv3D(int ni, int nj, int nk, DATA_TYPE POLYBENCH_3D(A, NI, NJ, NK, ni, nj, nk), DATA_TYPE POLYBENCH_3D(B, NI, NJ, NK, ni, nj, nk))
{
int i, j, k;
DATA_TYPE c11, c12, c13, c21, c22, c23, c31, c32, c33;
c11 = +2; c21 = +5; c31 = -8;
c12 = -3; c22 = +6; c32 = -9;
c13 = +4; c23 = +7; c33 = +10;
for (i = 1; i < _PB_NI - 1; ++i) // 0
{
for (j = 1; j < _PB_NJ - 1; ++j) // 1
{
for (k = 1; k < _PB_NK -1; ++k) // 2
{
B[i][j][k] = c11 * A[(i - 1)][(j - 1)][(k - 1)] + c13 * A[(i + 1)][(j - 1)][(k - 1)]
+ c21 * A[(i - 1)][(j - 1)][(k - 1)] + c23 * A[(i + 1)][(j - 1)][(k - 1)]
+ c31 * A[(i - 1)][(j - 1)][(k - 1)] + c33 * A[(i + 1)][(j - 1)][(k - 1)]
+ c12 * A[(i + 0)][(j - 1)][(k + 0)] + c22 * A[(i + 0)][(j + 0)][(k + 0)]
+ c32 * A[(i + 0)][(j + 1)][(k + 0)] + c11 * A[(i - 1)][(j - 1)][(k + 1)]
+ c13 * A[(i + 1)][(j - 1)][(k + 1)] + c21 * A[(i - 1)][(j + 0)][(k + 1)]
+ c23 * A[(i + 1)][(j + 0)][(k + 1)] + c31 * A[(i - 1)][(j + 1)][(k + 1)]
+ c33 * A[(i + 1)][(j + 1)][(k + 1)];
}
}
}
}
void init(int ni, int nj, int nk, DATA_TYPE POLYBENCH_3D(A, NI, NJ, NK, ni, nj, nk))
{
int i, j, k;
for (i = 0; i < ni; ++i)
{
for (j = 0; j < nj; ++j)
{
for (k = 0; k < nk; ++k)
{
A[i][j][k] = i % 12 + 2 * (j % 7) + 3 * (k % 13);
}
}
}
}
void compareResults(int ni, int nj, int nk, DATA_TYPE POLYBENCH_3D(B, NI, NJ, NK, ni, nj, nk), DATA_TYPE POLYBENCH_3D(B_outputFromGpu, NI, NJ, NK, ni, nj, nk))
{
int i, j, k, fail;
fail = 0;
// Compare result from cpu and gpu
for (i = 1; i < ni - 1; ++i) // 0
{
for (j = 1; j < nj - 1; ++j) // 1
{
for (k = 1; k < nk - 1; ++k) // 2
{
if (percentDiff(B[i][j][k], B_outputFromGpu[i][j][k]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
}
}
// Print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, GPU_DEVICE);
printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
hipSetDevice( GPU_DEVICE );
}
__global__ void convolution3D_kernel(int ni, int nj, int nk, DATA_TYPE* A, DATA_TYPE* B, int i)
{
int k = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
DATA_TYPE c11, c12, c13, c21, c22, c23, c31, c32, c33;
c11 = +2; c21 = +5; c31 = -8;
c12 = -3; c22 = +6; c32 = -9;
c13 = +4; c23 = +7; c33 = +10;
if ((i < (_PB_NI-1)) && (j < (_PB_NJ-1)) && (k < (_PB_NK-1)) && (i > 0) && (j > 0) && (k > 0))
{
B[i*(NK * NJ) + j*NK + k] = c11 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c13 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)]
+ c21 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c23 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)]
+ c31 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c33 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)]
+ c12 * A[(i + 0)*(NK * NJ) + (j - 1)*NK + (k + 0)] + c22 * A[(i + 0)*(NK * NJ) + (j + 0)*NK + (k + 0)]
+ c32 * A[(i + 0)*(NK * NJ) + (j + 1)*NK + (k + 0)] + c11 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k + 1)]
+ c13 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k + 1)] + c21 * A[(i - 1)*(NK * NJ) + (j + 0)*NK + (k + 1)]
+ c23 * A[(i + 1)*(NK * NJ) + (j + 0)*NK + (k + 1)] + c31 * A[(i - 1)*(NK * NJ) + (j + 1)*NK + (k + 1)]
+ c33 * A[(i + 1)*(NK * NJ) + (j + 1)*NK + (k + 1)];
}
}
void convolution3DCuda(int ni, int nj, int nk, DATA_TYPE POLYBENCH_3D(A, NI, NJ, NK, ni, nj, nk), DATA_TYPE POLYBENCH_3D(B, NI, NJ, NK, ni, nj, nk), DATA_TYPE POLYBENCH_3D(B_outputFromGpu, NI, NJ, NK, ni, nj, nk))
{
DATA_TYPE *A_gpu;
DATA_TYPE *B_gpu;
hipMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * NI * NJ * NK);
hipMalloc((void **)&B_gpu, sizeof(DATA_TYPE) * NI * NJ * NK);
hipMemcpy(A_gpu, A, sizeof(DATA_TYPE) * NI * NJ * NK, hipMemcpyHostToDevice);
hipMemcpy(B_gpu, B, sizeof(DATA_TYPE) * NI * NJ * NK, hipMemcpyHostToDevice);
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid((size_t)(ceil( ((float)NK) / ((float)block.x) )), (size_t)(ceil( ((float)NJ) / ((float)block.y) )));
/* Start timer. */
polybench_start_instruments;
int i;
for (i = 1; i < _PB_NI - 1; ++i) // 0
{
hipLaunchKernelGGL(( convolution3D_kernel), dim3(grid), dim3(block) , 0, 0, ni, nj, nk, A_gpu, B_gpu, i);
}
hipDeviceSynchronize();
printf("GPU Time in seconds:\n");
polybench_stop_instruments;
polybench_print_instruments;
hipMemcpy(B_outputFromGpu, B_gpu, sizeof(DATA_TYPE) * NI * NJ * NK, hipMemcpyDeviceToHost);
hipFree(A_gpu);
hipFree(B_gpu);
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nj, int nk,
DATA_TYPE POLYBENCH_3D(B,NI,NJ,NK,ni,nj,nk))
{
int i, j, k;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++)
for (k = 0; k < nk; k++)
{
fprintf (stderr, DATA_PRINTF_MODIFIER, B[i][j][k]);
if ((i * (nj*nk) + j*nk + k) % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
int main(int argc, char *argv[])
{
int ni = NI;
int nj = NJ;
int nk = NK;
POLYBENCH_3D_ARRAY_DECL(A,DATA_TYPE,NI,NJ,NK,ni,nj,nk);
POLYBENCH_3D_ARRAY_DECL(B,DATA_TYPE,NI,NJ,NK,ni,nj,nk);
POLYBENCH_3D_ARRAY_DECL(B_outputFromGpu,DATA_TYPE,NI,NJ,NK,ni,nj,nk);
init(ni, nj, nk, POLYBENCH_ARRAY(A));
GPU_argv_init();
convolution3DCuda(ni, nj, nk, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(B_outputFromGpu));
#ifdef RUN_ON_CPU
/* Start timer. */
polybench_start_instruments;
conv3D(ni, nj, nk, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B));
printf("CPU Time in seconds:\n");
polybench_stop_instruments;
polybench_print_instruments;
compareResults(ni, nj, nk, POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(B_outputFromGpu));
#else //print output to stderr so no dead code elimination
print_array(ni, nj, nk, POLYBENCH_ARRAY(B_outputFromGpu));
#endif //RUN_ON_CPU
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
POLYBENCH_FREE_ARRAY(B_outputFromGpu);
return 0;
}
#include "../../common/polybench.c"
| a3a80982e40aee84e36ddff5b2b0d77aecbbd05f.cu | /**
* 3DConvolution.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <sgrauerg@gmail.com>
* Will Killian <killian@udel.edu>
* Louis-Noel Pouchet <pouchet@cse.ohio-state.edu>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <unistd.h>
#include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
#include <cuda.h>
#define POLYBENCH_TIME 1
#include "3DConvolution.cuh"
#include "../../common/polybench.h"
#include "../../common/polybenchUtilFuncts.h"
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.5
#define GPU_DEVICE 0
#define RUN_ON_CPU
void conv3D(int ni, int nj, int nk, DATA_TYPE POLYBENCH_3D(A, NI, NJ, NK, ni, nj, nk), DATA_TYPE POLYBENCH_3D(B, NI, NJ, NK, ni, nj, nk))
{
int i, j, k;
DATA_TYPE c11, c12, c13, c21, c22, c23, c31, c32, c33;
c11 = +2; c21 = +5; c31 = -8;
c12 = -3; c22 = +6; c32 = -9;
c13 = +4; c23 = +7; c33 = +10;
for (i = 1; i < _PB_NI - 1; ++i) // 0
{
for (j = 1; j < _PB_NJ - 1; ++j) // 1
{
for (k = 1; k < _PB_NK -1; ++k) // 2
{
B[i][j][k] = c11 * A[(i - 1)][(j - 1)][(k - 1)] + c13 * A[(i + 1)][(j - 1)][(k - 1)]
+ c21 * A[(i - 1)][(j - 1)][(k - 1)] + c23 * A[(i + 1)][(j - 1)][(k - 1)]
+ c31 * A[(i - 1)][(j - 1)][(k - 1)] + c33 * A[(i + 1)][(j - 1)][(k - 1)]
+ c12 * A[(i + 0)][(j - 1)][(k + 0)] + c22 * A[(i + 0)][(j + 0)][(k + 0)]
+ c32 * A[(i + 0)][(j + 1)][(k + 0)] + c11 * A[(i - 1)][(j - 1)][(k + 1)]
+ c13 * A[(i + 1)][(j - 1)][(k + 1)] + c21 * A[(i - 1)][(j + 0)][(k + 1)]
+ c23 * A[(i + 1)][(j + 0)][(k + 1)] + c31 * A[(i - 1)][(j + 1)][(k + 1)]
+ c33 * A[(i + 1)][(j + 1)][(k + 1)];
}
}
}
}
void init(int ni, int nj, int nk, DATA_TYPE POLYBENCH_3D(A, NI, NJ, NK, ni, nj, nk))
{
int i, j, k;
for (i = 0; i < ni; ++i)
{
for (j = 0; j < nj; ++j)
{
for (k = 0; k < nk; ++k)
{
A[i][j][k] = i % 12 + 2 * (j % 7) + 3 * (k % 13);
}
}
}
}
void compareResults(int ni, int nj, int nk, DATA_TYPE POLYBENCH_3D(B, NI, NJ, NK, ni, nj, nk), DATA_TYPE POLYBENCH_3D(B_outputFromGpu, NI, NJ, NK, ni, nj, nk))
{
int i, j, k, fail;
fail = 0;
// Compare result from cpu and gpu
for (i = 1; i < ni - 1; ++i) // 0
{
for (j = 1; j < nj - 1; ++j) // 1
{
for (k = 1; k < nk - 1; ++k) // 2
{
if (percentDiff(B[i][j][k], B_outputFromGpu[i][j][k]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
}
}
// Print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, GPU_DEVICE);
printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
cudaSetDevice( GPU_DEVICE );
}
__global__ void convolution3D_kernel(int ni, int nj, int nk, DATA_TYPE* A, DATA_TYPE* B, int i)
{
int k = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
DATA_TYPE c11, c12, c13, c21, c22, c23, c31, c32, c33;
c11 = +2; c21 = +5; c31 = -8;
c12 = -3; c22 = +6; c32 = -9;
c13 = +4; c23 = +7; c33 = +10;
if ((i < (_PB_NI-1)) && (j < (_PB_NJ-1)) && (k < (_PB_NK-1)) && (i > 0) && (j > 0) && (k > 0))
{
B[i*(NK * NJ) + j*NK + k] = c11 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c13 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)]
+ c21 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c23 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)]
+ c31 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c33 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)]
+ c12 * A[(i + 0)*(NK * NJ) + (j - 1)*NK + (k + 0)] + c22 * A[(i + 0)*(NK * NJ) + (j + 0)*NK + (k + 0)]
+ c32 * A[(i + 0)*(NK * NJ) + (j + 1)*NK + (k + 0)] + c11 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k + 1)]
+ c13 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k + 1)] + c21 * A[(i - 1)*(NK * NJ) + (j + 0)*NK + (k + 1)]
+ c23 * A[(i + 1)*(NK * NJ) + (j + 0)*NK + (k + 1)] + c31 * A[(i - 1)*(NK * NJ) + (j + 1)*NK + (k + 1)]
+ c33 * A[(i + 1)*(NK * NJ) + (j + 1)*NK + (k + 1)];
}
}
void convolution3DCuda(int ni, int nj, int nk, DATA_TYPE POLYBENCH_3D(A, NI, NJ, NK, ni, nj, nk), DATA_TYPE POLYBENCH_3D(B, NI, NJ, NK, ni, nj, nk), DATA_TYPE POLYBENCH_3D(B_outputFromGpu, NI, NJ, NK, ni, nj, nk))
{
DATA_TYPE *A_gpu;
DATA_TYPE *B_gpu;
cudaMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * NI * NJ * NK);
cudaMalloc((void **)&B_gpu, sizeof(DATA_TYPE) * NI * NJ * NK);
cudaMemcpy(A_gpu, A, sizeof(DATA_TYPE) * NI * NJ * NK, cudaMemcpyHostToDevice);
cudaMemcpy(B_gpu, B, sizeof(DATA_TYPE) * NI * NJ * NK, cudaMemcpyHostToDevice);
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid((size_t)(ceil( ((float)NK) / ((float)block.x) )), (size_t)(ceil( ((float)NJ) / ((float)block.y) )));
/* Start timer. */
polybench_start_instruments;
int i;
for (i = 1; i < _PB_NI - 1; ++i) // 0
{
convolution3D_kernel<<< grid, block >>>(ni, nj, nk, A_gpu, B_gpu, i);
}
cudaThreadSynchronize();
printf("GPU Time in seconds:\n");
polybench_stop_instruments;
polybench_print_instruments;
cudaMemcpy(B_outputFromGpu, B_gpu, sizeof(DATA_TYPE) * NI * NJ * NK, cudaMemcpyDeviceToHost);
cudaFree(A_gpu);
cudaFree(B_gpu);
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nj, int nk,
DATA_TYPE POLYBENCH_3D(B,NI,NJ,NK,ni,nj,nk))
{
int i, j, k;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++)
for (k = 0; k < nk; k++)
{
fprintf (stderr, DATA_PRINTF_MODIFIER, B[i][j][k]);
if ((i * (nj*nk) + j*nk + k) % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
int main(int argc, char *argv[])
{
int ni = NI;
int nj = NJ;
int nk = NK;
POLYBENCH_3D_ARRAY_DECL(A,DATA_TYPE,NI,NJ,NK,ni,nj,nk);
POLYBENCH_3D_ARRAY_DECL(B,DATA_TYPE,NI,NJ,NK,ni,nj,nk);
POLYBENCH_3D_ARRAY_DECL(B_outputFromGpu,DATA_TYPE,NI,NJ,NK,ni,nj,nk);
init(ni, nj, nk, POLYBENCH_ARRAY(A));
GPU_argv_init();
convolution3DCuda(ni, nj, nk, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(B_outputFromGpu));
#ifdef RUN_ON_CPU
/* Start timer. */
polybench_start_instruments;
conv3D(ni, nj, nk, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B));
printf("CPU Time in seconds:\n");
polybench_stop_instruments;
polybench_print_instruments;
compareResults(ni, nj, nk, POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(B_outputFromGpu));
#else //print output to stderr so no dead code elimination
print_array(ni, nj, nk, POLYBENCH_ARRAY(B_outputFromGpu));
#endif //RUN_ON_CPU
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
POLYBENCH_FREE_ARRAY(B_outputFromGpu);
return 0;
}
#include "../../common/polybench.c"
|
71fc2f7ec9225843c3e12d039b16537ff29f0de6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void select(const double* ran, const double* total, const int* size,
double** frac, double** xs, int* selection) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
double cutoff = total[idx] * ran[idx];
const double* el_frac = frac[idx];
const double* el_xs = xs[idx];
unsigned int i = size[idx] - 1;
for (; i > 0; --i) {
cutoff -= el_frac[i] * el_xs[i];
if (cutoff <= 0)
break;
}
selection[idx] = i;
}
| 71fc2f7ec9225843c3e12d039b16537ff29f0de6.cu | __global__ void select(const double* ran, const double* total, const int* size,
double** frac, double** xs, int* selection) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
double cutoff = total[idx] * ran[idx];
const double* el_frac = frac[idx];
const double* el_xs = xs[idx];
unsigned int i = size[idx] - 1;
for (; i > 0; --i) {
cutoff -= el_frac[i] * el_xs[i];
if (cutoff <= 0)
break;
}
selection[idx] = i;
}
|
2843e407a1ba6195cff6f0ea1364fbc8a7847d7a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Fast Accurate Fourier Transform (FAFT) was written by Oscar R. Cabrera L.
// Contributors: Renan Cabrera, Denys I. Bondar.
// Copyright (c) 2016
// All rights reserved.
#include "FAFTp_R2C_C2R.h"
// ax Split 1
__global__ void FAFT128_R2C_ax1_dev( float *re, float *im, float2 *data65, float dx, float delta, int segment )
{
int tid = threadIdx.x;
size_t sector = blockIdx.y*gridDim.x + blockIdx.x;
re += (sector*64) + tid;
im += (sector*64) + tid + 32;
if (tid == 0) data65 += sector;
float2 y[16];
load128_half_R2C_ax1( 8, y, re, 16 );
GENERAL_FAFT128( y, dx, delta, segment, tid );
store128_half_R2C_ax1<8>( y, re, im, data65, 16, tid );
}
extern "C" int FAFT128_1D_R2C( float *data, float2 *data65, float dx, float delta, int segment )
{
int success = 1;
dim3 grid_R2C(1, 1);
hipLaunchKernelGGL(( FAFT128_R2C_ax1_dev), dim3(grid_R2C), dim3(16) , 0, 0, data, data, data65, dx, delta, segment );
hipDeviceSynchronize();
return success;
}
| 2843e407a1ba6195cff6f0ea1364fbc8a7847d7a.cu | // Fast Accurate Fourier Transform (FAFT) was written by Oscar R. Cabrera L.
// Contributors: Renan Cabrera, Denys I. Bondar.
// Copyright (c) 2016
// All rights reserved.
#include "FAFTp_R2C_C2R.h"
// ax Split 1
__global__ void FAFT128_R2C_ax1_dev( float *re, float *im, float2 *data65, float dx, float delta, int segment )
{
int tid = threadIdx.x;
size_t sector = blockIdx.y*gridDim.x + blockIdx.x;
re += (sector*64) + tid;
im += (sector*64) + tid + 32;
if (tid == 0) data65 += sector;
float2 y[16];
load128_half_R2C_ax1( 8, y, re, 16 );
GENERAL_FAFT128( y, dx, delta, segment, tid );
store128_half_R2C_ax1<8>( y, re, im, data65, 16, tid );
}
extern "C" int FAFT128_1D_R2C( float *data, float2 *data65, float dx, float delta, int segment )
{
int success = 1;
dim3 grid_R2C(1, 1);
FAFT128_R2C_ax1_dev<<< grid_R2C, 16 >>>( data, data, data65, dx, delta, segment );
cudaThreadSynchronize();
return success;
}
|
9117a3413bd4e89808a1ed411f751857e5e33234.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <gtest/gtest.h>
#include <ATen/ATen.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/hip/MemoryAccess.cuh>
#include <ATen/hip/HIPContext.h>
using namespace at::native;
using namespace at::native::memory;
__managed__ double4 buffer1[1024];
__managed__ double4 buffer2[1024];
void reset_buffers() {
for (int i = 0; i < 1024; i++) {
buffer1[i].x = i;
buffer1[i].y = i + 0.1;
buffer1[i].z = i + 0.2;
buffer1[i].w = i + 0.3;
buffer2[2].x = -i;
buffer2[2].y = -(i + 0.1);
buffer2[2].z = -(i + 0.2);
buffer2[2].w = -(i + 0.3);
}
}
TEST(TestLoops, HasSameArgTypes) {
// This is a compile-time unit test. If this file compiles without error,
// then the test passes and during runtime, we just need to return.
using namespace at::native::modern::detail;
using func1_t = int (*)(float, float);
using func2_t = int (*)(bool, float, float);
using func3_t = int (*)(float);
using func4_t = int (*)();
static_assert(has_same_arg_types<func1_t>::value, "func1_t has the same argument types");
static_assert(!has_same_arg_types<func2_t>::value, "func2_t does not have the same argument types");
static_assert(has_same_arg_types<func3_t>::value, "func3_t has the same argument types");
static_assert(has_same_arg_types<func4_t>::value, "func4_t has the same argument types");
return;
}
TEST(TestVectorizedMemoryAccess, CanVectorizeUpTo) {
char *ptr = reinterpret_cast<char *>(buffer1);
ASSERT_EQ(can_vectorize_up_to<bool>(ptr), 4);
ASSERT_EQ(can_vectorize_up_to<int8_t>(ptr), 4);
ASSERT_EQ(can_vectorize_up_to<int16_t>(ptr), 4);
ASSERT_EQ(can_vectorize_up_to<int>(ptr), 4);
ASSERT_EQ(can_vectorize_up_to<int64_t>(ptr), 4);
ASSERT_EQ(can_vectorize_up_to<bool>(ptr + 1), 1);
ASSERT_EQ(can_vectorize_up_to<int8_t>(ptr + 1), 1);
ASSERT_EQ(can_vectorize_up_to<bool>(ptr + 2), 2);
ASSERT_EQ(can_vectorize_up_to<int8_t>(ptr + 2), 2);
ASSERT_EQ(can_vectorize_up_to<int16_t>(ptr + 2), 1);
ASSERT_EQ(can_vectorize_up_to<bool>(ptr + 4), 4);
ASSERT_EQ(can_vectorize_up_to<int8_t>(ptr + 4), 4);
ASSERT_EQ(can_vectorize_up_to<int16_t>(ptr + 4), 2);
ASSERT_EQ(can_vectorize_up_to<int>(ptr + 4), 1);
ASSERT_EQ(can_vectorize_up_to<bool>(ptr + 8), 4);
ASSERT_EQ(can_vectorize_up_to<int8_t>(ptr + 8), 4);
ASSERT_EQ(can_vectorize_up_to<int16_t>(ptr + 8), 4);
ASSERT_EQ(can_vectorize_up_to<int>(ptr + 8), 2);
ASSERT_EQ(can_vectorize_up_to<int64_t>(ptr + 8), 1);
}
// The following kernel copy values by using vectorized policies
// defined in `ATen/native/cuda/MemoryAccess.cuh`
template <typename scalar_t, int vec_size>
__global__ void vectorized_copy(scalar_t *dst, scalar_t *src) {
using vectorized = policies::vectorized<vec_size>;
auto policy = vectorized();
scalar_t buf[thread_work_size];
auto accessor = [&](int index) -> scalar_t & { return buf[index]; };
policy.load(accessor, src + 256 * blockIdx.x);
policy.store(accessor, dst + 256 * blockIdx.x);
}
TEST(TestVectorizedMemoryAccess, CopyKernel) {
if (!at::cuda::is_available()) {
return;
}
double *b1 = reinterpret_cast<double *>(buffer1);
double *b2 = reinterpret_cast<double *>(buffer2);
// vec4 copy
reset_buffers();
hipDeviceSynchronize();
hipLaunchKernelGGL(( vectorized_copy<double, 4>), dim3(16), dim3(64), 0, 0, b2, b1);
hipDeviceSynchronize();
ASSERT_EQ(hipGetLastError(), hipSuccess);
for (int i = 0; i < 1024; i++) {
ASSERT_EQ(buffer1[i].x, buffer2[i].x);
ASSERT_EQ(buffer1[i].y, buffer2[i].y);
ASSERT_EQ(buffer1[i].z, buffer2[i].z);
ASSERT_EQ(buffer1[i].w, buffer2[i].w);
}
// vec2 copy
reset_buffers();
hipDeviceSynchronize();
hipLaunchKernelGGL(( vectorized_copy<double, 2>), dim3(16), dim3(64), 0, 0, b2, b1);
hipDeviceSynchronize();
ASSERT_EQ(hipGetLastError(), hipSuccess);
for (int i = 0; i < 1024; i++) {
ASSERT_EQ(buffer1[i].x, buffer2[i].x);
ASSERT_EQ(buffer1[i].y, buffer2[i].y);
ASSERT_EQ(buffer1[i].z, buffer2[i].z);
ASSERT_EQ(buffer1[i].w, buffer2[i].w);
}
// vec1 copy
reset_buffers();
hipDeviceSynchronize();
hipLaunchKernelGGL(( vectorized_copy<double, 1>), dim3(16), dim3(64), 0, 0, b2, b1);
hipDeviceSynchronize();
ASSERT_EQ(hipGetLastError(), hipSuccess);
for (int i = 0; i < 1024; i++) {
ASSERT_EQ(buffer1[i].x, buffer2[i].x);
ASSERT_EQ(buffer1[i].y, buffer2[i].y);
ASSERT_EQ(buffer1[i].z, buffer2[i].z);
ASSERT_EQ(buffer1[i].w, buffer2[i].w);
}
// unaligned
for (int i = 0; i < 16; i++) {
for (int j = 0; j < 16; j++) {
b1 = reinterpret_cast<double *>(reinterpret_cast<char *>(buffer1) + i);
b2 = reinterpret_cast<double *>(reinterpret_cast<char *>(buffer2) + j);
hipGetLastError();
hipDeviceSynchronize();
hipLaunchKernelGGL(( vectorized_copy<double, 4>), dim3(1), dim3(64), 0, 0, b2, b1);
hipDeviceSynchronize();
auto err = hipGetLastError();
if (i % 16 == 0 && j % 16 == 0) {
ASSERT_EQ(err, hipSuccess);
} else {
ASSERT_EQ(err, hipErrorMisalignedAddress);
}
}
}
}
| 9117a3413bd4e89808a1ed411f751857e5e33234.cu | #include <gtest/gtest.h>
#include <ATen/ATen.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/MemoryAccess.cuh>
#include <ATen/cuda/CUDAContext.h>
using namespace at::native;
using namespace at::native::memory;
__managed__ double4 buffer1[1024];
__managed__ double4 buffer2[1024];
void reset_buffers() {
for (int i = 0; i < 1024; i++) {
buffer1[i].x = i;
buffer1[i].y = i + 0.1;
buffer1[i].z = i + 0.2;
buffer1[i].w = i + 0.3;
buffer2[2].x = -i;
buffer2[2].y = -(i + 0.1);
buffer2[2].z = -(i + 0.2);
buffer2[2].w = -(i + 0.3);
}
}
TEST(TestLoops, HasSameArgTypes) {
// This is a compile-time unit test. If this file compiles without error,
// then the test passes and during runtime, we just need to return.
using namespace at::native::modern::detail;
using func1_t = int (*)(float, float);
using func2_t = int (*)(bool, float, float);
using func3_t = int (*)(float);
using func4_t = int (*)();
static_assert(has_same_arg_types<func1_t>::value, "func1_t has the same argument types");
static_assert(!has_same_arg_types<func2_t>::value, "func2_t does not have the same argument types");
static_assert(has_same_arg_types<func3_t>::value, "func3_t has the same argument types");
static_assert(has_same_arg_types<func4_t>::value, "func4_t has the same argument types");
return;
}
TEST(TestVectorizedMemoryAccess, CanVectorizeUpTo) {
char *ptr = reinterpret_cast<char *>(buffer1);
ASSERT_EQ(can_vectorize_up_to<bool>(ptr), 4);
ASSERT_EQ(can_vectorize_up_to<int8_t>(ptr), 4);
ASSERT_EQ(can_vectorize_up_to<int16_t>(ptr), 4);
ASSERT_EQ(can_vectorize_up_to<int>(ptr), 4);
ASSERT_EQ(can_vectorize_up_to<int64_t>(ptr), 4);
ASSERT_EQ(can_vectorize_up_to<bool>(ptr + 1), 1);
ASSERT_EQ(can_vectorize_up_to<int8_t>(ptr + 1), 1);
ASSERT_EQ(can_vectorize_up_to<bool>(ptr + 2), 2);
ASSERT_EQ(can_vectorize_up_to<int8_t>(ptr + 2), 2);
ASSERT_EQ(can_vectorize_up_to<int16_t>(ptr + 2), 1);
ASSERT_EQ(can_vectorize_up_to<bool>(ptr + 4), 4);
ASSERT_EQ(can_vectorize_up_to<int8_t>(ptr + 4), 4);
ASSERT_EQ(can_vectorize_up_to<int16_t>(ptr + 4), 2);
ASSERT_EQ(can_vectorize_up_to<int>(ptr + 4), 1);
ASSERT_EQ(can_vectorize_up_to<bool>(ptr + 8), 4);
ASSERT_EQ(can_vectorize_up_to<int8_t>(ptr + 8), 4);
ASSERT_EQ(can_vectorize_up_to<int16_t>(ptr + 8), 4);
ASSERT_EQ(can_vectorize_up_to<int>(ptr + 8), 2);
ASSERT_EQ(can_vectorize_up_to<int64_t>(ptr + 8), 1);
}
// The following kernel copy values by using vectorized policies
// defined in `ATen/native/cuda/MemoryAccess.cuh`
template <typename scalar_t, int vec_size>
__global__ void vectorized_copy(scalar_t *dst, scalar_t *src) {
using vectorized = policies::vectorized<vec_size>;
auto policy = vectorized();
scalar_t buf[thread_work_size];
auto accessor = [&](int index) -> scalar_t & { return buf[index]; };
policy.load(accessor, src + 256 * blockIdx.x);
policy.store(accessor, dst + 256 * blockIdx.x);
}
TEST(TestVectorizedMemoryAccess, CopyKernel) {
if (!at::cuda::is_available()) {
return;
}
double *b1 = reinterpret_cast<double *>(buffer1);
double *b2 = reinterpret_cast<double *>(buffer2);
// vec4 copy
reset_buffers();
cudaDeviceSynchronize();
vectorized_copy<double, 4><<<16, 64>>>(b2, b1);
cudaDeviceSynchronize();
ASSERT_EQ(cudaGetLastError(), cudaSuccess);
for (int i = 0; i < 1024; i++) {
ASSERT_EQ(buffer1[i].x, buffer2[i].x);
ASSERT_EQ(buffer1[i].y, buffer2[i].y);
ASSERT_EQ(buffer1[i].z, buffer2[i].z);
ASSERT_EQ(buffer1[i].w, buffer2[i].w);
}
// vec2 copy
reset_buffers();
cudaDeviceSynchronize();
vectorized_copy<double, 2><<<16, 64>>>(b2, b1);
cudaDeviceSynchronize();
ASSERT_EQ(cudaGetLastError(), cudaSuccess);
for (int i = 0; i < 1024; i++) {
ASSERT_EQ(buffer1[i].x, buffer2[i].x);
ASSERT_EQ(buffer1[i].y, buffer2[i].y);
ASSERT_EQ(buffer1[i].z, buffer2[i].z);
ASSERT_EQ(buffer1[i].w, buffer2[i].w);
}
// vec1 copy
reset_buffers();
cudaDeviceSynchronize();
vectorized_copy<double, 1><<<16, 64>>>(b2, b1);
cudaDeviceSynchronize();
ASSERT_EQ(cudaGetLastError(), cudaSuccess);
for (int i = 0; i < 1024; i++) {
ASSERT_EQ(buffer1[i].x, buffer2[i].x);
ASSERT_EQ(buffer1[i].y, buffer2[i].y);
ASSERT_EQ(buffer1[i].z, buffer2[i].z);
ASSERT_EQ(buffer1[i].w, buffer2[i].w);
}
// unaligned
for (int i = 0; i < 16; i++) {
for (int j = 0; j < 16; j++) {
b1 = reinterpret_cast<double *>(reinterpret_cast<char *>(buffer1) + i);
b2 = reinterpret_cast<double *>(reinterpret_cast<char *>(buffer2) + j);
cudaGetLastError();
cudaDeviceSynchronize();
vectorized_copy<double, 4><<<1, 64>>>(b2, b1);
cudaDeviceSynchronize();
auto err = cudaGetLastError();
if (i % 16 == 0 && j % 16 == 0) {
ASSERT_EQ(err, cudaSuccess);
} else {
ASSERT_EQ(err, cudaErrorMisalignedAddress);
}
}
}
}
|
1ac1078e4c3607322d597fb6f98dc758ee42f22f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void gpu_mix32_1(int64_t * ip, uint32_t stride, int32_t * u, int32_t * v, int32_t numSamples, uint16_t * shiftUV, int32_t mixres, uint32_t mask, int32_t m2, int32_t mixbits, int32_t shift)
{
int z = threadIdx.x + blockIdx.x * blockDim.x;
if (z < numSamples)
{
int32_t l, r;
int32_t k = z * 2;
int64_t temp = ip[z];
l = (int32_t)temp;
r = temp >> 32;
shiftUV[k + 0] = (uint16_t)(l & mask);
shiftUV[k + 1] = (uint16_t)(r & mask);
l >>= shift;
r >>= shift;
u[z] = (mixres * l + m2 * r) >> mixbits;
v[z] = l - r;
}
} | 1ac1078e4c3607322d597fb6f98dc758ee42f22f.cu | #include "includes.h"
__global__ void gpu_mix32_1(int64_t * ip, uint32_t stride, int32_t * u, int32_t * v, int32_t numSamples, uint16_t * shiftUV, int32_t mixres, uint32_t mask, int32_t m2, int32_t mixbits, int32_t shift)
{
int z = threadIdx.x + blockIdx.x * blockDim.x;
if (z < numSamples)
{
int32_t l, r;
int32_t k = z * 2;
int64_t temp = ip[z];
l = (int32_t)temp;
r = temp >> 32;
shiftUV[k + 0] = (uint16_t)(l & mask);
shiftUV[k + 1] = (uint16_t)(r & mask);
l >>= shift;
r >>= shift;
u[z] = (mixres * l + m2 * r) >> mixbits;
v[z] = l - r;
}
} |
96e6b7f75369b22a21bae5cd28ab9cbeada091b5.hip | // !!! This is a file automatically generated by hipify!!!
#include "cudf.h"
#include "rmm/rmm.h"
#include "utilities/cudf_utils.h"
#include "utilities/error_utils.hpp"
#include "utilities/type_dispatcher.hpp"
#include "utilities/wrapper_types.hpp"
#include <hipcub/hipcub.hpp>
struct RadixSortPlan{
const gdf_size_type num_items;
// temporary storage
void *storage;
size_t storage_bytes;
void *back_key, *back_val;
size_t back_key_size, back_val_size;
hipStream_t stream;
int descending;
unsigned begin_bit, end_bit;
RadixSortPlan(size_t num_items, int descending,
unsigned begin_bit, unsigned end_bit)
: num_items(num_items),
storage(nullptr), storage_bytes(0),
back_key(nullptr), back_val(nullptr),
back_key_size(0), back_val_size(0),
stream(0), descending(descending),
begin_bit(begin_bit), end_bit(end_bit)
{}
gdf_error setup(size_t sizeof_key, size_t sizeof_val) {
back_key_size = num_items * sizeof_key;
back_val_size = num_items * sizeof_val;
RMM_TRY( RMM_ALLOC(&back_key, back_key_size, stream) ); // TODO: non-default stream
RMM_TRY( RMM_ALLOC(&back_val, back_val_size, stream) );
return GDF_SUCCESS;
}
gdf_error teardown() {
RMM_TRY( RMM_FREE(back_key, stream) );
RMM_TRY( RMM_FREE(back_val, stream) );
RMM_TRY( RMM_FREE(storage, stream) );
return GDF_SUCCESS;
}
};
template <typename Tk, typename Tv>
struct RadixSort {
static
gdf_error sort( RadixSortPlan *plan, Tk *d_key_buf, Tv *d_value_buf) {
unsigned num_items = plan->num_items;
Tk *d_key_alt_buf = (Tk*)plan->back_key;
Tv *d_value_alt_buf = (Tv*)plan->back_val;
hipStream_t stream = plan->stream;
int descending = plan->descending;
unsigned begin_bit = plan->begin_bit;
unsigned end_bit = plan->end_bit;
cub::DoubleBuffer<Tk> d_keys(d_key_buf, d_key_alt_buf);
if (d_value_buf) {
// Sort KeyValue pairs
cub::DoubleBuffer<Tv> d_values(d_value_buf, d_value_alt_buf);
if (descending) {
hipcub::DeviceRadixSort::SortPairsDescending(plan->storage,
plan->storage_bytes,
d_keys,
d_values,
num_items,
begin_bit,
end_bit,
stream);
} else {
hipcub::DeviceRadixSort::SortPairs( plan->storage,
plan->storage_bytes,
d_keys,
d_values,
num_items,
begin_bit,
end_bit,
stream );
}
CUDA_CHECK_LAST();
if (plan->storage && d_value_buf != d_values.Current()){
hipMemcpyAsync(d_value_buf, d_value_alt_buf,
num_items * sizeof(Tv),
hipMemcpyDeviceToDevice,
stream);
CUDA_CHECK_LAST();
}
} else {
// Sort Keys only
if (descending) {
hipcub::DeviceRadixSort::SortKeysDescending( plan->storage,
plan->storage_bytes,
d_keys,
num_items,
begin_bit,
end_bit,
stream );
CUDA_CHECK_LAST()
} else {
hipcub::DeviceRadixSort::SortKeys( plan->storage,
plan->storage_bytes,
d_keys,
num_items,
begin_bit,
end_bit,
stream );
}
CUDA_CHECK_LAST();
}
if ( plan->storage ) {
// We have operated and the result is not in front buffer
if (d_key_buf != d_keys.Current()){
hipMemcpyAsync(d_key_buf, d_key_alt_buf, num_items * sizeof(Tk),
hipMemcpyDeviceToDevice, stream);
CUDA_CHECK_LAST();
}
} else {
// We have not operated.
// Just checking for temporary storage requirement
RMM_TRY( RMM_ALLOC(&plan->storage, plan->storage_bytes, plan->stream) ); // TODO: non-default stream
CUDA_CHECK_LAST();
// Now that we have allocated, do real work.
return sort(plan, d_key_buf, d_value_buf);
}
return GDF_SUCCESS;
}
};
gdf_radixsort_plan_type* cffi_wrap(RadixSortPlan* obj){
return reinterpret_cast<gdf_radixsort_plan_type*>(obj);
}
RadixSortPlan* cffi_unwrap(gdf_radixsort_plan_type* hdl){
return reinterpret_cast<RadixSortPlan*>(hdl);
}
gdf_radixsort_plan_type* gdf_radixsort_plan(size_t num_items, int descending,
unsigned begin_bit, unsigned end_bit){
return cffi_wrap(new RadixSortPlan(num_items, descending,
begin_bit, end_bit));
}
gdf_error gdf_radixsort_plan_setup(gdf_radixsort_plan_type *hdl,
size_t sizeof_key,
size_t sizeof_val)
{
return cffi_unwrap(hdl)->setup(sizeof_key, sizeof_val);
}
gdf_error gdf_radixsort_plan_free(gdf_radixsort_plan_type *hdl) {
auto plan = cffi_unwrap(hdl);
gdf_error status = plan->teardown();
delete plan;
return status;
}
template <typename Tv>
struct gdf_radixsort_functor
{
template <typename Tk>
gdf_error
operator()( gdf_radixsort_plan_type *hdl,
gdf_column *keycol,
gdf_column *valcol)
{
/* validity mask must be empty */
GDF_REQUIRE(!keycol->valid || !keycol->null_count, GDF_VALIDITY_UNSUPPORTED);
GDF_REQUIRE(!valcol->valid || !valcol->null_count, GDF_VALIDITY_UNSUPPORTED);
/* size of columns must match */
GDF_REQUIRE(keycol->size == valcol->size, GDF_COLUMN_SIZE_MISMATCH);
RadixSortPlan *plan = cffi_unwrap(hdl);
/* num_items must match */
GDF_REQUIRE(plan->num_items == keycol->size, GDF_COLUMN_SIZE_MISMATCH);
/* back buffer size must match */
GDF_REQUIRE(sizeof(Tk) * plan->num_items == plan->back_key_size,
GDF_COLUMN_SIZE_MISMATCH);
GDF_REQUIRE(sizeof(Tv) * plan->num_items == plan->back_val_size,
GDF_COLUMN_SIZE_MISMATCH);
/* Do sort */
return RadixSort<Tk, Tv>::sort(plan, (Tk*)keycol->data, (Tv*)valcol->data);
}
};
gdf_error gdf_radixsort(gdf_radixsort_plan_type *hdl,
gdf_column *keycol,
gdf_column *valcol)
{
GDF_REQUIRE(valcol->dtype == GDF_INT64, GDF_UNSUPPORTED_DTYPE);
return cudf::type_dispatcher(keycol->dtype,
gdf_radixsort_functor<int64_t>{},
hdl, keycol, valcol);
}
| 96e6b7f75369b22a21bae5cd28ab9cbeada091b5.cu | #include "cudf.h"
#include "rmm/rmm.h"
#include "utilities/cudf_utils.h"
#include "utilities/error_utils.hpp"
#include "utilities/type_dispatcher.hpp"
#include "utilities/wrapper_types.hpp"
#include <cub/device/device_radix_sort.cuh>
struct RadixSortPlan{
const gdf_size_type num_items;
// temporary storage
void *storage;
size_t storage_bytes;
void *back_key, *back_val;
size_t back_key_size, back_val_size;
cudaStream_t stream;
int descending;
unsigned begin_bit, end_bit;
RadixSortPlan(size_t num_items, int descending,
unsigned begin_bit, unsigned end_bit)
: num_items(num_items),
storage(nullptr), storage_bytes(0),
back_key(nullptr), back_val(nullptr),
back_key_size(0), back_val_size(0),
stream(0), descending(descending),
begin_bit(begin_bit), end_bit(end_bit)
{}
gdf_error setup(size_t sizeof_key, size_t sizeof_val) {
back_key_size = num_items * sizeof_key;
back_val_size = num_items * sizeof_val;
RMM_TRY( RMM_ALLOC(&back_key, back_key_size, stream) ); // TODO: non-default stream
RMM_TRY( RMM_ALLOC(&back_val, back_val_size, stream) );
return GDF_SUCCESS;
}
gdf_error teardown() {
RMM_TRY( RMM_FREE(back_key, stream) );
RMM_TRY( RMM_FREE(back_val, stream) );
RMM_TRY( RMM_FREE(storage, stream) );
return GDF_SUCCESS;
}
};
template <typename Tk, typename Tv>
struct RadixSort {
static
gdf_error sort( RadixSortPlan *plan, Tk *d_key_buf, Tv *d_value_buf) {
unsigned num_items = plan->num_items;
Tk *d_key_alt_buf = (Tk*)plan->back_key;
Tv *d_value_alt_buf = (Tv*)plan->back_val;
cudaStream_t stream = plan->stream;
int descending = plan->descending;
unsigned begin_bit = plan->begin_bit;
unsigned end_bit = plan->end_bit;
cub::DoubleBuffer<Tk> d_keys(d_key_buf, d_key_alt_buf);
if (d_value_buf) {
// Sort KeyValue pairs
cub::DoubleBuffer<Tv> d_values(d_value_buf, d_value_alt_buf);
if (descending) {
cub::DeviceRadixSort::SortPairsDescending(plan->storage,
plan->storage_bytes,
d_keys,
d_values,
num_items,
begin_bit,
end_bit,
stream);
} else {
cub::DeviceRadixSort::SortPairs( plan->storage,
plan->storage_bytes,
d_keys,
d_values,
num_items,
begin_bit,
end_bit,
stream );
}
CUDA_CHECK_LAST();
if (plan->storage && d_value_buf != d_values.Current()){
cudaMemcpyAsync(d_value_buf, d_value_alt_buf,
num_items * sizeof(Tv),
cudaMemcpyDeviceToDevice,
stream);
CUDA_CHECK_LAST();
}
} else {
// Sort Keys only
if (descending) {
cub::DeviceRadixSort::SortKeysDescending( plan->storage,
plan->storage_bytes,
d_keys,
num_items,
begin_bit,
end_bit,
stream );
CUDA_CHECK_LAST()
} else {
cub::DeviceRadixSort::SortKeys( plan->storage,
plan->storage_bytes,
d_keys,
num_items,
begin_bit,
end_bit,
stream );
}
CUDA_CHECK_LAST();
}
if ( plan->storage ) {
// We have operated and the result is not in front buffer
if (d_key_buf != d_keys.Current()){
cudaMemcpyAsync(d_key_buf, d_key_alt_buf, num_items * sizeof(Tk),
cudaMemcpyDeviceToDevice, stream);
CUDA_CHECK_LAST();
}
} else {
// We have not operated.
// Just checking for temporary storage requirement
RMM_TRY( RMM_ALLOC(&plan->storage, plan->storage_bytes, plan->stream) ); // TODO: non-default stream
CUDA_CHECK_LAST();
// Now that we have allocated, do real work.
return sort(plan, d_key_buf, d_value_buf);
}
return GDF_SUCCESS;
}
};
gdf_radixsort_plan_type* cffi_wrap(RadixSortPlan* obj){
return reinterpret_cast<gdf_radixsort_plan_type*>(obj);
}
RadixSortPlan* cffi_unwrap(gdf_radixsort_plan_type* hdl){
return reinterpret_cast<RadixSortPlan*>(hdl);
}
gdf_radixsort_plan_type* gdf_radixsort_plan(size_t num_items, int descending,
unsigned begin_bit, unsigned end_bit){
return cffi_wrap(new RadixSortPlan(num_items, descending,
begin_bit, end_bit));
}
gdf_error gdf_radixsort_plan_setup(gdf_radixsort_plan_type *hdl,
size_t sizeof_key,
size_t sizeof_val)
{
return cffi_unwrap(hdl)->setup(sizeof_key, sizeof_val);
}
gdf_error gdf_radixsort_plan_free(gdf_radixsort_plan_type *hdl) {
auto plan = cffi_unwrap(hdl);
gdf_error status = plan->teardown();
delete plan;
return status;
}
template <typename Tv>
struct gdf_radixsort_functor
{
template <typename Tk>
gdf_error
operator()( gdf_radixsort_plan_type *hdl,
gdf_column *keycol,
gdf_column *valcol)
{
/* validity mask must be empty */
GDF_REQUIRE(!keycol->valid || !keycol->null_count, GDF_VALIDITY_UNSUPPORTED);
GDF_REQUIRE(!valcol->valid || !valcol->null_count, GDF_VALIDITY_UNSUPPORTED);
/* size of columns must match */
GDF_REQUIRE(keycol->size == valcol->size, GDF_COLUMN_SIZE_MISMATCH);
RadixSortPlan *plan = cffi_unwrap(hdl);
/* num_items must match */
GDF_REQUIRE(plan->num_items == keycol->size, GDF_COLUMN_SIZE_MISMATCH);
/* back buffer size must match */
GDF_REQUIRE(sizeof(Tk) * plan->num_items == plan->back_key_size,
GDF_COLUMN_SIZE_MISMATCH);
GDF_REQUIRE(sizeof(Tv) * plan->num_items == plan->back_val_size,
GDF_COLUMN_SIZE_MISMATCH);
/* Do sort */
return RadixSort<Tk, Tv>::sort(plan, (Tk*)keycol->data, (Tv*)valcol->data);
}
};
gdf_error gdf_radixsort(gdf_radixsort_plan_type *hdl,
gdf_column *keycol,
gdf_column *valcol)
{
GDF_REQUIRE(valcol->dtype == GDF_INT64, GDF_UNSUPPORTED_DTYPE);
return cudf::type_dispatcher(keycol->dtype,
gdf_radixsort_functor<int64_t>{},
hdl, keycol, valcol);
}
|
198bf5007422df7b3ea5a5eaad1fac4242923b0a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Modifications: CUDA implementation of CPU verison
// Copyright 2020 Netease Fuxi AI LAB
// SPDX-License-Identifier: Apache-2.0
#include "rasterize_triangles_cuda_impl.h"
namespace pytorch_mesh_renderer
{
// Takes the minimum of a, b, and c, rounds down, and converts to an integer
// in the range [low, high].
template <typename scalar_t>
__device__ __forceinline__ int32_t ClampedIntegerMin(scalar_t a, scalar_t b, scalar_t c, int32_t low, int32_t high)
{
return (int32_t)fmin(fmax((floor(fmin(fmin(a, b), c))), (scalar_t)low), (scalar_t)high);
}
// Takes the maximum of a, b, and c, rounds up, and converts to an integer
// in the range [low, high].
template <typename scalar_t>
__device__ __forceinline__ int32_t ClampedIntegerMax(scalar_t a, scalar_t b, scalar_t c, int32_t low, int32_t high)
{
return (int32_t)fmin(fmax((ceil(fmax(fmax(a, b), c))), (scalar_t)low), (scalar_t)high);
}
// Computes a 3x3 matrix inverse without dividing by the determinant.
// Instead, makes an unnormalized matrix inverse with the correct sign
// by flipping the sign of the matrix if the determinant is negative.
// By leaving out determinant division, the rows of M^-1 only depend on two out
// of three of the columns of M; i.e., the first row of M^-1 only depends on the
// second and third columns of M, the second only depends on the first and
// third, etc. This means we can compute edge functions for two neighboring
// triangles independently and produce exactly the same numerical result up to
// the sign. This in turn means we can avoid cracks in rasterization without
// using fixed-point arithmetic.
// See http://mathworld.wolfram.com/MatrixInverse.html
template <typename scalar_t>
__device__ void ComputeUnnormalizedMatrixInverse(
const scalar_t a11, const scalar_t a12,
const scalar_t a13, const scalar_t a21,
const scalar_t a22, const scalar_t a23,
const scalar_t a31, const scalar_t a32,
const scalar_t a33, scalar_t m_inv[9])
{
m_inv[0] = a22 * a33 - a32 * a23;
m_inv[1] = a13 * a32 - a33 * a12;
m_inv[2] = a12 * a23 - a22 * a13;
m_inv[3] = a23 * a31 - a33 * a21;
m_inv[4] = a11 * a33 - a31 * a13;
m_inv[5] = a13 * a21 - a23 * a11;
m_inv[6] = a21 * a32 - a31 * a22;
m_inv[7] = a12 * a31 - a32 * a11;
m_inv[8] = a11 * a22 - a21 * a12;
// The first column of the unnormalized M^-1 contains intermediate values for
// det(M).
const scalar_t det = a11 * m_inv[0] + a12 * m_inv[3] + a13 * m_inv[6];
// Transfer the sign of the determinant.
if (det < 0.0f)
{
for (int32_t i = 0; i < 9; ++i)
{
m_inv[i] = -m_inv[i];
}
}
}
// Computes the edge functions from M^-1 as described by Olano and Greer,
// "Triangle Scan Conversion using 2D Homogeneous Coordinates."
//
// This function combines equations (3) and (4). It first computes
// [a b c] = u_i * M^-1, where u_0 = [1 0 0], u_1 = [0 1 0], etc.,
// then computes edge_i = aX + bY + c
template <typename scalar_t>
__device__ void ComputeEdgeFunctions(const scalar_t px, const scalar_t py,
const scalar_t m_inv[9], scalar_t values[3])
{
for (int32_t i = 0; i < 3; ++i)
{
const scalar_t a = m_inv[3 * i + 0];
const scalar_t b = m_inv[3 * i + 1];
const scalar_t c = m_inv[3 * i + 2];
values[i] = a * px + b * py + c;
}
}
// Determines whether the point p lies inside a front-facing triangle.
// Counts pixels exactly on an edge as inside the triangle, as long as the
// triangle is not degenerate. Degenerate (zero-area) triangles always fail the
// inside test.
template <typename scalar_t>
__device__ __forceinline__ bool PixelIsInsideTriangle(const scalar_t edge_values[3])
{
// Check that the edge values are all non-negative and that at least one is
// positive (triangle is non-degenerate).
return (edge_values[0] >= 0 && edge_values[1] >= 0 && edge_values[2] >= 0) &&
(edge_values[0] > 0 || edge_values[1] > 0 || edge_values[2] > 0);
}
template <typename scalar_t>
__global__ void RasterizeTrianglesForwardCudaKernel(
const scalar_t* vertices, const int64_t* triangles,
int64_t triangle_count, int32_t image_width, int32_t image_height,
scalar_t* barycentric_coordinates, int64_t* triangle_ids,
scalar_t* z_buffer, int32_t* locks)
{
const int64_t triangle_id = blockIdx.x * blockDim.x + threadIdx.x;
if (triangle_id >= triangle_count)
{
return;
}
const scalar_t half_image_width = 0.5f * image_width;
const scalar_t half_image_height = 0.5f * image_height;
double unnormalized_matrix_inverse[9];
double b_over_w[3];
const int64_t v0_x_id = 4 * triangles[3 * triangle_id];
const int64_t v1_x_id = 4 * triangles[3 * triangle_id + 1];
const int64_t v2_x_id = 4 * triangles[3 * triangle_id + 2];
const scalar_t v0w = vertices[v0_x_id + 3];
const scalar_t v1w = vertices[v1_x_id + 3];
const scalar_t v2w = vertices[v2_x_id + 3];
// Early exit: if all w < 0, triangle is entirely behind the eye.
if (v0w < 0 && v1w < 0 && v2w < 0)
{
return;
}
const scalar_t v0x = vertices[v0_x_id];
const scalar_t v0y = vertices[v0_x_id + 1];
const scalar_t v1x = vertices[v1_x_id];
const scalar_t v1y = vertices[v1_x_id + 1];
const scalar_t v2x = vertices[v2_x_id];
const scalar_t v2y = vertices[v2_x_id + 1];
// The nondeterminacy of GPU device in single precision may lead some pixel
// to be missing when a pixel is on the boundary of two triangles, so we use
// double precision to check the location of a pixel.
ComputeUnnormalizedMatrixInverse((double)v0x, (double)v1x, (double)v2x,
(double)v0y, (double)v1y, (double)v2y,
(double)v0w, (double)v1w, (double)v2w,
unnormalized_matrix_inverse);
// Initialize the bounding box to the entire screen.
int32_t left = 0, right = image_width, bottom = 0, top = image_height;
// If the triangle is entirely inside the screen, project the vertices to
// pixel coordinates and find the triangle bounding box enlarged to the
// nearest integer and clamped to the image boundaries.
if (v0w > 0 && v1w > 0 && v2w > 0)
{
const scalar_t p0x = (v0x / v0w + 1.0f) * half_image_width;
const scalar_t p1x = (v1x / v1w + 1.0f) * half_image_width;
const scalar_t p2x = (v2x / v2w + 1.0f) * half_image_width;
const scalar_t p0y = (v0y / v0w + 1.0f) * half_image_height;
const scalar_t p1y = (v1y / v1w + 1.0f) * half_image_height;
const scalar_t p2y = (v2y / v2w + 1.0f) * half_image_height;
left = ClampedIntegerMin(p0x, p1x, p2x, 0, image_width);
right = ClampedIntegerMax(p0x, p1x, p2x, 0, image_width);
bottom = ClampedIntegerMin(p0y, p1y, p2y, 0, image_height);
top = ClampedIntegerMax(p0y, p1y, p2y, 0, image_height);
}
// Iterate over each pixel in the bounding box.
for (int32_t iy = bottom; iy < top; ++iy)
{
for (int32_t ix = left; ix < right; ++ix)
{
const scalar_t px = ((ix + 0.5f) / half_image_width) - 1.0f;
const scalar_t py = ((iy + 0.5f) / half_image_height) - 1.0f;
const int32_t pixel_idx = iy * image_width + ix;
ComputeEdgeFunctions((double)px, (double)py, unnormalized_matrix_inverse, b_over_w);
if (!PixelIsInsideTriangle(b_over_w))
{
continue;
}
const scalar_t one_over_w = scalar_t(b_over_w[0] + b_over_w[1] + b_over_w[2]);
const scalar_t b0 = scalar_t(b_over_w[0] / one_over_w);
const scalar_t b1 = scalar_t(b_over_w[1] / one_over_w);
const scalar_t b2 = scalar_t(b_over_w[2] / one_over_w);
const scalar_t v0z = vertices[v0_x_id + 2];
const scalar_t v1z = vertices[v1_x_id + 2];
const scalar_t v2z = vertices[v2_x_id + 2];
// Since we computed an unnormalized w above, we need to recompute
// a properly scaled clip-space w value and then divide clip-space z
// by that.
const scalar_t clip_z = b0 * v0z + b1 * v1z + b2 * v2z;
const scalar_t clip_w = b0 * v0w + b1 * v1w + b2 * v2w;
const scalar_t z = clip_z / clip_w;
// Skip the pixel if it is farther than the current z-buffer pixel or
// beyond the near or far clipping plane.
if (z < -1.0 || z > 1.0) // || z > z_buffer[pixel_idx]
{
continue;
}
// write z_buffer, triangle_ids and barycentric_coordinates by using cuda threads lock
// reference: https://stackoverflow.com/questions/21341495/cuda-mutex-and-atomiccas
int32_t* mutex = locks + pixel_idx;
bool isSet = false;
do
{
if (isSet = atomicCAS(mutex, 0, 1) == 0)
{
if (z <= z_buffer[pixel_idx])
{
z_buffer[pixel_idx] = z;
triangle_ids[pixel_idx] = triangle_id;
barycentric_coordinates[3 * pixel_idx + 0] = b0;
barycentric_coordinates[3 * pixel_idx + 1] = b1;
barycentric_coordinates[3 * pixel_idx + 2] = b2;
}
}
if (isSet)
{
atomicExch(mutex, 0);
__threadfence();
}
} while (!isSet);
/* original
if (z < z_buffer[pixel_idx])
{
z_buffer[pixel_idx] = z;
triangle_ids[pixel_idx] = triangle_id;
barycentric_coordinates[3 * pixel_idx + 0] = b0;
barycentric_coordinates[3 * pixel_idx + 1] = b1;
barycentric_coordinates[3 * pixel_idx + 2] = b2;
}
*/
}
}
}
void RasterizeTrianglesForwardCuda(
at::Tensor vertices, at::Tensor triangles,
int32_t image_width, int32_t image_height,
torch::Tensor barycentric, torch::Tensor triangle_ids, torch::Tensor z_buffer)
{
const int64_t triangle_count = triangles.size(0);
const int threads = 512;
const dim3 blocks((triangle_count - 1) / threads + 1);
int32_t* locks = NULL; // pixel locks
hipMalloc((void**)&locks, image_width * image_height * sizeof(int32_t));
hipMemset(locks, 0, image_width * image_height * sizeof(int32_t));
AT_DISPATCH_FLOATING_TYPES(vertices.type(), "RasterizeTrianglesForwardCuda", ([&] {
RasterizeTrianglesForwardCudaKernel<scalar_t> << <blocks, threads >> > (
vertices.data<scalar_t>(),
triangles.data<int64_t>(),
triangle_count,
image_width,
image_height,
barycentric.data<scalar_t>(),
triangle_ids.data<int64_t>(),
z_buffer.data<scalar_t>(),
locks);
}));
hipFree(locks);
hipError_t err = hipGetLastError();
if (err != hipSuccess)
printf("Error in RasterizeTrianglesForwardCuda: %s\n", hipGetErrorString(err));
}
template <typename scalar_t>
__global__ void RasterizeTrianglesBackwardCudaKernel(
const scalar_t* vertices, const int64_t* triangles,
const scalar_t* barycentric_coordinates,
const int64_t* triangle_ids,
const scalar_t* df_dbarycentric_coordinates,
int32_t image_width, int32_t image_height,
scalar_t* df_dvertices)
{
const int32_t pixel_id = blockIdx.x * blockDim.x + threadIdx.x;
if (pixel_id >= image_width * image_height)
{
return;
}
// We first loop over each pixel in the output image, and compute
// dbarycentric_coordinate[0,1,2]/dvertex[0x, 0y, 1x, 1y, 2x, 2y].
// Next we compute each value above's contribution to
// df/dvertices, building up that matrix as the output of this iteration.
// b0, b1, and b2 are the three barycentric coordinate values
// rendered at pixel pixel_id.
const scalar_t b0 = barycentric_coordinates[3 * pixel_id];
const scalar_t b1 = barycentric_coordinates[3 * pixel_id + 1];
const scalar_t b2 = barycentric_coordinates[3 * pixel_id + 2];
if (b0 + b1 + b2 < kDegenerateBarycentricCoordinatesCutoff)
{
return;
}
const scalar_t df_db0 = df_dbarycentric_coordinates[3 * pixel_id];
const scalar_t df_db1 = df_dbarycentric_coordinates[3 * pixel_id + 1];
const scalar_t df_db2 = df_dbarycentric_coordinates[3 * pixel_id + 2];
const int64_t triangle_at_current_pixel = triangle_ids[pixel_id];
const int64_t* vertices_at_current_pixel =
&triangles[3 * triangle_at_current_pixel];
// Extract vertex indices for the current triangle.
const int64_t v0_id = 4 * vertices_at_current_pixel[0];
const int64_t v1_id = 4 * vertices_at_current_pixel[1];
const int64_t v2_id = 4 * vertices_at_current_pixel[2];
// Extract x,y,w components of the vertices' clip space coordinates.
const scalar_t x0 = vertices[v0_id];
const scalar_t y0 = vertices[v0_id + 1];
const scalar_t w0 = vertices[v0_id + 3];
const scalar_t x1 = vertices[v1_id];
const scalar_t y1 = vertices[v1_id + 1];
const scalar_t w1 = vertices[v1_id + 3];
const scalar_t x2 = vertices[v2_id];
const scalar_t y2 = vertices[v2_id + 1];
const scalar_t w2 = vertices[v2_id + 3];
// Compute pixel's NDC-s.
const int32_t ix = pixel_id % image_width;
const int32_t iy = pixel_id / image_width;
const scalar_t px = 2 * (ix + 0.5f) / image_width - 1.0f;
const scalar_t py = 2 * (iy + 0.5f) / image_height - 1.0f;
// Baricentric gradients wrt each vertex coordinate share a common factor.
const scalar_t db0_dx = py * (w1 - w2) - (y1 - y2);
const scalar_t db1_dx = py * (w2 - w0) - (y2 - y0);
const scalar_t db2_dx = -(db0_dx + db1_dx);
const scalar_t db0_dy = (x1 - x2) - px * (w1 - w2);
const scalar_t db1_dy = (x2 - x0) - px * (w2 - w0);
const scalar_t db2_dy = -(db0_dy + db1_dy);
const scalar_t db0_dw = px * (y1 - y2) - py * (x1 - x2);
const scalar_t db1_dw = px * (y2 - y0) - py * (x2 - x0);
const scalar_t db2_dw = -(db0_dw + db1_dw);
// Combine them with chain rule.
const scalar_t df_dx = df_db0 * db0_dx + df_db1 * db1_dx + df_db2 * db2_dx;
const scalar_t df_dy = df_db0 * db0_dy + df_db1 * db1_dy + df_db2 * db2_dy;
const scalar_t df_dw = df_db0 * db0_dw + df_db1 * db1_dw + df_db2 * db2_dw;
// Values of edge equations and inverse w at the current pixel.
const scalar_t edge0_over_w = x2 * db0_dx + y2 * db0_dy + w2 * db0_dw;
const scalar_t edge1_over_w = x2 * db1_dx + y2 * db1_dy + w2 * db1_dw;
const scalar_t edge2_over_w = x1 * db2_dx + y1 * db2_dy + w1 * db2_dw;
const scalar_t w_inv = edge0_over_w + edge1_over_w + edge2_over_w;
// All gradients share a common denominator.
const scalar_t w_sqr = 1 / (w_inv * w_inv);
// Gradients wrt each vertex share a common factor.
const scalar_t edge0 = w_sqr * edge0_over_w;
const scalar_t edge1 = w_sqr * edge1_over_w;
const scalar_t edge2 = w_sqr * edge2_over_w;
atomicAdd(&df_dvertices[v0_id + 0], edge0 * df_dx);
atomicAdd(&df_dvertices[v0_id + 1], edge0 * df_dy);
atomicAdd(&df_dvertices[v0_id + 3], edge0 * df_dw);
atomicAdd(&df_dvertices[v1_id + 0], edge1 * df_dx);
atomicAdd(&df_dvertices[v1_id + 1], edge1 * df_dy);
atomicAdd(&df_dvertices[v1_id + 3], edge1 * df_dw);
atomicAdd(&df_dvertices[v2_id + 0], edge2 * df_dx);
atomicAdd(&df_dvertices[v2_id + 1], edge2 * df_dy);
atomicAdd(&df_dvertices[v2_id + 3], edge2 * df_dw);
}
void RasterizeTrianglesBackwardCuda(
at::Tensor vertices, // FloatTensor
at::Tensor triangles, // LongTensor
at::Tensor barycentric_coordinates, // FloatTensor
at::Tensor triangle_ids, // LongTensor
at::Tensor df_dbarycentric_coordinates, // FloatTensor
int32_t image_width,
int32_t image_height,
at::Tensor df_dvertices)
{
const int64_t pixel_count = image_width * image_height;
const int threads = 512;
const dim3 blocks((pixel_count - 1) / threads + 1);
AT_DISPATCH_FLOATING_TYPES(vertices.type(), "RasterizeTrianglesBackwardCuda", ([&] {
RasterizeTrianglesBackwardCudaKernel<scalar_t> << <blocks, threads >> > (
vertices.data<scalar_t>(),
triangles.data<int64_t>(),
barycentric_coordinates.data<scalar_t>(),
triangle_ids.data<int64_t>(),
df_dbarycentric_coordinates.data<scalar_t>(),
image_width,
image_height,
df_dvertices.data<scalar_t>());
}));
hipError_t err = hipGetLastError();
if (err != hipSuccess)
printf("Error in RasterizeTrianglesBackwardCuda: %s\n", hipGetErrorString(err));
}
}// namespace pytorch_mesh_renderer
| 198bf5007422df7b3ea5a5eaad1fac4242923b0a.cu | // Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Modifications: CUDA implementation of CPU verison
// Copyright 2020 Netease Fuxi AI LAB
// SPDX-License-Identifier: Apache-2.0
#include "rasterize_triangles_cuda_impl.h"
namespace pytorch_mesh_renderer
{
// Takes the minimum of a, b, and c, rounds down, and converts to an integer
// in the range [low, high].
template <typename scalar_t>
__device__ __forceinline__ int32_t ClampedIntegerMin(scalar_t a, scalar_t b, scalar_t c, int32_t low, int32_t high)
{
return (int32_t)fmin(fmax((floor(fmin(fmin(a, b), c))), (scalar_t)low), (scalar_t)high);
}
// Takes the maximum of a, b, and c, rounds up, and converts to an integer
// in the range [low, high].
template <typename scalar_t>
__device__ __forceinline__ int32_t ClampedIntegerMax(scalar_t a, scalar_t b, scalar_t c, int32_t low, int32_t high)
{
return (int32_t)fmin(fmax((ceil(fmax(fmax(a, b), c))), (scalar_t)low), (scalar_t)high);
}
// Computes a 3x3 matrix inverse without dividing by the determinant.
// Instead, makes an unnormalized matrix inverse with the correct sign
// by flipping the sign of the matrix if the determinant is negative.
// By leaving out determinant division, the rows of M^-1 only depend on two out
// of three of the columns of M; i.e., the first row of M^-1 only depends on the
// second and third columns of M, the second only depends on the first and
// third, etc. This means we can compute edge functions for two neighboring
// triangles independently and produce exactly the same numerical result up to
// the sign. This in turn means we can avoid cracks in rasterization without
// using fixed-point arithmetic.
// See http://mathworld.wolfram.com/MatrixInverse.html
template <typename scalar_t>
__device__ void ComputeUnnormalizedMatrixInverse(
const scalar_t a11, const scalar_t a12,
const scalar_t a13, const scalar_t a21,
const scalar_t a22, const scalar_t a23,
const scalar_t a31, const scalar_t a32,
const scalar_t a33, scalar_t m_inv[9])
{
m_inv[0] = a22 * a33 - a32 * a23;
m_inv[1] = a13 * a32 - a33 * a12;
m_inv[2] = a12 * a23 - a22 * a13;
m_inv[3] = a23 * a31 - a33 * a21;
m_inv[4] = a11 * a33 - a31 * a13;
m_inv[5] = a13 * a21 - a23 * a11;
m_inv[6] = a21 * a32 - a31 * a22;
m_inv[7] = a12 * a31 - a32 * a11;
m_inv[8] = a11 * a22 - a21 * a12;
// The first column of the unnormalized M^-1 contains intermediate values for
// det(M).
const scalar_t det = a11 * m_inv[0] + a12 * m_inv[3] + a13 * m_inv[6];
// Transfer the sign of the determinant.
if (det < 0.0f)
{
for (int32_t i = 0; i < 9; ++i)
{
m_inv[i] = -m_inv[i];
}
}
}
// Computes the edge functions from M^-1 as described by Olano and Greer,
// "Triangle Scan Conversion using 2D Homogeneous Coordinates."
//
// This function combines equations (3) and (4). It first computes
// [a b c] = u_i * M^-1, where u_0 = [1 0 0], u_1 = [0 1 0], etc.,
// then computes edge_i = aX + bY + c
template <typename scalar_t>
__device__ void ComputeEdgeFunctions(const scalar_t px, const scalar_t py,
const scalar_t m_inv[9], scalar_t values[3])
{
for (int32_t i = 0; i < 3; ++i)
{
const scalar_t a = m_inv[3 * i + 0];
const scalar_t b = m_inv[3 * i + 1];
const scalar_t c = m_inv[3 * i + 2];
values[i] = a * px + b * py + c;
}
}
// Determines whether the point p lies inside a front-facing triangle.
// Counts pixels exactly on an edge as inside the triangle, as long as the
// triangle is not degenerate. Degenerate (zero-area) triangles always fail the
// inside test.
template <typename scalar_t>
__device__ __forceinline__ bool PixelIsInsideTriangle(const scalar_t edge_values[3])
{
// Check that the edge values are all non-negative and that at least one is
// positive (triangle is non-degenerate).
return (edge_values[0] >= 0 && edge_values[1] >= 0 && edge_values[2] >= 0) &&
(edge_values[0] > 0 || edge_values[1] > 0 || edge_values[2] > 0);
}
template <typename scalar_t>
__global__ void RasterizeTrianglesForwardCudaKernel(
const scalar_t* vertices, const int64_t* triangles,
int64_t triangle_count, int32_t image_width, int32_t image_height,
scalar_t* barycentric_coordinates, int64_t* triangle_ids,
scalar_t* z_buffer, int32_t* locks)
{
const int64_t triangle_id = blockIdx.x * blockDim.x + threadIdx.x;
if (triangle_id >= triangle_count)
{
return;
}
const scalar_t half_image_width = 0.5f * image_width;
const scalar_t half_image_height = 0.5f * image_height;
double unnormalized_matrix_inverse[9];
double b_over_w[3];
const int64_t v0_x_id = 4 * triangles[3 * triangle_id];
const int64_t v1_x_id = 4 * triangles[3 * triangle_id + 1];
const int64_t v2_x_id = 4 * triangles[3 * triangle_id + 2];
const scalar_t v0w = vertices[v0_x_id + 3];
const scalar_t v1w = vertices[v1_x_id + 3];
const scalar_t v2w = vertices[v2_x_id + 3];
// Early exit: if all w < 0, triangle is entirely behind the eye.
if (v0w < 0 && v1w < 0 && v2w < 0)
{
return;
}
const scalar_t v0x = vertices[v0_x_id];
const scalar_t v0y = vertices[v0_x_id + 1];
const scalar_t v1x = vertices[v1_x_id];
const scalar_t v1y = vertices[v1_x_id + 1];
const scalar_t v2x = vertices[v2_x_id];
const scalar_t v2y = vertices[v2_x_id + 1];
// The nondeterminacy of GPU device in single precision may lead some pixel
// to be missing when a pixel is on the boundary of two triangles, so we use
// double precision to check the location of a pixel.
ComputeUnnormalizedMatrixInverse((double)v0x, (double)v1x, (double)v2x,
(double)v0y, (double)v1y, (double)v2y,
(double)v0w, (double)v1w, (double)v2w,
unnormalized_matrix_inverse);
// Initialize the bounding box to the entire screen.
int32_t left = 0, right = image_width, bottom = 0, top = image_height;
// If the triangle is entirely inside the screen, project the vertices to
// pixel coordinates and find the triangle bounding box enlarged to the
// nearest integer and clamped to the image boundaries.
if (v0w > 0 && v1w > 0 && v2w > 0)
{
const scalar_t p0x = (v0x / v0w + 1.0f) * half_image_width;
const scalar_t p1x = (v1x / v1w + 1.0f) * half_image_width;
const scalar_t p2x = (v2x / v2w + 1.0f) * half_image_width;
const scalar_t p0y = (v0y / v0w + 1.0f) * half_image_height;
const scalar_t p1y = (v1y / v1w + 1.0f) * half_image_height;
const scalar_t p2y = (v2y / v2w + 1.0f) * half_image_height;
left = ClampedIntegerMin(p0x, p1x, p2x, 0, image_width);
right = ClampedIntegerMax(p0x, p1x, p2x, 0, image_width);
bottom = ClampedIntegerMin(p0y, p1y, p2y, 0, image_height);
top = ClampedIntegerMax(p0y, p1y, p2y, 0, image_height);
}
// Iterate over each pixel in the bounding box.
for (int32_t iy = bottom; iy < top; ++iy)
{
for (int32_t ix = left; ix < right; ++ix)
{
const scalar_t px = ((ix + 0.5f) / half_image_width) - 1.0f;
const scalar_t py = ((iy + 0.5f) / half_image_height) - 1.0f;
const int32_t pixel_idx = iy * image_width + ix;
ComputeEdgeFunctions((double)px, (double)py, unnormalized_matrix_inverse, b_over_w);
if (!PixelIsInsideTriangle(b_over_w))
{
continue;
}
const scalar_t one_over_w = scalar_t(b_over_w[0] + b_over_w[1] + b_over_w[2]);
const scalar_t b0 = scalar_t(b_over_w[0] / one_over_w);
const scalar_t b1 = scalar_t(b_over_w[1] / one_over_w);
const scalar_t b2 = scalar_t(b_over_w[2] / one_over_w);
const scalar_t v0z = vertices[v0_x_id + 2];
const scalar_t v1z = vertices[v1_x_id + 2];
const scalar_t v2z = vertices[v2_x_id + 2];
// Since we computed an unnormalized w above, we need to recompute
// a properly scaled clip-space w value and then divide clip-space z
// by that.
const scalar_t clip_z = b0 * v0z + b1 * v1z + b2 * v2z;
const scalar_t clip_w = b0 * v0w + b1 * v1w + b2 * v2w;
const scalar_t z = clip_z / clip_w;
// Skip the pixel if it is farther than the current z-buffer pixel or
// beyond the near or far clipping plane.
if (z < -1.0 || z > 1.0) // || z > z_buffer[pixel_idx]
{
continue;
}
// write z_buffer, triangle_ids and barycentric_coordinates by using cuda threads lock
// reference: https://stackoverflow.com/questions/21341495/cuda-mutex-and-atomiccas
int32_t* mutex = locks + pixel_idx;
bool isSet = false;
do
{
if (isSet = atomicCAS(mutex, 0, 1) == 0)
{
if (z <= z_buffer[pixel_idx])
{
z_buffer[pixel_idx] = z;
triangle_ids[pixel_idx] = triangle_id;
barycentric_coordinates[3 * pixel_idx + 0] = b0;
barycentric_coordinates[3 * pixel_idx + 1] = b1;
barycentric_coordinates[3 * pixel_idx + 2] = b2;
}
}
if (isSet)
{
atomicExch(mutex, 0);
__threadfence();
}
} while (!isSet);
/* original
if (z < z_buffer[pixel_idx])
{
z_buffer[pixel_idx] = z;
triangle_ids[pixel_idx] = triangle_id;
barycentric_coordinates[3 * pixel_idx + 0] = b0;
barycentric_coordinates[3 * pixel_idx + 1] = b1;
barycentric_coordinates[3 * pixel_idx + 2] = b2;
}
*/
}
}
}
void RasterizeTrianglesForwardCuda(
at::Tensor vertices, at::Tensor triangles,
int32_t image_width, int32_t image_height,
torch::Tensor barycentric, torch::Tensor triangle_ids, torch::Tensor z_buffer)
{
const int64_t triangle_count = triangles.size(0);
const int threads = 512;
const dim3 blocks((triangle_count - 1) / threads + 1);
int32_t* locks = NULL; // pixel locks
cudaMalloc((void**)&locks, image_width * image_height * sizeof(int32_t));
cudaMemset(locks, 0, image_width * image_height * sizeof(int32_t));
AT_DISPATCH_FLOATING_TYPES(vertices.type(), "RasterizeTrianglesForwardCuda", ([&] {
RasterizeTrianglesForwardCudaKernel<scalar_t> << <blocks, threads >> > (
vertices.data<scalar_t>(),
triangles.data<int64_t>(),
triangle_count,
image_width,
image_height,
barycentric.data<scalar_t>(),
triangle_ids.data<int64_t>(),
z_buffer.data<scalar_t>(),
locks);
}));
cudaFree(locks);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error in RasterizeTrianglesForwardCuda: %s\n", cudaGetErrorString(err));
}
template <typename scalar_t>
__global__ void RasterizeTrianglesBackwardCudaKernel(
const scalar_t* vertices, const int64_t* triangles,
const scalar_t* barycentric_coordinates,
const int64_t* triangle_ids,
const scalar_t* df_dbarycentric_coordinates,
int32_t image_width, int32_t image_height,
scalar_t* df_dvertices)
{
const int32_t pixel_id = blockIdx.x * blockDim.x + threadIdx.x;
if (pixel_id >= image_width * image_height)
{
return;
}
// We first loop over each pixel in the output image, and compute
// dbarycentric_coordinate[0,1,2]/dvertex[0x, 0y, 1x, 1y, 2x, 2y].
// Next we compute each value above's contribution to
// df/dvertices, building up that matrix as the output of this iteration.
// b0, b1, and b2 are the three barycentric coordinate values
// rendered at pixel pixel_id.
const scalar_t b0 = barycentric_coordinates[3 * pixel_id];
const scalar_t b1 = barycentric_coordinates[3 * pixel_id + 1];
const scalar_t b2 = barycentric_coordinates[3 * pixel_id + 2];
if (b0 + b1 + b2 < kDegenerateBarycentricCoordinatesCutoff)
{
return;
}
const scalar_t df_db0 = df_dbarycentric_coordinates[3 * pixel_id];
const scalar_t df_db1 = df_dbarycentric_coordinates[3 * pixel_id + 1];
const scalar_t df_db2 = df_dbarycentric_coordinates[3 * pixel_id + 2];
const int64_t triangle_at_current_pixel = triangle_ids[pixel_id];
const int64_t* vertices_at_current_pixel =
&triangles[3 * triangle_at_current_pixel];
// Extract vertex indices for the current triangle.
const int64_t v0_id = 4 * vertices_at_current_pixel[0];
const int64_t v1_id = 4 * vertices_at_current_pixel[1];
const int64_t v2_id = 4 * vertices_at_current_pixel[2];
// Extract x,y,w components of the vertices' clip space coordinates.
const scalar_t x0 = vertices[v0_id];
const scalar_t y0 = vertices[v0_id + 1];
const scalar_t w0 = vertices[v0_id + 3];
const scalar_t x1 = vertices[v1_id];
const scalar_t y1 = vertices[v1_id + 1];
const scalar_t w1 = vertices[v1_id + 3];
const scalar_t x2 = vertices[v2_id];
const scalar_t y2 = vertices[v2_id + 1];
const scalar_t w2 = vertices[v2_id + 3];
// Compute pixel's NDC-s.
const int32_t ix = pixel_id % image_width;
const int32_t iy = pixel_id / image_width;
const scalar_t px = 2 * (ix + 0.5f) / image_width - 1.0f;
const scalar_t py = 2 * (iy + 0.5f) / image_height - 1.0f;
// Baricentric gradients wrt each vertex coordinate share a common factor.
const scalar_t db0_dx = py * (w1 - w2) - (y1 - y2);
const scalar_t db1_dx = py * (w2 - w0) - (y2 - y0);
const scalar_t db2_dx = -(db0_dx + db1_dx);
const scalar_t db0_dy = (x1 - x2) - px * (w1 - w2);
const scalar_t db1_dy = (x2 - x0) - px * (w2 - w0);
const scalar_t db2_dy = -(db0_dy + db1_dy);
const scalar_t db0_dw = px * (y1 - y2) - py * (x1 - x2);
const scalar_t db1_dw = px * (y2 - y0) - py * (x2 - x0);
const scalar_t db2_dw = -(db0_dw + db1_dw);
// Combine them with chain rule.
const scalar_t df_dx = df_db0 * db0_dx + df_db1 * db1_dx + df_db2 * db2_dx;
const scalar_t df_dy = df_db0 * db0_dy + df_db1 * db1_dy + df_db2 * db2_dy;
const scalar_t df_dw = df_db0 * db0_dw + df_db1 * db1_dw + df_db2 * db2_dw;
// Values of edge equations and inverse w at the current pixel.
const scalar_t edge0_over_w = x2 * db0_dx + y2 * db0_dy + w2 * db0_dw;
const scalar_t edge1_over_w = x2 * db1_dx + y2 * db1_dy + w2 * db1_dw;
const scalar_t edge2_over_w = x1 * db2_dx + y1 * db2_dy + w1 * db2_dw;
const scalar_t w_inv = edge0_over_w + edge1_over_w + edge2_over_w;
// All gradients share a common denominator.
const scalar_t w_sqr = 1 / (w_inv * w_inv);
// Gradients wrt each vertex share a common factor.
const scalar_t edge0 = w_sqr * edge0_over_w;
const scalar_t edge1 = w_sqr * edge1_over_w;
const scalar_t edge2 = w_sqr * edge2_over_w;
atomicAdd(&df_dvertices[v0_id + 0], edge0 * df_dx);
atomicAdd(&df_dvertices[v0_id + 1], edge0 * df_dy);
atomicAdd(&df_dvertices[v0_id + 3], edge0 * df_dw);
atomicAdd(&df_dvertices[v1_id + 0], edge1 * df_dx);
atomicAdd(&df_dvertices[v1_id + 1], edge1 * df_dy);
atomicAdd(&df_dvertices[v1_id + 3], edge1 * df_dw);
atomicAdd(&df_dvertices[v2_id + 0], edge2 * df_dx);
atomicAdd(&df_dvertices[v2_id + 1], edge2 * df_dy);
atomicAdd(&df_dvertices[v2_id + 3], edge2 * df_dw);
}
void RasterizeTrianglesBackwardCuda(
at::Tensor vertices, // FloatTensor
at::Tensor triangles, // LongTensor
at::Tensor barycentric_coordinates, // FloatTensor
at::Tensor triangle_ids, // LongTensor
at::Tensor df_dbarycentric_coordinates, // FloatTensor
int32_t image_width,
int32_t image_height,
at::Tensor df_dvertices)
{
const int64_t pixel_count = image_width * image_height;
const int threads = 512;
const dim3 blocks((pixel_count - 1) / threads + 1);
AT_DISPATCH_FLOATING_TYPES(vertices.type(), "RasterizeTrianglesBackwardCuda", ([&] {
RasterizeTrianglesBackwardCudaKernel<scalar_t> << <blocks, threads >> > (
vertices.data<scalar_t>(),
triangles.data<int64_t>(),
barycentric_coordinates.data<scalar_t>(),
triangle_ids.data<int64_t>(),
df_dbarycentric_coordinates.data<scalar_t>(),
image_width,
image_height,
df_dvertices.data<scalar_t>());
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error in RasterizeTrianglesBackwardCuda: %s\n", cudaGetErrorString(err));
}
}// namespace pytorch_mesh_renderer
|
391ec511f5664aabad356b601c9b4e84f79f35db.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include "caffe/common.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <>
void sll_gpu_nrm2(const int code_length, const int batch_size,
const float* bottom, float* norm_vec)
{
for (int i = 0; i < batch_size; ++i)
{
hipblasSnrm2(Caffe::cublas_handle(), code_length, bottom + i * code_length, 1, norm_vec + i);
}
}
template<>
void sll_gpu_nrm2(const int code_length, const int batch_size,
const double* bottom, double* norm_vec)
{
for (int i = 0; i < batch_size; ++i)
{
hipblasDnrm2(Caffe::cublas_handle(), code_length, bottom + i * code_length, 1, norm_vec + i);
}
}
template <>
void lfcl_gpu_gemm<float>(const int M, const int N, const int K, const int C,
const float alpha, const float* bottom, const float* weight, const float beta,
float* top) {
CUBLAS_CHECK(hipblasSgemm(Caffe::cublas_handle(), HIPBLAS_OP_N, HIPBLAS_OP_N,
N, M, K, &alpha, weight, N, bottom, C * K, &beta, top, C * N));
}
template <>
void lfcl_gpu_gemm<double>( const int M, const int N, const int K, const int C,
const double alpha, const double* bottom, const double* weight, const double beta,
double* top) {
CUBLAS_CHECK(hipblasDgemm(Caffe::cublas_handle(), HIPBLAS_OP_N, HIPBLAS_OP_N,
N, M, K, &alpha, weight, N, bottom, C * K, &beta, top, C * N));
}
/*/////////////////////////////////////
*******LFCL GPU forward****************
/////////////////////////////////////*/
template <>
void lfcl_gpu_gemmBatched_forward(const int M, const int N, const int K, const int C,
const float* alpha, const float* bottom[], const float* weight[], const float* beta,
float* top[]) {
CUBLAS_CHECK(hipblasSgemmBatched(Caffe::cublas_handle(), HIPBLAS_OP_N, HIPBLAS_OP_N,
N, M, K, alpha, weight, N, bottom, C * K, beta, top, C * N, C));
}
template <>
void lfcl_gpu_gemmBatched_forward(const int M, const int N, const int K, const int C,
const double* alpha, const double* bottom[], const double* weight[], const double* beta,
double* top[]) {
CUBLAS_CHECK(hipblasDgemmBatched(Caffe::cublas_handle(), HIPBLAS_OP_N, HIPBLAS_OP_N,
N, M, K, alpha, weight, N, bottom, C * K, beta, top, C * N, C));
}
/*/////////////////////////////////////
*******LFCL GPU backward weight********
/////////////////////////////////////*/
template <>
void lfcl_gpu_gemmBatched_backward_weight(const int M, const int N, const int K, const int C,
const float* alpha, const float* top[], const float* bottom[], const float* beta,
float* weight[]) {
CUBLAS_CHECK(hipblasSgemmBatched(Caffe::cublas_handle(), HIPBLAS_OP_N, HIPBLAS_OP_T,
N, K, M, alpha, top, C * N, bottom, C * K, beta, weight, N, C));
}
template <>
void lfcl_gpu_gemmBatched_backward_weight(const int M, const int N, const int K, const int C,
const double* alpha, const double* top[], const double* bottom[], const double* beta,
double* weight[]) {
CUBLAS_CHECK(hipblasDgemmBatched(Caffe::cublas_handle(), HIPBLAS_OP_N, HIPBLAS_OP_T,
N, K, M, alpha, top, C * N, bottom, C * K, beta, weight, N, C));
}
/*/////////////////////////////////////
*******LFCL GPU backward bottom********
/////////////////////////////////////*/
template <>
void lfcl_gpu_gemmBatched_backward_bottom(const int M, const int N, const int K, const int C,
const float* alpha, const float* weight[], const float* top[], const float* beta,
float* bottom[]) {
CUBLAS_CHECK(hipblasSgemmBatched(Caffe::cublas_handle(), HIPBLAS_OP_T, HIPBLAS_OP_N,
K, M, N, alpha, weight, N, top, C * N, beta, bottom, C * K, C));
}
template <>
void lfcl_gpu_gemmBatched_backward_bottom(const int M, const int N, const int K, const int C,
const double* alpha, const double* weight[], const double* top[], const double* beta,
double* bottom[]) {
CUBLAS_CHECK(hipblasDgemmBatched(Caffe::cublas_handle(), HIPBLAS_OP_T, HIPBLAS_OP_N,
K, M, N, alpha, weight, N, top, C * N, beta, bottom, C * K, C));
}
template <>
void lfcl_gpu_gemv<float>(const int M, const int N, const float alpha,
const float* A, const float* x, const float beta, float* y) {
CUBLAS_CHECK(hipblasSgemv(Caffe::cublas_handle(), HIPBLAS_OP_N, M, N, &alpha,
A, M, x, 1, &beta, y, 1));
}
template <>
void lfcl_gpu_gemv<double>(const int M, const int N, const double alpha,
const double* A, const double* x, const double beta, double* y) {
CUBLAS_CHECK(hipblasDgemv(Caffe::cublas_handle(), HIPBLAS_OP_N, M, N, &alpha,
A, M, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y) {
CUBLAS_CHECK(hipblasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(hipblasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CUDA_CHECK(hipMemcpy(Y, X, N, hipMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float *X) {
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double *X) {
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(N, beta, Y);
caffe_gpu_axpy<float>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(N, beta, Y);
caffe_gpu_axpy<double>(N, alpha, X, Y);
}
template <>
void caffe_gpu_dot<float>(const int n, const float* x, const float* y,
float* out) {
CUBLAS_CHECK(hipblasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double>(const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(hipblasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_asum<float>(const int n, const float* x, float* y) {
CUBLAS_CHECK(hipblasSasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double>(const int n, const double* x, double* y) {
CUBLAS_CHECK(hipblasDasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_scale<float>(const int n, const float alpha, const float *x,
float* y) {
CUBLAS_CHECK(hipblasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double>(const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(hipblasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) {
if (alpha == 0) {
CUDA_CHECK(hipMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( set_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template void caffe_gpu_set<int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double>(const int N, const double alpha, double* Y);
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <>
void caffe_gpu_add<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_add<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <>
void caffe_gpu_mul<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_mul<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <>
void caffe_gpu_div<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_div<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
template <>
void caffe_gpu_abs<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_abs<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = exp(a[index]);
}
}
template <>
void caffe_gpu_exp<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_exp<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(a[index]);
}
}
template <>
void caffe_gpu_log<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_log<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template <>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
}
template <>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index])
- (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
void caffe_gpu_rng_uniform(const int n, unsigned int* r) {
CURAND_CHECK(hiprandGenerate(Caffe::curand_generator(), r, n));
}
template <>
void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b,
float* r) {
CURAND_CHECK(hiprandGenerateUniform(Caffe::curand_generator(), r, n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b,
double* r) {
CURAND_CHECK(hiprandGenerateUniformDouble(Caffe::curand_generator(), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<double>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma,
float* r) {
CURAND_CHECK(
hiprandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma));
}
template <>
void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma,
double* r) {
CURAND_CHECK(
hiprandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma));
}
} // namespace caffe
| 391ec511f5664aabad356b601c9b4e84f79f35db.cu | #include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include "caffe/common.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <>
void sll_gpu_nrm2(const int code_length, const int batch_size,
const float* bottom, float* norm_vec)
{
for (int i = 0; i < batch_size; ++i)
{
cublasSnrm2(Caffe::cublas_handle(), code_length, bottom + i * code_length, 1, norm_vec + i);
}
}
template<>
void sll_gpu_nrm2(const int code_length, const int batch_size,
const double* bottom, double* norm_vec)
{
for (int i = 0; i < batch_size; ++i)
{
cublasDnrm2(Caffe::cublas_handle(), code_length, bottom + i * code_length, 1, norm_vec + i);
}
}
template <>
void lfcl_gpu_gemm<float>(const int M, const int N, const int K, const int C,
const float alpha, const float* bottom, const float* weight, const float beta,
float* top) {
CUBLAS_CHECK(cublasSgemm(Caffe::cublas_handle(), CUBLAS_OP_N, CUBLAS_OP_N,
N, M, K, &alpha, weight, N, bottom, C * K, &beta, top, C * N));
}
template <>
void lfcl_gpu_gemm<double>( const int M, const int N, const int K, const int C,
const double alpha, const double* bottom, const double* weight, const double beta,
double* top) {
CUBLAS_CHECK(cublasDgemm(Caffe::cublas_handle(), CUBLAS_OP_N, CUBLAS_OP_N,
N, M, K, &alpha, weight, N, bottom, C * K, &beta, top, C * N));
}
/*/////////////////////////////////////
*******LFCL GPU forward****************
/////////////////////////////////////*/
template <>
void lfcl_gpu_gemmBatched_forward(const int M, const int N, const int K, const int C,
const float* alpha, const float* bottom[], const float* weight[], const float* beta,
float* top[]) {
CUBLAS_CHECK(cublasSgemmBatched(Caffe::cublas_handle(), CUBLAS_OP_N, CUBLAS_OP_N,
N, M, K, alpha, weight, N, bottom, C * K, beta, top, C * N, C));
}
template <>
void lfcl_gpu_gemmBatched_forward(const int M, const int N, const int K, const int C,
const double* alpha, const double* bottom[], const double* weight[], const double* beta,
double* top[]) {
CUBLAS_CHECK(cublasDgemmBatched(Caffe::cublas_handle(), CUBLAS_OP_N, CUBLAS_OP_N,
N, M, K, alpha, weight, N, bottom, C * K, beta, top, C * N, C));
}
/*/////////////////////////////////////
*******LFCL GPU backward weight********
/////////////////////////////////////*/
template <>
void lfcl_gpu_gemmBatched_backward_weight(const int M, const int N, const int K, const int C,
const float* alpha, const float* top[], const float* bottom[], const float* beta,
float* weight[]) {
CUBLAS_CHECK(cublasSgemmBatched(Caffe::cublas_handle(), CUBLAS_OP_N, CUBLAS_OP_T,
N, K, M, alpha, top, C * N, bottom, C * K, beta, weight, N, C));
}
template <>
void lfcl_gpu_gemmBatched_backward_weight(const int M, const int N, const int K, const int C,
const double* alpha, const double* top[], const double* bottom[], const double* beta,
double* weight[]) {
CUBLAS_CHECK(cublasDgemmBatched(Caffe::cublas_handle(), CUBLAS_OP_N, CUBLAS_OP_T,
N, K, M, alpha, top, C * N, bottom, C * K, beta, weight, N, C));
}
/*/////////////////////////////////////
*******LFCL GPU backward bottom********
/////////////////////////////////////*/
template <>
void lfcl_gpu_gemmBatched_backward_bottom(const int M, const int N, const int K, const int C,
const float* alpha, const float* weight[], const float* top[], const float* beta,
float* bottom[]) {
CUBLAS_CHECK(cublasSgemmBatched(Caffe::cublas_handle(), CUBLAS_OP_T, CUBLAS_OP_N,
K, M, N, alpha, weight, N, top, C * N, beta, bottom, C * K, C));
}
template <>
void lfcl_gpu_gemmBatched_backward_bottom(const int M, const int N, const int K, const int C,
const double* alpha, const double* weight[], const double* top[], const double* beta,
double* bottom[]) {
CUBLAS_CHECK(cublasDgemmBatched(Caffe::cublas_handle(), CUBLAS_OP_T, CUBLAS_OP_N,
K, M, N, alpha, weight, N, top, C * N, beta, bottom, C * K, C));
}
template <>
void lfcl_gpu_gemv<float>(const int M, const int N, const float alpha,
const float* A, const float* x, const float beta, float* y) {
CUBLAS_CHECK(cublasSgemv(Caffe::cublas_handle(), CUBLAS_OP_N, M, N, &alpha,
A, M, x, 1, &beta, y, 1));
}
template <>
void lfcl_gpu_gemv<double>(const int M, const int N, const double alpha,
const double* A, const double* x, const double beta, double* y) {
CUBLAS_CHECK(cublasDgemv(Caffe::cublas_handle(), CUBLAS_OP_N, M, N, &alpha,
A, M, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y) {
CUBLAS_CHECK(cublasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(cublasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CUDA_CHECK(cudaMemcpy(Y, X, N, cudaMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float *X) {
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double *X) {
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(N, beta, Y);
caffe_gpu_axpy<float>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(N, beta, Y);
caffe_gpu_axpy<double>(N, alpha, X, Y);
}
template <>
void caffe_gpu_dot<float>(const int n, const float* x, const float* y,
float* out) {
CUBLAS_CHECK(cublasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double>(const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(cublasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_asum<float>(const int n, const float* x, float* y) {
CUBLAS_CHECK(cublasSasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double>(const int n, const double* x, double* y) {
CUBLAS_CHECK(cublasDasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_scale<float>(const int n, const float alpha, const float *x,
float* y) {
CUBLAS_CHECK(cublasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double>(const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(cublasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) {
if (alpha == 0) {
CUDA_CHECK(cudaMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
set_kernel<Dtype><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template void caffe_gpu_set<int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double>(const int N, const double alpha, double* Y);
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <>
void caffe_gpu_add<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_add<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <>
void caffe_gpu_mul<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_mul<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <>
void caffe_gpu_div<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_div<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
template <>
void caffe_gpu_abs<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_abs<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = exp(a[index]);
}
}
template <>
void caffe_gpu_exp<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_exp<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(a[index]);
}
}
template <>
void caffe_gpu_log<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_log<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template <>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
}
template <>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index])
- (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
void caffe_gpu_rng_uniform(const int n, unsigned int* r) {
CURAND_CHECK(curandGenerate(Caffe::curand_generator(), r, n));
}
template <>
void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b,
float* r) {
CURAND_CHECK(curandGenerateUniform(Caffe::curand_generator(), r, n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b,
double* r) {
CURAND_CHECK(curandGenerateUniformDouble(Caffe::curand_generator(), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<double>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma,
float* r) {
CURAND_CHECK(
curandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma));
}
template <>
void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma,
double* r) {
CURAND_CHECK(
curandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma));
}
} // namespace caffe
|
magVtempMain.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <float.h>
#include <string.h>
#include <hip/hip_runtime.h>
#include <rocblas.h>
#include <hiprand/hiprand.h>
#include <helper_cuda.h> //In samples/common/inc
#include <thrust/reduce.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
//#define DBUG //Save stuff to files
#define MOD(x, N) (((x < 0) ? ((x % N) + N) : x) % N)
#ifndef MIN
#define MIN(a, b) ((a > b) ? b : a)
#endif
#ifndef MAX
#define MAX(a, b) ((a > b) ? a : b)
#endif
#define THREADS_PER 30
__global__
void isingSample(int *d_spins, float *d_random, const float T,
const int L);
__device__
void chessBoardUpdate(int *s_spins, int *d_spins, float *d_random,
const float T, const int site, const int sharedsite);
int main(int argc, char **argv){
int deviceCount;
hipGetDeviceCount(&deviceCount);
if (deviceCount == 0){
fprintf(stderr, "Error: no CUDA supporting devices.\n");
exit(EXIT_FAILURE);
}
int dev = 0;
hipSetDevice(dev);
const char *printMSG = "Incorrect number of arguments: Usage: \n\
./cuising filename L Tmin Tmax numTs N_steps period burnin \n";
if (argc < 9){
printf("%s", printMSG);
return 0;
}
else if (argc > 9){
printf("%s", printMSG);
return 0;
}
char *filename = argv[1];
int L = atoi(argv[2]);
float Tmin = atof(argv[3]), Tmax = atof(argv[4]);
int numTs = atoi(argv[5]);
int N_steps = atoi(argv[6]);
int period = atoi(argv[7]), burnin = atoi(argv[8]);
printf("Saving to %s with L=%d, Tmin=%f, Tmax=%f, numTs=%d, N_steps=%d, period=%d, burnin=%d\n",
filename, L, Tmin, Tmax, numTs, N_steps, period, burnin);
int N = L*L;
hiprandGenerator_t rng;
checkCudaErrors(hiprandCreateGenerator(&rng, HIPRAND_RNG_PSEUDO_DEFAULT));
checkCudaErrors(hiprandSetPseudoRandomGeneratorSeed(rng, 920989ULL));
//hipblasHandle_t handle;
//checkCudaErrors(hipblasCreate(&handle));
int *h_spins = (int *)malloc(sizeof(int) * N);
memset(h_spins, 1, sizeof(int) * N);
for (int i = 0; i < N; i++){
float r = (float)rand()/RAND_MAX;
h_spins[i] = (r > 0.5) ? 1 : -1;
}
int *d_spins;
float *d_random;
checkCudaErrors(hipMalloc((void **)&d_spins, sizeof(int) * N));
checkCudaErrors(hipMalloc((void **)&d_random, sizeof(float) * N));
checkCudaErrors(hipMemcpy(d_spins, h_spins, sizeof(int) * N, hipMemcpyHostToDevice));
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hiprandGenerateUniform(rng, d_random, N));
checkCudaErrors(hipDeviceSynchronize());
float *h_random = (float *)malloc(sizeof(float) * N);
checkCudaErrors(hipMemcpy(h_random, d_random, sizeof(int) * N, hipMemcpyDeviceToHost));
checkCudaErrors(hipDeviceSynchronize());
FILE *fp = fopen("dbug.dat", "w");
for (int i=0; i < N; i++){
if (i%L ==0)
fprintf(fp, "\n");
fprintf(fp, "%d\t", h_spins[i]);
}
for (int i=0; i < N; i++){
if (i%L ==0)
fprintf(fp, "\n");
fprintf(fp, "%f\t", h_random[i]);
}
FILE *fpMag = fopen(filename, "w");
int NUMBLOCKS = ceil((float)L/(float)THREADS_PER);
int BLOCKMEM = sizeof(int) * (THREADS_PER+2) * (THREADS_PER+2);
dim3 blocks(NUMBLOCKS, NUMBLOCKS);
dim3 threads(THREADS_PER+2, THREADS_PER+2);
hipEvent_t start, stop;
float time = 0.f;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
checkCudaErrors(hipEventRecord(start, 0));
float Tstep = (Tmax - Tmin) / ((float) numTs);
for (int iT = 0; iT < numTs; iT++){
float temp = Tmax - Tstep * iT;
float mag = 0.f;
for (int t = 0; t < burnin; t++){
hipLaunchKernelGGL(( isingSample), dim3(blocks), dim3(threads),
BLOCKMEM, 0, d_spins, d_random, temp, L);
checkCudaErrors(hiprandGenerateUniform(rng, d_random, N));
checkCudaErrors(hipDeviceSynchronize());
}
for (int t = 0; t < N_steps; t++){
hipLaunchKernelGGL(( isingSample), dim3(blocks), dim3(threads),
BLOCKMEM, 0, d_spins, d_random, temp, L);
checkCudaErrors(hiprandGenerateUniform(rng, d_random, N));
checkCudaErrors(hipDeviceSynchronize());
if (t % period == 0){
thrust::device_ptr<int> spinPtr = thrust::device_pointer_cast(d_spins);
mag += ((float) thrust::reduce(spinPtr, spinPtr + N))/((float) N);
}
}
fprintf(fpMag, "%f\t%f\n", mag/((float)N_steps/period), temp);
}
fclose(fpMag);
checkCudaErrors(hipEventRecord(stop, 0));
checkCudaErrors(hipEventSynchronize(stop));
hipEventElapsedTime(&time, start, stop);
printf("Elapsed time: %f ms, %f ms/step\n", time, time/(float)burnin);
checkCudaErrors(hipMemcpy(h_spins, d_spins, sizeof(int) * N, hipMemcpyDeviceToHost));
for (int i=0; i < N; i++){
if (i%L ==0)
fprintf(fp, "\n");
fprintf(fp, "%d\t", h_spins[i]);
}
fclose(fp);
hipFree(d_spins);
hipFree(d_random);
free(h_spins);
free(h_random);
checkCudaErrors(hipGetLastError());
return EXIT_SUCCESS;
}
__global__
void isingSample(int *d_spins, float *d_random, const float T,
const int L){
int N = L*L;
int tidx = threadIdx.x, tidy = threadIdx.y;
int bdimx = blockDim.x, bdimy = blockDim.y;
int col = MOD( (int)(tidx + blockIdx.x * (bdimx - 2) - 1), L);
int row = MOD( (int)(tidy + blockIdx.y * (bdimy - 2) - 1), L);
int site = row * L + col, sharedsite = tidy * bdimx + tidx;
if (site >= N || col >=L || row >= L)
return;
int blockChess = (blockIdx.x%2 + blockIdx.y%2)%2;
extern __shared__ int s_spins[];//(blockDim+2)**2
if (blockChess == 0)
chessBoardUpdate(s_spins, d_spins, d_random, T, site, sharedsite);
if (blockChess == 1)
chessBoardUpdate(s_spins, d_spins, d_random, T, site, sharedsite);
return;
}
__device__
void chessBoardUpdate(int *s_spins, int *d_spins, float *d_random,
const float T, const int site, const int sharedsite){
int row = threadIdx.y, col = threadIdx.x;
//Load spins to shared memory
s_spins[sharedsite] = d_spins[site];
__syncthreads();
if (row == 0 || col == 0 || row == blockDim.y-1 || col == blockDim.x-1)
return; //Edge site for shared memory filling
int neighSum = 0, r = row, c = col;
int chess = (row%2 + col%2)%2;
int spin = s_spins[sharedsite];
if (chess == 0){
for (int i =-1; i < 2; i++){
for (int j=-1; j < 2; j++){
if (abs(i) != abs(j)){
r = row + i;
c = col + j;
neighSum += s_spins[r * blockDim.x + c];
}
}
}
float dE = 2 * spin * neighSum;
if (exp(- dE/T) > d_random[site])
s_spins[sharedsite] = -1 * spin;
}
__syncthreads();
neighSum = 0;
if (chess == 1){
for (int i =-1; i < 2; i++){
for (int j=-1; j < 2; j++){
if (abs(i) != abs(j)){
r = row + i;
c = col + j;
neighSum += s_spins[r * blockDim.x + c];
}
}
}
float dE = 2 * spin * neighSum;
if (exp(- dE/T) > d_random[site])
s_spins[sharedsite] = -1 * spin;
}
__syncthreads();
//Update spins
d_spins[site] = s_spins[sharedsite];
__syncthreads();
return;
}
| magVtempMain.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <float.h>
#include <string.h>
#include <cuda.h>
#include <cublas_v2.h>
#include <curand.h>
#include <helper_cuda.h> //In samples/common/inc
#include <thrust/reduce.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
//#define DBUG //Save stuff to files
#define MOD(x, N) (((x < 0) ? ((x % N) + N) : x) % N)
#ifndef MIN
#define MIN(a, b) ((a > b) ? b : a)
#endif
#ifndef MAX
#define MAX(a, b) ((a > b) ? a : b)
#endif
#define THREADS_PER 30
__global__
void isingSample(int *d_spins, float *d_random, const float T,
const int L);
__device__
void chessBoardUpdate(int *s_spins, int *d_spins, float *d_random,
const float T, const int site, const int sharedsite);
int main(int argc, char **argv){
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0){
fprintf(stderr, "Error: no CUDA supporting devices.\n");
exit(EXIT_FAILURE);
}
int dev = 0;
cudaSetDevice(dev);
const char *printMSG = "Incorrect number of arguments: Usage: \n\
./cuising filename L Tmin Tmax numTs N_steps period burnin \n";
if (argc < 9){
printf("%s", printMSG);
return 0;
}
else if (argc > 9){
printf("%s", printMSG);
return 0;
}
char *filename = argv[1];
int L = atoi(argv[2]);
float Tmin = atof(argv[3]), Tmax = atof(argv[4]);
int numTs = atoi(argv[5]);
int N_steps = atoi(argv[6]);
int period = atoi(argv[7]), burnin = atoi(argv[8]);
printf("Saving to %s with L=%d, Tmin=%f, Tmax=%f, numTs=%d, N_steps=%d, period=%d, burnin=%d\n",
filename, L, Tmin, Tmax, numTs, N_steps, period, burnin);
int N = L*L;
curandGenerator_t rng;
checkCudaErrors(curandCreateGenerator(&rng, CURAND_RNG_PSEUDO_DEFAULT));
checkCudaErrors(curandSetPseudoRandomGeneratorSeed(rng, 920989ULL));
//cublasHandle_t handle;
//checkCudaErrors(cublasCreate(&handle));
int *h_spins = (int *)malloc(sizeof(int) * N);
memset(h_spins, 1, sizeof(int) * N);
for (int i = 0; i < N; i++){
float r = (float)rand()/RAND_MAX;
h_spins[i] = (r > 0.5) ? 1 : -1;
}
int *d_spins;
float *d_random;
checkCudaErrors(cudaMalloc((void **)&d_spins, sizeof(int) * N));
checkCudaErrors(cudaMalloc((void **)&d_random, sizeof(float) * N));
checkCudaErrors(cudaMemcpy(d_spins, h_spins, sizeof(int) * N, cudaMemcpyHostToDevice));
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(curandGenerateUniform(rng, d_random, N));
checkCudaErrors(cudaDeviceSynchronize());
float *h_random = (float *)malloc(sizeof(float) * N);
checkCudaErrors(cudaMemcpy(h_random, d_random, sizeof(int) * N, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaDeviceSynchronize());
FILE *fp = fopen("dbug.dat", "w");
for (int i=0; i < N; i++){
if (i%L ==0)
fprintf(fp, "\n");
fprintf(fp, "%d\t", h_spins[i]);
}
for (int i=0; i < N; i++){
if (i%L ==0)
fprintf(fp, "\n");
fprintf(fp, "%f\t", h_random[i]);
}
FILE *fpMag = fopen(filename, "w");
int NUMBLOCKS = ceil((float)L/(float)THREADS_PER);
int BLOCKMEM = sizeof(int) * (THREADS_PER+2) * (THREADS_PER+2);
dim3 blocks(NUMBLOCKS, NUMBLOCKS);
dim3 threads(THREADS_PER+2, THREADS_PER+2);
cudaEvent_t start, stop;
float time = 0.f;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
checkCudaErrors(cudaEventRecord(start, 0));
float Tstep = (Tmax - Tmin) / ((float) numTs);
for (int iT = 0; iT < numTs; iT++){
float temp = Tmax - Tstep * iT;
float mag = 0.f;
for (int t = 0; t < burnin; t++){
isingSample<<<blocks, threads,
BLOCKMEM>>>(d_spins, d_random, temp, L);
checkCudaErrors(curandGenerateUniform(rng, d_random, N));
checkCudaErrors(cudaDeviceSynchronize());
}
for (int t = 0; t < N_steps; t++){
isingSample<<<blocks, threads,
BLOCKMEM>>>(d_spins, d_random, temp, L);
checkCudaErrors(curandGenerateUniform(rng, d_random, N));
checkCudaErrors(cudaDeviceSynchronize());
if (t % period == 0){
thrust::device_ptr<int> spinPtr = thrust::device_pointer_cast(d_spins);
mag += ((float) thrust::reduce(spinPtr, spinPtr + N))/((float) N);
}
}
fprintf(fpMag, "%f\t%f\n", mag/((float)N_steps/period), temp);
}
fclose(fpMag);
checkCudaErrors(cudaEventRecord(stop, 0));
checkCudaErrors(cudaEventSynchronize(stop));
cudaEventElapsedTime(&time, start, stop);
printf("Elapsed time: %f ms, %f ms/step\n", time, time/(float)burnin);
checkCudaErrors(cudaMemcpy(h_spins, d_spins, sizeof(int) * N, cudaMemcpyDeviceToHost));
for (int i=0; i < N; i++){
if (i%L ==0)
fprintf(fp, "\n");
fprintf(fp, "%d\t", h_spins[i]);
}
fclose(fp);
cudaFree(d_spins);
cudaFree(d_random);
free(h_spins);
free(h_random);
checkCudaErrors(cudaGetLastError());
return EXIT_SUCCESS;
}
__global__
void isingSample(int *d_spins, float *d_random, const float T,
const int L){
int N = L*L;
int tidx = threadIdx.x, tidy = threadIdx.y;
int bdimx = blockDim.x, bdimy = blockDim.y;
int col = MOD( (int)(tidx + blockIdx.x * (bdimx - 2) - 1), L);
int row = MOD( (int)(tidy + blockIdx.y * (bdimy - 2) - 1), L);
int site = row * L + col, sharedsite = tidy * bdimx + tidx;
if (site >= N || col >=L || row >= L)
return;
int blockChess = (blockIdx.x%2 + blockIdx.y%2)%2;
extern __shared__ int s_spins[];//(blockDim+2)**2
if (blockChess == 0)
chessBoardUpdate(s_spins, d_spins, d_random, T, site, sharedsite);
if (blockChess == 1)
chessBoardUpdate(s_spins, d_spins, d_random, T, site, sharedsite);
return;
}
__device__
void chessBoardUpdate(int *s_spins, int *d_spins, float *d_random,
const float T, const int site, const int sharedsite){
int row = threadIdx.y, col = threadIdx.x;
//Load spins to shared memory
s_spins[sharedsite] = d_spins[site];
__syncthreads();
if (row == 0 || col == 0 || row == blockDim.y-1 || col == blockDim.x-1)
return; //Edge site for shared memory filling
int neighSum = 0, r = row, c = col;
int chess = (row%2 + col%2)%2;
int spin = s_spins[sharedsite];
if (chess == 0){
for (int i =-1; i < 2; i++){
for (int j=-1; j < 2; j++){
if (abs(i) != abs(j)){
r = row + i;
c = col + j;
neighSum += s_spins[r * blockDim.x + c];
}
}
}
float dE = 2 * spin * neighSum;
if (exp(- dE/T) > d_random[site])
s_spins[sharedsite] = -1 * spin;
}
__syncthreads();
neighSum = 0;
if (chess == 1){
for (int i =-1; i < 2; i++){
for (int j=-1; j < 2; j++){
if (abs(i) != abs(j)){
r = row + i;
c = col + j;
neighSum += s_spins[r * blockDim.x + c];
}
}
}
float dE = 2 * spin * neighSum;
if (exp(- dE/T) > d_random[site])
s_spins[sharedsite] = -1 * spin;
}
__syncthreads();
//Update spins
d_spins[site] = s_spins[sharedsite];
__syncthreads();
return;
}
|
964fa16e340bcc54a2972bcfa278122d050d43f8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C" {
#ifndef REAL
#define REAL float
#endif
#ifndef CAST
#define CAST(fun) fun ## f
#endif
#ifndef REAL2o3
#define REAL2o3 (REAL)0.6666666666666667
#endif
#ifndef REAL3o2
#define REAL3o2 (REAL)1.5
#endif
}
__global__ void uplo_elu (const int sd, const int unit, const int bottom, const REAL alpha, const REAL* a, const int offset_a, const int ld_a, REAL* b, const int offset_b, const int ld_b) {
const int gid_0 = blockIdx.x * blockDim.x + threadIdx.x;
const int gid_1 = blockIdx.y * blockDim.y + threadIdx.y;
const bool valid = (gid_0 < sd) && (gid_1 < sd);
const bool check = valid &&
((unit == 132) ? bottom * gid_0 > bottom * gid_1 : bottom * gid_0 >= bottom * gid_1);
if (check) {
const REAL val = a[offset_a + gid_0 + gid_1 * ld_a];
b[offset_b + gid_0 + gid_1 * ld_b] = CAST(fmax)(val, alpha * expm1(val));
}
} | 964fa16e340bcc54a2972bcfa278122d050d43f8.cu | #include "includes.h"
extern "C" {
#ifndef REAL
#define REAL float
#endif
#ifndef CAST
#define CAST(fun) fun ## f
#endif
#ifndef REAL2o3
#define REAL2o3 (REAL)0.6666666666666667
#endif
#ifndef REAL3o2
#define REAL3o2 (REAL)1.5
#endif
}
__global__ void uplo_elu (const int sd, const int unit, const int bottom, const REAL alpha, const REAL* a, const int offset_a, const int ld_a, REAL* b, const int offset_b, const int ld_b) {
const int gid_0 = blockIdx.x * blockDim.x + threadIdx.x;
const int gid_1 = blockIdx.y * blockDim.y + threadIdx.y;
const bool valid = (gid_0 < sd) && (gid_1 < sd);
const bool check = valid &&
((unit == 132) ? bottom * gid_0 > bottom * gid_1 : bottom * gid_0 >= bottom * gid_1);
if (check) {
const REAL val = a[offset_a + gid_0 + gid_1 * ld_a];
b[offset_b + gid_0 + gid_1 * ld_b] = CAST(fmax)(val, alpha * expm1(val));
}
} |
4def284210a0d4094cf324af404da3b0532ea09a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include "CPUconvLayer.cuh"
#include "cuda_conv_layer_ring.cuh"
#include "cuda_conv_layer.cuh"
#define device 0
__host__ void printArray(float* A, int n)
{
for (int i = 0; i< n; i++) {
printf("%f ", A[i]);
}
printf("\n\n");
}
// Make sure relative error is small
__host__ int arrEq(float* a, float* b, int n)
{
int diff = 0;
for (int i = 0; i < n; i++) {
if ( abs(a[i] - b[i]) / abs(b[i]) > pow((double) 10, (double)-5)) {
diff += 1;
//printf("a[%d] != b[%d]: |%f = %f| / %f = %f \n", i, i, a[i], b[i], abs(a[i] - b[i]) / abs(b[i]));
}
}
return diff;
}
void printMap(float* map, int N, int F, int S) {
for (int s = 0; s < S; s++) {
printf("\n ====== sample %d =========\n", s);
for (int f = 0; f < F; f++) {
printf("\n --- feature %d -----\n", f);
for (int k = 0; k < N; k++) {
for (int j = 0; j < N; j++) {
for (int i = 0; i < N; i++) {
printf("%f ", map[(N*N*N*F)*s + (N*N*N)*f + (N*N)*k + (N)*j + i ]);
}
printf("\n");
}
printf("\n");
}
}
}
}
int main(int argc, char **argv)
{
hipSetDevice(device);
int N = 32;
int K = 3;
int M = N - K + 1;
int S = 1;
int F_in = 3;
int F_out = 16;
srand(time(NULL));
float *inputMaps = (float *) malloc(N*N*N*F_in*S*sizeof(float));
float *outputMaps = (float *) malloc(M*M*M*F_out*S*sizeof(float));
float *weights = (float *) malloc(K*K*K*F_in*F_out*sizeof(float));
float *outputMapsCPU = (float *) malloc(M*M*M*F_out*S*sizeof(float));
//fill the input maps
for (int s = 0; s < S; s++) {
for (int f_in = 0; f_in < F_in; f_in++) {
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
for (int k = 0; k < N; k++) {
inputMaps[(N*N*N*F_in)*s + (N*N*N)*f_in + (N*N)*k + (N)*j + i ] = (float) (double)rand()/(double)RAND_MAX ;
}
}
}
}
}
//fill the output maps
for (int s = 0; s < S; s++) {
for (int f_out = 0; f_out < F_out; f_out++) {
for (int i = 0; i < M; i++) {
for (int j = 0; j < M; j++) {
for (int k = 0; k < M; k++) {
outputMaps[(M*M*M*F_out)*s + (M*M*M)*f_out + (M*M)*k + (M)*j + i ] = 0;
outputMapsCPU[(M*M*M*F_out)*s + (M*M*M)*f_out + (M*M)*k + (M)*j + i ] = 0;
}
}
}
}
}
//fill the weights
for (int f_out = 0; f_out < F_out; f_out++) {
for (int f_in = 0; f_in < F_in; f_in++) {
for (int i = 0; i < K; i++) {
for (int j = 0; j < K; j++) {
for (int k = 0; k < K; k++) {
weights[(K*K*K*F_out)*f_in + (K*K*K)*f_out + (K*K)*k + (K)*j + i ] = 1;
}
}
}
}
}
//do convolution
clock_t start_t, end_t;
double total_t;
int num_runs = 1;
start_t = clock();
for (int t = 0; t < num_runs; t++) {
convLayerRing(inputMaps, weights, outputMaps, N, M, F_in, F_out, K, S);
}
end_t = clock();
total_t = (float)(end_t - start_t) / (CLOCKS_PER_SEC*num_runs);
printf("\nTotal time (in seconds) on GPU averaged over %d runs: %f \n", num_runs, total_t);
//make sure it matches CPU version
start_t = clock();
for (int t = 0; t < num_runs; t++) {
convLayerCPU(inputMaps, weights, outputMapsCPU, N, M, F_in, F_out, K, S);
}
end_t = clock();
total_t = (float)(end_t - start_t) / (CLOCKS_PER_SEC*num_runs);
printf("\nTotal time (in seconds) on CPU averaged over %d runs: %f \n\n", num_runs, total_t);
if (arrEq(outputMaps, outputMapsCPU, M*M*M*F_out*S)) {
printf("%d: outputMaps != outputMapsCPU\n\n", N);
} else {
printf("%d: 1 outputMaps == outputMapsCPU\n\n", N);
}
//print input maps
/*
printArray(outputMaps, 20);//M*M*M*F_out*S);
printArray(outputMapsCPU, 20);//M*M*M*F_out*S);
printMap(outputMaps, M, F_out, S);
printMap(outputMapsCPU, M, F_out, S);
*/
free(inputMaps); free(weights); free(outputMaps); free(outputMapsCPU);
} | 4def284210a0d4094cf324af404da3b0532ea09a.cu | #include <stdio.h>
#include <stdlib.h>
#include "CPUconvLayer.cuh"
#include "cuda_conv_layer_ring.cuh"
#include "cuda_conv_layer.cuh"
#define device 0
__host__ void printArray(float* A, int n)
{
for (int i = 0; i< n; i++) {
printf("%f ", A[i]);
}
printf("\n\n");
}
// Make sure relative error is small
__host__ int arrEq(float* a, float* b, int n)
{
int diff = 0;
for (int i = 0; i < n; i++) {
if ( abs(a[i] - b[i]) / abs(b[i]) > pow((double) 10, (double)-5)) {
diff += 1;
//printf("a[%d] != b[%d]: |%f = %f| / %f = %f \n", i, i, a[i], b[i], abs(a[i] - b[i]) / abs(b[i]));
}
}
return diff;
}
void printMap(float* map, int N, int F, int S) {
for (int s = 0; s < S; s++) {
printf("\n ====== sample %d =========\n", s);
for (int f = 0; f < F; f++) {
printf("\n --- feature %d -----\n", f);
for (int k = 0; k < N; k++) {
for (int j = 0; j < N; j++) {
for (int i = 0; i < N; i++) {
printf("%f ", map[(N*N*N*F)*s + (N*N*N)*f + (N*N)*k + (N)*j + i ]);
}
printf("\n");
}
printf("\n");
}
}
}
}
int main(int argc, char **argv)
{
cudaSetDevice(device);
int N = 32;
int K = 3;
int M = N - K + 1;
int S = 1;
int F_in = 3;
int F_out = 16;
srand(time(NULL));
float *inputMaps = (float *) malloc(N*N*N*F_in*S*sizeof(float));
float *outputMaps = (float *) malloc(M*M*M*F_out*S*sizeof(float));
float *weights = (float *) malloc(K*K*K*F_in*F_out*sizeof(float));
float *outputMapsCPU = (float *) malloc(M*M*M*F_out*S*sizeof(float));
//fill the input maps
for (int s = 0; s < S; s++) {
for (int f_in = 0; f_in < F_in; f_in++) {
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
for (int k = 0; k < N; k++) {
inputMaps[(N*N*N*F_in)*s + (N*N*N)*f_in + (N*N)*k + (N)*j + i ] = (float) (double)rand()/(double)RAND_MAX ;
}
}
}
}
}
//fill the output maps
for (int s = 0; s < S; s++) {
for (int f_out = 0; f_out < F_out; f_out++) {
for (int i = 0; i < M; i++) {
for (int j = 0; j < M; j++) {
for (int k = 0; k < M; k++) {
outputMaps[(M*M*M*F_out)*s + (M*M*M)*f_out + (M*M)*k + (M)*j + i ] = 0;
outputMapsCPU[(M*M*M*F_out)*s + (M*M*M)*f_out + (M*M)*k + (M)*j + i ] = 0;
}
}
}
}
}
//fill the weights
for (int f_out = 0; f_out < F_out; f_out++) {
for (int f_in = 0; f_in < F_in; f_in++) {
for (int i = 0; i < K; i++) {
for (int j = 0; j < K; j++) {
for (int k = 0; k < K; k++) {
weights[(K*K*K*F_out)*f_in + (K*K*K)*f_out + (K*K)*k + (K)*j + i ] = 1;
}
}
}
}
}
//do convolution
clock_t start_t, end_t;
double total_t;
int num_runs = 1;
start_t = clock();
for (int t = 0; t < num_runs; t++) {
convLayerRing(inputMaps, weights, outputMaps, N, M, F_in, F_out, K, S);
}
end_t = clock();
total_t = (float)(end_t - start_t) / (CLOCKS_PER_SEC*num_runs);
printf("\nTotal time (in seconds) on GPU averaged over %d runs: %f \n", num_runs, total_t);
//make sure it matches CPU version
start_t = clock();
for (int t = 0; t < num_runs; t++) {
convLayerCPU(inputMaps, weights, outputMapsCPU, N, M, F_in, F_out, K, S);
}
end_t = clock();
total_t = (float)(end_t - start_t) / (CLOCKS_PER_SEC*num_runs);
printf("\nTotal time (in seconds) on CPU averaged over %d runs: %f \n\n", num_runs, total_t);
if (arrEq(outputMaps, outputMapsCPU, M*M*M*F_out*S)) {
printf("%d: outputMaps != outputMapsCPU\n\n", N);
} else {
printf("%d: 1 outputMaps == outputMapsCPU\n\n", N);
}
//print input maps
/*
printArray(outputMaps, 20);//M*M*M*F_out*S);
printArray(outputMapsCPU, 20);//M*M*M*F_out*S);
printMap(outputMaps, M, F_out, S);
printMap(outputMapsCPU, M, F_out, S);
*/
free(inputMaps); free(weights); free(outputMaps); free(outputMapsCPU);
} |
c31e8a6516b44e32f1962c338bf8060f2d3043ef.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <string.h>
#include <stdarg.h>
#ifdef UNIX
#include <stdint.h>
#include <unistd.h>
#endif
#include "mex.h"
// CUDA
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "rocblas.h"
#include "cudaCommon.h"
/* THIS FUNCTION
This function takes momentum and density arrays to interpolate to second order the velocity fields
required by the MHD routines to perform magnetic field evolution.
The interpolation is two dimensional, involving a backwards average in the I (velocity) direction
and a centered 2nd order average in the X (magnetic) direction, such that the stencil is
+---+-------+
|I\X|-1 0 1 |
+---+-------+
A = | 0 | 1 2 1 | / 4
|-1 | 1 2 1 |
+---+-------+
where A is computed using
[ (p(i-1,x+1)+p(i,x+1))/(m(i-1,x+1)+m(i,x+1))
2*(p(i-1,x )+p(i,x ))/(m(i-1,x )+m(i,x ))
(p(i-1,x-1)+p(i,x-1))/(m(i-1,x-1)+m(i,x-1)) ] / 4
This presents the velocities interpolated at the corners of cells
*/
__global__ void cukern_SimpleVelocity(double *v, double *p, double *m, int numel);
__global__ void cukern_VelocityBkwdAverage_X(double *v, double *p, double *m, int nx);
__global__ void cukern_VelocityBkwdAverage_Y(double *v, double *p, double *m, int nx, int ny);
__global__ void cukern_VelocityBkwdAverage_Z(double *v, double *p, double *m, int nx, int nz);
__global__ void cukern_CentralAverage_X(double *out, double *in, int nx);
__global__ void cukern_CentralAverage_Y(double *out, double *in, int nx, int ny);
__global__ void cukern_magVelInterp_Z(double *velout, double *velin, int3 dims);
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
// At least 2 arguments expected
// Input and result
if ((nrhs!=3) || (nlhs != 1)) mexErrMsgTxt("Wrong number of arguments: need velInterp = cudaMagPrep(mom, mass, [dirvel dirmag])\n");
CHECK_CUDA_ERROR("entering cudaMagPrep");
// Get source array info and create destination arrays
MGArray src[2];
int worked = MGA_accessMatlabArrays(prhs, 0, 1, src);
MGArray *dst = MGA_createReturnedArrays(plhs, 1, src);
double *tempVelocity;
hipError_t fail = hipMalloc((void **)&tempVelocity, src->numel*sizeof(double));
if(fail != hipSuccess) {
printf("%s\n", hipGetErrorString(fail));
mexErrMsgTxt("cudaMagPrep: I have an hipMalloc fail. And a sad.");
}
// Establish launch dimensions & a few other parameters
double *directs = mxGetPr(prhs[2]);
int velDirection = (int)directs[0];
int magDirection = (int)directs[1];
int3 arraySize;
arraySize = makeInt3(&src->dim[0]);
dim3 blocksize, gridsize;
blocksize.z = 1;
gridsize.z = 1;
hipSetDevice(src->deviceID[0]);
CHECK_CUDA_ERROR("hipSetDevice()");
if(src->dim[velDirection-1] > 1) {
// Interpolate the grid-aligned velocity
switch(velDirection) {
case 1:
blocksize = makeDim3(128,1,1);
gridsize.x = arraySize.y; // / 16; gridsize.x += (16 * gridsize.x < arraySize.x);
gridsize.y = arraySize.z;// / blocksize.y; gridsize.y += (blocksize.y * gridsize.y < arraySize.y);
hipLaunchKernelGGL(( cukern_VelocityBkwdAverage_X), dim3(gridsize), dim3(blocksize), 0, 0, tempVelocity, src[0].devicePtr[0], src[1].devicePtr[1], arraySize.x);
break;
case 2:
blocksize = makeDim3(64,1,1);
gridsize.x = arraySize.x/64; gridsize.x += (gridsize.x*64 < arraySize.x);
gridsize.y = arraySize.z;
hipLaunchKernelGGL(( cukern_VelocityBkwdAverage_Y), dim3(gridsize), dim3(blocksize), 0, 0, tempVelocity, src[0].devicePtr[0], src[1].devicePtr[1], arraySize.x, arraySize.y);
break;
case 3:
blocksize = makeDim3(64,1,1);
gridsize.x = arraySize.x/64; gridsize.x += (gridsize.x*64 < arraySize.x);
gridsize.y = arraySize.y;
hipLaunchKernelGGL(( cukern_VelocityBkwdAverage_Z), dim3(gridsize), dim3(blocksize), 0, 0, tempVelocity, src[0].devicePtr[0], src[1].devicePtr[1], arraySize.x, arraySize.z);
break;
}
} else {
blocksize = makeDim3(512,1,1);
gridsize.x = src->numel / 512; gridsize.x += (gridsize.x * 512 < src->numel); gridsize.y = gridsize.z = 1;
hipLaunchKernelGGL(( cukern_SimpleVelocity), dim3(gridsize), dim3(blocksize), 0, 0, tempVelocity, src[0].devicePtr[0], src[1].devicePtr[1], src->numel);
}
CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, src, velDirection, "mag prep velocity avg");
hipDeviceSynchronize();
// Interpolate the velocity to 2nd order
if(src->dim[magDirection-1] > 1) {
switch(magDirection) {
case 1:
blocksize.x = 128; blocksize.y = blocksize.z = 1;
gridsize.x = arraySize.y; // / 16; gridsize.x += (16 * gridsize.x < arraySize.x);
gridsize.y = arraySize.z;// / blocksize.y; gridsize.y += (blocksize.y * gridsize.y < arraySize.y);
hipLaunchKernelGGL(( cukern_CentralAverage_X), dim3(gridsize), dim3(blocksize), 0, 0, dst->devicePtr[0], tempVelocity, arraySize.x);
break;
case 2:
blocksize.x = 64; blocksize.y = blocksize.z = 1;
gridsize.x = arraySize.x/64; gridsize.x += (gridsize.x*64 < arraySize.x);
gridsize.y = arraySize.z;
hipLaunchKernelGGL(( cukern_CentralAverage_Y), dim3(gridsize), dim3(blocksize), 0, 0, dst->devicePtr[0], tempVelocity, arraySize.x, arraySize.y);
break;
case 3:
blocksize.x = 18; blocksize.y = 8;
gridsize.x = arraySize.z / 14; gridsize.x += (14 * gridsize.x < arraySize.z);
gridsize.y = arraySize.x / blocksize.y; gridsize.y += (blocksize.y * gridsize.y < arraySize.x);
hipLaunchKernelGGL(( cukern_magVelInterp_Z), dim3(gridsize), dim3(blocksize), 0, 0, dst->devicePtr[0], tempVelocity, arraySize);
break;
}
} else {
hipMemcpy(dst->devicePtr[0], tempVelocity, src->numel, hipMemcpyDeviceToDevice);
// FIXME: Detect this condition ahead of time and never bother with this array in the first place
}
hipDeviceSynchronize();
CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, dst, magDirection, "mag prep interpolation");
hipFree(tempVelocity); // Because only YOU can prevent memory leaks!
// (and this one would be a whopper...)
free(dst);
}
__global__ void cukern_SimpleVelocity(double *v, double *p, double *m, int numel)
{
int addr = threadIdx.x + 512*blockIdx.x;
//short int q;
if(addr > numel) return;
v[addr] = p[addr] / m[addr];
}
/*cukern_Vavg_vx_by(double *v, double *p, double *rho, int3 dim);
{
int nx = dim.x; int ny = dim.y; int nz = dim.z; // Kill them all and let the compiler sort them out. The Optimizer will recognize his own.
__shared__ double tileA[TILEX][TILEY]
__shared__ double tileB[TILEX][TILEY]
int myx = threadIdx.x + (TILEX-2)*blockIdx.x - 1;
int myy = threadIdx.y + (TILEY-2)*blockIdx.y - 1;
if(myx >= nx) return;
if(myy >= ny) return;
myx += nx; myy += ny;
myx %= nx; myy %= ny;
int globAddr = myx + nx*myy;
int z;
int xwrite = (
int iwrite = (threadIdx.x > 0) && (threadIdx.x < (TILEX-1)) && (threadIdx.y > 0) && (threadIdx.y < (TILEY-1));
for(z = 0; z < nz; z++) {
tileA[threadIdx.x][threadIdx.y] = p[globAddr];
tileB[threadIdx.x][threadIdx.y] = rho[globAddr];
__syncthreads();
if(iwrite) {
v[globAddr] = (tileA[threadIdx.x-1][threadIdx.y] + tileA[threadIdx.x][threadIdx.y])/ \
(tileB[threadIdx.x-1][threadIdx.y] + tileB[threadIdx.x][threadIdx.y]);
}
//[ (p(i-1,x+1)+p(i,x+1))/(m(i-1,x+1)+m(i,x+1))
// 2*(p(i-1,x )+p(i,x ))/(m(i-1,x )+m(i,x ))
// (p(i-1,x-1)+p(i,x-1))/(m(i-1,x-1)+m(i,x-1)) ]
//
globAddr += nx*ny;
}
}
*/
__global__ void cukern_VelocityBkwdAverage_X(double *v, double *p, double *m, int nx)
{
int xAddr = threadIdx.x;
int yAddr = blockIdx.x;
int zAddr = blockIdx.y;
int ny = gridDim.x;
int addrMax = nx*(yAddr + ny*zAddr + 1); // The address which we must not reach or go beyond is the start of the next line
int writeBase = xAddr + nx*(yAddr + ny*zAddr); // the write start position is the left edge
int readBase = writeBase - 1; // the read start position is one to the left of the write start position.
if (threadIdx.x == 0) readBase += nx; // leftmost reads right edge
readBase -= nx*(readBase >= addrMax);
__shared__ double lMom[256];
__shared__ double lRho[256];
int locAddr = threadIdx.x;
//lStore[locAddr] = in[globBase + readX]; // load the first memory segment
lMom[locAddr] = p[readBase]; // load the first memory segment
lRho[locAddr] = m[readBase];
do {
readBase += 128; // move over one block
readBase -= nx*(readBase >= addrMax); // loop around if x overflows
lMom[(locAddr + 128) % 256] = p[readBase];
lRho[(locAddr + 128) % 256] = m[readBase];
__syncthreads(); // We have now read ahead by a segment. Calculate forward average, comrades!
if(writeBase < addrMax) { v[writeBase] = (lMom[locAddr] + lMom[(locAddr+1)%256])/(lRho[locAddr] + lRho[(locAddr+1)%256]); } // If we are within range, that is.
writeBase += 128; // Advance write address
if(writeBase >= addrMax) return; // If write address is beyond nx, we're finished.
locAddr ^= 128;
__syncthreads();
} while(1);
}
/* Invoke with a blockdim of <64, 1, 1> threads
Invoke with a griddim = <ceil[nx / 64], nz, 1> */
__global__ void cukern_VelocityBkwdAverage_Y(double *v, double *p, double *m, int nx, int ny)
{
int xaddr = blockDim.x * blockIdx.x + threadIdx.x; // There are however many X threads
if(xaddr >= nx) return; // truncate this right off
__shared__ double tileA[128];
__shared__ double tileB[128];
double *setA = tileA;
double *setB = tileB;
double *swap;
int writeBase = xaddr + nx*ny*blockIdx.y; // set Raddr to x + nx ny z
int addrMax = writeBase + nx*(ny - 1); // Set this to the max address we want to handle in the loop
setB[threadIdx.x] = p[addrMax]; // load row (y=-1) into set b
setB[threadIdx.x+64] = m[addrMax];
while(writeBase <= addrMax) { // Exit one BEFORE the max address to handle (since the max is a special case)
swap = setB; // exchange A/B pointers
setB = setA;
setA = swap;
// __syncthreads();
setB[threadIdx.x] = p[writeBase]; // load row (y=0) into set B
setB[threadIdx.x+64] = m[writeBase];
v[writeBase] = (setA[threadIdx.x] + setB[threadIdx.x])/(setA[threadIdx.x+64] + setB[threadIdx.x+64]); // average written to output
__syncthreads();
writeBase += nx; // increment rw address to y=1
}
}
/* Invoke with a blockdim of <64, 1, 1> threads
Invoke with a griddim = <ceil[nx / 64], ny, 1> */
__global__ void cukern_VelocityBkwdAverage_Z(double *v, double *p, double *m, int nx, int nz)
{
int xaddr = blockDim.x * blockIdx.x + threadIdx.x; // There are however many X threads
if(xaddr >= nx) return; // truncate this right off
__shared__ double tileA[128];
__shared__ double tileB[128];
double *setA = tileA;
double *setB = tileB;
double *swap;
int writeBase = xaddr + nx*blockIdx.y; // set Raddr to x + nx ny z
int addrMax = writeBase + nx*gridDim.y*(nz-1); // Set this to the max address we want to handle in the loop
setB[threadIdx.x] = p[addrMax]; // load row (y=-1) into set b
setB[threadIdx.x+64] = m[addrMax];
while(writeBase <= addrMax) { // Exit one BEFORE the max address to handle (since the max is a special case)
swap = setB; // exchange A/B pointers
setB = setA;
setA = swap;
// __syncthreads();
setB[threadIdx.x] = p[writeBase]; // load row (y=0) into set B
setB[threadIdx.x+64] = m[writeBase];
v[writeBase] = (setA[threadIdx.x] + setB[threadIdx.x])/(setA[threadIdx.x+64] + setB[threadIdx.x+64]); // average written to output
__syncthreads();
writeBase += nx*gridDim.y; // increment rw address to y=1
}
}
//################ MAGNETIC INTERPOLATION KERNELS
__global__ void cukern_CentralAverage_X(double *out, double *in, int nx)
{
int xAddr = threadIdx.x;
int yAddr = blockIdx.x;
int zAddr = blockIdx.y;
int ny = gridDim.x;
int addrMax = nx*(yAddr + ny*zAddr + 1); // The address which we must not reach or go beyond is the start of the next line
int writeBase = xAddr + nx*(yAddr + ny*zAddr); // the write start position is the left edge
int readBase = writeBase - 1; // the read start position is one to the left of the write start position.
if (threadIdx.x == 0) readBase += nx; // leftmost reads right edge
readBase -= nx*(readBase >= addrMax);
__shared__ double funcBuffer[256];
int locAddr = threadIdx.x;
//lStore[locAddr] = in[globBase + readX]; // load the first memory segment
funcBuffer[locAddr] = in[readBase]; // load the first memory segment
do {
readBase += 128; // move over one block
readBase -= nx*(readBase >= addrMax); // loop around if x overflows
funcBuffer[(locAddr + 128) % 256] = in[readBase];
__syncthreads(); // We have now read ahead by a segment. Calculate forward average, comrades!
if(writeBase < addrMax) { out[writeBase] = .25*(funcBuffer[locAddr] + funcBuffer[(locAddr+2)%256]+ 2*funcBuffer[(locAddr+1)%256]); } // If we are within range, that is.
writeBase += 128; // Advance write address
if(writeBase >= addrMax) return; // If write address is beyond nx, we're finished.
locAddr ^= 128;
__syncthreads();
} while(1);
}
/* Invoke with a blockdim of <64, 1, 1> threads
Invoke with a griddim = <ceil[nx / 64], nz, 1> */
__global__ void cukern_CentralAverage_Y(double *out, double *in, int nx, int ny)
{
int xaddr = blockDim.x * blockIdx.x + threadIdx.x; // There are however many X threads
if(xaddr >= nx) return; // truncate this right off
__shared__ double tileA[64];
__shared__ double tileB[64];
__shared__ double tileC[64];
double *setA = tileA;
double *setB = tileB;
double *setC = tileC;
double *swap;
int writeBase = xaddr + nx*ny*blockIdx.y; // set Raddr to x + nx ny z
int addrMax = writeBase + nx*(ny - 1); // Set this to the max address we want to handle in the loop
setB[threadIdx.x] = in[addrMax]; // load row (y=-1) into set b
setC[threadIdx.x] = in[writeBase];
while(writeBase < addrMax) { // Exit one BEFORE the max address to handle (since the max is a special case)
swap = setA; // Rotate pointers
setA = setB;
setB = setC;
setC = swap;
// __syncthreads();
setC[threadIdx.x] = in[writeBase + nx]; // load row (y=0) into set B
out[writeBase] = .25*(setA[threadIdx.x] + setC[threadIdx.x]) + .5*setB[threadIdx.x]; // average written to output
__syncthreads();
writeBase += nx; // increment rw address to y=1
}
// We arrive here when writeBase == addrMax, i.e. we are at the last Y index
setA[threadIdx.x] = in[xaddr + nx*ny*blockIdx.y];
// The weights change because we haven't cycled the pointers
out[writeBase] = .25*(setA[threadIdx.x] + setB[threadIdx.x]) + .5*setC[threadIdx.x];
}
#undef TILEDIM_X
#undef TILEDIM_Y
#undef DIFFEDGE
#undef FD_DIMENSION
#undef FD_MEMSTEP
#undef OTHER_DIMENSION
#undef OTHER_MEMSTEP
#undef ORTHOG_DIMENSION
#undef ORTHOG_MEMSTEP
/* These define the size of the "tile" each element loads
which is contrained, basically, by available local memory.
diffedge determines how wide the buffer zone is for taking
derivatives. */
#define TILEDIM_X 18
#define TILEDIM_Y 8
#define DIFFEDGE 2
/* These determine how we look at the array. The array is assumed to be 3D
(though possibly with z extent 1) and stored in C row-major format:
index = [i j k], size = [Nx Ny Nz], memory step = [1 Nx NxNy]
Choosing these determines how this operator sees the array: FD_DIM is the
one we're taking derivatives in, OTHER forms a plane to it, and ORTHOG
is the final dimension */
#define FD_DIMENSION dims.z
#define FD_MEMSTEP (dims.x*dims.y)
#define OTHER_DIMENSION dims.x
#define OTHER_MEMSTEP 1
#define ORTHOG_DIMENSION dims.y
#define ORTHOG_MEMSTEP dims.x
__global__ void cukern_magVelInterp_Z(double *velout, double *velin, int3 dims)
{
/* Declare any arrays to be used for storage/differentiation similarly. */
__shared__ double cellVel[TILEDIM_X * TILEDIM_Y+2];
/* Our assumption implicitly is that differencing occurs in the X direction in the local tile */
int tileAddr = threadIdx.x + TILEDIM_X*threadIdx.y + 1;
int addrX = (threadIdx.x - DIFFEDGE) + blockIdx.x * (TILEDIM_X - 2*DIFFEDGE);
int addrY = threadIdx.y + blockIdx.y * TILEDIM_Y;
addrX += (addrX < 0)*FD_DIMENSION;
/* Nuke the threads hanging out past the end of the X extent of the array */
/* addrX is zero indexed, mind */
if(addrX >= FD_DIMENSION - 1 + DIFFEDGE) return;
if(addrY >= OTHER_DIMENSION) return;
/* Mask out threads who are near the edges to prevent seg violation upon differencing */
bool ITakeDerivative = (threadIdx.x >= DIFFEDGE) && (threadIdx.x < (TILEDIM_X - DIFFEDGE)) && (addrX < FD_DIMENSION);
addrX %= FD_DIMENSION; /* Wraparound (circular boundary conditions) */
/* NOTE: This chooses which direction we "actually" take derivatives in
along with the conditional add a few lines up */
int globAddr = FD_MEMSTEP * addrX + OTHER_MEMSTEP * addrY;
/* Stick whatever local variables we care to futz with here */
/* We step through the array, one XY plane at a time */
int z;
for(z = 0; z < ORTHOG_DIMENSION; z++) {
cellVel[tileAddr] = velin[globAddr];
__syncthreads();
// Keep in mind, ANY operation that refers to other than register variables or flux[tileAddr] MUST have a __syncthreads() after it or there will be sadness.
if(ITakeDerivative) {
velout[globAddr] = .25*(cellVel[tileAddr-1] + 2.0*cellVel[tileAddr] + cellVel[tileAddr+1]);
}
__syncthreads();
/* This determines the "Z" direction */
globAddr += ORTHOG_MEMSTEP;
}
}
| c31e8a6516b44e32f1962c338bf8060f2d3043ef.cu | #include <stdio.h>
#include <string.h>
#include <stdarg.h>
#ifdef UNIX
#include <stdint.h>
#include <unistd.h>
#endif
#include "mex.h"
// CUDA
#include "cuda.h"
#include "cuda_runtime.h"
#include "cublas.h"
#include "cudaCommon.h"
/* THIS FUNCTION
This function takes momentum and density arrays to interpolate to second order the velocity fields
required by the MHD routines to perform magnetic field evolution.
The interpolation is two dimensional, involving a backwards average in the I (velocity) direction
and a centered 2nd order average in the X (magnetic) direction, such that the stencil is
+---+-------+
|I\X|-1 0 1 |
+---+-------+
A = | 0 | 1 2 1 | / 4
|-1 | 1 2 1 |
+---+-------+
where A is computed using
[ (p(i-1,x+1)+p(i,x+1))/(m(i-1,x+1)+m(i,x+1))
2*(p(i-1,x )+p(i,x ))/(m(i-1,x )+m(i,x ))
(p(i-1,x-1)+p(i,x-1))/(m(i-1,x-1)+m(i,x-1)) ] / 4
This presents the velocities interpolated at the corners of cells
*/
__global__ void cukern_SimpleVelocity(double *v, double *p, double *m, int numel);
__global__ void cukern_VelocityBkwdAverage_X(double *v, double *p, double *m, int nx);
__global__ void cukern_VelocityBkwdAverage_Y(double *v, double *p, double *m, int nx, int ny);
__global__ void cukern_VelocityBkwdAverage_Z(double *v, double *p, double *m, int nx, int nz);
__global__ void cukern_CentralAverage_X(double *out, double *in, int nx);
__global__ void cukern_CentralAverage_Y(double *out, double *in, int nx, int ny);
__global__ void cukern_magVelInterp_Z(double *velout, double *velin, int3 dims);
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
// At least 2 arguments expected
// Input and result
if ((nrhs!=3) || (nlhs != 1)) mexErrMsgTxt("Wrong number of arguments: need velInterp = cudaMagPrep(mom, mass, [dirvel dirmag])\n");
CHECK_CUDA_ERROR("entering cudaMagPrep");
// Get source array info and create destination arrays
MGArray src[2];
int worked = MGA_accessMatlabArrays(prhs, 0, 1, src);
MGArray *dst = MGA_createReturnedArrays(plhs, 1, src);
double *tempVelocity;
cudaError_t fail = cudaMalloc((void **)&tempVelocity, src->numel*sizeof(double));
if(fail != cudaSuccess) {
printf("%s\n", cudaGetErrorString(fail));
mexErrMsgTxt("cudaMagPrep: I have an cudaMalloc fail. And a sad.");
}
// Establish launch dimensions & a few other parameters
double *directs = mxGetPr(prhs[2]);
int velDirection = (int)directs[0];
int magDirection = (int)directs[1];
int3 arraySize;
arraySize = makeInt3(&src->dim[0]);
dim3 blocksize, gridsize;
blocksize.z = 1;
gridsize.z = 1;
cudaSetDevice(src->deviceID[0]);
CHECK_CUDA_ERROR("cudaSetDevice()");
if(src->dim[velDirection-1] > 1) {
// Interpolate the grid-aligned velocity
switch(velDirection) {
case 1:
blocksize = makeDim3(128,1,1);
gridsize.x = arraySize.y; // / 16; gridsize.x += (16 * gridsize.x < arraySize.x);
gridsize.y = arraySize.z;// / blocksize.y; gridsize.y += (blocksize.y * gridsize.y < arraySize.y);
cukern_VelocityBkwdAverage_X<<<gridsize, blocksize>>>(tempVelocity, src[0].devicePtr[0], src[1].devicePtr[1], arraySize.x);
break;
case 2:
blocksize = makeDim3(64,1,1);
gridsize.x = arraySize.x/64; gridsize.x += (gridsize.x*64 < arraySize.x);
gridsize.y = arraySize.z;
cukern_VelocityBkwdAverage_Y<<<gridsize, blocksize>>>(tempVelocity, src[0].devicePtr[0], src[1].devicePtr[1], arraySize.x, arraySize.y);
break;
case 3:
blocksize = makeDim3(64,1,1);
gridsize.x = arraySize.x/64; gridsize.x += (gridsize.x*64 < arraySize.x);
gridsize.y = arraySize.y;
cukern_VelocityBkwdAverage_Z<<<gridsize, blocksize>>>(tempVelocity, src[0].devicePtr[0], src[1].devicePtr[1], arraySize.x, arraySize.z);
break;
}
} else {
blocksize = makeDim3(512,1,1);
gridsize.x = src->numel / 512; gridsize.x += (gridsize.x * 512 < src->numel); gridsize.y = gridsize.z = 1;
cukern_SimpleVelocity<<<gridsize, blocksize>>>(tempVelocity, src[0].devicePtr[0], src[1].devicePtr[1], src->numel);
}
CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, src, velDirection, "mag prep velocity avg");
cudaDeviceSynchronize();
// Interpolate the velocity to 2nd order
if(src->dim[magDirection-1] > 1) {
switch(magDirection) {
case 1:
blocksize.x = 128; blocksize.y = blocksize.z = 1;
gridsize.x = arraySize.y; // / 16; gridsize.x += (16 * gridsize.x < arraySize.x);
gridsize.y = arraySize.z;// / blocksize.y; gridsize.y += (blocksize.y * gridsize.y < arraySize.y);
cukern_CentralAverage_X<<<gridsize, blocksize>>>(dst->devicePtr[0], tempVelocity, arraySize.x);
break;
case 2:
blocksize.x = 64; blocksize.y = blocksize.z = 1;
gridsize.x = arraySize.x/64; gridsize.x += (gridsize.x*64 < arraySize.x);
gridsize.y = arraySize.z;
cukern_CentralAverage_Y<<<gridsize, blocksize>>>(dst->devicePtr[0], tempVelocity, arraySize.x, arraySize.y);
break;
case 3:
blocksize.x = 18; blocksize.y = 8;
gridsize.x = arraySize.z / 14; gridsize.x += (14 * gridsize.x < arraySize.z);
gridsize.y = arraySize.x / blocksize.y; gridsize.y += (blocksize.y * gridsize.y < arraySize.x);
cukern_magVelInterp_Z<<<gridsize, blocksize>>>(dst->devicePtr[0], tempVelocity, arraySize);
break;
}
} else {
cudaMemcpy(dst->devicePtr[0], tempVelocity, src->numel, cudaMemcpyDeviceToDevice);
// FIXME: Detect this condition ahead of time and never bother with this array in the first place
}
cudaDeviceSynchronize();
CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, dst, magDirection, "mag prep interpolation");
cudaFree(tempVelocity); // Because only YOU can prevent memory leaks!
// (and this one would be a whopper...)
free(dst);
}
__global__ void cukern_SimpleVelocity(double *v, double *p, double *m, int numel)
{
int addr = threadIdx.x + 512*blockIdx.x;
//short int q;
if(addr > numel) return;
v[addr] = p[addr] / m[addr];
}
/*cukern_Vavg_vx_by(double *v, double *p, double *rho, int3 dim);
{
int nx = dim.x; int ny = dim.y; int nz = dim.z; // Kill them all and let the compiler sort them out. The Optimizer will recognize his own.
__shared__ double tileA[TILEX][TILEY]
__shared__ double tileB[TILEX][TILEY]
int myx = threadIdx.x + (TILEX-2)*blockIdx.x - 1;
int myy = threadIdx.y + (TILEY-2)*blockIdx.y - 1;
if(myx >= nx) return;
if(myy >= ny) return;
myx += nx; myy += ny;
myx %= nx; myy %= ny;
int globAddr = myx + nx*myy;
int z;
int xwrite = (
int iwrite = (threadIdx.x > 0) && (threadIdx.x < (TILEX-1)) && (threadIdx.y > 0) && (threadIdx.y < (TILEY-1));
for(z = 0; z < nz; z++) {
tileA[threadIdx.x][threadIdx.y] = p[globAddr];
tileB[threadIdx.x][threadIdx.y] = rho[globAddr];
__syncthreads();
if(iwrite) {
v[globAddr] = (tileA[threadIdx.x-1][threadIdx.y] + tileA[threadIdx.x][threadIdx.y])/ \
(tileB[threadIdx.x-1][threadIdx.y] + tileB[threadIdx.x][threadIdx.y]);
}
//[ (p(i-1,x+1)+p(i,x+1))/(m(i-1,x+1)+m(i,x+1))
// 2*(p(i-1,x )+p(i,x ))/(m(i-1,x )+m(i,x ))
// (p(i-1,x-1)+p(i,x-1))/(m(i-1,x-1)+m(i,x-1)) ]
//
globAddr += nx*ny;
}
}
*/
__global__ void cukern_VelocityBkwdAverage_X(double *v, double *p, double *m, int nx)
{
int xAddr = threadIdx.x;
int yAddr = blockIdx.x;
int zAddr = blockIdx.y;
int ny = gridDim.x;
int addrMax = nx*(yAddr + ny*zAddr + 1); // The address which we must not reach or go beyond is the start of the next line
int writeBase = xAddr + nx*(yAddr + ny*zAddr); // the write start position is the left edge
int readBase = writeBase - 1; // the read start position is one to the left of the write start position.
if (threadIdx.x == 0) readBase += nx; // leftmost reads right edge
readBase -= nx*(readBase >= addrMax);
__shared__ double lMom[256];
__shared__ double lRho[256];
int locAddr = threadIdx.x;
//lStore[locAddr] = in[globBase + readX]; // load the first memory segment
lMom[locAddr] = p[readBase]; // load the first memory segment
lRho[locAddr] = m[readBase];
do {
readBase += 128; // move over one block
readBase -= nx*(readBase >= addrMax); // loop around if x overflows
lMom[(locAddr + 128) % 256] = p[readBase];
lRho[(locAddr + 128) % 256] = m[readBase];
__syncthreads(); // We have now read ahead by a segment. Calculate forward average, comrades!
if(writeBase < addrMax) { v[writeBase] = (lMom[locAddr] + lMom[(locAddr+1)%256])/(lRho[locAddr] + lRho[(locAddr+1)%256]); } // If we are within range, that is.
writeBase += 128; // Advance write address
if(writeBase >= addrMax) return; // If write address is beyond nx, we're finished.
locAddr ^= 128;
__syncthreads();
} while(1);
}
/* Invoke with a blockdim of <64, 1, 1> threads
Invoke with a griddim = <ceil[nx / 64], nz, 1> */
__global__ void cukern_VelocityBkwdAverage_Y(double *v, double *p, double *m, int nx, int ny)
{
int xaddr = blockDim.x * blockIdx.x + threadIdx.x; // There are however many X threads
if(xaddr >= nx) return; // truncate this right off
__shared__ double tileA[128];
__shared__ double tileB[128];
double *setA = tileA;
double *setB = tileB;
double *swap;
int writeBase = xaddr + nx*ny*blockIdx.y; // set Raddr to x + nx ny z
int addrMax = writeBase + nx*(ny - 1); // Set this to the max address we want to handle in the loop
setB[threadIdx.x] = p[addrMax]; // load row (y=-1) into set b
setB[threadIdx.x+64] = m[addrMax];
while(writeBase <= addrMax) { // Exit one BEFORE the max address to handle (since the max is a special case)
swap = setB; // exchange A/B pointers
setB = setA;
setA = swap;
// __syncthreads();
setB[threadIdx.x] = p[writeBase]; // load row (y=0) into set B
setB[threadIdx.x+64] = m[writeBase];
v[writeBase] = (setA[threadIdx.x] + setB[threadIdx.x])/(setA[threadIdx.x+64] + setB[threadIdx.x+64]); // average written to output
__syncthreads();
writeBase += nx; // increment rw address to y=1
}
}
/* Invoke with a blockdim of <64, 1, 1> threads
Invoke with a griddim = <ceil[nx / 64], ny, 1> */
__global__ void cukern_VelocityBkwdAverage_Z(double *v, double *p, double *m, int nx, int nz)
{
int xaddr = blockDim.x * blockIdx.x + threadIdx.x; // There are however many X threads
if(xaddr >= nx) return; // truncate this right off
__shared__ double tileA[128];
__shared__ double tileB[128];
double *setA = tileA;
double *setB = tileB;
double *swap;
int writeBase = xaddr + nx*blockIdx.y; // set Raddr to x + nx ny z
int addrMax = writeBase + nx*gridDim.y*(nz-1); // Set this to the max address we want to handle in the loop
setB[threadIdx.x] = p[addrMax]; // load row (y=-1) into set b
setB[threadIdx.x+64] = m[addrMax];
while(writeBase <= addrMax) { // Exit one BEFORE the max address to handle (since the max is a special case)
swap = setB; // exchange A/B pointers
setB = setA;
setA = swap;
// __syncthreads();
setB[threadIdx.x] = p[writeBase]; // load row (y=0) into set B
setB[threadIdx.x+64] = m[writeBase];
v[writeBase] = (setA[threadIdx.x] + setB[threadIdx.x])/(setA[threadIdx.x+64] + setB[threadIdx.x+64]); // average written to output
__syncthreads();
writeBase += nx*gridDim.y; // increment rw address to y=1
}
}
//################ MAGNETIC INTERPOLATION KERNELS
__global__ void cukern_CentralAverage_X(double *out, double *in, int nx)
{
int xAddr = threadIdx.x;
int yAddr = blockIdx.x;
int zAddr = blockIdx.y;
int ny = gridDim.x;
int addrMax = nx*(yAddr + ny*zAddr + 1); // The address which we must not reach or go beyond is the start of the next line
int writeBase = xAddr + nx*(yAddr + ny*zAddr); // the write start position is the left edge
int readBase = writeBase - 1; // the read start position is one to the left of the write start position.
if (threadIdx.x == 0) readBase += nx; // leftmost reads right edge
readBase -= nx*(readBase >= addrMax);
__shared__ double funcBuffer[256];
int locAddr = threadIdx.x;
//lStore[locAddr] = in[globBase + readX]; // load the first memory segment
funcBuffer[locAddr] = in[readBase]; // load the first memory segment
do {
readBase += 128; // move over one block
readBase -= nx*(readBase >= addrMax); // loop around if x overflows
funcBuffer[(locAddr + 128) % 256] = in[readBase];
__syncthreads(); // We have now read ahead by a segment. Calculate forward average, comrades!
if(writeBase < addrMax) { out[writeBase] = .25*(funcBuffer[locAddr] + funcBuffer[(locAddr+2)%256]+ 2*funcBuffer[(locAddr+1)%256]); } // If we are within range, that is.
writeBase += 128; // Advance write address
if(writeBase >= addrMax) return; // If write address is beyond nx, we're finished.
locAddr ^= 128;
__syncthreads();
} while(1);
}
/* Invoke with a blockdim of <64, 1, 1> threads
Invoke with a griddim = <ceil[nx / 64], nz, 1> */
__global__ void cukern_CentralAverage_Y(double *out, double *in, int nx, int ny)
{
int xaddr = blockDim.x * blockIdx.x + threadIdx.x; // There are however many X threads
if(xaddr >= nx) return; // truncate this right off
__shared__ double tileA[64];
__shared__ double tileB[64];
__shared__ double tileC[64];
double *setA = tileA;
double *setB = tileB;
double *setC = tileC;
double *swap;
int writeBase = xaddr + nx*ny*blockIdx.y; // set Raddr to x + nx ny z
int addrMax = writeBase + nx*(ny - 1); // Set this to the max address we want to handle in the loop
setB[threadIdx.x] = in[addrMax]; // load row (y=-1) into set b
setC[threadIdx.x] = in[writeBase];
while(writeBase < addrMax) { // Exit one BEFORE the max address to handle (since the max is a special case)
swap = setA; // Rotate pointers
setA = setB;
setB = setC;
setC = swap;
// __syncthreads();
setC[threadIdx.x] = in[writeBase + nx]; // load row (y=0) into set B
out[writeBase] = .25*(setA[threadIdx.x] + setC[threadIdx.x]) + .5*setB[threadIdx.x]; // average written to output
__syncthreads();
writeBase += nx; // increment rw address to y=1
}
// We arrive here when writeBase == addrMax, i.e. we are at the last Y index
setA[threadIdx.x] = in[xaddr + nx*ny*blockIdx.y];
// The weights change because we haven't cycled the pointers
out[writeBase] = .25*(setA[threadIdx.x] + setB[threadIdx.x]) + .5*setC[threadIdx.x];
}
#undef TILEDIM_X
#undef TILEDIM_Y
#undef DIFFEDGE
#undef FD_DIMENSION
#undef FD_MEMSTEP
#undef OTHER_DIMENSION
#undef OTHER_MEMSTEP
#undef ORTHOG_DIMENSION
#undef ORTHOG_MEMSTEP
/* These define the size of the "tile" each element loads
which is contrained, basically, by available local memory.
diffedge determines how wide the buffer zone is for taking
derivatives. */
#define TILEDIM_X 18
#define TILEDIM_Y 8
#define DIFFEDGE 2
/* These determine how we look at the array. The array is assumed to be 3D
(though possibly with z extent 1) and stored in C row-major format:
index = [i j k], size = [Nx Ny Nz], memory step = [1 Nx NxNy]
Choosing these determines how this operator sees the array: FD_DIM is the
one we're taking derivatives in, OTHER forms a plane to it, and ORTHOG
is the final dimension */
#define FD_DIMENSION dims.z
#define FD_MEMSTEP (dims.x*dims.y)
#define OTHER_DIMENSION dims.x
#define OTHER_MEMSTEP 1
#define ORTHOG_DIMENSION dims.y
#define ORTHOG_MEMSTEP dims.x
__global__ void cukern_magVelInterp_Z(double *velout, double *velin, int3 dims)
{
/* Declare any arrays to be used for storage/differentiation similarly. */
__shared__ double cellVel[TILEDIM_X * TILEDIM_Y+2];
/* Our assumption implicitly is that differencing occurs in the X direction in the local tile */
int tileAddr = threadIdx.x + TILEDIM_X*threadIdx.y + 1;
int addrX = (threadIdx.x - DIFFEDGE) + blockIdx.x * (TILEDIM_X - 2*DIFFEDGE);
int addrY = threadIdx.y + blockIdx.y * TILEDIM_Y;
addrX += (addrX < 0)*FD_DIMENSION;
/* Nuke the threads hanging out past the end of the X extent of the array */
/* addrX is zero indexed, mind */
if(addrX >= FD_DIMENSION - 1 + DIFFEDGE) return;
if(addrY >= OTHER_DIMENSION) return;
/* Mask out threads who are near the edges to prevent seg violation upon differencing */
bool ITakeDerivative = (threadIdx.x >= DIFFEDGE) && (threadIdx.x < (TILEDIM_X - DIFFEDGE)) && (addrX < FD_DIMENSION);
addrX %= FD_DIMENSION; /* Wraparound (circular boundary conditions) */
/* NOTE: This chooses which direction we "actually" take derivatives in
along with the conditional add a few lines up */
int globAddr = FD_MEMSTEP * addrX + OTHER_MEMSTEP * addrY;
/* Stick whatever local variables we care to futz with here */
/* We step through the array, one XY plane at a time */
int z;
for(z = 0; z < ORTHOG_DIMENSION; z++) {
cellVel[tileAddr] = velin[globAddr];
__syncthreads();
// Keep in mind, ANY operation that refers to other than register variables or flux[tileAddr] MUST have a __syncthreads() after it or there will be sadness.
if(ITakeDerivative) {
velout[globAddr] = .25*(cellVel[tileAddr-1] + 2.0*cellVel[tileAddr] + cellVel[tileAddr+1]);
}
__syncthreads();
/* This determines the "Z" direction */
globAddr += ORTHOG_MEMSTEP;
}
}
|
622c765d94d9e46fb2ae8cca3b32eb1d771904fb.hip | // !!! This is a file automatically generated by hipify!!!
#define DATATYPE 0
#include "marvin.hpp"
#include <opencv2/opencv.hpp>
int main(int argc, char * argv[]) {
std::cout << argv[1] << std::endl;
std::string filename(argv[1]);
// Read binary file containing TDF voxel grid values
FILE * fp = fopen(argv[1],"rb");
float numVolumesf;
float volumeGridSizeXf;
float volumeGridSizeYf;
float volumeGridSizeZf;
int iret = fread((void*)(&numVolumesf), sizeof(float), 1, fp);
int numVolumes = (int)numVolumesf;
iret = fread((void*)(&volumeGridSizeXf), sizeof(float), 1, fp);
iret = fread((void*)(&volumeGridSizeYf), sizeof(float), 1, fp);
iret = fread((void*)(&volumeGridSizeZf), sizeof(float), 1, fp);
int numVolumeGridPoints = (int)(volumeGridSizeXf * volumeGridSizeYf * volumeGridSizeZf);
float * tudfVoxf = new float[numVolumes*numVolumeGridPoints];
iret = fread((void*)(tudfVoxf), sizeof(float), numVolumes*numVolumeGridPoints, fp);
fclose(fp);
// Start Marvin network
marvin::Net convnet("tmp.json");
convnet.Malloc(marvin::Testing);
convnet.loadWeights("3dmatch-weights-snapshot-137000.marvin");
marvin::Response * rData;
marvin::Response * rFeat;
rData = convnet.getResponse("data");
rFeat = convnet.getResponse("feat");
fp = fopen("feat.bin", "wb");
fwrite(&numVolumesf, sizeof(float), 1, fp);
for (int i = 1; i < 5; i++) {
float value = (float)(rFeat->dim[i]);
fwrite(&value, sizeof(float), 1, fp);
}
std::cout << numVolumes << std::endl;
StorageT * itudfVox = new StorageT[50*numVolumeGridPoints];
int numFeatVolumeGridPoints = rFeat->dim[0] * rFeat->dim[1] * rFeat->dim[2] * rFeat->dim[3] * rFeat->dim[4];
StorageT * featVolume = new StorageT[numFeatVolumeGridPoints];
for (int vox_idx = 0; vox_idx < numVolumes/50; ++vox_idx) {
for (int i = 0; i < 50*numVolumeGridPoints; ++i) {
itudfVox[i] = CPUCompute2StorageT(tudfVoxf[vox_idx*50*numVolumeGridPoints + i]);
}
hipMemcpy(rData->dataGPU, itudfVox, rData->numBytes(), hipMemcpyHostToDevice);
marvin::checkCUDA(__LINE__, hipGetLastError());
convnet.forward();
hipMemcpy(featVolume, rFeat->dataGPU, rFeat->numBytes(), hipMemcpyDeviceToHost);
for (int i = 0; i < numFeatVolumeGridPoints; i++) {
float value = CPUStorage2ComputeT(featVolume[i]);
fwrite(&value, sizeof(float), 1, fp);
}
}
fclose(fp);
return 0;
}
| 622c765d94d9e46fb2ae8cca3b32eb1d771904fb.cu | #define DATATYPE 0
#include "marvin.hpp"
#include <opencv2/opencv.hpp>
int main(int argc, char * argv[]) {
std::cout << argv[1] << std::endl;
std::string filename(argv[1]);
// Read binary file containing TDF voxel grid values
FILE * fp = fopen(argv[1],"rb");
float numVolumesf;
float volumeGridSizeXf;
float volumeGridSizeYf;
float volumeGridSizeZf;
int iret = fread((void*)(&numVolumesf), sizeof(float), 1, fp);
int numVolumes = (int)numVolumesf;
iret = fread((void*)(&volumeGridSizeXf), sizeof(float), 1, fp);
iret = fread((void*)(&volumeGridSizeYf), sizeof(float), 1, fp);
iret = fread((void*)(&volumeGridSizeZf), sizeof(float), 1, fp);
int numVolumeGridPoints = (int)(volumeGridSizeXf * volumeGridSizeYf * volumeGridSizeZf);
float * tudfVoxf = new float[numVolumes*numVolumeGridPoints];
iret = fread((void*)(tudfVoxf), sizeof(float), numVolumes*numVolumeGridPoints, fp);
fclose(fp);
// Start Marvin network
marvin::Net convnet("tmp.json");
convnet.Malloc(marvin::Testing);
convnet.loadWeights("3dmatch-weights-snapshot-137000.marvin");
marvin::Response * rData;
marvin::Response * rFeat;
rData = convnet.getResponse("data");
rFeat = convnet.getResponse("feat");
fp = fopen("feat.bin", "wb");
fwrite(&numVolumesf, sizeof(float), 1, fp);
for (int i = 1; i < 5; i++) {
float value = (float)(rFeat->dim[i]);
fwrite(&value, sizeof(float), 1, fp);
}
std::cout << numVolumes << std::endl;
StorageT * itudfVox = new StorageT[50*numVolumeGridPoints];
int numFeatVolumeGridPoints = rFeat->dim[0] * rFeat->dim[1] * rFeat->dim[2] * rFeat->dim[3] * rFeat->dim[4];
StorageT * featVolume = new StorageT[numFeatVolumeGridPoints];
for (int vox_idx = 0; vox_idx < numVolumes/50; ++vox_idx) {
for (int i = 0; i < 50*numVolumeGridPoints; ++i) {
itudfVox[i] = CPUCompute2StorageT(tudfVoxf[vox_idx*50*numVolumeGridPoints + i]);
}
cudaMemcpy(rData->dataGPU, itudfVox, rData->numBytes(), cudaMemcpyHostToDevice);
marvin::checkCUDA(__LINE__, cudaGetLastError());
convnet.forward();
cudaMemcpy(featVolume, rFeat->dataGPU, rFeat->numBytes(), cudaMemcpyDeviceToHost);
for (int i = 0; i < numFeatVolumeGridPoints; i++) {
float value = CPUStorage2ComputeT(featVolume[i]);
fwrite(&value, sizeof(float), 1, fp);
}
}
fclose(fp);
return 0;
}
|
efa3090796993f5a6b7b2a07fa2da3af941393c1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// cuda_comm.cu
// Cuda GMRES
//
// Created by Tim Ioannidis on 3/06/12.
// Copyright 2012 Chemeng NTUA. All rights reserved.
//
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "cuda_config.h"
#include "cuda_methods.h"
//dot product dot_res=a<dot>b me diastasi dim
__global__ void cuda_comm_kernel(double *dest,double *source,int choice)
{
if (choice==0) {
(*dest)=sqrt((*dest)+(*source));
}
else
{
*dest += *source;
}
}
| efa3090796993f5a6b7b2a07fa2da3af941393c1.cu | //
// cuda_comm.cu
// Cuda GMRES
//
// Created by Tim Ioannidis on 3/06/12.
// Copyright 2012 Chemeng NTUA. All rights reserved.
//
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "cuda_config.h"
#include "cuda_methods.h"
//dot product dot_res=a<dot>b me diastasi dim
__global__ void cuda_comm_kernel(double *dest,double *source,int choice)
{
if (choice==0) {
(*dest)=sqrt((*dest)+(*source));
}
else
{
*dest += *source;
}
}
|
305fe554c0e63ba14ab1ae07f725c602046a469c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Udacity HW5
Histogramming for Speed
The goal of this assignment is compute a histogram
as fast as possible. We have simplified the problem as much as
possible to allow you to focus solely on the histogramming algorithm.
The input values that you need to histogram are already the exact
bins that need to be updated. This is unlike in HW3 where you needed
to compute the range of the data and then do:
bin = (val - valMin) / valRange to determine the bin.
Here the bin is just:
bin = val
so the serial histogram calculation looks like:
for (i = 0; i < numElems; ++i)
histo[val[i]]++;
That's it! Your job is to make it run as fast as possible!
The values are normally distributed - you may take
advantage of this fact in your implementation.
*/
#include "utils.h"
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include <thrust/device_ptr.h> // thrust::device_ptr
/**
* global version of histogram
*
* on GeForce GTX 980 Ti, 4.338208 msecs 4.589024 msecs 4.319296 msec.
**/
/**
__global__
void yourHisto(const unsigned int* const vals, //INPUT
unsigned int* const d_histo, //OUPUT
const unsigned int numVals)
{
unsigned int id = threadIdx.x + blockIdx.x*blockDim.x;
// if (id >= numVals) { return ; } // sanity check for accessing the input values in global memory
int stride = blockDim.x * gridDim.x;
for (unsigned int i = id; i < numVals; i += stride) {
unsigned int bin = vals[i];
atomicAdd(&(d_histo[bin]),1);
}
}
*/
/**
* shared version of histogram
*
* on GeForce GTX 980 Ti, 0.569824 msecs 0.570016 msecs 0.570144 msec.
**/
// GPU with atomics in shared memory with final summation of partial histograms
/****************************************/
/* GPU HISTOGRAM SHARED MEMORY */
/****************************************/
__global__ void yourHisto( const unsigned int* const d_vals, unsigned int *d_Histo,
const unsigned int numBins, const unsigned int numElems ) {
int k_x = threadIdx.x + blockDim.x * blockIdx.x ; // k_x = 0, 1, ... numElems-1
int i_x = threadIdx.x ;
int M_x = blockDim.x ;
int offset = blockDim.x * gridDim.x ;
extern __shared__ unsigned int s[] ; // |s| = numBins ; i.e. size of s, shared memory, is numBins
for (unsigned int i = i_x; i < numBins; i += M_x ) {
s[i] = 0; }
__syncthreads();
for (unsigned int i = k_x; i < numElems; i += offset) {
atomicAdd( &s[ d_vals[i] ], 1) ;
}
__syncthreads(); // ensure last of our writes have been committed
for (unsigned int i = i_x; i < numBins; i += M_x ) {
atomicAdd( &(d_Histo[ i ]), s[ i ] ) ;
}
__syncthreads();
}
////////////////////////////////////////////////////////////////////////
/** *******************************************************************/
/** GPU shared atomics fast histogram
*
* on GeForce GTX 980 Ti, 1.905600 msecs 1.879360 msecs 1.914016 msec.
*
* ********************************************************************/
////////////////////////////////////////////////////////////////////////
// shared atomics, fast histogram
// cf. https://devblogs.nvidia.com/parallelforall/gpu-pro-tip-fast-histograms-using-shared-atomics-maxwell/
// 1st phase
__global__ void histogram_smem_atomics( const unsigned int* const d_vals, //INPUT
unsigned int* PartialHisto ,
// unsigned int* const d_histo, //OUTPUT
const unsigned int numBins,
const unsigned int numElems ) {
// global position
int k_x = threadIdx.x + blockDim.x * blockIdx.x ; // k_x = [0, numElems)
// threads (thread position) in workgroup
int i_x = threadIdx.x ;
// group index in 0 .. ngroups -1, i.e.
// thread block index j_x in 0 .. L_x-1 blocks on a grid in x-direction, where L_x = (numElems + M_x - 1)/M_x
int j_x = blockIdx.x ;
int M_x = blockDim.x ;
int offset = blockDim.x * gridDim.x ;
// initialize smem
//__shared__ unsigned int smem[ numBins ] ;
extern __shared__ unsigned int smem[] ; // |smem| = numBins ; i.e. size of smem, shared memory, is numBinss
for (int i = i_x; i < numBins ; i += M_x) {
smem[i] = 0 ; }
__syncthreads();
// process input values
// updates our group (i.e. (single) thread block)'s partial histogram in smem
for (int pos = k_x; pos < numElems; pos += offset ) {
// unsigned int temp_val = d_vals[pos] ;
// atomicAdd( &smem[ temp_val ], 1);
atomicAdd( &smem[ d_vals[pos] ], 1 ) ;
}
__syncthreads();
for (int i = i_x; i < numBins; i += M_x ) {
PartialHisto[i + numBins * j_x] = smem[ i ] ;
}
}
// shared atomics, fast histogram
// cf. https://devblogs.nvidia.com/parallelforall/gpu-pro-tip-fast-histograms-using-shared-atomics-maxwell/
// 2nd phase
__global__ void histogram_final_accum(
const unsigned int *PartialHisto,
unsigned int *d_Histo,
unsigned int L_x, // total number of thread blocks
const unsigned int numBins) {
int k_x = threadIdx.x + blockIdx.x * blockDim.x ; // k_x = 0, 1, ... numBins-1
if (k_x >= numBins) { return; }
// unsigned int total = 0;
int total = 0 ;
for (int j = 0; j < L_x ; ++j) {
total += PartialHisto[ k_x + j * numBins ] ;
}
d_Histo[k_x] = total;
}
void histogram_shared_atomics_kernel( const unsigned int* const d_vals, unsigned int *d_Histo,
const unsigned int numBins, const unsigned int numElems,
const unsigned int blockSize, const unsigned int blockSize2) {
unsigned int gridSize = (numElems + blockSize - 1)/blockSize ;
unsigned int * d_PartialHisto ; // Partial Histograms, of size numBins * gridSize
checkCudaErrors( hipMalloc( &d_PartialHisto, sizeof(unsigned int) * numBins * gridSize ) ) ;
checkCudaErrors( hipMemset( d_PartialHisto, 0, sizeof(unsigned int) * numBins * gridSize));
hipLaunchKernelGGL(( histogram_smem_atomics), dim3(gridSize),dim3(blockSize), numBins * sizeof(unsigned int), 0, d_vals, d_PartialHisto, numBins, numElems) ;
hipLaunchKernelGGL(( histogram_final_accum), dim3(numBins), dim3(blockSize2), 0, 0, d_PartialHisto, d_Histo, gridSize, numBins) ;
checkCudaErrors( hipFree( d_PartialHisto) );
}
/** *******************************************************************/
/** END OF GPU shared atomics fast histogram
* ********************************************************************/
////////////////////////////////////////////////////////////////////////
void computeHistogram(const unsigned int* const d_vals, //INPUT
unsigned int* const d_histo, //OUTPUT
const unsigned int numBins,
const unsigned int numElems)
{
/**
* *********** block, grid dimensions *************
* */
// histogram global
// const unsigned int BLOCK_SIZE = 256;
// const unsigned int GRID_SIZE = (numElems + BLOCK_SIZE - 1)/ BLOCK_SIZE ;
// histogram shared memory
const unsigned int blocksize_s = 1024; // numBins = 1024
const unsigned int gridsize_s = (numElems + blocksize_s - 1)/blocksize_s ;
// histogram, shared atomics
// 1st. phase - input values from global memory into shared memory local histograms
// notice that gridsize1 will be necessary for size of PartialHisto
const unsigned int blocksize1 = 1024; // it is correct for blocksize1 = 256, block_accum = 64; or 512, 64, resp.; or 1024, 1024, resp.
// 2nd. phase - merge, concatenate, accumulate - block, grid dimensions
const unsigned int block_accum = 1024 ; // it is correct for block_accum = 64, blocksize1 = 256, or 64, 512, resp.; or 1024, 1024, resp.
/** END of block, grid dimensions *****************/
// END of block, grid dimensions
/**
* global version of histogram
* on GeForce GTX 980 Ti, 4.338208 msecs 4.589024 msecs 4.319296 msec.
*
yourHisto<<<GRID_SIZE, BLOCK_SIZE>>>(d_vals,d_histo,numElems);
*/
/**
* shared memory version of histogram
* on GeForce GTX 980 Ti, 0.569824 msecs 0.570016 msecs 0.570144 msec. for blocksize_s = 1024
* */
hipLaunchKernelGGL(( yourHisto), dim3(gridsize_s), dim3(blocksize_s), numBins * sizeof(unsigned int), 0, d_vals,d_histo,numBins,numElems);
/**
* on GeForce GTX 980 Ti, 1.905600 msecs 1.879360 msecs 1.914016 msec.
*
* ********************************************************************
* shared atomics version of histogram
* ********************************************************************/
/*
histogram_shared_atomics_kernel( d_vals, d_histo, numBins, numElems,
blocksize1, block_accum) ;
*/
}
| 305fe554c0e63ba14ab1ae07f725c602046a469c.cu | /* Udacity HW5
Histogramming for Speed
The goal of this assignment is compute a histogram
as fast as possible. We have simplified the problem as much as
possible to allow you to focus solely on the histogramming algorithm.
The input values that you need to histogram are already the exact
bins that need to be updated. This is unlike in HW3 where you needed
to compute the range of the data and then do:
bin = (val - valMin) / valRange to determine the bin.
Here the bin is just:
bin = val
so the serial histogram calculation looks like:
for (i = 0; i < numElems; ++i)
histo[val[i]]++;
That's it! Your job is to make it run as fast as possible!
The values are normally distributed - you may take
advantage of this fact in your implementation.
*/
#include "utils.h"
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include <thrust/device_ptr.h> // thrust::device_ptr
/**
* global version of histogram
*
* on GeForce GTX 980 Ti, 4.338208 msecs 4.589024 msecs 4.319296 msec.
**/
/**
__global__
void yourHisto(const unsigned int* const vals, //INPUT
unsigned int* const d_histo, //OUPUT
const unsigned int numVals)
{
unsigned int id = threadIdx.x + blockIdx.x*blockDim.x;
// if (id >= numVals) { return ; } // sanity check for accessing the input values in global memory
int stride = blockDim.x * gridDim.x;
for (unsigned int i = id; i < numVals; i += stride) {
unsigned int bin = vals[i];
atomicAdd(&(d_histo[bin]),1);
}
}
*/
/**
* shared version of histogram
*
* on GeForce GTX 980 Ti, 0.569824 msecs 0.570016 msecs 0.570144 msec.
**/
// GPU with atomics in shared memory with final summation of partial histograms
/****************************************/
/* GPU HISTOGRAM SHARED MEMORY */
/****************************************/
__global__ void yourHisto( const unsigned int* const d_vals, unsigned int *d_Histo,
const unsigned int numBins, const unsigned int numElems ) {
int k_x = threadIdx.x + blockDim.x * blockIdx.x ; // k_x = 0, 1, ... numElems-1
int i_x = threadIdx.x ;
int M_x = blockDim.x ;
int offset = blockDim.x * gridDim.x ;
extern __shared__ unsigned int s[] ; // |s| = numBins ; i.e. size of s, shared memory, is numBins
for (unsigned int i = i_x; i < numBins; i += M_x ) {
s[i] = 0; }
__syncthreads();
for (unsigned int i = k_x; i < numElems; i += offset) {
atomicAdd( &s[ d_vals[i] ], 1) ;
}
__syncthreads(); // ensure last of our writes have been committed
for (unsigned int i = i_x; i < numBins; i += M_x ) {
atomicAdd( &(d_Histo[ i ]), s[ i ] ) ;
}
__syncthreads();
}
////////////////////////////////////////////////////////////////////////
/** *******************************************************************/
/** GPU shared atomics fast histogram
*
* on GeForce GTX 980 Ti, 1.905600 msecs 1.879360 msecs 1.914016 msec.
*
* ********************************************************************/
////////////////////////////////////////////////////////////////////////
// shared atomics, fast histogram
// cf. https://devblogs.nvidia.com/parallelforall/gpu-pro-tip-fast-histograms-using-shared-atomics-maxwell/
// 1st phase
__global__ void histogram_smem_atomics( const unsigned int* const d_vals, //INPUT
unsigned int* PartialHisto ,
// unsigned int* const d_histo, //OUTPUT
const unsigned int numBins,
const unsigned int numElems ) {
// global position
int k_x = threadIdx.x + blockDim.x * blockIdx.x ; // k_x = [0, numElems)
// threads (thread position) in workgroup
int i_x = threadIdx.x ;
// group index in 0 .. ngroups -1, i.e.
// thread block index j_x in 0 .. L_x-1 blocks on a grid in x-direction, where L_x = (numElems + M_x - 1)/M_x
int j_x = blockIdx.x ;
int M_x = blockDim.x ;
int offset = blockDim.x * gridDim.x ;
// initialize smem
//__shared__ unsigned int smem[ numBins ] ;
extern __shared__ unsigned int smem[] ; // |smem| = numBins ; i.e. size of smem, shared memory, is numBinss
for (int i = i_x; i < numBins ; i += M_x) {
smem[i] = 0 ; }
__syncthreads();
// process input values
// updates our group (i.e. (single) thread block)'s partial histogram in smem
for (int pos = k_x; pos < numElems; pos += offset ) {
// unsigned int temp_val = d_vals[pos] ;
// atomicAdd( &smem[ temp_val ], 1);
atomicAdd( &smem[ d_vals[pos] ], 1 ) ;
}
__syncthreads();
for (int i = i_x; i < numBins; i += M_x ) {
PartialHisto[i + numBins * j_x] = smem[ i ] ;
}
}
// shared atomics, fast histogram
// cf. https://devblogs.nvidia.com/parallelforall/gpu-pro-tip-fast-histograms-using-shared-atomics-maxwell/
// 2nd phase
__global__ void histogram_final_accum(
const unsigned int *PartialHisto,
unsigned int *d_Histo,
unsigned int L_x, // total number of thread blocks
const unsigned int numBins) {
int k_x = threadIdx.x + blockIdx.x * blockDim.x ; // k_x = 0, 1, ... numBins-1
if (k_x >= numBins) { return; }
// unsigned int total = 0;
int total = 0 ;
for (int j = 0; j < L_x ; ++j) {
total += PartialHisto[ k_x + j * numBins ] ;
}
d_Histo[k_x] = total;
}
void histogram_shared_atomics_kernel( const unsigned int* const d_vals, unsigned int *d_Histo,
const unsigned int numBins, const unsigned int numElems,
const unsigned int blockSize, const unsigned int blockSize2) {
unsigned int gridSize = (numElems + blockSize - 1)/blockSize ;
unsigned int * d_PartialHisto ; // Partial Histograms, of size numBins * gridSize
checkCudaErrors( cudaMalloc( &d_PartialHisto, sizeof(unsigned int) * numBins * gridSize ) ) ;
checkCudaErrors( cudaMemset( d_PartialHisto, 0, sizeof(unsigned int) * numBins * gridSize));
histogram_smem_atomics<<<gridSize,blockSize, numBins * sizeof(unsigned int)>>>(d_vals, d_PartialHisto, numBins, numElems) ;
histogram_final_accum<<<numBins, blockSize2>>>(d_PartialHisto, d_Histo, gridSize, numBins) ;
checkCudaErrors( cudaFree( d_PartialHisto) );
}
/** *******************************************************************/
/** END OF GPU shared atomics fast histogram
* ********************************************************************/
////////////////////////////////////////////////////////////////////////
void computeHistogram(const unsigned int* const d_vals, //INPUT
unsigned int* const d_histo, //OUTPUT
const unsigned int numBins,
const unsigned int numElems)
{
/**
* *********** block, grid dimensions *************
* */
// histogram global
// const unsigned int BLOCK_SIZE = 256;
// const unsigned int GRID_SIZE = (numElems + BLOCK_SIZE - 1)/ BLOCK_SIZE ;
// histogram shared memory
const unsigned int blocksize_s = 1024; // numBins = 1024
const unsigned int gridsize_s = (numElems + blocksize_s - 1)/blocksize_s ;
// histogram, shared atomics
// 1st. phase - input values from global memory into shared memory local histograms
// notice that gridsize1 will be necessary for size of PartialHisto
const unsigned int blocksize1 = 1024; // it is correct for blocksize1 = 256, block_accum = 64; or 512, 64, resp.; or 1024, 1024, resp.
// 2nd. phase - merge, concatenate, accumulate - block, grid dimensions
const unsigned int block_accum = 1024 ; // it is correct for block_accum = 64, blocksize1 = 256, or 64, 512, resp.; or 1024, 1024, resp.
/** END of block, grid dimensions *****************/
// END of block, grid dimensions
/**
* global version of histogram
* on GeForce GTX 980 Ti, 4.338208 msecs 4.589024 msecs 4.319296 msec.
*
yourHisto<<<GRID_SIZE, BLOCK_SIZE>>>(d_vals,d_histo,numElems);
*/
/**
* shared memory version of histogram
* on GeForce GTX 980 Ti, 0.569824 msecs 0.570016 msecs 0.570144 msec. for blocksize_s = 1024
* */
yourHisto<<<gridsize_s, blocksize_s, numBins * sizeof(unsigned int)>>>(d_vals,d_histo,numBins,numElems);
/**
* on GeForce GTX 980 Ti, 1.905600 msecs 1.879360 msecs 1.914016 msec.
*
* ********************************************************************
* shared atomics version of histogram
* ********************************************************************/
/*
histogram_shared_atomics_kernel( d_vals, d_histo, numBins, numElems,
blocksize1, block_accum) ;
*/
}
|
20fbff2e07b3667ac35dec5f03bb10b3a0f8e1c4.hip | // !!! This is a file automatically generated by hipify!!!
//
// Created by joe on 4/5/16.
//
#include"../CSR.h"
#include"cuHelper.h"
#include<hipsparse.h>
extern "C" void csr_memCpy(csr *src, csr *dst, enum DeviceCopyDIR dir) {
dst->m = src->m;
dst->n = src->n;
dst->nnz = src->nnz;
memCopy((void **) &(dst->ptr), (void *) src->ptr, sizeof(int) * (dst->n + 1), dir);
memCopy((void **) &(dst->indx), (void *) src->indx, sizeof(int) * (src->nnz), dir);
memCopy((void **) &(dst->val), (void *) src->val, sizeof(elem_t) * (src->nnz), dir);
}
extern "C" void csr_CUDA_SpMV(csr *m, vector *v, vector *r) {
hipsparseMatDescr_t descr = 0;
hipsparseHandle_t handle = 0;
cuSparseCheck(hipsparseCreateMatDescr(&descr));
cuSparseCheck(hipsparseSetMatType(descr, HIPSPARSE_MATRIX_TYPE_GENERAL));
cuSparseCheck(hipsparseSetMatIndexBase(descr, HIPSPARSE_INDEX_BASE_ZERO));
cuSparseCheck(hipsparseCreate(&handle));
elem_t unit = 1;
hipsparseScsrmv(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, m->n, m->m, m->nnz, &unit, descr, m->val, m->ptr,
m->indx, v->val, &unit, r->val);
cuSparseCheck(hipsparseDestroy(handle));
cuSparseCheck(hipsparseDestroyMatDescr(descr));
}
extern "C" void csr_CUDA_destroy(void *c) {
csr *cc = (csr *) c;
safeCudaFree(cc->val);
safeCudaFree(cc->indx);
safeCudaFree(cc->ptr);
}
| 20fbff2e07b3667ac35dec5f03bb10b3a0f8e1c4.cu | //
// Created by joe on 4/5/16.
//
#include"../CSR.h"
#include"cuHelper.h"
#include<cusparse.h>
extern "C" void csr_memCpy(csr *src, csr *dst, enum DeviceCopyDIR dir) {
dst->m = src->m;
dst->n = src->n;
dst->nnz = src->nnz;
memCopy((void **) &(dst->ptr), (void *) src->ptr, sizeof(int) * (dst->n + 1), dir);
memCopy((void **) &(dst->indx), (void *) src->indx, sizeof(int) * (src->nnz), dir);
memCopy((void **) &(dst->val), (void *) src->val, sizeof(elem_t) * (src->nnz), dir);
}
extern "C" void csr_CUDA_SpMV(csr *m, vector *v, vector *r) {
cusparseMatDescr_t descr = 0;
cusparseHandle_t handle = 0;
cuSparseCheck(cusparseCreateMatDescr(&descr));
cuSparseCheck(cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL));
cuSparseCheck(cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO));
cuSparseCheck(cusparseCreate(&handle));
elem_t unit = 1;
cusparseScsrmv(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, m->n, m->m, m->nnz, &unit, descr, m->val, m->ptr,
m->indx, v->val, &unit, r->val);
cuSparseCheck(cusparseDestroy(handle));
cuSparseCheck(cusparseDestroyMatDescr(descr));
}
extern "C" void csr_CUDA_destroy(void *c) {
csr *cc = (csr *) c;
safeCudaFree(cc->val);
safeCudaFree(cc->indx);
safeCudaFree(cc->ptr);
}
|
c0a5daaf43e9e320d4575575a1aeb519fda18105.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kernel2( int *a, int dimx, int dimy )
{
int ix = blockIdx.x*blockDim.x + threadIdx.x;
int iy = blockIdx.y*blockDim.y + threadIdx.y;
int idx = iy * dimx + ix;
if(iy < dimy && ix < dimx)
a[idx] = (blockIdx.y * gridDim.x) + blockIdx.x;
} | c0a5daaf43e9e320d4575575a1aeb519fda18105.cu | #include "includes.h"
__global__ void kernel2( int *a, int dimx, int dimy )
{
int ix = blockIdx.x*blockDim.x + threadIdx.x;
int iy = blockIdx.y*blockDim.y + threadIdx.y;
int idx = iy * dimx + ix;
if(iy < dimy && ix < dimx)
a[idx] = (blockIdx.y * gridDim.x) + blockIdx.x;
} |
848dfb2d9adf6828b488e96c4dec5b20e710412d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layers/sigmoid_cross_entropy_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SigmoidCrossEntropyLossForwardGPU(const int nthreads,
const Dtype* input_data, const Dtype* target, Dtype* loss,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts) {
CUDA_KERNEL_LOOP(i, nthreads) {
const int target_value = static_cast<int>(target[i]);
if (has_ignore_label_ && target_value == ignore_label_) {
loss[i] = 0;
counts[i] = 0;
} else {
loss[i] = input_data[i] * (target[i] - (input_data[i] >= 0)) -
log(1 + exp(input_data[i] - 2 * input_data[i] *
(input_data[i] >= 0)));
counts[i] = 1;
}
}
}
template <typename Dtype>
__global__ void SigmoidCrossEntropyLossIgnoreDiffGPU(const int count,
const int ignore_label, const Dtype* target, Dtype* diff) {
CUDA_KERNEL_LOOP(i, count) {
const int target_value = static_cast<int>(target[i]);
if (target_value == ignore_label) {
diff[i] = 0;
}
}
}
template <typename Dtype>
void SigmoidCrossEntropyLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
// The forward pass computes the sigmoid outputs.
sigmoid_bottom_vec_[0] = bottom[0];
sigmoid_layer_->Forward(sigmoid_bottom_vec_, sigmoid_top_vec_);
// Compute the loss (negative log likelihood)
const int count = bottom[0]->count();
// Stable version of loss computation from input data
const Dtype* input_data = bottom[0]->gpu_data();
const Dtype* target = bottom[1]->gpu_data();
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
Dtype* count_data = bottom[1]->mutable_gpu_diff();
Dtype valid_count;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SigmoidCrossEntropyLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, input_data, target, loss_data,
has_ignore_label_, ignore_label_, count_data);
// Only launch another CUDA kernel if we actually need the valid count.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(count, count_data, &valid_count);
} else {
valid_count = count;
}
Dtype loss;
caffe_gpu_asum(count, loss_data, &loss);
normalizer_ = get_normalizer(normalization_, valid_count);
top[0]->mutable_cpu_data()[0] = loss / normalizer_;
}
template <typename Dtype>
void SigmoidCrossEntropyLossLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
// First, compute the diff
const int count = bottom[0]->count();
const Dtype* sigmoid_output_data = sigmoid_output_->gpu_data();
const Dtype* target = bottom[1]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
caffe_copy(count, sigmoid_output_data, bottom_diff);
caffe_gpu_axpy(count, Dtype(-1), target, bottom_diff);
// Zero out gradient of ignored targets.
if (has_ignore_label_) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SigmoidCrossEntropyLossIgnoreDiffGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, ignore_label_, target, bottom_diff);
}
// Scale down gradient
Dtype loss_weight = top[0]->cpu_diff()[0] / normalizer_;
caffe_gpu_scal(count, loss_weight, bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SigmoidCrossEntropyLossLayer);
} // namespace caffe
| 848dfb2d9adf6828b488e96c4dec5b20e710412d.cu | #include <vector>
#include "caffe/layers/sigmoid_cross_entropy_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SigmoidCrossEntropyLossForwardGPU(const int nthreads,
const Dtype* input_data, const Dtype* target, Dtype* loss,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts) {
CUDA_KERNEL_LOOP(i, nthreads) {
const int target_value = static_cast<int>(target[i]);
if (has_ignore_label_ && target_value == ignore_label_) {
loss[i] = 0;
counts[i] = 0;
} else {
loss[i] = input_data[i] * (target[i] - (input_data[i] >= 0)) -
log(1 + exp(input_data[i] - 2 * input_data[i] *
(input_data[i] >= 0)));
counts[i] = 1;
}
}
}
template <typename Dtype>
__global__ void SigmoidCrossEntropyLossIgnoreDiffGPU(const int count,
const int ignore_label, const Dtype* target, Dtype* diff) {
CUDA_KERNEL_LOOP(i, count) {
const int target_value = static_cast<int>(target[i]);
if (target_value == ignore_label) {
diff[i] = 0;
}
}
}
template <typename Dtype>
void SigmoidCrossEntropyLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
// The forward pass computes the sigmoid outputs.
sigmoid_bottom_vec_[0] = bottom[0];
sigmoid_layer_->Forward(sigmoid_bottom_vec_, sigmoid_top_vec_);
// Compute the loss (negative log likelihood)
const int count = bottom[0]->count();
// Stable version of loss computation from input data
const Dtype* input_data = bottom[0]->gpu_data();
const Dtype* target = bottom[1]->gpu_data();
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
Dtype* count_data = bottom[1]->mutable_gpu_diff();
Dtype valid_count;
// NOLINT_NEXT_LINE(whitespace/operators)
SigmoidCrossEntropyLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(count, input_data, target, loss_data,
has_ignore_label_, ignore_label_, count_data);
// Only launch another CUDA kernel if we actually need the valid count.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(count, count_data, &valid_count);
} else {
valid_count = count;
}
Dtype loss;
caffe_gpu_asum(count, loss_data, &loss);
normalizer_ = get_normalizer(normalization_, valid_count);
top[0]->mutable_cpu_data()[0] = loss / normalizer_;
}
template <typename Dtype>
void SigmoidCrossEntropyLossLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
// First, compute the diff
const int count = bottom[0]->count();
const Dtype* sigmoid_output_data = sigmoid_output_->gpu_data();
const Dtype* target = bottom[1]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
caffe_copy(count, sigmoid_output_data, bottom_diff);
caffe_gpu_axpy(count, Dtype(-1), target, bottom_diff);
// Zero out gradient of ignored targets.
if (has_ignore_label_) {
// NOLINT_NEXT_LINE(whitespace/operators)
SigmoidCrossEntropyLossIgnoreDiffGPU<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(count, ignore_label_, target, bottom_diff);
}
// Scale down gradient
Dtype loss_weight = top[0]->cpu_diff()[0] / normalizer_;
caffe_gpu_scal(count, loss_weight, bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SigmoidCrossEntropyLossLayer);
} // namespace caffe
|
7451f60984f68247b78caa6a5726118617cd97e6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <hipcub/hipcub.hpp>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/pack_segments.h"
namespace caffe2 {
namespace {
template <typename T, typename Data_T>
__global__ void PackSegmentsKernel(
const Data_T* data_ptr,
const T* lengths_ptr,
const T* lengths_cum_sum,
const T max_length,
const int64_t num_seq,
const int64_t cell_size,
Data_T padding,
Data_T* out_ptr) {
CUDA_1D_KERNEL_LOOP(i, num_seq * max_length * cell_size) {
int seq = (i / cell_size) / max_length;
int cell = (i / cell_size) % max_length;
int offset = i % cell_size;
if (cell >= lengths_ptr[seq]) {
out_ptr[i] = padding;
} else {
int32_t idx = (lengths_cum_sum[seq] + cell) * cell_size + offset;
out_ptr[i] = data_ptr[idx];
}
}
}
template <typename T, typename Data_T>
__global__ void UnpackSegmentsKernel(
const Data_T* data_ptr,
const T* lengths_ptr,
const T* lengths_cum_sum,
const T max_length,
const int64_t num_seq,
const int64_t cell_size,
Data_T* out_ptr) {
CUDA_1D_KERNEL_LOOP(i, num_seq * max_length * cell_size) {
int seq = (i / cell_size) / max_length;
int cell = (i / cell_size) % max_length;
int offset = i % cell_size;
if (cell < lengths_ptr[seq]) {
int idx = (lengths_cum_sum[seq] + cell) * cell_size + offset;
out_ptr[idx] = data_ptr[i];
}
}
}
template <typename T>
int64_t int_array_sum(
const T* dev_array,
int64_t num_items,
Tensor& dev_buffer,
Tensor& dev_sum,
Tensor& host_sum,
CUDAContext& context) {
// Retrieve buffer size
size_t temp_storage_bytes = 0;
hipcub::DeviceReduce::Sum(
nullptr,
temp_storage_bytes,
dev_array,
dev_sum.mutable_data<int64_t>(),
num_items,
context.cuda_stream());
// Allocate temporary storage
auto buffer_size = (temp_storage_bytes + sizeof(T)) / sizeof(T);
dev_buffer.Resize(buffer_size);
void* dev_temp_storage = static_cast<void*>(dev_buffer.mutable_data<T>());
// Find sumimum
hipcub::DeviceReduce::Sum(
dev_temp_storage,
temp_storage_bytes,
dev_array,
dev_sum.mutable_data<int64_t>(),
num_items,
context.cuda_stream());
// Copy to host
host_sum.CopyFrom(dev_sum);
context.FinishDeviceComputation();
return *host_sum.data<int64_t>();
}
template <typename T>
T array_max(
const T* dev_array,
int64_t num_items,
Tensor& dev_max_buffer,
Tensor& dev_max,
Tensor& host_max,
CUDAContext& context) {
// Retrieve buffer size
size_t temp_storage_bytes = 0;
hipcub::DeviceReduce::Max(
nullptr,
temp_storage_bytes,
dev_array,
dev_max.mutable_data<T>(),
num_items,
context.cuda_stream());
// Allocate temporary storage
auto buffer_size = (temp_storage_bytes + sizeof(T)) / sizeof(T);
dev_max_buffer.Resize(buffer_size);
void* dev_temp_storage = static_cast<void*>(dev_max_buffer.mutable_data<T>());
// Find maximum
hipcub::DeviceReduce::Max(
dev_temp_storage,
temp_storage_bytes,
dev_array,
dev_max.mutable_data<T>(),
num_items,
context.cuda_stream());
// Copy to host
host_max.CopyFrom(dev_max);
context.FinishDeviceComputation();
return *host_max.data<T>();
}
template <typename T>
void array_prefix_sum_exclusive(
const T* dev_array,
const int32_t num_items,
Tensor& prefix_buffer,
Tensor& prefix_sum,
CUDAContext& context) {
// Retrieve buffer size
size_t temp_storage_bytes = 0;
prefix_sum.Resize(num_items);
hipcub::DeviceScan::ExclusiveSum(
nullptr,
temp_storage_bytes,
dev_array,
prefix_sum.mutable_data<T>(),
num_items,
context.cuda_stream());
// Allocate temporary storage
auto buffer_size = (temp_storage_bytes + sizeof(T)) / sizeof(T);
prefix_buffer.Resize(buffer_size);
void* dev_temp_storage = static_cast<void*>(prefix_buffer.mutable_data<T>());
// Exclusive sum
hipcub::DeviceScan::ExclusiveSum(
dev_temp_storage,
temp_storage_bytes,
dev_array,
prefix_sum.mutable_data<T>(),
num_items,
context.cuda_stream());
}
} // namespace
template <>
template <typename T>
bool PackSegmentsOp<CUDAContext>::DoRunWithType() {
return DispatchHelper<TensorTypes2<char, int32_t, int64_t, float>, T>::call(
this, Input(DATA));
}
template <>
template <typename T, typename Data_T>
bool PackSegmentsOp<CUDAContext>::DoRunWithType2() {
const auto& data = Input(DATA);
const auto& lengths = Input(LENGTHS);
int64_t num_seq = lengths.dim(0);
const Data_T* data_ptr = data.data<Data_T>();
const T* lengths_ptr = lengths.data<T>();
auto* out = Output(0);
if (return_presence_mask_) {
CAFFE_THROW("CUDA version of PackSegments does not support presence mask.");
}
CAFFE_ENFORCE_GE(data.ndim(), 1, "DATA should be at least 1-D");
CAFFE_ENFORCE_EQ(lengths.ndim(), 1, "LENGTH should be 1-D");
// Find the length of the longest sequence.
dev_max_length_.Resize(1);
host_max_length_.Resize(1);
T temp = num_seq > 0 ? array_max<T>(
lengths_ptr,
num_seq,
dev_buffer_,
dev_max_length_,
host_max_length_,
context_)
: 0;
if (max_length_ != -1) {
CAFFE_ENFORCE_GE(
max_length_,
temp,
"Pre-defined max_length should be greater than the real max_length");
temp = max_length_;
}
const T& max_length = temp;
// Compute prefix sum over the lengths
array_prefix_sum_exclusive<T>(
lengths_ptr, num_seq, dev_buffer_, dev_lengths_prefix_sum_, context_);
// create output tensor
auto shape = data.dims().vec(); // Shape of out is batch_size x max_len x ...
shape[0] = max_length;
shape.insert(shape.begin(), lengths.size());
out->Resize(shape);
Data_T* out_ptr = static_cast<Data_T*>(out->raw_mutable_data(data.meta()));
// Return empty out (with the proper shape) if first dim is 0.
if (!data.dim(0)) {
return true;
}
// Do padding
Data_T padding = out->IsType<float>() ? padding_ : 0;
int64_t cell_size = data.size() / data.dim(0);
hipLaunchKernelGGL(( PackSegmentsKernel),
dim3(CAFFE_GET_BLOCKS(num_seq * max_length * cell_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
data_ptr,
lengths_ptr,
dev_lengths_prefix_sum_.data<T>(),
max_length,
num_seq,
cell_size,
padding,
out_ptr);
return true;
}
template <>
template <typename T>
bool UnpackSegmentsOp<CUDAContext>::DoRunWithType() {
return DispatchHelper<TensorTypes2<char, int32_t, int64_t, float>, T>::call(
this, Input(DATA));
}
template <>
template <typename T, typename Data_T>
bool UnpackSegmentsOp<CUDAContext>::DoRunWithType2() {
const auto& data = Input(DATA);
const auto& lengths = Input(LENGTHS);
int64_t num_seq = lengths.dim(0);
const Data_T* data_ptr = data.data<Data_T>();
const T* lengths_ptr = lengths.data<T>();
auto* out = Output(0);
CAFFE_ENFORCE_GE(data.ndim(), 1, "DATA should be at least 1-D");
CAFFE_ENFORCE_EQ(lengths.ndim(), 1, "LENGTH should be 1-D");
if (max_length_ != -1) {
CAFFE_ENFORCE_EQ(
max_length_,
data.dim(1),
"max_length should be equal to the packed segments");
}
// Compute prefix sum over the lengths
array_prefix_sum_exclusive<T>(
lengths_ptr, num_seq, dev_buffer_, dev_lengths_prefix_sum_, context_);
// compute max of the lengths
dev_max_length_.Resize(1);
host_max_length_.Resize(1);
const T max_length = num_seq > 0 ? array_max<T>(
lengths_ptr,
num_seq,
dev_buffer_,
dev_max_length_,
host_max_length_,
context_)
: 0;
// compute num of cells: sum of the lengths
dev_num_cell_.Resize(1);
host_num_cell_.Resize(1);
const int64_t num_cell = int_array_sum<T>(
lengths_ptr,
num_seq,
dev_buffer_,
dev_num_cell_,
host_num_cell_,
context_);
// create output tensor
auto shape = data.dims().vec();
CAFFE_ENFORCE_EQ(
shape[0], lengths.dim(0), "LENGTH should match DATA in dimension 0");
shape.erase(shape.begin());
shape[0] = num_cell;
out->Resize(shape);
Data_T* out_ptr = static_cast<Data_T*>(out->raw_mutable_data(data.meta()));
// Return empty out (with the proper shape) if any of the dimensions is 0.
if (data.dim(0) == 0 || data.dim(1) == 0) {
return true;
}
// Unpack
int64_t cell_size = data.size() / (data.dim(0) * data.dim(1));
hipLaunchKernelGGL(( UnpackSegmentsKernel),
dim3(CAFFE_GET_BLOCKS(num_seq * max_length * cell_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
data_ptr,
lengths_ptr,
dev_lengths_prefix_sum_.data<T>(),
max_length,
num_seq,
cell_size,
out_ptr);
return true;
}
REGISTER_CUDA_OPERATOR(UnpackSegments, UnpackSegmentsOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(PackSegments, PackSegmentsOp<CUDAContext>);
} // namespace caffe2
| 7451f60984f68247b78caa6a5726118617cd97e6.cu | #include <cub/cub.cuh>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/pack_segments.h"
namespace caffe2 {
namespace {
template <typename T, typename Data_T>
__global__ void PackSegmentsKernel(
const Data_T* data_ptr,
const T* lengths_ptr,
const T* lengths_cum_sum,
const T max_length,
const int64_t num_seq,
const int64_t cell_size,
Data_T padding,
Data_T* out_ptr) {
CUDA_1D_KERNEL_LOOP(i, num_seq * max_length * cell_size) {
int seq = (i / cell_size) / max_length;
int cell = (i / cell_size) % max_length;
int offset = i % cell_size;
if (cell >= lengths_ptr[seq]) {
out_ptr[i] = padding;
} else {
int32_t idx = (lengths_cum_sum[seq] + cell) * cell_size + offset;
out_ptr[i] = data_ptr[idx];
}
}
}
template <typename T, typename Data_T>
__global__ void UnpackSegmentsKernel(
const Data_T* data_ptr,
const T* lengths_ptr,
const T* lengths_cum_sum,
const T max_length,
const int64_t num_seq,
const int64_t cell_size,
Data_T* out_ptr) {
CUDA_1D_KERNEL_LOOP(i, num_seq * max_length * cell_size) {
int seq = (i / cell_size) / max_length;
int cell = (i / cell_size) % max_length;
int offset = i % cell_size;
if (cell < lengths_ptr[seq]) {
int idx = (lengths_cum_sum[seq] + cell) * cell_size + offset;
out_ptr[idx] = data_ptr[i];
}
}
}
template <typename T>
int64_t int_array_sum(
const T* dev_array,
int64_t num_items,
Tensor& dev_buffer,
Tensor& dev_sum,
Tensor& host_sum,
CUDAContext& context) {
// Retrieve buffer size
size_t temp_storage_bytes = 0;
cub::DeviceReduce::Sum(
nullptr,
temp_storage_bytes,
dev_array,
dev_sum.mutable_data<int64_t>(),
num_items,
context.cuda_stream());
// Allocate temporary storage
auto buffer_size = (temp_storage_bytes + sizeof(T)) / sizeof(T);
dev_buffer.Resize(buffer_size);
void* dev_temp_storage = static_cast<void*>(dev_buffer.mutable_data<T>());
// Find sumimum
cub::DeviceReduce::Sum(
dev_temp_storage,
temp_storage_bytes,
dev_array,
dev_sum.mutable_data<int64_t>(),
num_items,
context.cuda_stream());
// Copy to host
host_sum.CopyFrom(dev_sum);
context.FinishDeviceComputation();
return *host_sum.data<int64_t>();
}
template <typename T>
T array_max(
const T* dev_array,
int64_t num_items,
Tensor& dev_max_buffer,
Tensor& dev_max,
Tensor& host_max,
CUDAContext& context) {
// Retrieve buffer size
size_t temp_storage_bytes = 0;
cub::DeviceReduce::Max(
nullptr,
temp_storage_bytes,
dev_array,
dev_max.mutable_data<T>(),
num_items,
context.cuda_stream());
// Allocate temporary storage
auto buffer_size = (temp_storage_bytes + sizeof(T)) / sizeof(T);
dev_max_buffer.Resize(buffer_size);
void* dev_temp_storage = static_cast<void*>(dev_max_buffer.mutable_data<T>());
// Find maximum
cub::DeviceReduce::Max(
dev_temp_storage,
temp_storage_bytes,
dev_array,
dev_max.mutable_data<T>(),
num_items,
context.cuda_stream());
// Copy to host
host_max.CopyFrom(dev_max);
context.FinishDeviceComputation();
return *host_max.data<T>();
}
template <typename T>
void array_prefix_sum_exclusive(
const T* dev_array,
const int32_t num_items,
Tensor& prefix_buffer,
Tensor& prefix_sum,
CUDAContext& context) {
// Retrieve buffer size
size_t temp_storage_bytes = 0;
prefix_sum.Resize(num_items);
cub::DeviceScan::ExclusiveSum(
nullptr,
temp_storage_bytes,
dev_array,
prefix_sum.mutable_data<T>(),
num_items,
context.cuda_stream());
// Allocate temporary storage
auto buffer_size = (temp_storage_bytes + sizeof(T)) / sizeof(T);
prefix_buffer.Resize(buffer_size);
void* dev_temp_storage = static_cast<void*>(prefix_buffer.mutable_data<T>());
// Exclusive sum
cub::DeviceScan::ExclusiveSum(
dev_temp_storage,
temp_storage_bytes,
dev_array,
prefix_sum.mutable_data<T>(),
num_items,
context.cuda_stream());
}
} // namespace
template <>
template <typename T>
bool PackSegmentsOp<CUDAContext>::DoRunWithType() {
return DispatchHelper<TensorTypes2<char, int32_t, int64_t, float>, T>::call(
this, Input(DATA));
}
template <>
template <typename T, typename Data_T>
bool PackSegmentsOp<CUDAContext>::DoRunWithType2() {
const auto& data = Input(DATA);
const auto& lengths = Input(LENGTHS);
int64_t num_seq = lengths.dim(0);
const Data_T* data_ptr = data.data<Data_T>();
const T* lengths_ptr = lengths.data<T>();
auto* out = Output(0);
if (return_presence_mask_) {
CAFFE_THROW("CUDA version of PackSegments does not support presence mask.");
}
CAFFE_ENFORCE_GE(data.ndim(), 1, "DATA should be at least 1-D");
CAFFE_ENFORCE_EQ(lengths.ndim(), 1, "LENGTH should be 1-D");
// Find the length of the longest sequence.
dev_max_length_.Resize(1);
host_max_length_.Resize(1);
T temp = num_seq > 0 ? array_max<T>(
lengths_ptr,
num_seq,
dev_buffer_,
dev_max_length_,
host_max_length_,
context_)
: 0;
if (max_length_ != -1) {
CAFFE_ENFORCE_GE(
max_length_,
temp,
"Pre-defined max_length should be greater than the real max_length");
temp = max_length_;
}
const T& max_length = temp;
// Compute prefix sum over the lengths
array_prefix_sum_exclusive<T>(
lengths_ptr, num_seq, dev_buffer_, dev_lengths_prefix_sum_, context_);
// create output tensor
auto shape = data.dims().vec(); // Shape of out is batch_size x max_len x ...
shape[0] = max_length;
shape.insert(shape.begin(), lengths.size());
out->Resize(shape);
Data_T* out_ptr = static_cast<Data_T*>(out->raw_mutable_data(data.meta()));
// Return empty out (with the proper shape) if first dim is 0.
if (!data.dim(0)) {
return true;
}
// Do padding
Data_T padding = out->IsType<float>() ? padding_ : 0;
int64_t cell_size = data.size() / data.dim(0);
PackSegmentsKernel<<<
CAFFE_GET_BLOCKS(num_seq * max_length * cell_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
data_ptr,
lengths_ptr,
dev_lengths_prefix_sum_.data<T>(),
max_length,
num_seq,
cell_size,
padding,
out_ptr);
return true;
}
template <>
template <typename T>
bool UnpackSegmentsOp<CUDAContext>::DoRunWithType() {
return DispatchHelper<TensorTypes2<char, int32_t, int64_t, float>, T>::call(
this, Input(DATA));
}
template <>
template <typename T, typename Data_T>
bool UnpackSegmentsOp<CUDAContext>::DoRunWithType2() {
const auto& data = Input(DATA);
const auto& lengths = Input(LENGTHS);
int64_t num_seq = lengths.dim(0);
const Data_T* data_ptr = data.data<Data_T>();
const T* lengths_ptr = lengths.data<T>();
auto* out = Output(0);
CAFFE_ENFORCE_GE(data.ndim(), 1, "DATA should be at least 1-D");
CAFFE_ENFORCE_EQ(lengths.ndim(), 1, "LENGTH should be 1-D");
if (max_length_ != -1) {
CAFFE_ENFORCE_EQ(
max_length_,
data.dim(1),
"max_length should be equal to the packed segments");
}
// Compute prefix sum over the lengths
array_prefix_sum_exclusive<T>(
lengths_ptr, num_seq, dev_buffer_, dev_lengths_prefix_sum_, context_);
// compute max of the lengths
dev_max_length_.Resize(1);
host_max_length_.Resize(1);
const T max_length = num_seq > 0 ? array_max<T>(
lengths_ptr,
num_seq,
dev_buffer_,
dev_max_length_,
host_max_length_,
context_)
: 0;
// compute num of cells: sum of the lengths
dev_num_cell_.Resize(1);
host_num_cell_.Resize(1);
const int64_t num_cell = int_array_sum<T>(
lengths_ptr,
num_seq,
dev_buffer_,
dev_num_cell_,
host_num_cell_,
context_);
// create output tensor
auto shape = data.dims().vec();
CAFFE_ENFORCE_EQ(
shape[0], lengths.dim(0), "LENGTH should match DATA in dimension 0");
shape.erase(shape.begin());
shape[0] = num_cell;
out->Resize(shape);
Data_T* out_ptr = static_cast<Data_T*>(out->raw_mutable_data(data.meta()));
// Return empty out (with the proper shape) if any of the dimensions is 0.
if (data.dim(0) == 0 || data.dim(1) == 0) {
return true;
}
// Unpack
int64_t cell_size = data.size() / (data.dim(0) * data.dim(1));
UnpackSegmentsKernel<<<
CAFFE_GET_BLOCKS(num_seq * max_length * cell_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
data_ptr,
lengths_ptr,
dev_lengths_prefix_sum_.data<T>(),
max_length,
num_seq,
cell_size,
out_ptr);
return true;
}
REGISTER_CUDA_OPERATOR(UnpackSegments, UnpackSegmentsOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(PackSegments, PackSegmentsOp<CUDAContext>);
} // namespace caffe2
|
077644c302197438b727980a4a86d60d73bb8558.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2011, Alex Krizhevsky (akrizhevsky@gmail.com)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <cutil_inline.h>
#include <nvmatrix.cuh>
#include <cudaconv2.cuh>
/*
* Block size B_YxB_X. Each block applies B_Y * filtersPerThread filters to B_X * imgsPerThread images.
* threadIdx.x determines image
* threadIdx.y determines filter
*
* blockIdx.x determines image batch of B_X * imgsPerThread
* blockIdx.y determines filter batch of module and B_Y * filtersPerThread
*
* images: (numColors, imgSizeY, imgSizeX, numImages) with stride given
* filters: (numColors, filterPixels, numFilters) if conv
* (numModules, numColors, filterPixels, numFilters) otherwise
*
* targets: (numFilters, numModulesY, numModulesX, numImages)
*
* B_Y one of 4, 8, 16
* B_X one of 16, 32
* imgsPerThread one of 1, 2, 4
* filtersPerThread one of 1, 2, 4, 8
*
* Number of filters per module should be divisible by B_Y * filtersPerThread
* checkImgBounds indicates whether number of images is divisible by B_X * imgsPerThread
*
* The imgSize here is the size of the actual image without the padding.
*
*/
template <int B_Y, int B_X, int imgsPerThread, int filtersPerThread, int numColors,
bool scale, bool checkImgBounds>
__global__ void filterActs_YxX_color(float* images, float* filters, float* targets,
const int numImages, const int numFilters,
const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart,
const int moduleStride,
const int numModulesY, const int numModulesX, const int imgStride,
const float scaleTargets, const float scaleOutputs,
const bool conv) {
__shared__ float shFilters[B_Y*numColors][B_Y * filtersPerThread]; // pre-load B_Y pixels from B_Y*filtersPerThread filters
__shared__ float shImages[B_Y*numColors][B_X * imgsPerThread]; // pre-load B_Y pixels from B_X*imgsPerThread images
const int imgPixels = imgSizeY * imgSizeX;
const int filterPixels = filterSize * filterSize;
const int blocksPerModule = numFilters / (B_Y*filtersPerThread);
const int moduleIdx = blockIdx.y / blocksPerModule; // which position on the output image is it computing (2D position)
const int blockFilterIdx = blockIdx.y % blocksPerModule; // indicating which filters (batch) the block is going to compute
const int tidx = threadIdx.y * B_X + threadIdx.x; // 1D index of the thread in the block
const int imgLoadModPosY = (moduleIdx / numModulesX) * moduleStride; // corresponding Y position on the original image plane regarding the module index
const int imgLoadModPosX = (moduleIdx % numModulesX) * moduleStride; // corresponding X position on the original image plane regarding the module index
const int shFilterLoadY = tidx / (B_Y * filtersPerThread); // which pixel(s) of the filter this thread is dealing with?
const int shFilterLoadX = tidx % (B_Y * filtersPerThread); // which filter this thread is dealing with?
const int myImgIdx = blockIdx.x * B_X * imgsPerThread + threadIdx.x; // which image are we computing (starting index; image alignment in a stride pattern, i.e. processing threadIdx.x, threadIdx.x+BX, threadIdx+2*BX, ... threadIdx+imgsPerThread*BX)
images += myImgIdx; // pick that image
filters += filtersPerThread * B_Y * blockFilterIdx
+ shFilterLoadY * numFilters + shFilterLoadX; // pick that filter
if (!conv) {
filters += moduleIdx * numColors * filterPixels * numFilters;
}
targets += moduleIdx * numImages
+ (blockFilterIdx * B_Y * filtersPerThread + threadIdx.y) * numImages * numModulesY * numModulesX
+ myImgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for(int g = 0; g < imgsPerThread; g++) {
prod[f][g] = 0;
}
}
for (int p = 0; p < filterPixels; p += B_Y) {
/*
* Load B_Y pixels from B_Y*filtersPerThread filters
*/
if (shFilterLoadY < B_Y) {
#pragma unroll
for (int p2 = 0; p2 < B_Y; p2 += B_X/filtersPerThread) {
if (p + p2 + shFilterLoadY < filterPixels) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shFilters[shFilterLoadY + p2 + c * B_Y][shFilterLoadX] = filters[(c * filterPixels + p + p2) * numFilters];
}
} else {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shFilters[shFilterLoadY + p2 + c * B_Y][shFilterLoadX] = 0;
}
}
}
}
/*
* Load B_Y pixels from B_X*imgsPerThread images
*/
const int pixIdx = p + threadIdx.y;
if (pixIdx < filterPixels) {
const int x = paddingStart + imgLoadModPosX + pixIdx % filterSize;
const int y = paddingStart + imgLoadModPosY + pixIdx / filterSize;
if (y >= 0 && y< imgSizeY && x >= 0 && x < imgSizeX) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkImgBounds || myImgIdx + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImages[threadIdx.y + c * B_Y][threadIdx.x + i * B_X] = images[imgStride * (c * imgPixels + y * imgSizeX + x) + i * B_X];
}
} else {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImages[threadIdx.y + c * B_Y][threadIdx.x + i * B_X] = 0;
}
}
}
} else { // Padding
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImages[threadIdx.y + c * B_Y][threadIdx.x + i * B_X] = 0;
}
}
}
}
__syncthreads();
#pragma unroll
for (int i = 0; i < B_Y*numColors; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for(int g = 0; g < imgsPerThread; g++) {
prod[f][g] += shImages[i][g * B_X + threadIdx.x] * shFilters[i][threadIdx.y + f * B_Y]; // time domain convolution computation, did not use fft! (shouldn't, when the convolution kernel is very small)
}
}
}
__syncthreads();
}
if (scale) {
#pragma unroll
for (int g = 0; g < imgsPerThread; g++) {
if (!checkImgBounds || myImgIdx + g * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[g * B_X + f * B_Y * numImages * numModulesY * numModulesX] = scaleTargets * targets[g * B_X + f * B_Y * numImages * numModulesY * numModulesX] + scaleOutputs * prod[f][g];
}
}
}
} else {
#pragma unroll
for (int g = 0; g < imgsPerThread; g++) {
if (!checkImgBounds || myImgIdx + g * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[g * B_X + f * B_Y * numImages * numModulesY * numModulesX] = scaleOutputs * prod[f][g];
}
}
}
}
}
/*
* Block size B_YxB_X. Each block applies B_Y * filtersPerThread filters to B_X * imgsPerThread images.
* threadIdx.x determines image
* threadIdx.y determines filter
*
* blockIdx.x determines image batch of B_X * imgsPerThread
* blockIdx.y determines filter batch of B_Y * filtersPerThread
*
* images: (numImgColors, imgSizeY, imgSizeX, numImages) with stride given
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModules, numFilterColors, filterPixels, numFilters) otherwise
*
* targets: (numFilters, numModulesY, numModulesX, numImages)
*
* B_Y one of 4, 8, 16
* B_X one of 16, 32
* imgsPerThread one of 1, 2, 4
* filtersPerThread one of 1, 2, 4, 8
* colorCache: how many colors to put into shmem
*
* numFilters should be divisible by B_Y * filtersPerThread
* numImages be divisible by B_X * imgsPerThread
* numFilterColors should be divisible by colorCache.
* numImgColors must be even.
* numFilters must be divisible by numGroups.
*
* The imgSize here is the size of the actual image without the padding.
*
*/
template <int B_Y, int B_X, int imgsPerThread, int filtersPerThread, int colorCache,
bool scale, bool checkImgBounds>
__global__ void filterActs_YxX_sparse(float* images, float* filters, float* targets,
const int numImages, const int numFilters,
const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart,
const int moduleStride,
const int numModulesY, const int numModulesX, const int imgStride, const int numImgColors,
const int numGroups,
const float scaleTargets, const float scaleOutputs,
const bool conv) {
__shared__ float shFilters[B_Y*colorCache][B_Y * filtersPerThread]; // pre-load B_Y pixels from B_Y*filtersPerThread filters
__shared__ float shImages[B_Y*colorCache][B_X * imgsPerThread]; // pre-load B_Y pixels from B_X*imgsPerThread images
const int imgPixels = imgSizeY * imgSizeX;
const int filterPixels = filterSize * filterSize;
const int numFilterColors = numImgColors / numGroups;
const int blocksPerModule = numFilters / (B_Y*filtersPerThread);
const int moduleIdx = blockIdx.y / blocksPerModule;
const int blockFilterIdx = filtersPerThread * B_Y * (blockIdx.y % blocksPerModule);
const int numFiltersPerGroup = numFilters / numGroups;
const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup;
const int numModules = numModulesX * numModulesY;
const int blockColorIdx = numFilterColors * blockGroupIdx;
const int tidx = threadIdx.y * B_X + threadIdx.x;
const int imgLoadModPosY = paddingStart + (moduleIdx / numModulesX) * moduleStride;
const int imgLoadModPosX = paddingStart + (moduleIdx % numModulesX) * moduleStride;
const int shFilterLoadY = tidx / (B_Y * filtersPerThread);
const int shFilterLoadX = tidx % (B_Y * filtersPerThread);
const int myImgIdx = blockIdx.x * B_X * imgsPerThread + threadIdx.x;
images += blockColorIdx * imgPixels * imgStride + myImgIdx;
filters +=blockFilterIdx
+ shFilterLoadY * numFilters + shFilterLoadX;
if (!conv) {
filters += moduleIdx * numFilterColors * filterPixels * numFilters;
}
targets += moduleIdx * numImages
+ (blockFilterIdx + threadIdx.y) * numImages * numModules
+ myImgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for(int g = 0; g < imgsPerThread; g++) {
prod[f][g] = 0;
}
}
// __shared__ int imgPos[]
for (int oc = 0; oc < numFilterColors; oc += colorCache) { // oc stands for outer color (loop)
for (int p = 0; p < filterPixels; p += B_Y) {
/*
* Load B_Y pixels from B_Y*filtersPerThread filters
*/
if (shFilterLoadY < B_Y) {
#pragma unroll
for (int p2 = 0; p2 < B_Y; p2 += B_X/filtersPerThread) {
if (p + p2 + shFilterLoadY < filterPixels) {
#pragma unroll
for (int c = 0; c < colorCache; c++) {
shFilters[shFilterLoadY + p2 + c * B_Y][shFilterLoadX] = filters[((oc+c) * filterPixels + p + p2) * numFilters];
}
} else {
#pragma unroll
for (int c = 0; c < colorCache; c++) {
shFilters[shFilterLoadY + p2 + c * B_Y][shFilterLoadX] = 0;
}
}
}
}
/*
* Load B_Y pixels from B_X*imgsPerThread images
*/
const int pixIdx = p + threadIdx.y;
if (pixIdx < filterPixels) {
const int x = imgLoadModPosX + pixIdx % filterSize;
const int y = imgLoadModPosY + pixIdx / filterSize;
if (y >= 0 && y < imgSizeY && x >= 0 && x < imgSizeX) {
float* m = &images[imgStride * (oc * imgPixels + y * imgSizeX + x)];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkImgBounds || myImgIdx + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < colorCache; c++) {
shImages[threadIdx.y + c * B_Y][threadIdx.x + i * B_X] = m[c * imgStride * imgPixels + i * B_X];
}
} else {
#pragma unroll
for (int c = 0; c < colorCache; c++) {
shImages[threadIdx.y + c * B_Y][threadIdx.x + i * B_X] = 0;
}
}
}
} else { // Padding
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for (int c = 0; c < colorCache; c++) {
shImages[threadIdx.y + c * B_Y][threadIdx.x + i * B_X] = 0;
}
}
}
}
__syncthreads();
#pragma unroll
for (int i = 0; i < B_Y*colorCache; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for(int g = 0; g < imgsPerThread; g++) {
prod[f][g] += shImages[i][g * B_X + threadIdx.x] * shFilters[i][threadIdx.y + f * B_Y];
}
}
}
__syncthreads();
}
}
if (scale) {
#pragma unroll
for (int g = 0; g < imgsPerThread; g++) {
if (!checkImgBounds || myImgIdx + g * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[g * B_X + f * B_Y * numImages * numModules] = scaleTargets * targets[g * B_X + f * B_Y * numImages * numModules] + scaleOutputs * prod[f][g];
}
}
}
} else {
#pragma unroll
for (int g = 0; g < imgsPerThread; g++) {
if (!checkImgBounds || myImgIdx + g * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[g * B_X + f * B_Y * numImages * numModules] = scaleOutputs * prod[f][g];
}
}
}
}
}
/*
* Block size B_YxB_X. Each block applies B_Y * filtersPerThread filters to B_X * imgsPerThread images.
* threadIdx.x determines image
* threadIdx.y determines filter
*
* blockIdx.x determines image batch of B_X * imgsPerThread
* blockIdx.y determines filter batch of B_Y * filtersPerThread
*
* images: (numImgColors, imgSizeY, imgSizeX, numImages) with stride given
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModules, numFilterColors, filterPixels, numFilters) otherwise
*
* targets: (numFilters, numModulesY, numModulesX, numImages)
* colorIndices: (numGroups, numFiltercolors)
*
* B_Y one of 4, 8, 16
* B_X one of 16, 32
* imgsPerThread one of 1, 2, 4
* filtersPerThread one of 1, 2, 4, 8
* colorCache: how many colors to put into shmem
*
* numFilters should be divisible by B_Y * filtersPerThread
* numImages be divisible by B_X * imgsPerThread
* numFilterColors should be divisible by colorCache.
* numImgColors must be even.
* numFilters must be divisible by numGroups.
*
* The imgSize here is the size of the actual image without the padding.
*/
template <int B_Y, int B_X, int imgsPerThread, int filtersPerThread, int colorCache, bool scale, bool checkImgBounds>
__global__ void filterActs_YxX_sparse_random(float* images, float* filters, float* targets, int* colorIndices,
const int numImages, const int numFilters,
const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart,
const int moduleStride,
const int numModulesY, const int numModulesX, const int imgStride,
/*const int numImgColors,*/ const int numFilterColors, const int numGroups,
const float scaleTargets, const float scaleOutputs,
const bool conv) {
__shared__ float shFilters[B_Y*colorCache][B_Y * filtersPerThread]; // pre-load B_Y pixels from B_Y*filtersPerThread filters
__shared__ float shImages[B_Y*colorCache][B_X * imgsPerThread]; // pre-load B_Y pixels from B_X*imgsPerThread images
__shared__ int shColors[colorCache];
const int imgPixels = imgSizeY * imgSizeX;
const int filterPixels = filterSize * filterSize;
// const int numFilterColors = numImgColors / numGroups;
const int blocksPerModule = numFilters / (B_Y*filtersPerThread);
const int moduleIdx = blockIdx.y / blocksPerModule;
const int blockFilterIdx = filtersPerThread * B_Y * (blockIdx.y % blocksPerModule);
const int numFiltersPerGroup = numFilters / numGroups;
const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup;
const int numModules = numModulesY * numModulesX;
const int tidx = threadIdx.y * B_X + threadIdx.x;
const int imgLoadModPosY = paddingStart + (moduleIdx / numModulesX) * moduleStride;
const int imgLoadModPosX = paddingStart + (moduleIdx % numModulesX) * moduleStride;
const int shFilterLoadY = tidx / (B_Y * filtersPerThread);
const int shFilterLoadX = tidx % (B_Y * filtersPerThread);
const int myImgIdx = blockIdx.x * B_X * imgsPerThread + threadIdx.x;
images += myImgIdx;
filters +=blockFilterIdx
+ shFilterLoadY * numFilters + shFilterLoadX;
if (!conv) {
filters += moduleIdx * numFilterColors * filterPixels * numFilters;
}
targets += moduleIdx * numImages
+ (blockFilterIdx + threadIdx.y) * numImages * numModules
+ myImgIdx;
colorIndices += blockGroupIdx * numFilterColors;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for(int g = 0; g < imgsPerThread; g++) {
prod[f][g] = 0;
}
}
// __shared__ int imgPos[]
for (int oc = 0; oc < numFilterColors; oc += colorCache) { // oc stands for outer color (loop)
// Kinda wasteful here but...shouldn't matter
if (tidx < colorCache) {
shColors[tidx] = colorIndices[oc + tidx] * imgStride * imgPixels;
}
__syncthreads();
for (int p = 0; p < filterPixels; p += B_Y) {
/*
* Load B_Y pixels from B_Y*filtersPerThread filters
*/
if (shFilterLoadY < B_Y) {
#pragma unroll
for (int p2 = 0; p2 < B_Y; p2 += B_X/filtersPerThread) {
if (p + p2 + shFilterLoadY < filterPixels) {
#pragma unroll
for (int c = 0; c < colorCache; c++) {
shFilters[shFilterLoadY + p2 + c * B_Y][shFilterLoadX] = filters[((oc+c) * filterPixels + p + p2) * numFilters];
}
} else {
#pragma unroll
for (int c = 0; c < colorCache; c++) {
shFilters[shFilterLoadY + p2 + c * B_Y][shFilterLoadX] = 0;
}
}
}
}
/*
* Load B_Y pixels from B_X*imgsPerThread images
*/
const int pixIdx = p + threadIdx.y;
if (pixIdx < filterPixels) {
const int x = imgLoadModPosX + pixIdx % filterSize;
const int y = imgLoadModPosY + pixIdx / filterSize;
if (y >= 0 && y < imgSizeY && x >= 0 && x < imgSizeX) {
float* m = &images[imgStride * (y * imgSizeX + x)];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkImgBounds || myImgIdx + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < colorCache; c++) {
shImages[threadIdx.y + c * B_Y][threadIdx.x + i * B_X] = m[shColors[c] + i * B_X];
}
} else {
#pragma unroll
for (int c = 0; c < colorCache; c++) {
shImages[threadIdx.y + c * B_Y][threadIdx.x + i * B_X] = 0;
}
}
}
} else { // Padding
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for (int c = 0; c < colorCache; c++) {
shImages[threadIdx.y + c * B_Y][threadIdx.x + i * B_X] = 0;
}
}
}
}
__syncthreads();
#pragma unroll
for (int i = 0; i < B_Y*colorCache; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for(int g = 0; g < imgsPerThread; g++) {
prod[f][g] += shImages[i][g * B_X + threadIdx.x] * shFilters[i][threadIdx.y + f * B_Y];
}
}
}
__syncthreads();
}
}
if (scale) {
#pragma unroll
for (int g = 0; g < imgsPerThread; g++) {
if (!checkImgBounds || myImgIdx + g * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[g * B_X + f * B_Y * numImages * numModules] = scaleTargets * targets[g * B_X + f * B_Y * numImages * numModules] + scaleOutputs * prod[f][g];
}
}
}
} else {
#pragma unroll
for (int g = 0; g < imgsPerThread; g++) {
if (!checkImgBounds || myImgIdx + g * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[g * B_X + f * B_Y * numImages * numModules] = scaleOutputs * prod[f][g];
}
}
}
}
}
/*
* images: (numImgColors, imgSizeY, imgSizeX, numImages) with stride given
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModules, numFilterColors, filterPixels, numFilters) otherwise
*
* targets: (numFilters, numModules, numImages)
*
* Note: all of these convolution routines are optimized for the case when
* the number of images (i.e. the minibatch size) is a multiple of 128.
* Other batch sizes will work, but but I made no attempt whatsoever
* to make them work fast.
*/
void _filterActs(NVMatrix& images, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int numModulesY, int numModulesX, int paddingStart, int moduleStride,
int numImgColors, int numGroups,
float scaleTargets, float scaleOutput, bool conv) {
int numFilterColors = numImgColors / numGroups;
int numFilters = filters.getNumCols();
int numModules = numModulesY * numModulesX; // numModules is the size of the filter output (for one filter)
int numImages = images.getNumCols();
int imgPixels = images.getNumRows()/numImgColors;
int imgSizeX = imgPixels / imgSizeY;
int filterModuleMult = conv ? 1 : numModules;
assert(numGroups > 1 || (numImgColors > 0 && (numImgColors <= 3 || numImgColors % 2 == 0)));
assert(numGroups == 1 || numFilterColors % 2 == 0);
assert(numFilters % (16 * numGroups) == 0);
assert(numImgColors % numGroups == 0);
assert(images.getNumRows() == imgPixels * numImgColors);
assert(imgSizeY * imgSizeX == imgPixels);
int numFiltersPerGroup = numFilters / numGroups;
int imgStride = images.getStride(); // images does not need to be a contiguous matrix
int filterPixels = filters.getNumRows() / (filterModuleMult * numFilterColors);
int filterSize = int(sqrt(filterPixels));
assert(filterSize * filterSize == filterPixels);
assert(filters.getNumRows() == filterModuleMult * numFilterColors * filterPixels);
// These routines don't handle the case when only part of the image is visited in the convolution
assert(paddingStart <= 0);
assert(paddingStart + (numModulesX-1)*moduleStride + filterSize >= imgSizeX);
assert(paddingStart + (numModulesY-1)*moduleStride + filterSize >= imgSizeY);
assert(moduleStride <= filterSize);
assert(!images.isTrans());
assert(!filters.isTrans());
assert(!targets.isTrans());
assert(filters.isContiguous());
assert(targets.isContiguous());
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
dim3 blocks = numFiltersPerGroup % 32 == 0 ? dim3(DIVUP(numImages, 32 * imgsPerThread), (numModules * numFilters) / (4 * 8))
: dim3(DIVUP(numImages, 32 * imgsPerThread), (numModules * numFilters) / (4 * 4));
dim3 threads(32, 4);
bool checkImgBounds = numImages % (32*imgsPerThread) != 0;
if (scaleTargets == 0) {
targets.resize(numFilters * numModules, numImages);
} else {
assert(targets.getNumRows() == numFilters * numModules);
assert(targets.getNumCols() == numImages);
}
if (imgsPerThread == 4) {
if (numImgColors <= 3) {
assert(numGroups == 1); // It has to be based on above definitions, but just to be sure.
if (scaleTargets == 0) { // don't scale
if (numImgColors == 1) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 1, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 1, false, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 1, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 1, false, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 1, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 1, false, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 1, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 1, false, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
} else if (numImgColors == 2) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 2, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 2, false, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 2, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 2, false, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 2, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 2, false, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 2, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 2, false, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
} else if (numImgColors == 3) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 3, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 3, false, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 3, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 3, false, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 3, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 3, false, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 3, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 3, false, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
}
} else { // do scale
if (numImgColors == 1) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 1, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 1, true, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 1, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 1, true, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 1, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 1, true, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 1, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 1, true, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
} else if (numImgColors == 2) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 2, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 2, true, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 2, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 2, true, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 2, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 2, true, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 2, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 2, true, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
} else if (numImgColors == 3) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 3, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 3, true, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 3, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 3, true, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 3, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 3, true, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 3, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 3, true, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
}
}
} else {
if (scaleTargets == 0) { // don't scale
if (checkImgBounds) {
if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 8, 2, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 4, 8, 2, false, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 4, 2, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 4, 4, 2, false, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
} else {
if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 8, 2, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 4, 8, 2, false, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 4, 2, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 4, 4, 2, false, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
} else { // do scale
if (checkImgBounds) {
if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 8, 2, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 4, 8, 2, true, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 4, 2, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 4, 4, 2, true, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
} else {
if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 8, 2, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 4, 8, 2, true, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 4, 2, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 4, 4, 2, true, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
}
}
} else if (imgsPerThread == 2) {
if (numImgColors <= 3) {
assert(numGroups == 1); // It has to be based on above definitions, but just to be sure.
if (scaleTargets == 0) { // don't scale
if (numImgColors == 1) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 8, 1, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 8, 1, false, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 4, 1, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 4, 1, false, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 8, 1, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 8, 1, false, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 4, 1, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 4, 1, false, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
} else if (numImgColors == 2) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 8, 2, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 8, 2, false, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 4, 2, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 4, 2, false, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 8, 2, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 8, 2, false, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 4, 2, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 4, 2, false, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
} else if (numImgColors == 3) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 8, 3, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 8, 3, false, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 4, 3, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 4, 3, false, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 8, 3, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 8, 3, false, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 4, 3, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 4, 3, false, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
}
} else { // do scale
if (numImgColors == 1) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 8, 1, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 8, 1, true, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 4, 1, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 4, 1, true, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 8, 1, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 8, 1, true, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 4, 1, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 4, 1, true, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
} else if (numImgColors == 2) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 8, 2, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 8, 2, true, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 4, 2, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 4, 2, true, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 8, 2, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 8, 2, true, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 4, 2, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 4, 2, true, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
} else if (numImgColors == 3) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 8, 3, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 8, 3, true, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 4, 3, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 4, 3, true, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 8, 3, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 8, 3, true, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 4, 3, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 4, 3, true, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
}
}
} else {
if (scaleTargets == 0) { // don't scale
if (checkImgBounds) {
if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 2, 8, 2, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 2, 8, 2, false, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 2, 4, 2, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 2, 4, 2, false, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
} else {
if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 2, 8, 2, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 2, 8, 2, false, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 2, 4, 2, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 2, 4, 2, false, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
} else { // do scale
if (checkImgBounds) {
if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 2, 8, 2, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 2, 8, 2, true, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 2, 4, 2, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 2, 4, 2, true, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
} else {
if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 2, 8, 2, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 2, 8, 2, true, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 2, 4, 2, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 2, 4, 2, true, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
}
}
} else {
if (numImgColors <= 3) {
assert(numGroups == 1); // It has to be based on above definitions, but just to be sure.
if (scaleTargets == 0) { // don't scale
if (numImgColors == 1) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 8, 1, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 8, 1, false, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 4, 1, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 4, 1, false, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 8, 1, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 8, 1, false, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 4, 1, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 4, 1, false, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
} else if (numImgColors == 2) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 8, 2, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 8, 2, false, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 4, 2, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 4, 2, false, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 8, 2, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 8, 2, false, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 4, 2, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 4, 2, false, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
} else if (numImgColors == 3) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 8, 3, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 8, 3, false, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 4, 3, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 4, 3, false, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 8, 3, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 8, 3, false, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 4, 3, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 4, 3, false, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
}
} else { // do scale
if (numImgColors == 1) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 8, 1, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 8, 1, true, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 4, 1, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 4, 1, true, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 8, 1, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 8, 1, true, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 4, 1, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 4, 1, true, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
} else if (numImgColors == 2) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 8, 2, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 8, 2, true, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 4, 2, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 4, 2, true, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 8, 2, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 8, 2, true, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 4, 2, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 4, 2, true, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
} else if (numImgColors == 3) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 8, 3, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 8, 3, true, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 4, 3, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 4, 3, true, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 8, 3, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 8, 3, true, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 4, 3, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 4, 3, true, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
}
}
} else {
if (scaleTargets == 0) { // don't scale
if (checkImgBounds) {
if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 1, 8, 2, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 1, 8, 2, false, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 1, 4, 2, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 1, 4, 2, false, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
} else {
if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 1, 8, 2, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 1, 8, 2, false, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 1, 4, 2, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 1, 4, 2, false, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
} else { // do scale
if (checkImgBounds) {
if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 1, 8, 2, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 1, 8, 2, true, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 1, 4, 2, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 1, 4, 2, true, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
} else {
if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 1, 8, 2, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 1, 8, 2, true, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 1, 4, 2, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 1, 4, 2, true, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
}
}
}
cutilCheckMsg("filterActs: kernel execution failed");
}
void convFilterActs(NVMatrix& images, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int numModulesY, int numModulesX, int paddingStart, int moduleStride,
int numImgColors, int numGroups) {
convFilterActs(images, filters, targets, imgSizeY, numModulesY, numModulesX, paddingStart, moduleStride, numImgColors, numGroups, 0, 1);
}
void convFilterActs(NVMatrix& images, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int numModulesY, int numModulesX, int paddingStart, int moduleStride,
int numImgColors, int numGroups,
float scaleTargets, float scaleOutput) {
_filterActs(images, filters, targets, imgSizeY, numModulesY, numModulesX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput, true);
}
void localFilterActs(NVMatrix& images, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int numModulesY, int numModulesX, int paddingStart, int moduleStride,
int numImgColors, int numGroups) {
localFilterActs(images, filters, targets, imgSizeY, numModulesY, numModulesX, paddingStart, moduleStride, numImgColors, numGroups, 0, 1);
}
void localFilterActs(NVMatrix& images, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int numModulesY, int numModulesX, int paddingStart, int moduleStride,
int numImgColors, int numGroups,
float scaleTargets, float scaleOutput) {
_filterActs(images, filters, targets, imgSizeY, numModulesY, numModulesX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput, false);
}
/*
* images: (numImgColors, imgSizeY, imgSizeX, numImages) with stride given
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModules, numFilterColors, filterPixels, numFilters) otherwise
*
* targets: (numFilters, numModulesY, numModulesX, numImages)
* colorIndices: (numGroups, numFilterColors)
*
* Note: all of these convolution routines are optimized for the case when
* the number of images (i.e. the minibatch size) is a multiple of 128.
* Other batch sizes will work, but but I made no attempt whatsoever
* to make them work fast.
*/
void _filterActsSparse(NVMatrix& images, NVMatrix& filters, NVMatrix& targets, int* dColorIndices,
int imgSizeY, int numModulesY, int numModulesX, int paddingStart, int moduleStride,
int numImgColors, int numFilterColors, int numGroups,
float scaleTargets, float scaleOutput, bool conv) {
int numFilters = filters.getNumCols();
int numModules = numModulesY * numModulesX;
int numImages = images.getNumCols();
int imgPixels = images.getNumRows() / numImgColors;
int imgSizeX = imgPixels / imgSizeY;
int filterModuleMult = conv ? 1 : numModules;
assert(numGroups > 1);
assert(numImgColors % numFilterColors == 0);
assert((numFilterColors * numGroups) % numImgColors == 0);
assert(numFilters % (16 * numGroups) == 0);
assert(numFilterColors % 2 == 0);
assert(imgSizeY * imgSizeX == imgPixels);
assert(images.getNumRows() == imgPixels * numImgColors);
int numFiltersPerGroup = numFilters / numGroups;
int imgStride = images.getStride(); // images does not need to be a contiguous matrix
int filterPixels = filters.getNumRows() / (filterModuleMult * numFilterColors);
int filterSize = int(sqrt(filterPixels));
assert(filterSize * filterSize == filterPixels);
assert(filters.getNumRows() == filterModuleMult * numFilterColors * filterPixels);
// These routines don't handle the case when only part of the image is visited in the convolution
assert(paddingStart <= 0);
assert(paddingStart + (numModulesX-1) * moduleStride + filterSize >= imgSizeX);
assert(paddingStart + (numModulesY-1) * moduleStride + filterSize >= imgSizeY);
assert(moduleStride <= filterSize);
assert(!images.isTrans());
assert(!filters.isTrans());
assert(!targets.isTrans());
assert(filters.isContiguous());
assert(targets.isContiguous());
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
dim3 blocks = numFiltersPerGroup % 32 == 0 ? dim3(DIVUP(numImages, 32 * imgsPerThread), (numModules * numFilters) / (4 * 8))
: dim3(DIVUP(numImages, 32 * imgsPerThread), (numModules * numFilters) / (4 * 4));
dim3 threads(32, 4);
bool checkImgBounds = numImages % (32*imgsPerThread) != 0;
if (scaleTargets == 0) {
targets.resize(numFilters * numModules, numImages);
} else {
assert(targets.getNumRows() == numFilters * numModules);
assert(targets.getNumCols() == numImages);
}
if (imgsPerThread == 4) {
if (scaleTargets == 0) { // don't scale
if (checkImgBounds) {
if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse_random< 4, 32, 4, 8, 2, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse_random < 4, 32, 4, 8, 2, false, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numFilterColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_sparse_random< 4, 32, 4, 4, 2, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse_random < 4, 32, 4, 4, 2, false, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numFilterColors, numGroups, scaleTargets, scaleOutput, conv);
}
} else {
if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse_random< 4, 32, 4, 8, 2, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse_random < 4, 32, 4, 8, 2, false, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numFilterColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_sparse_random< 4, 32, 4, 4, 2, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse_random < 4, 32, 4, 4, 2, false, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numFilterColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
} else { // do scale
if (checkImgBounds) {
if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse_random< 4, 32, 4, 8, 2, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse_random < 4, 32, 4, 8, 2, true, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numFilterColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_sparse_random< 4, 32, 4, 4, 2, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse_random < 4, 32, 4, 4, 2, true, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numFilterColors, numGroups, scaleTargets, scaleOutput, conv);
}
} else {
if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse_random< 4, 32, 4, 8, 2, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse_random < 4, 32, 4, 8, 2, true, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numFilterColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_sparse_random< 4, 32, 4, 4, 2, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse_random < 4, 32, 4, 4, 2, true, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numFilterColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
}
} else if (imgsPerThread == 2) {
if (scaleTargets == 0) { // don't scale
if (checkImgBounds) {
if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse_random< 4, 32, 2, 8, 2, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse_random < 4, 32, 2, 8, 2, false, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numFilterColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_sparse_random< 4, 32, 2, 4, 2, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse_random < 4, 32, 2, 4, 2, false, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numFilterColors, numGroups, scaleTargets, scaleOutput, conv);
}
} else {
if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse_random< 4, 32, 2, 8, 2, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse_random < 4, 32, 2, 8, 2, false, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numFilterColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_sparse_random< 4, 32, 2, 4, 2, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse_random < 4, 32, 2, 4, 2, false, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numFilterColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
} else { // do scale
if (checkImgBounds) {
if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse_random< 4, 32, 2, 8, 2, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse_random < 4, 32, 2, 8, 2, true, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numFilterColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_sparse_random< 4, 32, 2, 4, 2, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse_random < 4, 32, 2, 4, 2, true, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numFilterColors, numGroups, scaleTargets, scaleOutput, conv);
}
} else {
if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse_random< 4, 32, 2, 8, 2, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse_random < 4, 32, 2, 8, 2, true, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numFilterColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_sparse_random< 4, 32, 2, 4, 2, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse_random < 4, 32, 2, 4, 2, true, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numFilterColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
}
} else {
if (scaleTargets == 0) { // don't scale
if (checkImgBounds) {
if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse_random< 4, 32, 1, 8, 2, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse_random < 4, 32, 1, 8, 2, false, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numFilterColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_sparse_random< 4, 32, 1, 4, 2, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse_random < 4, 32, 1, 4, 2, false, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numFilterColors, numGroups, scaleTargets, scaleOutput, conv);
}
} else {
if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse_random< 4, 32, 1, 8, 2, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse_random < 4, 32, 1, 8, 2, false, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numFilterColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_sparse_random< 4, 32, 1, 4, 2, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse_random < 4, 32, 1, 4, 2, false, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numFilterColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
} else { // do scale
if (checkImgBounds) {
if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse_random< 4, 32, 1, 8, 2, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse_random < 4, 32, 1, 8, 2, true, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numFilterColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_sparse_random< 4, 32, 1, 4, 2, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse_random < 4, 32, 1, 4, 2, true, true >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numFilterColors, numGroups, scaleTargets, scaleOutput, conv);
}
} else {
if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse_random< 4, 32, 1, 8, 2, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse_random < 4, 32, 1, 8, 2, true, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numFilterColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_sparse_random< 4, 32, 1, 4, 2, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse_random < 4, 32, 1, 4, 2, true, false >) , dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numFilterColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
}
}
cutilCheckMsg("filterActsSparse: kernel execution failed");
}
void convFilterActsSparse(NVMatrix& images, NVMatrix& filters, NVMatrix& targets, int* dColorIndices,
int imgSizeY, int numModulesY, int numModulesX, int paddingStart, int moduleStride,
int numImgColors, int numFilterColors, int numGroups,
float scaleTargets, float scaleOutput) {
_filterActsSparse(images, filters, targets, dColorIndices, imgSizeY, numModulesY, numModulesX, paddingStart, moduleStride,
numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput, true);
}
void convFilterActsSparse(NVMatrix& images, NVMatrix& filters, NVMatrix& targets, int* dColorIndices,
int imgSizeY, int numModulesY, int numModulesX, int paddingStart, int moduleStride,
int numImgColors, int numFilterColors, int numGroups) {
convFilterActsSparse(images, filters, targets, dColorIndices, imgSizeY, numModulesY, numModulesX, paddingStart,
moduleStride, numImgColors, numFilterColors, numGroups, 0, 1);
}
void localFilterActsSparse(NVMatrix& images, NVMatrix& filters, NVMatrix& targets, int* dColorIndices,
int imgSizeY, int numModulesY, int numModulesX, int paddingStart, int moduleStride,
int numImgColors, int numFilterColors, int numGroups,
float scaleTargets, float scaleOutput) {
_filterActsSparse(images, filters, targets, dColorIndices, imgSizeY, numModulesY, numModulesX, paddingStart, moduleStride,
numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput, false);
}
void localFilterActsSparse(NVMatrix& images, NVMatrix& filters, NVMatrix& targets, int* dColorIndices,
int imgSizeY, int numModulesY, int numModulesX, int paddingStart, int moduleStride,
int numImgColors, int numFilterColors, int numGroups) {
localFilterActsSparse(images, filters, targets, dColorIndices, imgSizeY, numModulesY, numModulesX, paddingStart,
moduleStride, numImgColors, numFilterColors, numGroups, 0, 1);
}
| 077644c302197438b727980a4a86d60d73bb8558.cu | /*
* Copyright (c) 2011, Alex Krizhevsky (akrizhevsky@gmail.com)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <cutil_inline.h>
#include <nvmatrix.cuh>
#include <cudaconv2.cuh>
/*
* Block size B_YxB_X. Each block applies B_Y * filtersPerThread filters to B_X * imgsPerThread images.
* threadIdx.x determines image
* threadIdx.y determines filter
*
* blockIdx.x determines image batch of B_X * imgsPerThread
* blockIdx.y determines filter batch of module and B_Y * filtersPerThread
*
* images: (numColors, imgSizeY, imgSizeX, numImages) with stride given
* filters: (numColors, filterPixels, numFilters) if conv
* (numModules, numColors, filterPixels, numFilters) otherwise
*
* targets: (numFilters, numModulesY, numModulesX, numImages)
*
* B_Y one of 4, 8, 16
* B_X one of 16, 32
* imgsPerThread one of 1, 2, 4
* filtersPerThread one of 1, 2, 4, 8
*
* Number of filters per module should be divisible by B_Y * filtersPerThread
* checkImgBounds indicates whether number of images is divisible by B_X * imgsPerThread
*
* The imgSize here is the size of the actual image without the padding.
*
*/
template <int B_Y, int B_X, int imgsPerThread, int filtersPerThread, int numColors,
bool scale, bool checkImgBounds>
__global__ void filterActs_YxX_color(float* images, float* filters, float* targets,
const int numImages, const int numFilters,
const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart,
const int moduleStride,
const int numModulesY, const int numModulesX, const int imgStride,
const float scaleTargets, const float scaleOutputs,
const bool conv) {
__shared__ float shFilters[B_Y*numColors][B_Y * filtersPerThread]; // pre-load B_Y pixels from B_Y*filtersPerThread filters
__shared__ float shImages[B_Y*numColors][B_X * imgsPerThread]; // pre-load B_Y pixels from B_X*imgsPerThread images
const int imgPixels = imgSizeY * imgSizeX;
const int filterPixels = filterSize * filterSize;
const int blocksPerModule = numFilters / (B_Y*filtersPerThread);
const int moduleIdx = blockIdx.y / blocksPerModule; // which position on the output image is it computing (2D position)
const int blockFilterIdx = blockIdx.y % blocksPerModule; // indicating which filters (batch) the block is going to compute
const int tidx = threadIdx.y * B_X + threadIdx.x; // 1D index of the thread in the block
const int imgLoadModPosY = (moduleIdx / numModulesX) * moduleStride; // corresponding Y position on the original image plane regarding the module index
const int imgLoadModPosX = (moduleIdx % numModulesX) * moduleStride; // corresponding X position on the original image plane regarding the module index
const int shFilterLoadY = tidx / (B_Y * filtersPerThread); // which pixel(s) of the filter this thread is dealing with?
const int shFilterLoadX = tidx % (B_Y * filtersPerThread); // which filter this thread is dealing with?
const int myImgIdx = blockIdx.x * B_X * imgsPerThread + threadIdx.x; // which image are we computing (starting index; image alignment in a stride pattern, i.e. processing threadIdx.x, threadIdx.x+BX, threadIdx+2*BX, ... threadIdx+imgsPerThread*BX)
images += myImgIdx; // pick that image
filters += filtersPerThread * B_Y * blockFilterIdx
+ shFilterLoadY * numFilters + shFilterLoadX; // pick that filter
if (!conv) {
filters += moduleIdx * numColors * filterPixels * numFilters;
}
targets += moduleIdx * numImages
+ (blockFilterIdx * B_Y * filtersPerThread + threadIdx.y) * numImages * numModulesY * numModulesX
+ myImgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for(int g = 0; g < imgsPerThread; g++) {
prod[f][g] = 0;
}
}
for (int p = 0; p < filterPixels; p += B_Y) {
/*
* Load B_Y pixels from B_Y*filtersPerThread filters
*/
if (shFilterLoadY < B_Y) {
#pragma unroll
for (int p2 = 0; p2 < B_Y; p2 += B_X/filtersPerThread) {
if (p + p2 + shFilterLoadY < filterPixels) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shFilters[shFilterLoadY + p2 + c * B_Y][shFilterLoadX] = filters[(c * filterPixels + p + p2) * numFilters];
}
} else {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shFilters[shFilterLoadY + p2 + c * B_Y][shFilterLoadX] = 0;
}
}
}
}
/*
* Load B_Y pixels from B_X*imgsPerThread images
*/
const int pixIdx = p + threadIdx.y;
if (pixIdx < filterPixels) {
const int x = paddingStart + imgLoadModPosX + pixIdx % filterSize;
const int y = paddingStart + imgLoadModPosY + pixIdx / filterSize;
if (y >= 0 && y< imgSizeY && x >= 0 && x < imgSizeX) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkImgBounds || myImgIdx + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImages[threadIdx.y + c * B_Y][threadIdx.x + i * B_X] = images[imgStride * (c * imgPixels + y * imgSizeX + x) + i * B_X];
}
} else {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImages[threadIdx.y + c * B_Y][threadIdx.x + i * B_X] = 0;
}
}
}
} else { // Padding
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImages[threadIdx.y + c * B_Y][threadIdx.x + i * B_X] = 0;
}
}
}
}
__syncthreads();
#pragma unroll
for (int i = 0; i < B_Y*numColors; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for(int g = 0; g < imgsPerThread; g++) {
prod[f][g] += shImages[i][g * B_X + threadIdx.x] * shFilters[i][threadIdx.y + f * B_Y]; // time domain convolution computation, did not use fft! (shouldn't, when the convolution kernel is very small)
}
}
}
__syncthreads();
}
if (scale) {
#pragma unroll
for (int g = 0; g < imgsPerThread; g++) {
if (!checkImgBounds || myImgIdx + g * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[g * B_X + f * B_Y * numImages * numModulesY * numModulesX] = scaleTargets * targets[g * B_X + f * B_Y * numImages * numModulesY * numModulesX] + scaleOutputs * prod[f][g];
}
}
}
} else {
#pragma unroll
for (int g = 0; g < imgsPerThread; g++) {
if (!checkImgBounds || myImgIdx + g * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[g * B_X + f * B_Y * numImages * numModulesY * numModulesX] = scaleOutputs * prod[f][g];
}
}
}
}
}
/*
* Block size B_YxB_X. Each block applies B_Y * filtersPerThread filters to B_X * imgsPerThread images.
* threadIdx.x determines image
* threadIdx.y determines filter
*
* blockIdx.x determines image batch of B_X * imgsPerThread
* blockIdx.y determines filter batch of B_Y * filtersPerThread
*
* images: (numImgColors, imgSizeY, imgSizeX, numImages) with stride given
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModules, numFilterColors, filterPixels, numFilters) otherwise
*
* targets: (numFilters, numModulesY, numModulesX, numImages)
*
* B_Y one of 4, 8, 16
* B_X one of 16, 32
* imgsPerThread one of 1, 2, 4
* filtersPerThread one of 1, 2, 4, 8
* colorCache: how many colors to put into shmem
*
* numFilters should be divisible by B_Y * filtersPerThread
* numImages be divisible by B_X * imgsPerThread
* numFilterColors should be divisible by colorCache.
* numImgColors must be even.
* numFilters must be divisible by numGroups.
*
* The imgSize here is the size of the actual image without the padding.
*
*/
template <int B_Y, int B_X, int imgsPerThread, int filtersPerThread, int colorCache,
bool scale, bool checkImgBounds>
__global__ void filterActs_YxX_sparse(float* images, float* filters, float* targets,
const int numImages, const int numFilters,
const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart,
const int moduleStride,
const int numModulesY, const int numModulesX, const int imgStride, const int numImgColors,
const int numGroups,
const float scaleTargets, const float scaleOutputs,
const bool conv) {
__shared__ float shFilters[B_Y*colorCache][B_Y * filtersPerThread]; // pre-load B_Y pixels from B_Y*filtersPerThread filters
__shared__ float shImages[B_Y*colorCache][B_X * imgsPerThread]; // pre-load B_Y pixels from B_X*imgsPerThread images
const int imgPixels = imgSizeY * imgSizeX;
const int filterPixels = filterSize * filterSize;
const int numFilterColors = numImgColors / numGroups;
const int blocksPerModule = numFilters / (B_Y*filtersPerThread);
const int moduleIdx = blockIdx.y / blocksPerModule;
const int blockFilterIdx = filtersPerThread * B_Y * (blockIdx.y % blocksPerModule);
const int numFiltersPerGroup = numFilters / numGroups;
const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup;
const int numModules = numModulesX * numModulesY;
const int blockColorIdx = numFilterColors * blockGroupIdx;
const int tidx = threadIdx.y * B_X + threadIdx.x;
const int imgLoadModPosY = paddingStart + (moduleIdx / numModulesX) * moduleStride;
const int imgLoadModPosX = paddingStart + (moduleIdx % numModulesX) * moduleStride;
const int shFilterLoadY = tidx / (B_Y * filtersPerThread);
const int shFilterLoadX = tidx % (B_Y * filtersPerThread);
const int myImgIdx = blockIdx.x * B_X * imgsPerThread + threadIdx.x;
images += blockColorIdx * imgPixels * imgStride + myImgIdx;
filters +=blockFilterIdx
+ shFilterLoadY * numFilters + shFilterLoadX;
if (!conv) {
filters += moduleIdx * numFilterColors * filterPixels * numFilters;
}
targets += moduleIdx * numImages
+ (blockFilterIdx + threadIdx.y) * numImages * numModules
+ myImgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for(int g = 0; g < imgsPerThread; g++) {
prod[f][g] = 0;
}
}
// __shared__ int imgPos[]
for (int oc = 0; oc < numFilterColors; oc += colorCache) { // oc stands for outer color (loop)
for (int p = 0; p < filterPixels; p += B_Y) {
/*
* Load B_Y pixels from B_Y*filtersPerThread filters
*/
if (shFilterLoadY < B_Y) {
#pragma unroll
for (int p2 = 0; p2 < B_Y; p2 += B_X/filtersPerThread) {
if (p + p2 + shFilterLoadY < filterPixels) {
#pragma unroll
for (int c = 0; c < colorCache; c++) {
shFilters[shFilterLoadY + p2 + c * B_Y][shFilterLoadX] = filters[((oc+c) * filterPixels + p + p2) * numFilters];
}
} else {
#pragma unroll
for (int c = 0; c < colorCache; c++) {
shFilters[shFilterLoadY + p2 + c * B_Y][shFilterLoadX] = 0;
}
}
}
}
/*
* Load B_Y pixels from B_X*imgsPerThread images
*/
const int pixIdx = p + threadIdx.y;
if (pixIdx < filterPixels) {
const int x = imgLoadModPosX + pixIdx % filterSize;
const int y = imgLoadModPosY + pixIdx / filterSize;
if (y >= 0 && y < imgSizeY && x >= 0 && x < imgSizeX) {
float* m = &images[imgStride * (oc * imgPixels + y * imgSizeX + x)];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkImgBounds || myImgIdx + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < colorCache; c++) {
shImages[threadIdx.y + c * B_Y][threadIdx.x + i * B_X] = m[c * imgStride * imgPixels + i * B_X];
}
} else {
#pragma unroll
for (int c = 0; c < colorCache; c++) {
shImages[threadIdx.y + c * B_Y][threadIdx.x + i * B_X] = 0;
}
}
}
} else { // Padding
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for (int c = 0; c < colorCache; c++) {
shImages[threadIdx.y + c * B_Y][threadIdx.x + i * B_X] = 0;
}
}
}
}
__syncthreads();
#pragma unroll
for (int i = 0; i < B_Y*colorCache; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for(int g = 0; g < imgsPerThread; g++) {
prod[f][g] += shImages[i][g * B_X + threadIdx.x] * shFilters[i][threadIdx.y + f * B_Y];
}
}
}
__syncthreads();
}
}
if (scale) {
#pragma unroll
for (int g = 0; g < imgsPerThread; g++) {
if (!checkImgBounds || myImgIdx + g * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[g * B_X + f * B_Y * numImages * numModules] = scaleTargets * targets[g * B_X + f * B_Y * numImages * numModules] + scaleOutputs * prod[f][g];
}
}
}
} else {
#pragma unroll
for (int g = 0; g < imgsPerThread; g++) {
if (!checkImgBounds || myImgIdx + g * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[g * B_X + f * B_Y * numImages * numModules] = scaleOutputs * prod[f][g];
}
}
}
}
}
/*
* Block size B_YxB_X. Each block applies B_Y * filtersPerThread filters to B_X * imgsPerThread images.
* threadIdx.x determines image
* threadIdx.y determines filter
*
* blockIdx.x determines image batch of B_X * imgsPerThread
* blockIdx.y determines filter batch of B_Y * filtersPerThread
*
* images: (numImgColors, imgSizeY, imgSizeX, numImages) with stride given
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModules, numFilterColors, filterPixels, numFilters) otherwise
*
* targets: (numFilters, numModulesY, numModulesX, numImages)
* colorIndices: (numGroups, numFiltercolors)
*
* B_Y one of 4, 8, 16
* B_X one of 16, 32
* imgsPerThread one of 1, 2, 4
* filtersPerThread one of 1, 2, 4, 8
* colorCache: how many colors to put into shmem
*
* numFilters should be divisible by B_Y * filtersPerThread
* numImages be divisible by B_X * imgsPerThread
* numFilterColors should be divisible by colorCache.
* numImgColors must be even.
* numFilters must be divisible by numGroups.
*
* The imgSize here is the size of the actual image without the padding.
*/
template <int B_Y, int B_X, int imgsPerThread, int filtersPerThread, int colorCache, bool scale, bool checkImgBounds>
__global__ void filterActs_YxX_sparse_random(float* images, float* filters, float* targets, int* colorIndices,
const int numImages, const int numFilters,
const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart,
const int moduleStride,
const int numModulesY, const int numModulesX, const int imgStride,
/*const int numImgColors,*/ const int numFilterColors, const int numGroups,
const float scaleTargets, const float scaleOutputs,
const bool conv) {
__shared__ float shFilters[B_Y*colorCache][B_Y * filtersPerThread]; // pre-load B_Y pixels from B_Y*filtersPerThread filters
__shared__ float shImages[B_Y*colorCache][B_X * imgsPerThread]; // pre-load B_Y pixels from B_X*imgsPerThread images
__shared__ int shColors[colorCache];
const int imgPixels = imgSizeY * imgSizeX;
const int filterPixels = filterSize * filterSize;
// const int numFilterColors = numImgColors / numGroups;
const int blocksPerModule = numFilters / (B_Y*filtersPerThread);
const int moduleIdx = blockIdx.y / blocksPerModule;
const int blockFilterIdx = filtersPerThread * B_Y * (blockIdx.y % blocksPerModule);
const int numFiltersPerGroup = numFilters / numGroups;
const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup;
const int numModules = numModulesY * numModulesX;
const int tidx = threadIdx.y * B_X + threadIdx.x;
const int imgLoadModPosY = paddingStart + (moduleIdx / numModulesX) * moduleStride;
const int imgLoadModPosX = paddingStart + (moduleIdx % numModulesX) * moduleStride;
const int shFilterLoadY = tidx / (B_Y * filtersPerThread);
const int shFilterLoadX = tidx % (B_Y * filtersPerThread);
const int myImgIdx = blockIdx.x * B_X * imgsPerThread + threadIdx.x;
images += myImgIdx;
filters +=blockFilterIdx
+ shFilterLoadY * numFilters + shFilterLoadX;
if (!conv) {
filters += moduleIdx * numFilterColors * filterPixels * numFilters;
}
targets += moduleIdx * numImages
+ (blockFilterIdx + threadIdx.y) * numImages * numModules
+ myImgIdx;
colorIndices += blockGroupIdx * numFilterColors;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for(int g = 0; g < imgsPerThread; g++) {
prod[f][g] = 0;
}
}
// __shared__ int imgPos[]
for (int oc = 0; oc < numFilterColors; oc += colorCache) { // oc stands for outer color (loop)
// Kinda wasteful here but...shouldn't matter
if (tidx < colorCache) {
shColors[tidx] = colorIndices[oc + tidx] * imgStride * imgPixels;
}
__syncthreads();
for (int p = 0; p < filterPixels; p += B_Y) {
/*
* Load B_Y pixels from B_Y*filtersPerThread filters
*/
if (shFilterLoadY < B_Y) {
#pragma unroll
for (int p2 = 0; p2 < B_Y; p2 += B_X/filtersPerThread) {
if (p + p2 + shFilterLoadY < filterPixels) {
#pragma unroll
for (int c = 0; c < colorCache; c++) {
shFilters[shFilterLoadY + p2 + c * B_Y][shFilterLoadX] = filters[((oc+c) * filterPixels + p + p2) * numFilters];
}
} else {
#pragma unroll
for (int c = 0; c < colorCache; c++) {
shFilters[shFilterLoadY + p2 + c * B_Y][shFilterLoadX] = 0;
}
}
}
}
/*
* Load B_Y pixels from B_X*imgsPerThread images
*/
const int pixIdx = p + threadIdx.y;
if (pixIdx < filterPixels) {
const int x = imgLoadModPosX + pixIdx % filterSize;
const int y = imgLoadModPosY + pixIdx / filterSize;
if (y >= 0 && y < imgSizeY && x >= 0 && x < imgSizeX) {
float* m = &images[imgStride * (y * imgSizeX + x)];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkImgBounds || myImgIdx + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < colorCache; c++) {
shImages[threadIdx.y + c * B_Y][threadIdx.x + i * B_X] = m[shColors[c] + i * B_X];
}
} else {
#pragma unroll
for (int c = 0; c < colorCache; c++) {
shImages[threadIdx.y + c * B_Y][threadIdx.x + i * B_X] = 0;
}
}
}
} else { // Padding
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for (int c = 0; c < colorCache; c++) {
shImages[threadIdx.y + c * B_Y][threadIdx.x + i * B_X] = 0;
}
}
}
}
__syncthreads();
#pragma unroll
for (int i = 0; i < B_Y*colorCache; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for(int g = 0; g < imgsPerThread; g++) {
prod[f][g] += shImages[i][g * B_X + threadIdx.x] * shFilters[i][threadIdx.y + f * B_Y];
}
}
}
__syncthreads();
}
}
if (scale) {
#pragma unroll
for (int g = 0; g < imgsPerThread; g++) {
if (!checkImgBounds || myImgIdx + g * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[g * B_X + f * B_Y * numImages * numModules] = scaleTargets * targets[g * B_X + f * B_Y * numImages * numModules] + scaleOutputs * prod[f][g];
}
}
}
} else {
#pragma unroll
for (int g = 0; g < imgsPerThread; g++) {
if (!checkImgBounds || myImgIdx + g * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[g * B_X + f * B_Y * numImages * numModules] = scaleOutputs * prod[f][g];
}
}
}
}
}
/*
* images: (numImgColors, imgSizeY, imgSizeX, numImages) with stride given
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModules, numFilterColors, filterPixels, numFilters) otherwise
*
* targets: (numFilters, numModules, numImages)
*
* Note: all of these convolution routines are optimized for the case when
* the number of images (i.e. the minibatch size) is a multiple of 128.
* Other batch sizes will work, but but I made no attempt whatsoever
* to make them work fast.
*/
void _filterActs(NVMatrix& images, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int numModulesY, int numModulesX, int paddingStart, int moduleStride,
int numImgColors, int numGroups,
float scaleTargets, float scaleOutput, bool conv) {
int numFilterColors = numImgColors / numGroups;
int numFilters = filters.getNumCols();
int numModules = numModulesY * numModulesX; // numModules is the size of the filter output (for one filter)
int numImages = images.getNumCols();
int imgPixels = images.getNumRows()/numImgColors;
int imgSizeX = imgPixels / imgSizeY;
int filterModuleMult = conv ? 1 : numModules;
assert(numGroups > 1 || (numImgColors > 0 && (numImgColors <= 3 || numImgColors % 2 == 0)));
assert(numGroups == 1 || numFilterColors % 2 == 0);
assert(numFilters % (16 * numGroups) == 0);
assert(numImgColors % numGroups == 0);
assert(images.getNumRows() == imgPixels * numImgColors);
assert(imgSizeY * imgSizeX == imgPixels);
int numFiltersPerGroup = numFilters / numGroups;
int imgStride = images.getStride(); // images does not need to be a contiguous matrix
int filterPixels = filters.getNumRows() / (filterModuleMult * numFilterColors);
int filterSize = int(sqrt(filterPixels));
assert(filterSize * filterSize == filterPixels);
assert(filters.getNumRows() == filterModuleMult * numFilterColors * filterPixels);
// These routines don't handle the case when only part of the image is visited in the convolution
assert(paddingStart <= 0);
assert(paddingStart + (numModulesX-1)*moduleStride + filterSize >= imgSizeX);
assert(paddingStart + (numModulesY-1)*moduleStride + filterSize >= imgSizeY);
assert(moduleStride <= filterSize);
assert(!images.isTrans());
assert(!filters.isTrans());
assert(!targets.isTrans());
assert(filters.isContiguous());
assert(targets.isContiguous());
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
dim3 blocks = numFiltersPerGroup % 32 == 0 ? dim3(DIVUP(numImages, 32 * imgsPerThread), (numModules * numFilters) / (4 * 8))
: dim3(DIVUP(numImages, 32 * imgsPerThread), (numModules * numFilters) / (4 * 4));
dim3 threads(32, 4);
bool checkImgBounds = numImages % (32*imgsPerThread) != 0;
if (scaleTargets == 0) {
targets.resize(numFilters * numModules, numImages);
} else {
assert(targets.getNumRows() == numFilters * numModules);
assert(targets.getNumCols() == numImages);
}
if (imgsPerThread == 4) {
if (numImgColors <= 3) {
assert(numGroups == 1); // It has to be based on above definitions, but just to be sure.
if (scaleTargets == 0) { // don't scale
if (numImgColors == 1) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 1, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 8, 1, false, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 1, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 4, 1, false, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 1, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 8, 1, false, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 1, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 4, 1, false, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
} else if (numImgColors == 2) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 2, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 8, 2, false, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 2, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 4, 2, false, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 2, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 8, 2, false, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 2, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 4, 2, false, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
} else if (numImgColors == 3) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 3, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 8, 3, false, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 3, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 4, 3, false, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 3, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 8, 3, false, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 3, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 4, 3, false, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
}
} else { // do scale
if (numImgColors == 1) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 1, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 8, 1, true, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 1, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 4, 1, true, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 1, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 8, 1, true, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 1, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 4, 1, true, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
} else if (numImgColors == 2) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 2, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 8, 2, true, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 2, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 4, 2, true, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 2, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 8, 2, true, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 2, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 4, 2, true, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
} else if (numImgColors == 3) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 3, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 8, 3, true, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 3, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 4, 3, true, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 3, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 8, 3, true, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 3, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 4, 3, true, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
}
}
} else {
if (scaleTargets == 0) { // don't scale
if (checkImgBounds) {
if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 8, 2, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_sparse < 4, 32, 4, 8, 2, false, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 4, 2, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_sparse < 4, 32, 4, 4, 2, false, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
} else {
if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 8, 2, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse < 4, 32, 4, 8, 2, false, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 4, 2, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse < 4, 32, 4, 4, 2, false, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
} else { // do scale
if (checkImgBounds) {
if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 8, 2, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_sparse < 4, 32, 4, 8, 2, true, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 4, 2, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_sparse < 4, 32, 4, 4, 2, true, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
} else {
if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 8, 2, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse < 4, 32, 4, 8, 2, true, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 4, 2, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse < 4, 32, 4, 4, 2, true, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
}
}
} else if (imgsPerThread == 2) {
if (numImgColors <= 3) {
assert(numGroups == 1); // It has to be based on above definitions, but just to be sure.
if (scaleTargets == 0) { // don't scale
if (numImgColors == 1) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 8, 1, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 8, 1, false, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 4, 1, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 4, 1, false, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 8, 1, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 8, 1, false, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 4, 1, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 4, 1, false, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
} else if (numImgColors == 2) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 8, 2, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 8, 2, false, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 4, 2, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 4, 2, false, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 8, 2, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 8, 2, false, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 4, 2, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 4, 2, false, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
} else if (numImgColors == 3) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 8, 3, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 8, 3, false, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 4, 3, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 4, 3, false, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 8, 3, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 8, 3, false, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 4, 3, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 4, 3, false, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
}
} else { // do scale
if (numImgColors == 1) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 8, 1, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 8, 1, true, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 4, 1, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 4, 1, true, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 8, 1, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 8, 1, true, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 4, 1, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 4, 1, true, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
} else if (numImgColors == 2) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 8, 2, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 8, 2, true, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 4, 2, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 4, 2, true, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 8, 2, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 8, 2, true, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 4, 2, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 4, 2, true, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
} else if (numImgColors == 3) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 8, 3, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 8, 3, true, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 4, 3, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 4, 3, true, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 8, 3, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 8, 3, true, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 4, 3, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 4, 3, true, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
}
}
} else {
if (scaleTargets == 0) { // don't scale
if (checkImgBounds) {
if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 2, 8, 2, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_sparse < 4, 32, 2, 8, 2, false, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 2, 4, 2, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_sparse < 4, 32, 2, 4, 2, false, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
} else {
if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 2, 8, 2, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse < 4, 32, 2, 8, 2, false, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 2, 4, 2, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse < 4, 32, 2, 4, 2, false, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
} else { // do scale
if (checkImgBounds) {
if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 2, 8, 2, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_sparse < 4, 32, 2, 8, 2, true, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 2, 4, 2, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_sparse < 4, 32, 2, 4, 2, true, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
} else {
if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 2, 8, 2, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse < 4, 32, 2, 8, 2, true, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 2, 4, 2, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse < 4, 32, 2, 4, 2, true, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
}
}
} else {
if (numImgColors <= 3) {
assert(numGroups == 1); // It has to be based on above definitions, but just to be sure.
if (scaleTargets == 0) { // don't scale
if (numImgColors == 1) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 8, 1, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 8, 1, false, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 4, 1, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 4, 1, false, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 8, 1, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 8, 1, false, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 4, 1, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 4, 1, false, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
} else if (numImgColors == 2) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 8, 2, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 8, 2, false, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 4, 2, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 4, 2, false, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 8, 2, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 8, 2, false, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 4, 2, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 4, 2, false, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
} else if (numImgColors == 3) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 8, 3, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 8, 3, false, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 4, 3, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 4, 3, false, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 8, 3, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 8, 3, false, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 4, 3, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 4, 3, false, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
}
} else { // do scale
if (numImgColors == 1) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 8, 1, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 8, 1, true, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 4, 1, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 4, 1, true, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 8, 1, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 8, 1, true, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 4, 1, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 4, 1, true, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
} else if (numImgColors == 2) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 8, 2, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 8, 2, true, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 4, 2, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 4, 2, true, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 8, 2, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 8, 2, true, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 4, 2, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 4, 2, true, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
} else if (numImgColors == 3) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 8, 3, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 8, 3, true, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 4, 3, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 4, 3, true, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 8, 3, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 8, 3, true, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 4, 3, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 4, 3, true, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
}
}
} else {
if (scaleTargets == 0) { // don't scale
if (checkImgBounds) {
if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 1, 8, 2, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_sparse < 4, 32, 1, 8, 2, false, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 1, 4, 2, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_sparse < 4, 32, 1, 4, 2, false, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
} else {
if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 1, 8, 2, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse < 4, 32, 1, 8, 2, false, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 1, 4, 2, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse < 4, 32, 1, 4, 2, false, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
} else { // do scale
if (checkImgBounds) {
if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 1, 8, 2, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_sparse < 4, 32, 1, 8, 2, true, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 1, 4, 2, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_sparse < 4, 32, 1, 4, 2, true, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
} else {
if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 1, 8, 2, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse < 4, 32, 1, 8, 2, true, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 1, 4, 2, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse < 4, 32, 1, 4, 2, true, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
}
}
}
cutilCheckMsg("filterActs: kernel execution failed");
}
void convFilterActs(NVMatrix& images, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int numModulesY, int numModulesX, int paddingStart, int moduleStride,
int numImgColors, int numGroups) {
convFilterActs(images, filters, targets, imgSizeY, numModulesY, numModulesX, paddingStart, moduleStride, numImgColors, numGroups, 0, 1);
}
void convFilterActs(NVMatrix& images, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int numModulesY, int numModulesX, int paddingStart, int moduleStride,
int numImgColors, int numGroups,
float scaleTargets, float scaleOutput) {
_filterActs(images, filters, targets, imgSizeY, numModulesY, numModulesX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput, true);
}
void localFilterActs(NVMatrix& images, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int numModulesY, int numModulesX, int paddingStart, int moduleStride,
int numImgColors, int numGroups) {
localFilterActs(images, filters, targets, imgSizeY, numModulesY, numModulesX, paddingStart, moduleStride, numImgColors, numGroups, 0, 1);
}
void localFilterActs(NVMatrix& images, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int numModulesY, int numModulesX, int paddingStart, int moduleStride,
int numImgColors, int numGroups,
float scaleTargets, float scaleOutput) {
_filterActs(images, filters, targets, imgSizeY, numModulesY, numModulesX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput, false);
}
/*
* images: (numImgColors, imgSizeY, imgSizeX, numImages) with stride given
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModules, numFilterColors, filterPixels, numFilters) otherwise
*
* targets: (numFilters, numModulesY, numModulesX, numImages)
* colorIndices: (numGroups, numFilterColors)
*
* Note: all of these convolution routines are optimized for the case when
* the number of images (i.e. the minibatch size) is a multiple of 128.
* Other batch sizes will work, but but I made no attempt whatsoever
* to make them work fast.
*/
void _filterActsSparse(NVMatrix& images, NVMatrix& filters, NVMatrix& targets, int* dColorIndices,
int imgSizeY, int numModulesY, int numModulesX, int paddingStart, int moduleStride,
int numImgColors, int numFilterColors, int numGroups,
float scaleTargets, float scaleOutput, bool conv) {
int numFilters = filters.getNumCols();
int numModules = numModulesY * numModulesX;
int numImages = images.getNumCols();
int imgPixels = images.getNumRows() / numImgColors;
int imgSizeX = imgPixels / imgSizeY;
int filterModuleMult = conv ? 1 : numModules;
assert(numGroups > 1);
assert(numImgColors % numFilterColors == 0);
assert((numFilterColors * numGroups) % numImgColors == 0);
assert(numFilters % (16 * numGroups) == 0);
assert(numFilterColors % 2 == 0);
assert(imgSizeY * imgSizeX == imgPixels);
assert(images.getNumRows() == imgPixels * numImgColors);
int numFiltersPerGroup = numFilters / numGroups;
int imgStride = images.getStride(); // images does not need to be a contiguous matrix
int filterPixels = filters.getNumRows() / (filterModuleMult * numFilterColors);
int filterSize = int(sqrt(filterPixels));
assert(filterSize * filterSize == filterPixels);
assert(filters.getNumRows() == filterModuleMult * numFilterColors * filterPixels);
// These routines don't handle the case when only part of the image is visited in the convolution
assert(paddingStart <= 0);
assert(paddingStart + (numModulesX-1) * moduleStride + filterSize >= imgSizeX);
assert(paddingStart + (numModulesY-1) * moduleStride + filterSize >= imgSizeY);
assert(moduleStride <= filterSize);
assert(!images.isTrans());
assert(!filters.isTrans());
assert(!targets.isTrans());
assert(filters.isContiguous());
assert(targets.isContiguous());
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
dim3 blocks = numFiltersPerGroup % 32 == 0 ? dim3(DIVUP(numImages, 32 * imgsPerThread), (numModules * numFilters) / (4 * 8))
: dim3(DIVUP(numImages, 32 * imgsPerThread), (numModules * numFilters) / (4 * 4));
dim3 threads(32, 4);
bool checkImgBounds = numImages % (32*imgsPerThread) != 0;
if (scaleTargets == 0) {
targets.resize(numFilters * numModules, numImages);
} else {
assert(targets.getNumRows() == numFilters * numModules);
assert(targets.getNumCols() == numImages);
}
if (imgsPerThread == 4) {
if (scaleTargets == 0) { // don't scale
if (checkImgBounds) {
if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse_random< 4, 32, 4, 8, 2, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_sparse_random < 4, 32, 4, 8, 2, false, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numFilterColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_sparse_random< 4, 32, 4, 4, 2, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_sparse_random < 4, 32, 4, 4, 2, false, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numFilterColors, numGroups, scaleTargets, scaleOutput, conv);
}
} else {
if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse_random< 4, 32, 4, 8, 2, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse_random < 4, 32, 4, 8, 2, false, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numFilterColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_sparse_random< 4, 32, 4, 4, 2, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse_random < 4, 32, 4, 4, 2, false, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numFilterColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
} else { // do scale
if (checkImgBounds) {
if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse_random< 4, 32, 4, 8, 2, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_sparse_random < 4, 32, 4, 8, 2, true, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numFilterColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_sparse_random< 4, 32, 4, 4, 2, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_sparse_random < 4, 32, 4, 4, 2, true, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numFilterColors, numGroups, scaleTargets, scaleOutput, conv);
}
} else {
if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse_random< 4, 32, 4, 8, 2, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse_random < 4, 32, 4, 8, 2, true, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numFilterColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_sparse_random< 4, 32, 4, 4, 2, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse_random < 4, 32, 4, 4, 2, true, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numFilterColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
}
} else if (imgsPerThread == 2) {
if (scaleTargets == 0) { // don't scale
if (checkImgBounds) {
if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse_random< 4, 32, 2, 8, 2, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_sparse_random < 4, 32, 2, 8, 2, false, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numFilterColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_sparse_random< 4, 32, 2, 4, 2, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_sparse_random < 4, 32, 2, 4, 2, false, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numFilterColors, numGroups, scaleTargets, scaleOutput, conv);
}
} else {
if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse_random< 4, 32, 2, 8, 2, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse_random < 4, 32, 2, 8, 2, false, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numFilterColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_sparse_random< 4, 32, 2, 4, 2, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse_random < 4, 32, 2, 4, 2, false, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numFilterColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
} else { // do scale
if (checkImgBounds) {
if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse_random< 4, 32, 2, 8, 2, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_sparse_random < 4, 32, 2, 8, 2, true, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numFilterColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_sparse_random< 4, 32, 2, 4, 2, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_sparse_random < 4, 32, 2, 4, 2, true, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numFilterColors, numGroups, scaleTargets, scaleOutput, conv);
}
} else {
if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse_random< 4, 32, 2, 8, 2, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse_random < 4, 32, 2, 8, 2, true, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numFilterColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_sparse_random< 4, 32, 2, 4, 2, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse_random < 4, 32, 2, 4, 2, true, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numFilterColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
}
} else {
if (scaleTargets == 0) { // don't scale
if (checkImgBounds) {
if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse_random< 4, 32, 1, 8, 2, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_sparse_random < 4, 32, 1, 8, 2, false, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numFilterColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_sparse_random< 4, 32, 1, 4, 2, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_sparse_random < 4, 32, 1, 4, 2, false, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numFilterColors, numGroups, scaleTargets, scaleOutput, conv);
}
} else {
if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse_random< 4, 32, 1, 8, 2, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse_random < 4, 32, 1, 8, 2, false, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numFilterColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_sparse_random< 4, 32, 1, 4, 2, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse_random < 4, 32, 1, 4, 2, false, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numFilterColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
} else { // do scale
if (checkImgBounds) {
if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse_random< 4, 32, 1, 8, 2, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_sparse_random < 4, 32, 1, 8, 2, true, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numFilterColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_sparse_random< 4, 32, 1, 4, 2, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_sparse_random < 4, 32, 1, 4, 2, true, true > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numFilterColors, numGroups, scaleTargets, scaleOutput, conv);
}
} else {
if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse_random< 4, 32, 1, 8, 2, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse_random < 4, 32, 1, 8, 2, true, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numFilterColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_sparse_random< 4, 32, 1, 4, 2, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse_random < 4, 32, 1, 4, 2, true, false > <<<blocks, threads>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numFilterColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
}
}
cutilCheckMsg("filterActsSparse: kernel execution failed");
}
void convFilterActsSparse(NVMatrix& images, NVMatrix& filters, NVMatrix& targets, int* dColorIndices,
int imgSizeY, int numModulesY, int numModulesX, int paddingStart, int moduleStride,
int numImgColors, int numFilterColors, int numGroups,
float scaleTargets, float scaleOutput) {
_filterActsSparse(images, filters, targets, dColorIndices, imgSizeY, numModulesY, numModulesX, paddingStart, moduleStride,
numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput, true);
}
void convFilterActsSparse(NVMatrix& images, NVMatrix& filters, NVMatrix& targets, int* dColorIndices,
int imgSizeY, int numModulesY, int numModulesX, int paddingStart, int moduleStride,
int numImgColors, int numFilterColors, int numGroups) {
convFilterActsSparse(images, filters, targets, dColorIndices, imgSizeY, numModulesY, numModulesX, paddingStart,
moduleStride, numImgColors, numFilterColors, numGroups, 0, 1);
}
void localFilterActsSparse(NVMatrix& images, NVMatrix& filters, NVMatrix& targets, int* dColorIndices,
int imgSizeY, int numModulesY, int numModulesX, int paddingStart, int moduleStride,
int numImgColors, int numFilterColors, int numGroups,
float scaleTargets, float scaleOutput) {
_filterActsSparse(images, filters, targets, dColorIndices, imgSizeY, numModulesY, numModulesX, paddingStart, moduleStride,
numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput, false);
}
void localFilterActsSparse(NVMatrix& images, NVMatrix& filters, NVMatrix& targets, int* dColorIndices,
int imgSizeY, int numModulesY, int numModulesX, int paddingStart, int moduleStride,
int numImgColors, int numFilterColors, int numGroups) {
localFilterActsSparse(images, filters, targets, dColorIndices, imgSizeY, numModulesY, numModulesX, paddingStart,
moduleStride, numImgColors, numFilterColors, numGroups, 0, 1);
}
|
006e9945f2050dfab98805c5c73ae10e2f547ab9.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <hip/hip_runtime.h>
#include <omp.h>
#define BLOCK_SIZE 64
#define HALF_BLOCK_SIZE 32
using namespace std;
const int INF = ((1 << 30) - 1);
// const int V = 50010;
void input(char* infile);
void output(char *outFileName);
void block_FW(int B);
int ceil(int a, int b);
__global__ void Phase1(int *dist, int Round, int n);
__global__ void Phase2(int *dist, int Round, int n);
__global__ void Phase3(int *dist, int Round, int n, int yOffset);
int original_n, n, m;
int* Dist = NULL;
int main(int argc, char* argv[]) {
input(argv[1]);
block_FW(BLOCK_SIZE);
output(argv[2]);
hipHostFree(Dist);
return 0;
}
void input(char* infile) {
cout << "input" << endl;
FILE* file = fopen(infile, "rb");
fread(&original_n, sizeof(int), 1, file);
fread(&m, sizeof(int), 1, file);
// make n % BLOCK_SIZE == 0
n = original_n + (BLOCK_SIZE - (original_n%BLOCK_SIZE));
Dist = (int*) malloc(sizeof(int)*n*n);
for (int i = 0; i < n; ++ i) {
for (int j = 0; j < n; ++ j) {
if (i == j) {
Dist[i*n+j] = 0;
} else {
Dist[i*n+j] = INF;
}
}
}
int pair[3];
for (int i = 0; i < m; ++ i) {
fread(pair, sizeof(int), 3, file);
Dist[pair[0]*n+pair[1]] = pair[2];
}
fclose(file);
}
void output(char *outFileName) {
FILE *outfile = fopen(outFileName, "w");
for (int i = 0; i < original_n; ++i) {
for (int j = 0; j < original_n; ++j) {
if (Dist[i*n+j] >= INF) Dist[i*n+j] = INF;
}
fwrite(&Dist[i*n], sizeof(int), original_n, outfile);
}
fclose(outfile);
}
int ceil(int a, int b) { return (a + b - 1) / b; }
void block_FW(int B) {
int* dst1 = NULL;
int* dst2 = NULL;
const unsigned long matrixSize = n * n * sizeof(int);
hipHostRegister(Dist, matrixSize, hipHostRegisterDefault);
const int blocks = (n + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 block_dim(32, 32, 1); // padding ( 32x)
const int blocks1 = blocks%2 != 0 ? (blocks/2) + 1 : (blocks/2);
const int blocks2 = blocks/2;
dim3 grid_dim1(blocks, blocks1, 1);
dim3 grid_dim2(blocks, blocks2, 1);
int round = ceil(n, B);
#pragma omp parallel num_threads(2)
{
int cpuThreadId = omp_get_thread_num();
hipSetDevice(cpuThreadId);
int *dst = NULL;
dst = cpuThreadId == 0 ? dst1 : dst2;
hipSetDevice(cpuThreadId);
hipMalloc(&dst, matrixSize);
hipMemcpy(dst, Dist, matrixSize, hipMemcpyHostToDevice);
for (int r = 0; r < round; ++r) {
// printf("%d %d\n", r, round);
// fflush(stdout);
/* Phase 1*/
hipLaunchKernelGGL(( Phase1), dim3(1), dim3(block_dim), 0, 0, dst, r, n);
/* Phase 2*/
hipLaunchKernelGGL(( Phase2), dim3(blocks), dim3(block_dim), 0, 0, dst, r, n);
hipDeviceSynchronize();
/* Phase 3*/
if (cpuThreadId == 0) {
hipLaunchKernelGGL(( Phase3), dim3(grid_dim1), dim3(block_dim), 0, 0, dst, r, n, 0);
hipMemcpyPeer(dst1 + blocks1*BLOCK_SIZE*n, 0, dst2, 1, sizeof(int)*blocks2*BLOCK_SIZE*n);
} else {
hipLaunchKernelGGL(( Phase3), dim3(grid_dim2), dim3(block_dim), 0, 0, dst, r, n, blocks1);
hipMemcpyPeer(dst2, 1, dst1, 0, sizeof(int)*blocks1*BLOCK_SIZE*n);
}
}
#pragma omp barrier
if (cpuThreadId == 0) hipMemcpy(Dist, dst, matrixSize, hipMemcpyDeviceToHost);
}
hipFree(dst1);
hipFree(dst2);
}
__global__ void Phase1(int *dist, int Round, int n) {
const int innerI = threadIdx.y;
const int innerJ = threadIdx.x;
const int offset = BLOCK_SIZE * Round;
__shared__ int C[BLOCK_SIZE][BLOCK_SIZE]; // 2d
// Every thread read its own value
// how index: blockIndex (to next diagonal block) + innerBlockIndex (every thread has its own index)
C[innerI][innerJ] = dist[offset*(n+1) + innerI*n + innerJ];
C[innerI+HALF_BLOCK_SIZE][innerJ] = dist[offset*(n+1) + (innerI+HALF_BLOCK_SIZE)*n + innerJ];
C[innerI][innerJ+HALF_BLOCK_SIZE] = dist[offset*(n+1) + innerI*n + innerJ + HALF_BLOCK_SIZE];
C[innerI+HALF_BLOCK_SIZE][innerJ+HALF_BLOCK_SIZE] = dist[offset*(n+1) + (innerI+HALF_BLOCK_SIZE)*n + innerJ + HALF_BLOCK_SIZE];
__syncthreads();
for (int k = 0; k < BLOCK_SIZE; k++) {
C[innerI][innerJ] = (C[innerI][k] + C[k][innerJ]) < C[innerI][innerJ] ? (C[innerI][k] + C[k][innerJ]) : C[innerI][innerJ];
C[innerI+HALF_BLOCK_SIZE][innerJ] = (C[innerI+HALF_BLOCK_SIZE][k] + C[k][innerJ]) < C[innerI+HALF_BLOCK_SIZE][innerJ] ? (C[innerI+HALF_BLOCK_SIZE][k] + C[k][innerJ]) : C[innerI+HALF_BLOCK_SIZE][innerJ];
C[innerI][innerJ+HALF_BLOCK_SIZE] = (C[innerI][k] + C[k][innerJ+HALF_BLOCK_SIZE]) < C[innerI][innerJ+HALF_BLOCK_SIZE] ? (C[innerI][k] + C[k][innerJ+HALF_BLOCK_SIZE]) : C[innerI][innerJ+HALF_BLOCK_SIZE];
C[innerI+HALF_BLOCK_SIZE][innerJ+HALF_BLOCK_SIZE] = (C[innerI+HALF_BLOCK_SIZE][k] + C[k][innerJ+HALF_BLOCK_SIZE]) < C[innerI+HALF_BLOCK_SIZE][innerJ+HALF_BLOCK_SIZE] ? (C[innerI+HALF_BLOCK_SIZE][k] + C[k][innerJ+HALF_BLOCK_SIZE]) : C[innerI+HALF_BLOCK_SIZE][innerJ+HALF_BLOCK_SIZE];
__syncthreads(); // TODO: only phase one
}
dist[offset*(n+1) + innerI*n + innerJ] = C[innerI][innerJ];
dist[offset*(n+1) + (innerI+HALF_BLOCK_SIZE)*n + innerJ] = C[innerI+HALF_BLOCK_SIZE][innerJ];
dist[offset*(n+1) + innerI*n + innerJ + HALF_BLOCK_SIZE] = C[innerI][innerJ+HALF_BLOCK_SIZE];
dist[offset*(n+1) + (innerI+HALF_BLOCK_SIZE)*n + innerJ + HALF_BLOCK_SIZE] = C[innerI+HALF_BLOCK_SIZE][innerJ+HALF_BLOCK_SIZE];
}
__global__ void Phase2(int *dist, int Round, int n) {
const int i = blockIdx.x; // "i" in n block in one row
if (i == Round) return;
const int innerI = threadIdx.y;
const int innerJ = threadIdx.x;
const int diagonalOffset = BLOCK_SIZE * Round;
__shared__ int Diagonal[BLOCK_SIZE][BLOCK_SIZE];
__shared__ int A[BLOCK_SIZE][BLOCK_SIZE];
__shared__ int B[BLOCK_SIZE][BLOCK_SIZE];
A[innerI][innerJ] = dist[i*BLOCK_SIZE*n + Round*BLOCK_SIZE + innerI*n + innerJ];
A[innerI+HALF_BLOCK_SIZE][innerJ] = dist[i*BLOCK_SIZE*n + Round*BLOCK_SIZE + (innerI+HALF_BLOCK_SIZE)*n + innerJ];
A[innerI][innerJ+HALF_BLOCK_SIZE] = dist[i*BLOCK_SIZE*n + Round*BLOCK_SIZE + innerI*n + innerJ + HALF_BLOCK_SIZE];
A[innerI + HALF_BLOCK_SIZE][innerJ + HALF_BLOCK_SIZE] = dist[i*BLOCK_SIZE*n + Round*BLOCK_SIZE + (innerI+HALF_BLOCK_SIZE)*n + innerJ+HALF_BLOCK_SIZE];
B[innerI][innerJ] = dist[Round*BLOCK_SIZE*n + i*BLOCK_SIZE + innerI*n + innerJ];
B[innerI+HALF_BLOCK_SIZE][innerJ] = dist[Round*BLOCK_SIZE*n + i*BLOCK_SIZE + (innerI+HALF_BLOCK_SIZE)*n + innerJ];
B[innerI][innerJ+HALF_BLOCK_SIZE] = dist[Round*BLOCK_SIZE*n + i*BLOCK_SIZE + innerI*n + innerJ+HALF_BLOCK_SIZE];
B[innerI + HALF_BLOCK_SIZE][innerJ + HALF_BLOCK_SIZE] = dist[Round*BLOCK_SIZE*n + i*BLOCK_SIZE + (innerI+HALF_BLOCK_SIZE)*n + innerJ+HALF_BLOCK_SIZE];
Diagonal[innerI][innerJ] = dist[diagonalOffset*(n+1) + innerI*n + innerJ]; // diagonalValue
Diagonal[innerI+HALF_BLOCK_SIZE][innerJ] = dist[diagonalOffset*(n+1) + (innerI+HALF_BLOCK_SIZE)*n + innerJ]; // diagonalValue
Diagonal[innerI][innerJ+HALF_BLOCK_SIZE] = dist[diagonalOffset*(n+1) + innerI*n + innerJ+HALF_BLOCK_SIZE]; // diagonalValue
Diagonal[innerI+HALF_BLOCK_SIZE][innerJ+HALF_BLOCK_SIZE] = dist[diagonalOffset*(n+1) + (innerI+HALF_BLOCK_SIZE)*n + innerJ+HALF_BLOCK_SIZE]; // diagonalValue
__syncthreads();
#pragma unroll 32
for (int k = 0; k < BLOCK_SIZE; k++) {
A[innerI][innerJ] = (A[innerI][k] + Diagonal[k][innerJ]) < A[innerI][innerJ] ? (A[innerI][k] + Diagonal[k][innerJ]) : A[innerI][innerJ];
A[innerI+HALF_BLOCK_SIZE][innerJ] = (A[innerI+HALF_BLOCK_SIZE][k] + Diagonal[k][innerJ]) < A[innerI+HALF_BLOCK_SIZE][innerJ] ? (A[innerI+HALF_BLOCK_SIZE][k] + Diagonal[k][innerJ]) : A[innerI+HALF_BLOCK_SIZE][innerJ];
A[innerI][innerJ+HALF_BLOCK_SIZE] = (A[innerI][k] + Diagonal[k][innerJ+HALF_BLOCK_SIZE]) < A[innerI][innerJ+HALF_BLOCK_SIZE] ? (A[innerI][k] + Diagonal[k][innerJ+HALF_BLOCK_SIZE]) : A[innerI][innerJ+HALF_BLOCK_SIZE];
A[innerI+HALF_BLOCK_SIZE][innerJ+HALF_BLOCK_SIZE] = (A[innerI+HALF_BLOCK_SIZE][k] + Diagonal[k][innerJ+HALF_BLOCK_SIZE]) < A[innerI+HALF_BLOCK_SIZE][innerJ+HALF_BLOCK_SIZE] ? (A[innerI+HALF_BLOCK_SIZE][k] + Diagonal[k][innerJ+HALF_BLOCK_SIZE]) : A[innerI+HALF_BLOCK_SIZE][innerJ+HALF_BLOCK_SIZE];
B[innerI][innerJ] = (Diagonal[innerI][k] + B[k][innerJ]) < B[innerI][innerJ] ? (Diagonal[innerI][k] + B[k][innerJ]) : B[innerI][innerJ];
B[innerI+HALF_BLOCK_SIZE][innerJ] = (Diagonal[innerI+HALF_BLOCK_SIZE][k] + B[k][innerJ]) < B[innerI+HALF_BLOCK_SIZE][innerJ] ? (Diagonal[innerI+HALF_BLOCK_SIZE][k] + B[k][innerJ]) : B[innerI+HALF_BLOCK_SIZE][innerJ];
B[innerI][innerJ+HALF_BLOCK_SIZE] = (Diagonal[innerI][k] + B[k][innerJ+HALF_BLOCK_SIZE]) < B[innerI][innerJ+HALF_BLOCK_SIZE] ? (Diagonal[innerI][k] + B[k][innerJ+HALF_BLOCK_SIZE]) : B[innerI][innerJ+HALF_BLOCK_SIZE];
B[innerI+HALF_BLOCK_SIZE][innerJ+HALF_BLOCK_SIZE] = (Diagonal[innerI+HALF_BLOCK_SIZE][k] + B[k][innerJ+HALF_BLOCK_SIZE]) < B[innerI+HALF_BLOCK_SIZE][innerJ+HALF_BLOCK_SIZE] ? (Diagonal[innerI+HALF_BLOCK_SIZE][k] + B[k][innerJ+HALF_BLOCK_SIZE]) : B[innerI+HALF_BLOCK_SIZE][innerJ+HALF_BLOCK_SIZE];
}
dist[i*BLOCK_SIZE*n + Round*BLOCK_SIZE + innerI*n + innerJ] = A[innerI][innerJ];
dist[i*BLOCK_SIZE*n + Round*BLOCK_SIZE + (innerI+HALF_BLOCK_SIZE)*n + innerJ] = A[innerI+HALF_BLOCK_SIZE][innerJ];
dist[i*BLOCK_SIZE*n + Round*BLOCK_SIZE + innerI*n + innerJ + HALF_BLOCK_SIZE] = A[innerI][innerJ+HALF_BLOCK_SIZE];
dist[i*BLOCK_SIZE*n + Round*BLOCK_SIZE + (innerI+HALF_BLOCK_SIZE)*n + innerJ+HALF_BLOCK_SIZE] = A[innerI + HALF_BLOCK_SIZE][innerJ + HALF_BLOCK_SIZE];
dist[Round*BLOCK_SIZE*n + i*BLOCK_SIZE + innerI*n + innerJ] = B[innerI][innerJ];
dist[Round*BLOCK_SIZE*n + i*BLOCK_SIZE + (innerI+HALF_BLOCK_SIZE)*n + innerJ] = B[innerI+HALF_BLOCK_SIZE][innerJ];
dist[Round*BLOCK_SIZE*n + i*BLOCK_SIZE + innerI*n + innerJ+HALF_BLOCK_SIZE] = B[innerI][innerJ+HALF_BLOCK_SIZE];
dist[Round*BLOCK_SIZE*n + i*BLOCK_SIZE + (innerI+HALF_BLOCK_SIZE)*n + innerJ+HALF_BLOCK_SIZE] = B[innerI + HALF_BLOCK_SIZE][innerJ + HALF_BLOCK_SIZE];
}
__global__ void Phase3(int *dist, int Round, int n, int yOffset) {
const int j = blockIdx.x;
const int i = blockIdx.y + yOffset;
if (i == Round && j == Round) return;
const int innerI = threadIdx.y;
const int innerJ = threadIdx.x;
__shared__ int A[BLOCK_SIZE][BLOCK_SIZE];
__shared__ int B[BLOCK_SIZE][BLOCK_SIZE];
__shared__ int C[BLOCK_SIZE][BLOCK_SIZE];
C[innerI][innerJ] = dist[i*BLOCK_SIZE*n + j*BLOCK_SIZE + innerI*n + innerJ];
C[innerI+HALF_BLOCK_SIZE][innerJ] = dist[i*BLOCK_SIZE*n + j*BLOCK_SIZE + (innerI+HALF_BLOCK_SIZE)*n + innerJ];
C[innerI][innerJ+HALF_BLOCK_SIZE] = dist[i*BLOCK_SIZE*n + j*BLOCK_SIZE + innerI*n + innerJ+HALF_BLOCK_SIZE];
C[innerI+HALF_BLOCK_SIZE][innerJ+HALF_BLOCK_SIZE] = dist[i*BLOCK_SIZE*n + j*BLOCK_SIZE + (innerI+HALF_BLOCK_SIZE)*n + innerJ+HALF_BLOCK_SIZE];
A[innerI][innerJ] = dist[i*BLOCK_SIZE*n + Round*BLOCK_SIZE + innerI*n + innerJ];
A[innerI+HALF_BLOCK_SIZE][innerJ] = dist[i*BLOCK_SIZE*n + Round*BLOCK_SIZE + (innerI+HALF_BLOCK_SIZE)*n + innerJ];
A[innerI][innerJ+HALF_BLOCK_SIZE] = dist[i*BLOCK_SIZE*n + Round*BLOCK_SIZE + innerI*n + innerJ + HALF_BLOCK_SIZE];
A[innerI + HALF_BLOCK_SIZE][innerJ + HALF_BLOCK_SIZE] = dist[i*BLOCK_SIZE*n + Round*BLOCK_SIZE + (innerI+HALF_BLOCK_SIZE)*n + innerJ + HALF_BLOCK_SIZE];
B[innerI][innerJ] = dist[Round*BLOCK_SIZE*n + j*BLOCK_SIZE + innerI*n + innerJ];
B[innerI+HALF_BLOCK_SIZE][innerJ] = dist[Round*BLOCK_SIZE*n + j*BLOCK_SIZE + (innerI+HALF_BLOCK_SIZE)*n + innerJ];
B[innerI][innerJ+HALF_BLOCK_SIZE] = dist[Round*BLOCK_SIZE*n + j*BLOCK_SIZE + innerI*n + innerJ+HALF_BLOCK_SIZE];
B[innerI + HALF_BLOCK_SIZE][innerJ + HALF_BLOCK_SIZE] = dist[Round*BLOCK_SIZE*n + j*BLOCK_SIZE + (innerI+HALF_BLOCK_SIZE)*n + innerJ+HALF_BLOCK_SIZE];
__syncthreads();
#pragma unroll 32
for (int k = 0; k < BLOCK_SIZE; k++) {
C[innerI][innerJ] = (A[innerI][k] + B[k][innerJ]) < C[innerI][innerJ] ? (A[innerI][k] + B[k][innerJ]) : C[innerI][innerJ];
C[innerI+HALF_BLOCK_SIZE][innerJ] = (A[innerI+HALF_BLOCK_SIZE][k] + B[k][innerJ]) < C[innerI+HALF_BLOCK_SIZE][innerJ] ? (A[innerI+HALF_BLOCK_SIZE][k] + B[k][innerJ]) : C[innerI+HALF_BLOCK_SIZE][innerJ];
C[innerI][innerJ+HALF_BLOCK_SIZE] = (A[innerI][k] + B[k][innerJ+HALF_BLOCK_SIZE]) < C[innerI][innerJ+HALF_BLOCK_SIZE] ? (A[innerI][k] + B[k][innerJ+HALF_BLOCK_SIZE]) : C[innerI][innerJ+HALF_BLOCK_SIZE];
C[innerI+HALF_BLOCK_SIZE][innerJ+HALF_BLOCK_SIZE] = (A[innerI+HALF_BLOCK_SIZE][k] + B[k][innerJ+HALF_BLOCK_SIZE]) < C[innerI+HALF_BLOCK_SIZE][innerJ+HALF_BLOCK_SIZE] ? (A[innerI+HALF_BLOCK_SIZE][k] + B[k][innerJ+HALF_BLOCK_SIZE]) : C[innerI+HALF_BLOCK_SIZE][innerJ+HALF_BLOCK_SIZE];
}
dist[i*BLOCK_SIZE*n + j*BLOCK_SIZE + innerI*n + innerJ] = C[innerI][innerJ];
dist[i*BLOCK_SIZE*n + j*BLOCK_SIZE + (innerI+HALF_BLOCK_SIZE)*n + innerJ] = C[innerI+HALF_BLOCK_SIZE][innerJ];
dist[i*BLOCK_SIZE*n + j*BLOCK_SIZE + innerI*n + innerJ+HALF_BLOCK_SIZE] = C[innerI][innerJ+HALF_BLOCK_SIZE];
dist[i*BLOCK_SIZE*n + j*BLOCK_SIZE + (innerI+HALF_BLOCK_SIZE)*n + innerJ+HALF_BLOCK_SIZE] = C[innerI+HALF_BLOCK_SIZE][innerJ+HALF_BLOCK_SIZE];
} | 006e9945f2050dfab98805c5c73ae10e2f547ab9.cu | #include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <cuda_runtime.h>
#include <omp.h>
#define BLOCK_SIZE 64
#define HALF_BLOCK_SIZE 32
using namespace std;
const int INF = ((1 << 30) - 1);
// const int V = 50010;
void input(char* infile);
void output(char *outFileName);
void block_FW(int B);
int ceil(int a, int b);
__global__ void Phase1(int *dist, int Round, int n);
__global__ void Phase2(int *dist, int Round, int n);
__global__ void Phase3(int *dist, int Round, int n, int yOffset);
int original_n, n, m;
int* Dist = NULL;
int main(int argc, char* argv[]) {
input(argv[1]);
block_FW(BLOCK_SIZE);
output(argv[2]);
cudaFreeHost(Dist);
return 0;
}
void input(char* infile) {
cout << "input" << endl;
FILE* file = fopen(infile, "rb");
fread(&original_n, sizeof(int), 1, file);
fread(&m, sizeof(int), 1, file);
// make n % BLOCK_SIZE == 0
n = original_n + (BLOCK_SIZE - (original_n%BLOCK_SIZE));
Dist = (int*) malloc(sizeof(int)*n*n);
for (int i = 0; i < n; ++ i) {
for (int j = 0; j < n; ++ j) {
if (i == j) {
Dist[i*n+j] = 0;
} else {
Dist[i*n+j] = INF;
}
}
}
int pair[3];
for (int i = 0; i < m; ++ i) {
fread(pair, sizeof(int), 3, file);
Dist[pair[0]*n+pair[1]] = pair[2];
}
fclose(file);
}
void output(char *outFileName) {
FILE *outfile = fopen(outFileName, "w");
for (int i = 0; i < original_n; ++i) {
for (int j = 0; j < original_n; ++j) {
if (Dist[i*n+j] >= INF) Dist[i*n+j] = INF;
}
fwrite(&Dist[i*n], sizeof(int), original_n, outfile);
}
fclose(outfile);
}
int ceil(int a, int b) { return (a + b - 1) / b; }
void block_FW(int B) {
int* dst1 = NULL;
int* dst2 = NULL;
const unsigned long matrixSize = n * n * sizeof(int);
cudaHostRegister(Dist, matrixSize, cudaHostRegisterDefault);
const int blocks = (n + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 block_dim(32, 32, 1); // padding (一定要開到 32x)
const int blocks1 = blocks%2 != 0 ? (blocks/2) + 1 : (blocks/2);
const int blocks2 = blocks/2;
dim3 grid_dim1(blocks, blocks1, 1);
dim3 grid_dim2(blocks, blocks2, 1);
int round = ceil(n, B);
#pragma omp parallel num_threads(2)
{
int cpuThreadId = omp_get_thread_num();
cudaSetDevice(cpuThreadId);
int *dst = NULL;
dst = cpuThreadId == 0 ? dst1 : dst2;
cudaSetDevice(cpuThreadId);
cudaMalloc(&dst, matrixSize);
cudaMemcpy(dst, Dist, matrixSize, cudaMemcpyHostToDevice);
for (int r = 0; r < round; ++r) {
// printf("%d %d\n", r, round);
// fflush(stdout);
/* Phase 1*/
Phase1<<<1, block_dim>>>(dst, r, n);
/* Phase 2*/
Phase2<<<blocks, block_dim>>>(dst, r, n);
cudaDeviceSynchronize();
/* Phase 3*/
if (cpuThreadId == 0) {
Phase3<<<grid_dim1, block_dim>>>(dst, r, n, 0);
cudaMemcpyPeer(dst1 + blocks1*BLOCK_SIZE*n, 0, dst2, 1, sizeof(int)*blocks2*BLOCK_SIZE*n);
} else {
Phase3<<<grid_dim2, block_dim>>>(dst, r, n, blocks1);
cudaMemcpyPeer(dst2, 1, dst1, 0, sizeof(int)*blocks1*BLOCK_SIZE*n);
}
}
#pragma omp barrier
if (cpuThreadId == 0) cudaMemcpy(Dist, dst, matrixSize, cudaMemcpyDeviceToHost);
}
cudaFree(dst1);
cudaFree(dst2);
}
__global__ void Phase1(int *dist, int Round, int n) {
const int innerI = threadIdx.y;
const int innerJ = threadIdx.x;
const int offset = BLOCK_SIZE * Round;
__shared__ int C[BLOCK_SIZE][BLOCK_SIZE]; // 2d
// Every thread read its own value
// how index: blockIndex (to next diagonal block) + innerBlockIndex (every thread has its own index)
C[innerI][innerJ] = dist[offset*(n+1) + innerI*n + innerJ];
C[innerI+HALF_BLOCK_SIZE][innerJ] = dist[offset*(n+1) + (innerI+HALF_BLOCK_SIZE)*n + innerJ];
C[innerI][innerJ+HALF_BLOCK_SIZE] = dist[offset*(n+1) + innerI*n + innerJ + HALF_BLOCK_SIZE];
C[innerI+HALF_BLOCK_SIZE][innerJ+HALF_BLOCK_SIZE] = dist[offset*(n+1) + (innerI+HALF_BLOCK_SIZE)*n + innerJ + HALF_BLOCK_SIZE];
__syncthreads();
for (int k = 0; k < BLOCK_SIZE; k++) {
C[innerI][innerJ] = (C[innerI][k] + C[k][innerJ]) < C[innerI][innerJ] ? (C[innerI][k] + C[k][innerJ]) : C[innerI][innerJ];
C[innerI+HALF_BLOCK_SIZE][innerJ] = (C[innerI+HALF_BLOCK_SIZE][k] + C[k][innerJ]) < C[innerI+HALF_BLOCK_SIZE][innerJ] ? (C[innerI+HALF_BLOCK_SIZE][k] + C[k][innerJ]) : C[innerI+HALF_BLOCK_SIZE][innerJ];
C[innerI][innerJ+HALF_BLOCK_SIZE] = (C[innerI][k] + C[k][innerJ+HALF_BLOCK_SIZE]) < C[innerI][innerJ+HALF_BLOCK_SIZE] ? (C[innerI][k] + C[k][innerJ+HALF_BLOCK_SIZE]) : C[innerI][innerJ+HALF_BLOCK_SIZE];
C[innerI+HALF_BLOCK_SIZE][innerJ+HALF_BLOCK_SIZE] = (C[innerI+HALF_BLOCK_SIZE][k] + C[k][innerJ+HALF_BLOCK_SIZE]) < C[innerI+HALF_BLOCK_SIZE][innerJ+HALF_BLOCK_SIZE] ? (C[innerI+HALF_BLOCK_SIZE][k] + C[k][innerJ+HALF_BLOCK_SIZE]) : C[innerI+HALF_BLOCK_SIZE][innerJ+HALF_BLOCK_SIZE];
__syncthreads(); // TODO: only phase one
}
dist[offset*(n+1) + innerI*n + innerJ] = C[innerI][innerJ];
dist[offset*(n+1) + (innerI+HALF_BLOCK_SIZE)*n + innerJ] = C[innerI+HALF_BLOCK_SIZE][innerJ];
dist[offset*(n+1) + innerI*n + innerJ + HALF_BLOCK_SIZE] = C[innerI][innerJ+HALF_BLOCK_SIZE];
dist[offset*(n+1) + (innerI+HALF_BLOCK_SIZE)*n + innerJ + HALF_BLOCK_SIZE] = C[innerI+HALF_BLOCK_SIZE][innerJ+HALF_BLOCK_SIZE];
}
__global__ void Phase2(int *dist, int Round, int n) {
const int i = blockIdx.x; // "i" in n block in one row
if (i == Round) return;
const int innerI = threadIdx.y;
const int innerJ = threadIdx.x;
const int diagonalOffset = BLOCK_SIZE * Round;
__shared__ int Diagonal[BLOCK_SIZE][BLOCK_SIZE];
__shared__ int A[BLOCK_SIZE][BLOCK_SIZE];
__shared__ int B[BLOCK_SIZE][BLOCK_SIZE];
A[innerI][innerJ] = dist[i*BLOCK_SIZE*n + Round*BLOCK_SIZE + innerI*n + innerJ];
A[innerI+HALF_BLOCK_SIZE][innerJ] = dist[i*BLOCK_SIZE*n + Round*BLOCK_SIZE + (innerI+HALF_BLOCK_SIZE)*n + innerJ];
A[innerI][innerJ+HALF_BLOCK_SIZE] = dist[i*BLOCK_SIZE*n + Round*BLOCK_SIZE + innerI*n + innerJ + HALF_BLOCK_SIZE];
A[innerI + HALF_BLOCK_SIZE][innerJ + HALF_BLOCK_SIZE] = dist[i*BLOCK_SIZE*n + Round*BLOCK_SIZE + (innerI+HALF_BLOCK_SIZE)*n + innerJ+HALF_BLOCK_SIZE];
B[innerI][innerJ] = dist[Round*BLOCK_SIZE*n + i*BLOCK_SIZE + innerI*n + innerJ];
B[innerI+HALF_BLOCK_SIZE][innerJ] = dist[Round*BLOCK_SIZE*n + i*BLOCK_SIZE + (innerI+HALF_BLOCK_SIZE)*n + innerJ];
B[innerI][innerJ+HALF_BLOCK_SIZE] = dist[Round*BLOCK_SIZE*n + i*BLOCK_SIZE + innerI*n + innerJ+HALF_BLOCK_SIZE];
B[innerI + HALF_BLOCK_SIZE][innerJ + HALF_BLOCK_SIZE] = dist[Round*BLOCK_SIZE*n + i*BLOCK_SIZE + (innerI+HALF_BLOCK_SIZE)*n + innerJ+HALF_BLOCK_SIZE];
Diagonal[innerI][innerJ] = dist[diagonalOffset*(n+1) + innerI*n + innerJ]; // diagonalValue
Diagonal[innerI+HALF_BLOCK_SIZE][innerJ] = dist[diagonalOffset*(n+1) + (innerI+HALF_BLOCK_SIZE)*n + innerJ]; // diagonalValue
Diagonal[innerI][innerJ+HALF_BLOCK_SIZE] = dist[diagonalOffset*(n+1) + innerI*n + innerJ+HALF_BLOCK_SIZE]; // diagonalValue
Diagonal[innerI+HALF_BLOCK_SIZE][innerJ+HALF_BLOCK_SIZE] = dist[diagonalOffset*(n+1) + (innerI+HALF_BLOCK_SIZE)*n + innerJ+HALF_BLOCK_SIZE]; // diagonalValue
__syncthreads();
#pragma unroll 32
for (int k = 0; k < BLOCK_SIZE; k++) {
A[innerI][innerJ] = (A[innerI][k] + Diagonal[k][innerJ]) < A[innerI][innerJ] ? (A[innerI][k] + Diagonal[k][innerJ]) : A[innerI][innerJ];
A[innerI+HALF_BLOCK_SIZE][innerJ] = (A[innerI+HALF_BLOCK_SIZE][k] + Diagonal[k][innerJ]) < A[innerI+HALF_BLOCK_SIZE][innerJ] ? (A[innerI+HALF_BLOCK_SIZE][k] + Diagonal[k][innerJ]) : A[innerI+HALF_BLOCK_SIZE][innerJ];
A[innerI][innerJ+HALF_BLOCK_SIZE] = (A[innerI][k] + Diagonal[k][innerJ+HALF_BLOCK_SIZE]) < A[innerI][innerJ+HALF_BLOCK_SIZE] ? (A[innerI][k] + Diagonal[k][innerJ+HALF_BLOCK_SIZE]) : A[innerI][innerJ+HALF_BLOCK_SIZE];
A[innerI+HALF_BLOCK_SIZE][innerJ+HALF_BLOCK_SIZE] = (A[innerI+HALF_BLOCK_SIZE][k] + Diagonal[k][innerJ+HALF_BLOCK_SIZE]) < A[innerI+HALF_BLOCK_SIZE][innerJ+HALF_BLOCK_SIZE] ? (A[innerI+HALF_BLOCK_SIZE][k] + Diagonal[k][innerJ+HALF_BLOCK_SIZE]) : A[innerI+HALF_BLOCK_SIZE][innerJ+HALF_BLOCK_SIZE];
B[innerI][innerJ] = (Diagonal[innerI][k] + B[k][innerJ]) < B[innerI][innerJ] ? (Diagonal[innerI][k] + B[k][innerJ]) : B[innerI][innerJ];
B[innerI+HALF_BLOCK_SIZE][innerJ] = (Diagonal[innerI+HALF_BLOCK_SIZE][k] + B[k][innerJ]) < B[innerI+HALF_BLOCK_SIZE][innerJ] ? (Diagonal[innerI+HALF_BLOCK_SIZE][k] + B[k][innerJ]) : B[innerI+HALF_BLOCK_SIZE][innerJ];
B[innerI][innerJ+HALF_BLOCK_SIZE] = (Diagonal[innerI][k] + B[k][innerJ+HALF_BLOCK_SIZE]) < B[innerI][innerJ+HALF_BLOCK_SIZE] ? (Diagonal[innerI][k] + B[k][innerJ+HALF_BLOCK_SIZE]) : B[innerI][innerJ+HALF_BLOCK_SIZE];
B[innerI+HALF_BLOCK_SIZE][innerJ+HALF_BLOCK_SIZE] = (Diagonal[innerI+HALF_BLOCK_SIZE][k] + B[k][innerJ+HALF_BLOCK_SIZE]) < B[innerI+HALF_BLOCK_SIZE][innerJ+HALF_BLOCK_SIZE] ? (Diagonal[innerI+HALF_BLOCK_SIZE][k] + B[k][innerJ+HALF_BLOCK_SIZE]) : B[innerI+HALF_BLOCK_SIZE][innerJ+HALF_BLOCK_SIZE];
}
dist[i*BLOCK_SIZE*n + Round*BLOCK_SIZE + innerI*n + innerJ] = A[innerI][innerJ];
dist[i*BLOCK_SIZE*n + Round*BLOCK_SIZE + (innerI+HALF_BLOCK_SIZE)*n + innerJ] = A[innerI+HALF_BLOCK_SIZE][innerJ];
dist[i*BLOCK_SIZE*n + Round*BLOCK_SIZE + innerI*n + innerJ + HALF_BLOCK_SIZE] = A[innerI][innerJ+HALF_BLOCK_SIZE];
dist[i*BLOCK_SIZE*n + Round*BLOCK_SIZE + (innerI+HALF_BLOCK_SIZE)*n + innerJ+HALF_BLOCK_SIZE] = A[innerI + HALF_BLOCK_SIZE][innerJ + HALF_BLOCK_SIZE];
dist[Round*BLOCK_SIZE*n + i*BLOCK_SIZE + innerI*n + innerJ] = B[innerI][innerJ];
dist[Round*BLOCK_SIZE*n + i*BLOCK_SIZE + (innerI+HALF_BLOCK_SIZE)*n + innerJ] = B[innerI+HALF_BLOCK_SIZE][innerJ];
dist[Round*BLOCK_SIZE*n + i*BLOCK_SIZE + innerI*n + innerJ+HALF_BLOCK_SIZE] = B[innerI][innerJ+HALF_BLOCK_SIZE];
dist[Round*BLOCK_SIZE*n + i*BLOCK_SIZE + (innerI+HALF_BLOCK_SIZE)*n + innerJ+HALF_BLOCK_SIZE] = B[innerI + HALF_BLOCK_SIZE][innerJ + HALF_BLOCK_SIZE];
}
__global__ void Phase3(int *dist, int Round, int n, int yOffset) {
const int j = blockIdx.x;
const int i = blockIdx.y + yOffset;
if (i == Round && j == Round) return;
const int innerI = threadIdx.y;
const int innerJ = threadIdx.x;
__shared__ int A[BLOCK_SIZE][BLOCK_SIZE];
__shared__ int B[BLOCK_SIZE][BLOCK_SIZE];
__shared__ int C[BLOCK_SIZE][BLOCK_SIZE];
C[innerI][innerJ] = dist[i*BLOCK_SIZE*n + j*BLOCK_SIZE + innerI*n + innerJ];
C[innerI+HALF_BLOCK_SIZE][innerJ] = dist[i*BLOCK_SIZE*n + j*BLOCK_SIZE + (innerI+HALF_BLOCK_SIZE)*n + innerJ];
C[innerI][innerJ+HALF_BLOCK_SIZE] = dist[i*BLOCK_SIZE*n + j*BLOCK_SIZE + innerI*n + innerJ+HALF_BLOCK_SIZE];
C[innerI+HALF_BLOCK_SIZE][innerJ+HALF_BLOCK_SIZE] = dist[i*BLOCK_SIZE*n + j*BLOCK_SIZE + (innerI+HALF_BLOCK_SIZE)*n + innerJ+HALF_BLOCK_SIZE];
A[innerI][innerJ] = dist[i*BLOCK_SIZE*n + Round*BLOCK_SIZE + innerI*n + innerJ];
A[innerI+HALF_BLOCK_SIZE][innerJ] = dist[i*BLOCK_SIZE*n + Round*BLOCK_SIZE + (innerI+HALF_BLOCK_SIZE)*n + innerJ];
A[innerI][innerJ+HALF_BLOCK_SIZE] = dist[i*BLOCK_SIZE*n + Round*BLOCK_SIZE + innerI*n + innerJ + HALF_BLOCK_SIZE];
A[innerI + HALF_BLOCK_SIZE][innerJ + HALF_BLOCK_SIZE] = dist[i*BLOCK_SIZE*n + Round*BLOCK_SIZE + (innerI+HALF_BLOCK_SIZE)*n + innerJ + HALF_BLOCK_SIZE];
B[innerI][innerJ] = dist[Round*BLOCK_SIZE*n + j*BLOCK_SIZE + innerI*n + innerJ];
B[innerI+HALF_BLOCK_SIZE][innerJ] = dist[Round*BLOCK_SIZE*n + j*BLOCK_SIZE + (innerI+HALF_BLOCK_SIZE)*n + innerJ];
B[innerI][innerJ+HALF_BLOCK_SIZE] = dist[Round*BLOCK_SIZE*n + j*BLOCK_SIZE + innerI*n + innerJ+HALF_BLOCK_SIZE];
B[innerI + HALF_BLOCK_SIZE][innerJ + HALF_BLOCK_SIZE] = dist[Round*BLOCK_SIZE*n + j*BLOCK_SIZE + (innerI+HALF_BLOCK_SIZE)*n + innerJ+HALF_BLOCK_SIZE];
__syncthreads();
#pragma unroll 32
for (int k = 0; k < BLOCK_SIZE; k++) {
C[innerI][innerJ] = (A[innerI][k] + B[k][innerJ]) < C[innerI][innerJ] ? (A[innerI][k] + B[k][innerJ]) : C[innerI][innerJ];
C[innerI+HALF_BLOCK_SIZE][innerJ] = (A[innerI+HALF_BLOCK_SIZE][k] + B[k][innerJ]) < C[innerI+HALF_BLOCK_SIZE][innerJ] ? (A[innerI+HALF_BLOCK_SIZE][k] + B[k][innerJ]) : C[innerI+HALF_BLOCK_SIZE][innerJ];
C[innerI][innerJ+HALF_BLOCK_SIZE] = (A[innerI][k] + B[k][innerJ+HALF_BLOCK_SIZE]) < C[innerI][innerJ+HALF_BLOCK_SIZE] ? (A[innerI][k] + B[k][innerJ+HALF_BLOCK_SIZE]) : C[innerI][innerJ+HALF_BLOCK_SIZE];
C[innerI+HALF_BLOCK_SIZE][innerJ+HALF_BLOCK_SIZE] = (A[innerI+HALF_BLOCK_SIZE][k] + B[k][innerJ+HALF_BLOCK_SIZE]) < C[innerI+HALF_BLOCK_SIZE][innerJ+HALF_BLOCK_SIZE] ? (A[innerI+HALF_BLOCK_SIZE][k] + B[k][innerJ+HALF_BLOCK_SIZE]) : C[innerI+HALF_BLOCK_SIZE][innerJ+HALF_BLOCK_SIZE];
}
dist[i*BLOCK_SIZE*n + j*BLOCK_SIZE + innerI*n + innerJ] = C[innerI][innerJ];
dist[i*BLOCK_SIZE*n + j*BLOCK_SIZE + (innerI+HALF_BLOCK_SIZE)*n + innerJ] = C[innerI+HALF_BLOCK_SIZE][innerJ];
dist[i*BLOCK_SIZE*n + j*BLOCK_SIZE + innerI*n + innerJ+HALF_BLOCK_SIZE] = C[innerI][innerJ+HALF_BLOCK_SIZE];
dist[i*BLOCK_SIZE*n + j*BLOCK_SIZE + (innerI+HALF_BLOCK_SIZE)*n + innerJ+HALF_BLOCK_SIZE] = C[innerI+HALF_BLOCK_SIZE][innerJ+HALF_BLOCK_SIZE];
} |
90a5f8ba2515aa953c636cb7dcb68ccfe3457e58.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "hip/hip_runtime_api.h"
#include <stdio.h>
hipError_t median5Cuda(unsigned char* input, unsigned char* output, int xSize, int ySize);
void median5C(unsigned char* input, unsigned char* output, int xSize, int ySize);
int verify(unsigned char* input, unsigned char* output, int xSize, int ySize);
int verify(unsigned char* GoldInput, unsigned char* CudaInput, int xSize, int ySize) {
for (int i = 0; i < xSize * ySize; i++) {
if (GoldInput[i] != CudaInput[i]) {
printf("GoldInput[%d] = %d CInput[%d]=%d \n", i, GoldInput[i], i, CudaInput[i]);
return(1);
}
}
return(0);
}
void median5C(unsigned char* input, unsigned char* output, int xSize, int ySize)
{
int n[5];
int temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8, temp9;
int i, j;
int xi, xj;
int x_count;
int y_count;
for (y_count = 0; y_count < ySize - 4; y_count++)
{
for (x_count = 0; x_count < xSize; x_count++)
{
n[0] = *(input + (y_count)*xSize + x_count + 0);
n[1] = *(input + (y_count)*xSize + x_count + 1);
n[2] = *(input + (y_count)*xSize + x_count + 2);
n[3] = *(input + (y_count)*xSize + x_count + 3);
n[4] = *(input + (y_count)*xSize + x_count + 4);
unsigned char temp = 0;
int j, k;
for (j = 0; j < 5; j++)
{
for (k = 0; k < 5 - j - 1; k++)
{
if (n[k] > n[k + 1])
{
temp = n[k];
n[k] = n[k + 1];
n[k + 1] = temp;
}
}
}
*(output + y_count * xSize + x_count) = n[2];
/*temp0 = (n[0] < n[1]) ? n[0] : n[1];
temp1 = (n[2] < n[3]) ? n[2] : n[3];
temp2 = (temp0 > temp1) ? temp0 : temp1;
temp3 = (n[0] > n[1]) ? n[0] : n[1];
temp4 = (n[2] > n[3]) ? n[2] : n[3];
temp5 = (temp3 < temp4) ? temp3 : temp4;
temp6 = (n[4] < temp2) ? n[4] : temp2;
temp7 = (n[4] > temp2) ? n[4] : temp2;
temp8 = (temp5 < temp7) ? temp5 : temp7;
temp9 = (temp6 > temp8) ? temp6 : temp8;
*(output + y_count * xSize + x_count) = temp9;*/
}
}
}
__global__ void kernelmedian5Cuda(unsigned char* input, unsigned char* output, int size)
{
int xWidth = blockDim.x * gridDim.x;
int yWidth = blockDim.y * gridDim.y;
//printf("blockDim.x = %d gridDim.x=%d \n", blockDim.x, gridDim.x);
int xLoc = (blockIdx.x * blockDim.x + threadIdx.x);
int yLoc = blockIdx.y * blockDim.y + threadIdx.y;
int index = xLoc + yLoc * xWidth;
unsigned char value0, value1, value2, value3, value4;
// share memory?
//__shared__ unsigned char value[5];
if (index + 4 < size)
{
value0 = input[index + 0];
value1 = input[index + 1];
value2 = input[index + 2];
value3 = input[index + 3];
value4 = input[index + 4];
//sort
unsigned char temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8, temp9;
temp0 = (value0 < value1) ? value0 : value1;
temp1 = (value2 < value3) ? value2 : value3;
temp2 = (temp0 > temp1) ? temp0 : temp1;
temp3 = (value0 > value1) ? value0 : value1;
temp4 = (value2 > value3) ? value2 : value3;
temp5 = (temp3 < temp4) ? temp3 : temp4;
temp6 = (value4 < temp2) ? value4 : temp2;
temp7 = (value4 > temp2) ? value4 : temp2;
temp8 = (temp5 < temp7) ? temp5 : temp7;
temp9 = (temp6 > temp8) ? temp6 : temp8;
output[index] = temp9;
}
}
int main()
{
unsigned char* input, * CudaOutput, * GoldOutput;
int xSize, ySize;
xSize = 512;
ySize = 512;
input = new unsigned char[xSize * ySize];
CudaOutput = new unsigned char[xSize * ySize];
GoldOutput = new unsigned char[xSize * ySize];
int i, j;
printf("xSize=%d ySize=%d \n", xSize, ySize);
FILE* fp;
fp = fopen("barbara_gray.raw", "rb");
fread(input, xSize, ySize, fp);
/*for (int i = 0; i < ySize; i++)
for (int j = 0; j < xSize; j++)
input[i * xSize + j] = (i * j) % 255;
*/
median5C(input, GoldOutput, xSize, ySize);
// Add vectors in parallel.
hipError_t cudaStatus = median5Cuda(input, CudaOutput, xSize, ySize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "invert8WithCuda failed!");
return 1;
}
int error = verify(GoldOutput, CudaOutput, xSize, ySize);
if (error != 0)
printf("Verify Failed \n");
else
printf("Verify Successful \n");
fp = fopen("COutput.raw", "wb");
fwrite(GoldOutput, xSize, ySize, fp);
fclose(fp);
fp = fopen("CudaOutput.raw", "w");
fwrite(CudaOutput, xSize, ySize, fp);
fclose(fp);
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
delete[] CudaOutput;
delete[] GoldOutput;
delete[] input;
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t median5Cuda(unsigned char* input, unsigned char* output, int xSize, int ySize)
{
unsigned char* dev_input = 0;
unsigned char* dev_output = 0;
// hipProfilerInitialize();
unsigned int xysize = xSize * ySize;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
hipDeviceProp_t prop;
int count;
dim3 blocks, threads;
threads.x = 512;
threads.y = 1;
//512x512 : along X 512/8 = 64 thread blocks Alon gY 64 blocks
blocks.x = (xSize + threads.x - 1) / (threads.x); //1
blocks.y = (ySize + threads.y - 1) / (threads.y); //512
printf("blocks.x = %d blocks.y=%d \n", blocks.x, blocks.y);
printf("threads.x = %d threads.y=%d \n", threads.x, threads.y);
hipGetDeviceCount(&count);
printf("Count = %d\n", count);
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
hipEventRecord(start, 0);
// Allocate GPU buffers for two input .
cudaStatus = hipMalloc((void**)&dev_input, xysize * sizeof(char));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_output, xysize * sizeof(char));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_input, input, xysize, hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
hipProfilerStart();
// Launch a kernel on the GPU with one thread for each element.
kernelmedian5Cuda << <blocks, threads >> > (dev_input, dev_output, xysize);
hipProfilerStop();
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching invert8Kernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(output, dev_output, xysize * sizeof(char), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float cudaElapsedTime;
hipEventElapsedTime(&cudaElapsedTime, start, stop);
printf("Time for execution = %3.1f ms \n", cudaElapsedTime);
hipEventDestroy(start);
hipEventDestroy(stop);
Error:
hipFree(dev_input);
hipFree(dev_output);
return cudaStatus;
}
| 90a5f8ba2515aa953c636cb7dcb68ccfe3457e58.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "cuda_profiler_api.h"
#include <stdio.h>
cudaError_t median5Cuda(unsigned char* input, unsigned char* output, int xSize, int ySize);
void median5C(unsigned char* input, unsigned char* output, int xSize, int ySize);
int verify(unsigned char* input, unsigned char* output, int xSize, int ySize);
int verify(unsigned char* GoldInput, unsigned char* CudaInput, int xSize, int ySize) {
for (int i = 0; i < xSize * ySize; i++) {
if (GoldInput[i] != CudaInput[i]) {
printf("GoldInput[%d] = %d CInput[%d]=%d \n", i, GoldInput[i], i, CudaInput[i]);
return(1);
}
}
return(0);
}
void median5C(unsigned char* input, unsigned char* output, int xSize, int ySize)
{
int n[5];
int temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8, temp9;
int i, j;
int xi, xj;
int x_count;
int y_count;
for (y_count = 0; y_count < ySize - 4; y_count++)
{
for (x_count = 0; x_count < xSize; x_count++)
{
n[0] = *(input + (y_count)*xSize + x_count + 0);
n[1] = *(input + (y_count)*xSize + x_count + 1);
n[2] = *(input + (y_count)*xSize + x_count + 2);
n[3] = *(input + (y_count)*xSize + x_count + 3);
n[4] = *(input + (y_count)*xSize + x_count + 4);
unsigned char temp = 0;
int j, k;
for (j = 0; j < 5; j++)
{
for (k = 0; k < 5 - j - 1; k++)
{
if (n[k] > n[k + 1])
{
temp = n[k];
n[k] = n[k + 1];
n[k + 1] = temp;
}
}
}
*(output + y_count * xSize + x_count) = n[2];
/*temp0 = (n[0] < n[1]) ? n[0] : n[1];
temp1 = (n[2] < n[3]) ? n[2] : n[3];
temp2 = (temp0 > temp1) ? temp0 : temp1;
temp3 = (n[0] > n[1]) ? n[0] : n[1];
temp4 = (n[2] > n[3]) ? n[2] : n[3];
temp5 = (temp3 < temp4) ? temp3 : temp4;
temp6 = (n[4] < temp2) ? n[4] : temp2;
temp7 = (n[4] > temp2) ? n[4] : temp2;
temp8 = (temp5 < temp7) ? temp5 : temp7;
temp9 = (temp6 > temp8) ? temp6 : temp8;
*(output + y_count * xSize + x_count) = temp9;*/
}
}
}
__global__ void kernelmedian5Cuda(unsigned char* input, unsigned char* output, int size)
{
int xWidth = blockDim.x * gridDim.x;
int yWidth = blockDim.y * gridDim.y;
//printf("blockDim.x = %d gridDim.x=%d \n", blockDim.x, gridDim.x);
int xLoc = (blockIdx.x * blockDim.x + threadIdx.x);
int yLoc = blockIdx.y * blockDim.y + threadIdx.y;
int index = xLoc + yLoc * xWidth;
unsigned char value0, value1, value2, value3, value4;
// share memory?
//__shared__ unsigned char value[5];
if (index + 4 < size)
{
value0 = input[index + 0];
value1 = input[index + 1];
value2 = input[index + 2];
value3 = input[index + 3];
value4 = input[index + 4];
//sort
unsigned char temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8, temp9;
temp0 = (value0 < value1) ? value0 : value1;
temp1 = (value2 < value3) ? value2 : value3;
temp2 = (temp0 > temp1) ? temp0 : temp1;
temp3 = (value0 > value1) ? value0 : value1;
temp4 = (value2 > value3) ? value2 : value3;
temp5 = (temp3 < temp4) ? temp3 : temp4;
temp6 = (value4 < temp2) ? value4 : temp2;
temp7 = (value4 > temp2) ? value4 : temp2;
temp8 = (temp5 < temp7) ? temp5 : temp7;
temp9 = (temp6 > temp8) ? temp6 : temp8;
output[index] = temp9;
}
}
int main()
{
unsigned char* input, * CudaOutput, * GoldOutput;
int xSize, ySize;
xSize = 512;
ySize = 512;
input = new unsigned char[xSize * ySize];
CudaOutput = new unsigned char[xSize * ySize];
GoldOutput = new unsigned char[xSize * ySize];
int i, j;
printf("xSize=%d ySize=%d \n", xSize, ySize);
FILE* fp;
fp = fopen("barbara_gray.raw", "rb");
fread(input, xSize, ySize, fp);
/*for (int i = 0; i < ySize; i++)
for (int j = 0; j < xSize; j++)
input[i * xSize + j] = (i * j) % 255;
*/
median5C(input, GoldOutput, xSize, ySize);
// Add vectors in parallel.
cudaError_t cudaStatus = median5Cuda(input, CudaOutput, xSize, ySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "invert8WithCuda failed!");
return 1;
}
int error = verify(GoldOutput, CudaOutput, xSize, ySize);
if (error != 0)
printf("Verify Failed \n");
else
printf("Verify Successful \n");
fp = fopen("COutput.raw", "wb");
fwrite(GoldOutput, xSize, ySize, fp);
fclose(fp);
fp = fopen("CudaOutput.raw", "w");
fwrite(CudaOutput, xSize, ySize, fp);
fclose(fp);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
delete[] CudaOutput;
delete[] GoldOutput;
delete[] input;
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t median5Cuda(unsigned char* input, unsigned char* output, int xSize, int ySize)
{
unsigned char* dev_input = 0;
unsigned char* dev_output = 0;
// cudaProfilerInitialize();
unsigned int xysize = xSize * ySize;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaDeviceProp prop;
int count;
dim3 blocks, threads;
threads.x = 512;
threads.y = 1;
//512x512 : along X 512/8 = 64 thread blocks Alon gY 64 blocks
blocks.x = (xSize + threads.x - 1) / (threads.x); //1
blocks.y = (ySize + threads.y - 1) / (threads.y); //512
printf("blocks.x = %d blocks.y=%d \n", blocks.x, blocks.y);
printf("threads.x = %d threads.y=%d \n", threads.x, threads.y);
cudaGetDeviceCount(&count);
printf("Count = %d\n", count);
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaEventRecord(start, 0);
// Allocate GPU buffers for two input .
cudaStatus = cudaMalloc((void**)&dev_input, xysize * sizeof(char));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_output, xysize * sizeof(char));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_input, input, xysize, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaProfilerStart();
// Launch a kernel on the GPU with one thread for each element.
kernelmedian5Cuda << <blocks, threads >> > (dev_input, dev_output, xysize);
cudaProfilerStop();
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching invert8Kernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(output, dev_output, xysize * sizeof(char), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float cudaElapsedTime;
cudaEventElapsedTime(&cudaElapsedTime, start, stop);
printf("Time for execution = %3.1f ms \n", cudaElapsedTime);
cudaEventDestroy(start);
cudaEventDestroy(stop);
Error:
cudaFree(dev_input);
cudaFree(dev_output);
return cudaStatus;
}
|
f810703618bd861d24bf8979f4a770f4339d903f.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <hip/hip_runtime.h>
#include <fenv.h>
#include <getopt.h>
#include <cassert>
#include <functional>
#include <iostream>
#include <random>
#include <vector>
#include "fbgemm_gpu/bench_utils.cuh"
#include "fbgemm_gpu/cuda_utils.cuh"
#include "fbgemm_gpu/embedding_wrappers.cuh"
void generate_auxiliary_tensors(
int batch_size,
std::vector<int>& hash_sizes,
std::vector<long>& table_offsets,
std::vector<long>& lengths,
std::vector<long>& offsets,
std::vector<long>& indices) {
// generate lengths and indices
std::default_random_engine generator;
std::uniform_real_distribution<float> distribution(0.0, 1.0);
fesetround(FE_TONEAREST);
for (int h = 0; h < hash_sizes.size(); h++) {
for (int i = 0; i < batch_size; i++) {
long n_indices = 1;
indices.push_back(
std::lrintf(distribution(generator) * (hash_sizes[h] - 1)));
lengths.push_back(n_indices);
}
}
// generate offsets
offsets.push_back(0);
long inc_sum = 0;
for (auto const& item : lengths) {
offsets.push_back(inc_sum += item);
}
// generate table_offsets
long inc_table_hash_sum = 0;
table_offsets.push_back(0);
for (auto const& item : hash_sizes) {
table_offsets.push_back(inc_table_hash_sum += item);
}
}
void parse_commandline(
int argc,
char* argv[],
int* batch_size,
int* num_tables,
int* num_tasks,
int* iters) {
static struct option longopts[] = {
{"batch-size", required_argument, NULL, 'b'},
{"num_tables", required_argument, NULL, 't'},
{"num_tasks", required_argument, NULL, 'p'},
{"iters", required_argument, NULL, 'i'}};
int opt;
while ((opt = getopt_long(argc, argv, "b:t:p:i", longopts, NULL)) != -1) {
switch (opt) {
case 'b':
*batch_size = atoi(optarg);
break;
case 't':
*num_tables = atoi(optarg);
break;
case 'p':
*num_tasks = atoi(optarg);
break;
case 'i':
*iters = atoi(optarg);
break;
}
}
std::cout << "batch size: " << *batch_size << std::endl;
std::cout << "number of tables: " << *num_tables << std::endl;
std::cout << "number of tasks: " << *num_tasks << std::endl;
std::cout << "iteration: " << *iters << std::endl;
}
int main(int argc, char* argv[]) {
int batch_size = 512;
int num_tables = 2;
int num_tasks = 3;
int iters = 100;
parse_commandline(argc, argv, &batch_size, &num_tables, &num_tasks, &iters);
// generate hash_sizes
std::vector<int> hash_sizes;
std::default_random_engine generator;
std::uniform_int_distribution<int> distribution(50, 250);
for (int i = 0; i < num_tables; i++) {
hash_sizes.push_back(distribution(generator));
}
std::cout << "table rows: ";
for (auto const& hash_size : hash_sizes) {
std::cout << hash_size << ",";
}
std::cout << std::endl;
// the auxilary tensors
std::vector<long> table_offsets;
std::vector<long> lengths;
std::vector<long> offsets;
std::vector<long> indices;
generate_auxiliary_tensors(
batch_size, hash_sizes, table_offsets, lengths, offsets, indices);
// cache flush utility
// gpu ptrs
float* embedding_table_ptr;
long* table_offsets_ptr;
long* offsets_ptr;
long* indices_ptr;
float* output_ptr;
float* grad_ptr;
float* grad_weight_ptr;
int embedding_rows = 0;
for (auto const& h : hash_sizes) {
embedding_rows += h;
}
CUDA_CHECK(hipMalloc(
&embedding_table_ptr, embedding_rows * num_tasks * sizeof(float)));
// generate embedding table random numbers
generate_random_table(embedding_table_ptr, embedding_rows * num_tasks);
CUDA_CHECK(hipDeviceSynchronize());
CUDA_CHECK(hipGetLastError());
CUDA_CHECK(
hipMalloc(&table_offsets_ptr, table_offsets.size() * sizeof(long)));
CUDA_CHECK(hipMalloc(&offsets_ptr, offsets.size() * sizeof(long)));
CUDA_CHECK(hipMalloc(&indices_ptr, indices.size() * sizeof(long)));
CUDA_CHECK(hipMalloc(
&output_ptr, batch_size * num_tables * num_tasks * sizeof(float)));
CUDA_CHECK(hipGetLastError());
// memcpy
CUDA_CHECK(hipMemcpy(
table_offsets_ptr,
table_offsets.data(),
table_offsets.size() * sizeof(long),
hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy(
offsets_ptr,
offsets.data(),
offsets.size() * sizeof(long),
hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy(
indices_ptr,
indices.data(),
indices.size() * sizeof(long),
hipMemcpyHostToDevice));
// forward
float forward_time = benchmark_function(iters, [&]() {
fbgemm_gpu_test::batched_unary_embeddings_forward(
num_tasks,
batch_size,
num_tables,
embedding_table_ptr,
table_offsets_ptr,
offsets_ptr,
indices_ptr,
output_ptr);
});
// free forward-only gpu ptrs
hipFree(output_ptr);
// backward
hipMalloc(&grad_ptr, batch_size * num_tables * num_tasks * sizeof(float));
generate_random_table(grad_ptr, batch_size * num_tables * num_tasks);
CUDA_CHECK(hipDeviceSynchronize());
hipMalloc(&grad_weight_ptr, embedding_rows * num_tasks * sizeof(float));
float backward_time = benchmark_function(iters, [&]() {
fbgemm_gpu_test::batched_unary_embeddings_backward(
num_tasks,
batch_size,
num_tables,
grad_ptr,
table_offsets_ptr,
offsets_ptr,
indices_ptr,
grad_weight_ptr);
});
// free backward-only gpu ptrs
hipFree(grad_ptr);
hipFree(grad_weight_ptr);
// free other gpu ptrs;
hipFree(embedding_table_ptr);
hipFree(table_offsets_ptr);
hipFree(offsets_ptr);
hipFree(indices_ptr);
hipFree(table_offsets_ptr);
hipFree(table_offsets_ptr);
std::cout << "Average Forward Pass Execution time per iteration: "
<< forward_time << " ms" << std::endl;
std::cout << "Forward Pass Memory Bandwidth: "
<< (num_tasks * num_tables * batch_size *
(5 * sizeof(long) + 2 * sizeof(float))) /
(forward_time * 1e-3) / 1e9
<< " GB/s" << std::endl;
std::cout << "Average Backward Pass Execution time per iteration: "
<< backward_time << " ms" << std::endl;
std::cout << "Backward Pass Memory Bandwidth: "
<< (num_tasks * num_tables * batch_size *
(5 * sizeof(long) + 2 * sizeof(float))) /
(backward_time * 1e-3) / 1e9
<< " GB/s" << std::endl;
}
| f810703618bd861d24bf8979f4a770f4339d903f.cu | /*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <cuda.h>
#include <fenv.h>
#include <getopt.h>
#include <cassert>
#include <functional>
#include <iostream>
#include <random>
#include <vector>
#include "fbgemm_gpu/bench_utils.cuh"
#include "fbgemm_gpu/cuda_utils.cuh"
#include "fbgemm_gpu/embedding_wrappers.cuh"
void generate_auxiliary_tensors(
int batch_size,
std::vector<int>& hash_sizes,
std::vector<long>& table_offsets,
std::vector<long>& lengths,
std::vector<long>& offsets,
std::vector<long>& indices) {
// generate lengths and indices
std::default_random_engine generator;
std::uniform_real_distribution<float> distribution(0.0, 1.0);
fesetround(FE_TONEAREST);
for (int h = 0; h < hash_sizes.size(); h++) {
for (int i = 0; i < batch_size; i++) {
long n_indices = 1;
indices.push_back(
std::lrintf(distribution(generator) * (hash_sizes[h] - 1)));
lengths.push_back(n_indices);
}
}
// generate offsets
offsets.push_back(0);
long inc_sum = 0;
for (auto const& item : lengths) {
offsets.push_back(inc_sum += item);
}
// generate table_offsets
long inc_table_hash_sum = 0;
table_offsets.push_back(0);
for (auto const& item : hash_sizes) {
table_offsets.push_back(inc_table_hash_sum += item);
}
}
void parse_commandline(
int argc,
char* argv[],
int* batch_size,
int* num_tables,
int* num_tasks,
int* iters) {
static struct option longopts[] = {
{"batch-size", required_argument, NULL, 'b'},
{"num_tables", required_argument, NULL, 't'},
{"num_tasks", required_argument, NULL, 'p'},
{"iters", required_argument, NULL, 'i'}};
int opt;
while ((opt = getopt_long(argc, argv, "b:t:p:i", longopts, NULL)) != -1) {
switch (opt) {
case 'b':
*batch_size = atoi(optarg);
break;
case 't':
*num_tables = atoi(optarg);
break;
case 'p':
*num_tasks = atoi(optarg);
break;
case 'i':
*iters = atoi(optarg);
break;
}
}
std::cout << "batch size: " << *batch_size << std::endl;
std::cout << "number of tables: " << *num_tables << std::endl;
std::cout << "number of tasks: " << *num_tasks << std::endl;
std::cout << "iteration: " << *iters << std::endl;
}
int main(int argc, char* argv[]) {
int batch_size = 512;
int num_tables = 2;
int num_tasks = 3;
int iters = 100;
parse_commandline(argc, argv, &batch_size, &num_tables, &num_tasks, &iters);
// generate hash_sizes
std::vector<int> hash_sizes;
std::default_random_engine generator;
std::uniform_int_distribution<int> distribution(50, 250);
for (int i = 0; i < num_tables; i++) {
hash_sizes.push_back(distribution(generator));
}
std::cout << "table rows: ";
for (auto const& hash_size : hash_sizes) {
std::cout << hash_size << ",";
}
std::cout << std::endl;
// the auxilary tensors
std::vector<long> table_offsets;
std::vector<long> lengths;
std::vector<long> offsets;
std::vector<long> indices;
generate_auxiliary_tensors(
batch_size, hash_sizes, table_offsets, lengths, offsets, indices);
// cache flush utility
// gpu ptrs
float* embedding_table_ptr;
long* table_offsets_ptr;
long* offsets_ptr;
long* indices_ptr;
float* output_ptr;
float* grad_ptr;
float* grad_weight_ptr;
int embedding_rows = 0;
for (auto const& h : hash_sizes) {
embedding_rows += h;
}
CUDA_CHECK(cudaMalloc(
&embedding_table_ptr, embedding_rows * num_tasks * sizeof(float)));
// generate embedding table random numbers
generate_random_table(embedding_table_ptr, embedding_rows * num_tasks);
CUDA_CHECK(cudaDeviceSynchronize());
CUDA_CHECK(cudaGetLastError());
CUDA_CHECK(
cudaMalloc(&table_offsets_ptr, table_offsets.size() * sizeof(long)));
CUDA_CHECK(cudaMalloc(&offsets_ptr, offsets.size() * sizeof(long)));
CUDA_CHECK(cudaMalloc(&indices_ptr, indices.size() * sizeof(long)));
CUDA_CHECK(cudaMalloc(
&output_ptr, batch_size * num_tables * num_tasks * sizeof(float)));
CUDA_CHECK(cudaGetLastError());
// memcpy
CUDA_CHECK(cudaMemcpy(
table_offsets_ptr,
table_offsets.data(),
table_offsets.size() * sizeof(long),
cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(
offsets_ptr,
offsets.data(),
offsets.size() * sizeof(long),
cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(
indices_ptr,
indices.data(),
indices.size() * sizeof(long),
cudaMemcpyHostToDevice));
// forward
float forward_time = benchmark_function(iters, [&]() {
fbgemm_gpu_test::batched_unary_embeddings_forward(
num_tasks,
batch_size,
num_tables,
embedding_table_ptr,
table_offsets_ptr,
offsets_ptr,
indices_ptr,
output_ptr);
});
// free forward-only gpu ptrs
cudaFree(output_ptr);
// backward
cudaMalloc(&grad_ptr, batch_size * num_tables * num_tasks * sizeof(float));
generate_random_table(grad_ptr, batch_size * num_tables * num_tasks);
CUDA_CHECK(cudaDeviceSynchronize());
cudaMalloc(&grad_weight_ptr, embedding_rows * num_tasks * sizeof(float));
float backward_time = benchmark_function(iters, [&]() {
fbgemm_gpu_test::batched_unary_embeddings_backward(
num_tasks,
batch_size,
num_tables,
grad_ptr,
table_offsets_ptr,
offsets_ptr,
indices_ptr,
grad_weight_ptr);
});
// free backward-only gpu ptrs
cudaFree(grad_ptr);
cudaFree(grad_weight_ptr);
// free other gpu ptrs;
cudaFree(embedding_table_ptr);
cudaFree(table_offsets_ptr);
cudaFree(offsets_ptr);
cudaFree(indices_ptr);
cudaFree(table_offsets_ptr);
cudaFree(table_offsets_ptr);
std::cout << "Average Forward Pass Execution time per iteration: "
<< forward_time << " ms" << std::endl;
std::cout << "Forward Pass Memory Bandwidth: "
<< (num_tasks * num_tables * batch_size *
(5 * sizeof(long) + 2 * sizeof(float))) /
(forward_time * 1e-3) / 1e9
<< " GB/s" << std::endl;
std::cout << "Average Backward Pass Execution time per iteration: "
<< backward_time << " ms" << std::endl;
std::cout << "Backward Pass Memory Bandwidth: "
<< (num_tasks * num_tables * batch_size *
(5 * sizeof(long) + 2 * sizeof(float))) /
(backward_time * 1e-3) / 1e9
<< " GB/s" << std::endl;
}
|
d5a19702a82ed2ee861d6582e097fab17b77ad47.hip | // !!! This is a file automatically generated by hipify!!!
//
// For intellisense
//
#ifdef __INTELLISENSE__
#include <hip/hip_runtime.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hip/device_functions.h>
#include <hip/hip_runtime_api.h>
#endif
#include <stdio.h>
#include <iostream>
#include <assert.h>
#include <vector>
#include <cmath>
#include <omp.h>
#include <fstream>
using prec = double;
using cu_prec = float;
//
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
//
inline hipError_t checkCuda(hipError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != hipSuccess)
{
fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result));
assert(result == hipSuccess);
}
#endif
return result;
}
#define gpuErrchk(ans) \
{ \
gpuAssert((ans), __FILE__, __LINE__); \
}
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true)
{
if (code != hipSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort)
exit(code);
}
}
//
// Test function in 2D
// cos(x) * exp( y );
//
void f_xy(std::vector<prec> &x, std::vector<prec> &y, std::vector<prec> &f)
{
for (std::size_t i = 0; i < x.size(); i++)
{
f[i] = std::cos(x[i]) * ::exp(y[i]);
}
return;
}
prec f_laplacian(prec x, prec y)
{
return 0.0;
}
void f_xy(std::vector<float> &x, std::vector<float> &y, std::vector<float> &f)
{
for (std::size_t i = 0; i < x.size(); i++)
{
f[i] = std::cos(x[i]) * ::exp(y[i]);
}
return;
}
float f_laplacian(float x, float y)
{
return (float)0.0;
}
//
// Finite Difference in the CPU
//
void fd_2d_cpu(int nx, int ny)
{
std::vector<prec> x(nx * ny);
std::vector<prec> y(nx * ny);
std::vector<prec> f(nx * ny);
std::vector<prec> f_d2(nx * ny);
prec dx2 = ::pow(2.0 / (nx - 1.0), 2);
prec dy2 = ::pow(2.0 / (ny - 1.0), 2);
//
// Initialize the values of x and y
//
for (int i = 0; i < nx; i++)
{
for (int j = 0; j < ny; j++)
{
int ell = i * ny + j;
x[ell] = -1.0 + 2.0 * i / (nx - 1.0);
y[ell] = -1.0 + 2.0 * j / (ny - 1.0);
}
}
f_xy(x, y, f);
double t1 = omp_get_wtime();
for (int i = 1; i < nx - 1; i++)
{
for (int j = 1; j < ny - 1; j++)
{
int ell = i * ny + j;
int ell_i_p1 = (i + 1) * ny + j; // i+1
int ell_i_m1 = (i - 1) * ny + j; // i-1
int ell_j_p1 = i * ny + (j + 1); // i+1
int ell_j_m1 = i * ny + (j - 1); // i-1
f_d2[ell] = (f[ell_i_p1] + f[ell_i_m1]) / dx2 +
(f[ell_j_p1] + f[ell_j_m1]) / dy2 -
(2.0 / dx2 + 2.0 / dy2) * f[ell];
}
}
double t2 = omp_get_wtime();
std::cout << "Time for 2D Laplacian in CPU = " << t2 - t1 << " sec" << std::endl;
//
// Check for the maximum error
//
prec max_err = 0.0;
for (int i = 1; i < nx - 1; i++)
{
for (int j = 1; j < ny - 1; j++)
{
int ell = i * ny + j;
prec lap_exact = f_laplacian(x[ell], y[ell]);
prec err_tmp = std::abs(lap_exact - f_d2[ell]);
// std::cout << f_d2[ell] << std::endl;
if ((i == 1) && (j == 1))
{
max_err = err_tmp;
}
else if (err_tmp > max_err)
{
max_err = err_tmp;
}
}
}
std::cout << "Maximum error in CPU = " << max_err << std::endl;
return;
}
//
// GPU Kernel the "silly" way
//
__constant__ int nx_cu, ny_cu;
__global__ void laplacian_silly(cu_prec *f,
cu_prec *f_d2)
{
const int ell = blockIdx.x * blockDim.x + threadIdx.x;
if (ell > nx_cu * ny_cu)
return;
int i = ell / ny_cu;
int j = ell - i * ny_cu;
if (i == 0 || i == nx_cu - 1 || j == 0 || j == ny_cu - 1)
return;
int ell_i_p1 = (i + 1) * ny_cu + j; // i+1
int ell_i_m1 = (i - 1) * ny_cu + j; // i-1
int ell_j_p1 = i * ny_cu + (j + 1); // i+1
int ell_j_m1 = i * ny_cu + (j - 1); // i-1
f_d2[ell] = (f[ell_i_p1] + f[ell_i_m1]) +
(f[ell_j_p1] + f[ell_j_m1]) -
4.0 * f[ell];
// f_d2[ell] = (f[ell+1] + f[ell-1]) +
// (f[ell+1] + f[ell-1]) -
// 4.0 * f[ell];
// f_d2[ell] = 0.0;
}
//
// GPU Kernel using shared memory
//
// stencil coefficients
__constant__ cu_prec c_a, c_b, c_c;
const int n_per_b = 32;
__global__ void laplacian_shared(cu_prec *f,
cu_prec *f_d2)
{
__shared__ cu_prec f_shared[n_per_b + 2][n_per_b + 2];
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int ell = i * ny_cu + j;
// printf("Blk: (%d,%d) Thread: (%d,%d) -> Row/Col = (%d,%d)\n",
// blockIdx.x, blockIdx.y,
// threadIdx.x, threadIdx.y,
// i, j);
// if (ell > nx_cu * ny_cu)
// return;
//
// Fill the local memory data
//
f_shared[threadIdx.x + 1][threadIdx.y + 1] = f[ell];
// If we are on an edge on the block we add the values
if (threadIdx.x == 0 && i != 0)
{
int ell_tmp = (i - 1) * ny_cu + j;
f_shared[0][threadIdx.y + 1] = f[ell_tmp];
}
if (threadIdx.x == (blockDim.x - 1) && i != (nx_cu - 1))
{
int ell_tmp = (i + 1) * ny_cu + j;
f_shared[n_per_b + 1][threadIdx.y + 1] = f[ell_tmp];
}
if (threadIdx.y == 0 && j != 0)
{
int ell_tmp = i * ny_cu + (j - 1);
f_shared[threadIdx.x + 1][0] = f[ell_tmp];
}
if (threadIdx.y == (blockDim.y - 1) && j != (ny_cu - 1))
{
int ell_tmp = i * ny_cu + (j + 1);
f_shared[threadIdx.x + 1][n_per_b + 1] = f[ell_tmp];
}
__syncthreads();
// int i_m = threadIdx.x;
// int i_0 = threadIdx.x + 1;
// int i_p = threadIdx.x + 2;
// int j_m = threadIdx.y;
// int j_0 = threadIdx.y + 1;
// int j_p = threadIdx.y + 2;
// f_d2[ell] = (f_shared[i_p][j_0] + f_shared[i_m][j_0]) +
// (f_shared[i_0][j_p] + f_shared[i_0][j_m]) -
// 4.0 * f_shared[i_0][j_0];
f_d2[ell] = (f_shared[threadIdx.x + 2][threadIdx.y + 1] + f_shared[threadIdx.x][threadIdx.y + 1]) +
(f_shared[threadIdx.x + 1][threadIdx.y + 2] + f_shared[threadIdx.x + 1][threadIdx.y]) -
4.0 * f_shared[threadIdx.x + 1][threadIdx.y + 1];
}
//
// Finite Difference in the GPU
//
void fd_2d_gpu_v0(int nx, int ny)
{
std::vector<cu_prec> x(nx * ny);
std::vector<cu_prec> y(nx * ny);
std::vector<cu_prec> f(nx * ny);
std::vector<cu_prec> f_d2(nx * ny);
cu_prec dx2 = (cu_prec)::pow(2.0 / (nx - 1.0), 2);
cu_prec dy2 = (cu_prec)::pow(2.0 / (ny - 1.0), 2);
//
// Initialize the values of x and y
//
for (int i = 0; i < nx; i++)
{
for (int j = 0; j < ny; j++)
{
int ell = i * ny + j;
x[ell] = (cu_prec)(-1.0 + 2.0 * i / (nx - 1.0));
y[ell] = (cu_prec)(-1.0 + 2.0 * j / (ny - 1.0));
}
}
f_xy(x, y, f);
//
// Set the CUDA variables
//
double t1 = omp_get_wtime();
cu_prec *f_cu, *f_d2_cu;
int bytes = nx * ny * sizeof(cu_prec);
checkCuda(hipMemcpyToSymbol(nx_cu, &nx, sizeof(int), 0, hipMemcpyHostToDevice));
checkCuda(hipMemcpyToSymbol(ny_cu, &ny, sizeof(int), 0, hipMemcpyHostToDevice));
checkCuda(hipMalloc(&f_cu, bytes));
checkCuda(hipMalloc(&f_d2_cu, bytes));
checkCuda(hipMemset(f_d2_cu, 0, bytes));
checkCuda(hipMemcpy(f_cu, &(f[0]), bytes, hipMemcpyHostToDevice));
int n_th_per_block = 1024;
int n_blocks = ::ceil(((prec)(nx * ny)) /
((prec)n_th_per_block));
const int nReps = 20;
hipLaunchKernelGGL(( laplacian_silly), dim3(n_blocks), dim3(n_th_per_block), 0, 0, f_cu, f_d2_cu); // Warm up
for (int i = 0; i < nReps; i++)
hipLaunchKernelGGL(( laplacian_silly), dim3(n_blocks), dim3(n_th_per_block), 0, 0, f_cu, f_d2_cu);
checkCuda(hipMemcpy(&(f_d2[0]), f_d2_cu, bytes, hipMemcpyDeviceToHost));
checkCuda(hipFree(f_cu));
checkCuda(hipFree(f_d2_cu));
// Add dx2 factor
for (int i = 0; i < nx * ny; i++)
{
f_d2[i] /= dx2;
}
double t2 = omp_get_wtime();
std::cout << "Time for 2D Laplacian in GPU (v0) = " << t2 - t1 << " sec" << std::endl;
//
// Check for the maximum error
//
cu_prec max_err = 0.0;
for (int i = 1; i < nx - 1; i++)
{
for (int j = 1; j < ny - 1; j++)
{
int ell = i * ny + j;
cu_prec lap_exact = f_laplacian(x[ell], y[ell]);
cu_prec err_tmp = std::abs(lap_exact - f_d2[ell]);
// std::cout << f_d2[ell] << std::endl;
if ((i == 1) && (j == 1))
{
max_err = err_tmp;
}
else if (err_tmp > max_err)
{
max_err = err_tmp;
}
}
}
std::cout << "Maximum error in GPU (v0) = " << max_err << std::endl;
return;
}
//
// Finite Difference in the GPU
//
void fd_2d_gpu_v1(int nx, int ny)
{
std::vector<cu_prec> x(nx * ny);
std::vector<cu_prec> y(nx * ny);
std::vector<cu_prec> f(nx * ny);
std::vector<cu_prec> f_d2(nx * ny);
cu_prec dx2 = ::pow(2.0 / (nx - 1.0), 2);
cu_prec dy2 = ::pow(2.0 / (ny - 1.0), 2);
//
// Initialize the values of x and y
//
for (int i = 0; i < nx; i++)
{
for (int j = 0; j < ny; j++)
{
int ell = i * ny + j;
x[ell] = -1.0 + 2.0 * i / (nx - 1.0);
y[ell] = -1.0 + 2.0 * j / (ny - 1.0);
}
}
f_xy(x, y, f);
//
// Set the CUDA variables
//
double t1 = omp_get_wtime();
cu_prec *f_cu, *f_d2_cu;
int bytes = nx * ny * sizeof(cu_prec);
checkCuda(hipMemcpyToSymbol(nx_cu, &nx, sizeof(int), 0, hipMemcpyHostToDevice));
checkCuda(hipMemcpyToSymbol(ny_cu, &ny, sizeof(int), 0, hipMemcpyHostToDevice));
checkCuda(hipMalloc(&f_cu, bytes));
checkCuda(hipMalloc(&f_d2_cu, bytes));
checkCuda(hipMemset(f_d2_cu, 0, bytes));
checkCuda(hipMemcpy(f_cu, &(f[0]), bytes, hipMemcpyHostToDevice));
dim3 block = dim3(n_per_b, n_per_b);
dim3 grid = dim3(nx / n_per_b, ny / n_per_b);
// int n_th_per_block = 64;
// int n_blocks = ::ceil(((prec)(nx * ny)) /
// ((prec)n_th_per_block));
cu_prec a = 1.0 / dx2;
cu_prec b = 1.0 / dy2;
cu_prec c = 4.0 / dx2;
// checkCuda(hipMemcpyToSymbol(c_a, &a, sizeof(cu_prec), 0, hipMemcpyHostToDevice));
// checkCuda(hipMemcpyToSymbol(c_b, &b, sizeof(cu_prec), 0, hipMemcpyHostToDevice));
// checkCuda(hipMemcpyToSymbol(c_c, &c, sizeof(cu_prec), 0, hipMemcpyHostToDevice));
const int nReps = 20;
hipLaunchKernelGGL(( laplacian_shared), dim3(grid), dim3(block), 0, 0, f_cu, f_d2_cu); // Warm up
for (int i = 0; i < nReps; i++)
hipLaunchKernelGGL(( laplacian_shared), dim3(grid), dim3(block), 0, 0, f_cu, f_d2_cu);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
checkCuda(hipMemcpy(&(f_d2[0]), f_d2_cu, bytes, hipMemcpyDeviceToHost));
checkCuda(hipFree(f_cu));
checkCuda(hipFree(f_d2_cu));
// Add dx2 factor
for (int i = 0; i < nx * ny; i++)
{
f_d2[i] /= dx2;
}
double t2 = omp_get_wtime();
std::cout << "Time for 2D Laplacian in GPU (v1) = " << t2 - t1 << " sec" << std::endl;
//
// Check for the maximum error
//
cu_prec max_err = 0.0;
for (int i = 1; i < nx - 1; i++)
{
for (int j = 1; j < ny - 1; j++)
{
int ell = i * ny + j;
cu_prec lap_exact = f_laplacian(x[ell], y[ell]);
cu_prec err_tmp = std::abs(lap_exact - f_d2[ell]);
// std::cout << f_d2[ell] << std::endl;
if ((i == 1) && (j == 1))
{
max_err = err_tmp;
}
else if (err_tmp > max_err)
{
max_err = err_tmp;
}
}
}
std::cout << "Maximum error in GPU (v1) = " << max_err << std::endl;
return;
}
//
// Wave equation
//
__constant__ cu_prec alpha2;
__global__ void wave_eq_propagate(cu_prec *u_n,
cu_prec *u_n_p1,
cu_prec *u_n_m1)
{
const int ell = blockIdx.x * blockDim.x + threadIdx.x;
if (ell > nx_cu * ny_cu)
return;
int i = ell / ny_cu;
int j = ell - i * ny_cu;
if (i == 0 || i == nx_cu - 1 || j == 0 || j == ny_cu - 1)
return;
int ell_i_p1 = (i + 1) * ny_cu + j; // i+1
int ell_i_m1 = (i - 1) * ny_cu + j; // i-1
int ell_j_p1 = i * ny_cu + (j + 1); // i+1
int ell_j_m1 = i * ny_cu + (j - 1); // i-1
//
// Time propagation
//
cu_prec u = u_n[ell];
u_n_p1[ell] = 2.0 * u - u_n_m1[ell] +
alpha2 * ((u_n[ell_i_p1] + u_n[ell_i_m1]) +
(u_n[ell_j_p1] + u_n[ell_j_m1]) -
4.0 * u);
// Set u_n_m1
u_n_m1[ell] = u;
}
__global__ void set_un(cu_prec *u_n,
cu_prec *u_n_p1)
{
const int ell = blockIdx.x * blockDim.x + threadIdx.x;
if (ell > nx_cu * ny_cu)
return;
int i = ell / ny_cu;
int j = ell - i * ny_cu;
// Boundary condition
if (i == 0 || i == nx_cu - 1 || j == 0 || j == ny_cu - 1)
{
u_n[ell] = 0.0;
return;
}
u_n[ell] = u_n_p1[ell];
}
//
// Solve the Wave Equation
//
void solve_wave_eq_gpu(int n, int n_steps, int n_animate)
{
cu_prec c_wv = 1.0;
cu_prec dx = 2.0 / (n - 1.0);
cu_prec dt = 0.5 * dx / c_wv; // To meet the CFL condition
cu_prec aa = ::pow(c_wv * dt / dx, 2);
int nx = n;
int ny = n;
std::vector<cu_prec> x(n * n);
std::vector<cu_prec> y(n * n);
std::vector<cu_prec> u_cpu(n * n);
int n_th_per_block = 1024;
int n_blocks = ::ceil(((prec)(nx * ny)) /
((prec)n_th_per_block));
//
// Set the initial derivative
//
cu_prec sigma2 = ::pow(0.2, 2);
for (int i = 0; i < nx; i++)
{
for (int j = 0; j < ny; j++)
{
int ell = i * ny + j;
x[ell] = -1.0 + 2.0 * i / (nx - 1.0);
y[ell] = -1.0 + 2.0 * j / (ny - 1.0);
u_cpu[ell] = ::exp(-(::pow(x[ell], 2) + ::pow(y[ell], 2)) / sigma2);
u_cpu[ell] *= dt;
}
}
int bytes = nx * ny * sizeof(cu_prec);
checkCuda(hipMemcpyToSymbol(nx_cu, &nx, sizeof(int), 0, hipMemcpyHostToDevice));
checkCuda(hipMemcpyToSymbol(ny_cu, &ny, sizeof(int), 0, hipMemcpyHostToDevice));
checkCuda(hipMemcpyToSymbol(alpha2, &aa, sizeof(cu_prec), 0, hipMemcpyHostToDevice));
cu_prec *u_n, *u_n_p1, *u_n_m1;
checkCuda(hipMalloc(&u_n, bytes));
checkCuda(hipMalloc(&u_n_p1, bytes));
checkCuda(hipMalloc(&u_n_m1, bytes));
checkCuda(hipMemset(u_n, 0, bytes));
checkCuda(hipMemset(u_n_p1, 0, bytes));
checkCuda(hipMemcpy(u_n, &(u_cpu[0]), bytes, hipMemcpyHostToDevice));
// int n_animate = 120;
int aux = n_steps / n_animate;
int c = 0;
int skip = 10;
for (int t = 0; t < n_steps; t++)
{
hipLaunchKernelGGL(( wave_eq_propagate), dim3(n_blocks), dim3(n_th_per_block), 0, 0, u_n, u_n_p1, u_n_m1);
hipLaunchKernelGGL(( set_un), dim3(n_blocks), dim3(n_th_per_block), 0, 0, u_n, u_n_p1);
if (t % aux == 0)
{
checkCuda(hipMemcpy(&(u_cpu[0]), u_n, bytes, hipMemcpyDeviceToHost));
std::string filename = "data/results";
c++;
filename.append(std::to_string(c));
filename.append(".m");
std::ofstream file;
file.open(filename, std::ios::out);
file << "F = [";
for (int i = 0; i < nx; i += skip)
{
if (i != 0)
file << ";" << std::endl;
for (int j = 0; j < ny; j += skip)
{
int ell = i * ny + j;
if (j != 0)
file << ", ";
file << u_cpu[ell];
}
}
file << "];" << std::endl;
}
}
checkCuda(hipMemcpy(&(u_cpu[0]), u_n, bytes, hipMemcpyDeviceToHost));
checkCuda(hipFree(u_n));
checkCuda(hipFree(u_n_p1));
checkCuda(hipFree(u_n_m1));
//
// Save text file for visualization on matlab
//
std::string filename = "data/results.m";
std::ofstream file;
file.open(filename, std::ios::out);
file << "X = [";
for (int i = 0; i < nx; i += skip)
{
if (i != 0)
file << ";" << std::endl;
for (int j = 0; j < ny; j += skip)
{
int ell = i * ny + j;
if (j != 0)
file << ", ";
file << x[ell];
}
}
file << "];" << std::endl;
file << "Y = [";
for (int i = 0; i < nx; i += skip)
{
if (i != 0)
file << ";" << std::endl;
for (int j = 0; j < ny; j += skip)
{
int ell = i * ny + j;
if (j != 0)
file << ", ";
file << y[ell];
}
}
file << "];" << std::endl;
file << "F = [";
for (int i = 0; i < nx; i += skip)
{
if (i != 0)
file << ";" << std::endl;
for (int j = 0; j < ny; j += skip)
{
int ell = i * ny + j;
if (j != 0)
file << ", ";
file << u_cpu[ell];
}
}
file << "];" << std::endl;
return;
}
//
// Wave equation on CPU
//
void solve_wave_eq_cpu(int n, int n_steps, int n_animate)
{
cu_prec c_wv = 1.0;
cu_prec dx = 2.0 / (n - 1.0);
cu_prec dt = 0.5 * dx / c_wv; // To meet the CFL condition
cu_prec aa = ::pow(c_wv * dt / dx, 2);
int nx = n;
int ny = n;
std::vector<cu_prec> x(n * n);
std::vector<cu_prec> y(n * n);
std::vector<cu_prec> u_n(n * n);
std::vector<cu_prec> u_n_p1(n * n, 0.0);
std::vector<cu_prec> u_n_m1(n * n, 0.0);
//
// Set the initial derivative
//
cu_prec sigma2 = ::pow(0.2, 2);
for (int i = 0; i < nx; i++)
{
for (int j = 0; j < ny; j++)
{
int ell = i * ny + j;
x[ell] = -1.0 + 2.0 * i / (nx - 1.0);
y[ell] = -1.0 + 2.0 * j / (ny - 1.0);
u_n[ell] = ::exp(-(::pow(x[ell], 2) + ::pow(y[ell], 2)) / sigma2);
u_n[ell] *= dt;
}
}
// int n_animate = 30;
int aux = n_steps / n_animate;
int c = 0;
int skip = 10;
for (int t = 0; t < n_steps; t++)
{
for (int ell = 0; ell < n * n; ell++)
{
int i = ell / ny;
int j = ell - i * ny;
int ell_i_p1 = (i + 1) * ny + j; // i+1
int ell_i_m1 = (i - 1) * ny + j; // i-1
int ell_j_p1 = i * ny + (j + 1); // i+1
int ell_j_m1 = i * ny + (j - 1); // i-1
//
// Time propagation
//
cu_prec u = u_n[ell];
u_n_p1[ell] = 2.0 * u - u_n_m1[ell] +
aa * ((u_n[ell_i_p1] + u_n[ell_i_m1]) +
(u_n[ell_j_p1] + u_n[ell_j_m1]) -
4.0 * u);
u_n_m1[ell] = u_n[ell];
}
// Set updated result
for (int ell = 0; ell < n * n; ell++)
{
int i = ell / ny;
int j = ell - i * ny;
if (i == 0 || i == nx - 1 || j == 0 || j == ny - 1)
{
u_n[ell] = 0.0;
continue;
}
u_n[ell] = u_n_p1[ell];
}
if (t % aux == 0)
{
std::string filename = "data/results";
c++;
filename.append(std::to_string(c));
filename.append(".m");
std::ofstream file;
file.open(filename, std::ios::out);
file << "F = [";
for (int i = 0; i < nx; i += skip)
{
if (i != 0)
file << ";" << std::endl;
for (int j = 0; j < ny; j += skip)
{
int ell = i * ny + j;
if (j != 0)
file << ", ";
file << u_n[ell];
}
}
file << "];" << std::endl;
}
}
//
// Save text file for visualization on matlab
//
std::string filename = "data/results.m";
std::ofstream file;
file.open(filename, std::ios::out);
file << "X = [";
for (int i = 0; i < nx; i += skip)
{
if (i != 0)
file << ";" << std::endl;
for (int j = 0; j < ny; j += skip)
{
int ell = i * ny + j;
if (j != 0)
file << ", ";
file << x[ell];
}
}
file << "];" << std::endl;
file << "Y = [";
for (int i = 0; i < nx; i += skip)
{
if (i != 0)
file << ";" << std::endl;
for (int j = 0; j < ny; j += skip)
{
int ell = i * ny + j;
if (j != 0)
file << ", ";
file << y[ell];
}
}
file << "];" << std::endl;
file << "F = [";
for (int i = 0; i < nx; i += skip)
{
if (i != 0)
file << ";" << std::endl;
for (int j = 0; j < ny; j += skip)
{
int ell = i * ny + j;
if (j != 0)
file << ", ";
file << u_n[ell];
}
}
file << "];" << std::endl;
return;
}
int main(void)
{
int nx = n_per_b * 400;
int ny = n_per_b * 400;
int n_steps = 500;
int n_animate = 1;
double t1, t2;
// t1 = omp_get_wtime();
// solve_wave_eq_cpu(nx, n_steps, n_animate);
// t2 = omp_get_wtime();
// std::cout << "Time for CPU Wave Equation = " << t2-t1 << std::endl;
// t1 = omp_get_wtime();
// solve_wave_eq_gpu(nx, n_steps, n_animate);
// t2 = omp_get_wtime();
// std::cout << "Time for GPU Wave Equation = " << t2-t1 << std::endl;
// solve_wave_eq(nx, n_steps);
fd_2d_cpu(nx, ny);
fd_2d_gpu_v0(nx, ny);
fd_2d_gpu_v1(nx, ny);
return 0;
} | d5a19702a82ed2ee861d6582e097fab17b77ad47.cu | //
// For intellisense
//
#ifdef __INTELLISENSE__
#include <cuda.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <device_functions.h>
#include <cuda_runtime_api.h>
#endif
#include <stdio.h>
#include <iostream>
#include <assert.h>
#include <vector>
#include <cmath>
#include <omp.h>
#include <fstream>
using prec = double;
using cu_prec = float;
//
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
//
inline cudaError_t checkCuda(cudaError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess)
{
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
assert(result == cudaSuccess);
}
#endif
return result;
}
#define gpuErrchk(ans) \
{ \
gpuAssert((ans), __FILE__, __LINE__); \
}
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort)
exit(code);
}
}
//
// Test function in 2D
// cos(x) * exp( y );
//
void f_xy(std::vector<prec> &x, std::vector<prec> &y, std::vector<prec> &f)
{
for (std::size_t i = 0; i < x.size(); i++)
{
f[i] = std::cos(x[i]) * std::exp(y[i]);
}
return;
}
prec f_laplacian(prec x, prec y)
{
return 0.0;
}
void f_xy(std::vector<float> &x, std::vector<float> &y, std::vector<float> &f)
{
for (std::size_t i = 0; i < x.size(); i++)
{
f[i] = std::cos(x[i]) * std::exp(y[i]);
}
return;
}
float f_laplacian(float x, float y)
{
return (float)0.0;
}
//
// Finite Difference in the CPU
//
void fd_2d_cpu(int nx, int ny)
{
std::vector<prec> x(nx * ny);
std::vector<prec> y(nx * ny);
std::vector<prec> f(nx * ny);
std::vector<prec> f_d2(nx * ny);
prec dx2 = std::pow(2.0 / (nx - 1.0), 2);
prec dy2 = std::pow(2.0 / (ny - 1.0), 2);
//
// Initialize the values of x and y
//
for (int i = 0; i < nx; i++)
{
for (int j = 0; j < ny; j++)
{
int ell = i * ny + j;
x[ell] = -1.0 + 2.0 * i / (nx - 1.0);
y[ell] = -1.0 + 2.0 * j / (ny - 1.0);
}
}
f_xy(x, y, f);
double t1 = omp_get_wtime();
for (int i = 1; i < nx - 1; i++)
{
for (int j = 1; j < ny - 1; j++)
{
int ell = i * ny + j;
int ell_i_p1 = (i + 1) * ny + j; // i+1
int ell_i_m1 = (i - 1) * ny + j; // i-1
int ell_j_p1 = i * ny + (j + 1); // i+1
int ell_j_m1 = i * ny + (j - 1); // i-1
f_d2[ell] = (f[ell_i_p1] + f[ell_i_m1]) / dx2 +
(f[ell_j_p1] + f[ell_j_m1]) / dy2 -
(2.0 / dx2 + 2.0 / dy2) * f[ell];
}
}
double t2 = omp_get_wtime();
std::cout << "Time for 2D Laplacian in CPU = " << t2 - t1 << " sec" << std::endl;
//
// Check for the maximum error
//
prec max_err = 0.0;
for (int i = 1; i < nx - 1; i++)
{
for (int j = 1; j < ny - 1; j++)
{
int ell = i * ny + j;
prec lap_exact = f_laplacian(x[ell], y[ell]);
prec err_tmp = std::abs(lap_exact - f_d2[ell]);
// std::cout << f_d2[ell] << std::endl;
if ((i == 1) && (j == 1))
{
max_err = err_tmp;
}
else if (err_tmp > max_err)
{
max_err = err_tmp;
}
}
}
std::cout << "Maximum error in CPU = " << max_err << std::endl;
return;
}
//
// GPU Kernel the "silly" way
//
__constant__ int nx_cu, ny_cu;
__global__ void laplacian_silly(cu_prec *f,
cu_prec *f_d2)
{
const int ell = blockIdx.x * blockDim.x + threadIdx.x;
if (ell > nx_cu * ny_cu)
return;
int i = ell / ny_cu;
int j = ell - i * ny_cu;
if (i == 0 || i == nx_cu - 1 || j == 0 || j == ny_cu - 1)
return;
int ell_i_p1 = (i + 1) * ny_cu + j; // i+1
int ell_i_m1 = (i - 1) * ny_cu + j; // i-1
int ell_j_p1 = i * ny_cu + (j + 1); // i+1
int ell_j_m1 = i * ny_cu + (j - 1); // i-1
f_d2[ell] = (f[ell_i_p1] + f[ell_i_m1]) +
(f[ell_j_p1] + f[ell_j_m1]) -
4.0 * f[ell];
// f_d2[ell] = (f[ell+1] + f[ell-1]) +
// (f[ell+1] + f[ell-1]) -
// 4.0 * f[ell];
// f_d2[ell] = 0.0;
}
//
// GPU Kernel using shared memory
//
// stencil coefficients
__constant__ cu_prec c_a, c_b, c_c;
const int n_per_b = 32;
__global__ void laplacian_shared(cu_prec *f,
cu_prec *f_d2)
{
__shared__ cu_prec f_shared[n_per_b + 2][n_per_b + 2];
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int ell = i * ny_cu + j;
// printf("Blk: (%d,%d) Thread: (%d,%d) -> Row/Col = (%d,%d)\n",
// blockIdx.x, blockIdx.y,
// threadIdx.x, threadIdx.y,
// i, j);
// if (ell > nx_cu * ny_cu)
// return;
//
// Fill the local memory data
//
f_shared[threadIdx.x + 1][threadIdx.y + 1] = f[ell];
// If we are on an edge on the block we add the values
if (threadIdx.x == 0 && i != 0)
{
int ell_tmp = (i - 1) * ny_cu + j;
f_shared[0][threadIdx.y + 1] = f[ell_tmp];
}
if (threadIdx.x == (blockDim.x - 1) && i != (nx_cu - 1))
{
int ell_tmp = (i + 1) * ny_cu + j;
f_shared[n_per_b + 1][threadIdx.y + 1] = f[ell_tmp];
}
if (threadIdx.y == 0 && j != 0)
{
int ell_tmp = i * ny_cu + (j - 1);
f_shared[threadIdx.x + 1][0] = f[ell_tmp];
}
if (threadIdx.y == (blockDim.y - 1) && j != (ny_cu - 1))
{
int ell_tmp = i * ny_cu + (j + 1);
f_shared[threadIdx.x + 1][n_per_b + 1] = f[ell_tmp];
}
__syncthreads();
// int i_m = threadIdx.x;
// int i_0 = threadIdx.x + 1;
// int i_p = threadIdx.x + 2;
// int j_m = threadIdx.y;
// int j_0 = threadIdx.y + 1;
// int j_p = threadIdx.y + 2;
// f_d2[ell] = (f_shared[i_p][j_0] + f_shared[i_m][j_0]) +
// (f_shared[i_0][j_p] + f_shared[i_0][j_m]) -
// 4.0 * f_shared[i_0][j_0];
f_d2[ell] = (f_shared[threadIdx.x + 2][threadIdx.y + 1] + f_shared[threadIdx.x][threadIdx.y + 1]) +
(f_shared[threadIdx.x + 1][threadIdx.y + 2] + f_shared[threadIdx.x + 1][threadIdx.y]) -
4.0 * f_shared[threadIdx.x + 1][threadIdx.y + 1];
}
//
// Finite Difference in the GPU
//
void fd_2d_gpu_v0(int nx, int ny)
{
std::vector<cu_prec> x(nx * ny);
std::vector<cu_prec> y(nx * ny);
std::vector<cu_prec> f(nx * ny);
std::vector<cu_prec> f_d2(nx * ny);
cu_prec dx2 = (cu_prec)std::pow(2.0 / (nx - 1.0), 2);
cu_prec dy2 = (cu_prec)std::pow(2.0 / (ny - 1.0), 2);
//
// Initialize the values of x and y
//
for (int i = 0; i < nx; i++)
{
for (int j = 0; j < ny; j++)
{
int ell = i * ny + j;
x[ell] = (cu_prec)(-1.0 + 2.0 * i / (nx - 1.0));
y[ell] = (cu_prec)(-1.0 + 2.0 * j / (ny - 1.0));
}
}
f_xy(x, y, f);
//
// Set the CUDA variables
//
double t1 = omp_get_wtime();
cu_prec *f_cu, *f_d2_cu;
int bytes = nx * ny * sizeof(cu_prec);
checkCuda(cudaMemcpyToSymbol(nx_cu, &nx, sizeof(int), 0, cudaMemcpyHostToDevice));
checkCuda(cudaMemcpyToSymbol(ny_cu, &ny, sizeof(int), 0, cudaMemcpyHostToDevice));
checkCuda(cudaMalloc(&f_cu, bytes));
checkCuda(cudaMalloc(&f_d2_cu, bytes));
checkCuda(cudaMemset(f_d2_cu, 0, bytes));
checkCuda(cudaMemcpy(f_cu, &(f[0]), bytes, cudaMemcpyHostToDevice));
int n_th_per_block = 1024;
int n_blocks = std::ceil(((prec)(nx * ny)) /
((prec)n_th_per_block));
const int nReps = 20;
laplacian_silly<<<n_blocks, n_th_per_block>>>(f_cu, f_d2_cu); // Warm up
for (int i = 0; i < nReps; i++)
laplacian_silly<<<n_blocks, n_th_per_block>>>(f_cu, f_d2_cu);
checkCuda(cudaMemcpy(&(f_d2[0]), f_d2_cu, bytes, cudaMemcpyDeviceToHost));
checkCuda(cudaFree(f_cu));
checkCuda(cudaFree(f_d2_cu));
// Add dx2 factor
for (int i = 0; i < nx * ny; i++)
{
f_d2[i] /= dx2;
}
double t2 = omp_get_wtime();
std::cout << "Time for 2D Laplacian in GPU (v0) = " << t2 - t1 << " sec" << std::endl;
//
// Check for the maximum error
//
cu_prec max_err = 0.0;
for (int i = 1; i < nx - 1; i++)
{
for (int j = 1; j < ny - 1; j++)
{
int ell = i * ny + j;
cu_prec lap_exact = f_laplacian(x[ell], y[ell]);
cu_prec err_tmp = std::abs(lap_exact - f_d2[ell]);
// std::cout << f_d2[ell] << std::endl;
if ((i == 1) && (j == 1))
{
max_err = err_tmp;
}
else if (err_tmp > max_err)
{
max_err = err_tmp;
}
}
}
std::cout << "Maximum error in GPU (v0) = " << max_err << std::endl;
return;
}
//
// Finite Difference in the GPU
//
void fd_2d_gpu_v1(int nx, int ny)
{
std::vector<cu_prec> x(nx * ny);
std::vector<cu_prec> y(nx * ny);
std::vector<cu_prec> f(nx * ny);
std::vector<cu_prec> f_d2(nx * ny);
cu_prec dx2 = std::pow(2.0 / (nx - 1.0), 2);
cu_prec dy2 = std::pow(2.0 / (ny - 1.0), 2);
//
// Initialize the values of x and y
//
for (int i = 0; i < nx; i++)
{
for (int j = 0; j < ny; j++)
{
int ell = i * ny + j;
x[ell] = -1.0 + 2.0 * i / (nx - 1.0);
y[ell] = -1.0 + 2.0 * j / (ny - 1.0);
}
}
f_xy(x, y, f);
//
// Set the CUDA variables
//
double t1 = omp_get_wtime();
cu_prec *f_cu, *f_d2_cu;
int bytes = nx * ny * sizeof(cu_prec);
checkCuda(cudaMemcpyToSymbol(nx_cu, &nx, sizeof(int), 0, cudaMemcpyHostToDevice));
checkCuda(cudaMemcpyToSymbol(ny_cu, &ny, sizeof(int), 0, cudaMemcpyHostToDevice));
checkCuda(cudaMalloc(&f_cu, bytes));
checkCuda(cudaMalloc(&f_d2_cu, bytes));
checkCuda(cudaMemset(f_d2_cu, 0, bytes));
checkCuda(cudaMemcpy(f_cu, &(f[0]), bytes, cudaMemcpyHostToDevice));
dim3 block = dim3(n_per_b, n_per_b);
dim3 grid = dim3(nx / n_per_b, ny / n_per_b);
// int n_th_per_block = 64;
// int n_blocks = std::ceil(((prec)(nx * ny)) /
// ((prec)n_th_per_block));
cu_prec a = 1.0 / dx2;
cu_prec b = 1.0 / dy2;
cu_prec c = 4.0 / dx2;
// checkCuda(cudaMemcpyToSymbol(c_a, &a, sizeof(cu_prec), 0, cudaMemcpyHostToDevice));
// checkCuda(cudaMemcpyToSymbol(c_b, &b, sizeof(cu_prec), 0, cudaMemcpyHostToDevice));
// checkCuda(cudaMemcpyToSymbol(c_c, &c, sizeof(cu_prec), 0, cudaMemcpyHostToDevice));
const int nReps = 20;
laplacian_shared<<<grid, block>>>(f_cu, f_d2_cu); // Warm up
for (int i = 0; i < nReps; i++)
laplacian_shared<<<grid, block>>>(f_cu, f_d2_cu);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
checkCuda(cudaMemcpy(&(f_d2[0]), f_d2_cu, bytes, cudaMemcpyDeviceToHost));
checkCuda(cudaFree(f_cu));
checkCuda(cudaFree(f_d2_cu));
// Add dx2 factor
for (int i = 0; i < nx * ny; i++)
{
f_d2[i] /= dx2;
}
double t2 = omp_get_wtime();
std::cout << "Time for 2D Laplacian in GPU (v1) = " << t2 - t1 << " sec" << std::endl;
//
// Check for the maximum error
//
cu_prec max_err = 0.0;
for (int i = 1; i < nx - 1; i++)
{
for (int j = 1; j < ny - 1; j++)
{
int ell = i * ny + j;
cu_prec lap_exact = f_laplacian(x[ell], y[ell]);
cu_prec err_tmp = std::abs(lap_exact - f_d2[ell]);
// std::cout << f_d2[ell] << std::endl;
if ((i == 1) && (j == 1))
{
max_err = err_tmp;
}
else if (err_tmp > max_err)
{
max_err = err_tmp;
}
}
}
std::cout << "Maximum error in GPU (v1) = " << max_err << std::endl;
return;
}
//
// Wave equation
//
__constant__ cu_prec alpha2;
__global__ void wave_eq_propagate(cu_prec *u_n,
cu_prec *u_n_p1,
cu_prec *u_n_m1)
{
const int ell = blockIdx.x * blockDim.x + threadIdx.x;
if (ell > nx_cu * ny_cu)
return;
int i = ell / ny_cu;
int j = ell - i * ny_cu;
if (i == 0 || i == nx_cu - 1 || j == 0 || j == ny_cu - 1)
return;
int ell_i_p1 = (i + 1) * ny_cu + j; // i+1
int ell_i_m1 = (i - 1) * ny_cu + j; // i-1
int ell_j_p1 = i * ny_cu + (j + 1); // i+1
int ell_j_m1 = i * ny_cu + (j - 1); // i-1
//
// Time propagation
//
cu_prec u = u_n[ell];
u_n_p1[ell] = 2.0 * u - u_n_m1[ell] +
alpha2 * ((u_n[ell_i_p1] + u_n[ell_i_m1]) +
(u_n[ell_j_p1] + u_n[ell_j_m1]) -
4.0 * u);
// Set u_n_m1
u_n_m1[ell] = u;
}
__global__ void set_un(cu_prec *u_n,
cu_prec *u_n_p1)
{
const int ell = blockIdx.x * blockDim.x + threadIdx.x;
if (ell > nx_cu * ny_cu)
return;
int i = ell / ny_cu;
int j = ell - i * ny_cu;
// Boundary condition
if (i == 0 || i == nx_cu - 1 || j == 0 || j == ny_cu - 1)
{
u_n[ell] = 0.0;
return;
}
u_n[ell] = u_n_p1[ell];
}
//
// Solve the Wave Equation
//
void solve_wave_eq_gpu(int n, int n_steps, int n_animate)
{
cu_prec c_wv = 1.0;
cu_prec dx = 2.0 / (n - 1.0);
cu_prec dt = 0.5 * dx / c_wv; // To meet the CFL condition
cu_prec aa = std::pow(c_wv * dt / dx, 2);
int nx = n;
int ny = n;
std::vector<cu_prec> x(n * n);
std::vector<cu_prec> y(n * n);
std::vector<cu_prec> u_cpu(n * n);
int n_th_per_block = 1024;
int n_blocks = std::ceil(((prec)(nx * ny)) /
((prec)n_th_per_block));
//
// Set the initial derivative
//
cu_prec sigma2 = std::pow(0.2, 2);
for (int i = 0; i < nx; i++)
{
for (int j = 0; j < ny; j++)
{
int ell = i * ny + j;
x[ell] = -1.0 + 2.0 * i / (nx - 1.0);
y[ell] = -1.0 + 2.0 * j / (ny - 1.0);
u_cpu[ell] = std::exp(-(std::pow(x[ell], 2) + std::pow(y[ell], 2)) / sigma2);
u_cpu[ell] *= dt;
}
}
int bytes = nx * ny * sizeof(cu_prec);
checkCuda(cudaMemcpyToSymbol(nx_cu, &nx, sizeof(int), 0, cudaMemcpyHostToDevice));
checkCuda(cudaMemcpyToSymbol(ny_cu, &ny, sizeof(int), 0, cudaMemcpyHostToDevice));
checkCuda(cudaMemcpyToSymbol(alpha2, &aa, sizeof(cu_prec), 0, cudaMemcpyHostToDevice));
cu_prec *u_n, *u_n_p1, *u_n_m1;
checkCuda(cudaMalloc(&u_n, bytes));
checkCuda(cudaMalloc(&u_n_p1, bytes));
checkCuda(cudaMalloc(&u_n_m1, bytes));
checkCuda(cudaMemset(u_n, 0, bytes));
checkCuda(cudaMemset(u_n_p1, 0, bytes));
checkCuda(cudaMemcpy(u_n, &(u_cpu[0]), bytes, cudaMemcpyHostToDevice));
// int n_animate = 120;
int aux = n_steps / n_animate;
int c = 0;
int skip = 10;
for (int t = 0; t < n_steps; t++)
{
wave_eq_propagate<<<n_blocks, n_th_per_block>>>(u_n, u_n_p1, u_n_m1);
set_un<<<n_blocks, n_th_per_block>>>(u_n, u_n_p1);
if (t % aux == 0)
{
checkCuda(cudaMemcpy(&(u_cpu[0]), u_n, bytes, cudaMemcpyDeviceToHost));
std::string filename = "data/results";
c++;
filename.append(std::to_string(c));
filename.append(".m");
std::ofstream file;
file.open(filename, std::ios::out);
file << "F = [";
for (int i = 0; i < nx; i += skip)
{
if (i != 0)
file << ";" << std::endl;
for (int j = 0; j < ny; j += skip)
{
int ell = i * ny + j;
if (j != 0)
file << ", ";
file << u_cpu[ell];
}
}
file << "];" << std::endl;
}
}
checkCuda(cudaMemcpy(&(u_cpu[0]), u_n, bytes, cudaMemcpyDeviceToHost));
checkCuda(cudaFree(u_n));
checkCuda(cudaFree(u_n_p1));
checkCuda(cudaFree(u_n_m1));
//
// Save text file for visualization on matlab
//
std::string filename = "data/results.m";
std::ofstream file;
file.open(filename, std::ios::out);
file << "X = [";
for (int i = 0; i < nx; i += skip)
{
if (i != 0)
file << ";" << std::endl;
for (int j = 0; j < ny; j += skip)
{
int ell = i * ny + j;
if (j != 0)
file << ", ";
file << x[ell];
}
}
file << "];" << std::endl;
file << "Y = [";
for (int i = 0; i < nx; i += skip)
{
if (i != 0)
file << ";" << std::endl;
for (int j = 0; j < ny; j += skip)
{
int ell = i * ny + j;
if (j != 0)
file << ", ";
file << y[ell];
}
}
file << "];" << std::endl;
file << "F = [";
for (int i = 0; i < nx; i += skip)
{
if (i != 0)
file << ";" << std::endl;
for (int j = 0; j < ny; j += skip)
{
int ell = i * ny + j;
if (j != 0)
file << ", ";
file << u_cpu[ell];
}
}
file << "];" << std::endl;
return;
}
//
// Wave equation on CPU
//
void solve_wave_eq_cpu(int n, int n_steps, int n_animate)
{
cu_prec c_wv = 1.0;
cu_prec dx = 2.0 / (n - 1.0);
cu_prec dt = 0.5 * dx / c_wv; // To meet the CFL condition
cu_prec aa = std::pow(c_wv * dt / dx, 2);
int nx = n;
int ny = n;
std::vector<cu_prec> x(n * n);
std::vector<cu_prec> y(n * n);
std::vector<cu_prec> u_n(n * n);
std::vector<cu_prec> u_n_p1(n * n, 0.0);
std::vector<cu_prec> u_n_m1(n * n, 0.0);
//
// Set the initial derivative
//
cu_prec sigma2 = std::pow(0.2, 2);
for (int i = 0; i < nx; i++)
{
for (int j = 0; j < ny; j++)
{
int ell = i * ny + j;
x[ell] = -1.0 + 2.0 * i / (nx - 1.0);
y[ell] = -1.0 + 2.0 * j / (ny - 1.0);
u_n[ell] = std::exp(-(std::pow(x[ell], 2) + std::pow(y[ell], 2)) / sigma2);
u_n[ell] *= dt;
}
}
// int n_animate = 30;
int aux = n_steps / n_animate;
int c = 0;
int skip = 10;
for (int t = 0; t < n_steps; t++)
{
for (int ell = 0; ell < n * n; ell++)
{
int i = ell / ny;
int j = ell - i * ny;
int ell_i_p1 = (i + 1) * ny + j; // i+1
int ell_i_m1 = (i - 1) * ny + j; // i-1
int ell_j_p1 = i * ny + (j + 1); // i+1
int ell_j_m1 = i * ny + (j - 1); // i-1
//
// Time propagation
//
cu_prec u = u_n[ell];
u_n_p1[ell] = 2.0 * u - u_n_m1[ell] +
aa * ((u_n[ell_i_p1] + u_n[ell_i_m1]) +
(u_n[ell_j_p1] + u_n[ell_j_m1]) -
4.0 * u);
u_n_m1[ell] = u_n[ell];
}
// Set updated result
for (int ell = 0; ell < n * n; ell++)
{
int i = ell / ny;
int j = ell - i * ny;
if (i == 0 || i == nx - 1 || j == 0 || j == ny - 1)
{
u_n[ell] = 0.0;
continue;
}
u_n[ell] = u_n_p1[ell];
}
if (t % aux == 0)
{
std::string filename = "data/results";
c++;
filename.append(std::to_string(c));
filename.append(".m");
std::ofstream file;
file.open(filename, std::ios::out);
file << "F = [";
for (int i = 0; i < nx; i += skip)
{
if (i != 0)
file << ";" << std::endl;
for (int j = 0; j < ny; j += skip)
{
int ell = i * ny + j;
if (j != 0)
file << ", ";
file << u_n[ell];
}
}
file << "];" << std::endl;
}
}
//
// Save text file for visualization on matlab
//
std::string filename = "data/results.m";
std::ofstream file;
file.open(filename, std::ios::out);
file << "X = [";
for (int i = 0; i < nx; i += skip)
{
if (i != 0)
file << ";" << std::endl;
for (int j = 0; j < ny; j += skip)
{
int ell = i * ny + j;
if (j != 0)
file << ", ";
file << x[ell];
}
}
file << "];" << std::endl;
file << "Y = [";
for (int i = 0; i < nx; i += skip)
{
if (i != 0)
file << ";" << std::endl;
for (int j = 0; j < ny; j += skip)
{
int ell = i * ny + j;
if (j != 0)
file << ", ";
file << y[ell];
}
}
file << "];" << std::endl;
file << "F = [";
for (int i = 0; i < nx; i += skip)
{
if (i != 0)
file << ";" << std::endl;
for (int j = 0; j < ny; j += skip)
{
int ell = i * ny + j;
if (j != 0)
file << ", ";
file << u_n[ell];
}
}
file << "];" << std::endl;
return;
}
int main(void)
{
int nx = n_per_b * 400;
int ny = n_per_b * 400;
int n_steps = 500;
int n_animate = 1;
double t1, t2;
// t1 = omp_get_wtime();
// solve_wave_eq_cpu(nx, n_steps, n_animate);
// t2 = omp_get_wtime();
// std::cout << "Time for CPU Wave Equation = " << t2-t1 << std::endl;
// t1 = omp_get_wtime();
// solve_wave_eq_gpu(nx, n_steps, n_animate);
// t2 = omp_get_wtime();
// std::cout << "Time for GPU Wave Equation = " << t2-t1 << std::endl;
// solve_wave_eq(nx, n_steps);
fd_2d_cpu(nx, ny);
fd_2d_gpu_v0(nx, ny);
fd_2d_gpu_v1(nx, ny);
return 0;
} |
85f57372276e503bdbe29ea51c13697e5c1206d3.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "mult_f32.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *left_op = NULL;
hipMalloc(&left_op, XSIZE*YSIZE);
float *right_op = NULL;
hipMalloc(&right_op, XSIZE*YSIZE);
float *output = NULL;
hipMalloc(&output, XSIZE*YSIZE);
int len = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
mult_f32), dim3(gridBlock),dim3(threadBlock), 0, 0, left_op,right_op,output,len);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
mult_f32), dim3(gridBlock),dim3(threadBlock), 0, 0, left_op,right_op,output,len);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
mult_f32), dim3(gridBlock),dim3(threadBlock), 0, 0, left_op,right_op,output,len);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 85f57372276e503bdbe29ea51c13697e5c1206d3.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "mult_f32.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *left_op = NULL;
cudaMalloc(&left_op, XSIZE*YSIZE);
float *right_op = NULL;
cudaMalloc(&right_op, XSIZE*YSIZE);
float *output = NULL;
cudaMalloc(&output, XSIZE*YSIZE);
int len = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
mult_f32<<<gridBlock,threadBlock>>>(left_op,right_op,output,len);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
mult_f32<<<gridBlock,threadBlock>>>(left_op,right_op,output,len);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
mult_f32<<<gridBlock,threadBlock>>>(left_op,right_op,output,len);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
084c40c15be70966369df3dc775f2511c375efc5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) Chris Choy (chrischoy@ai.stanford.edu).
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
* Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
* of the code.
*/
#include "gpu.cuh"
#include "pruning.cuh"
namespace minkowski {
template <typename Dtype>
__device__ void device_memcpy(Dtype *dst, const Dtype *src, int num_elements) {
for (int i = 0; i < num_elements; ++i)
dst[i] = src[i];
}
template <typename Dtype, typename Itype>
__global__ void copy_in_out_map(const int n, const Dtype *in_feat,
Dtype *out_feat, const int nchannel,
const Itype *in_map, const Itype *out_map) {
CUDA_KERNEL_LOOP(index, n) {
device_memcpy(&out_feat[out_map[index] * nchannel],
&in_feat[in_map[index] * nchannel], nchannel);
}
}
template <typename Dtype, typename Itype>
void PruningForwardKernelGPU(const Dtype *d_in_feat, Dtype *d_out_feat,
const int nchannel,
const pInOutMaps<Itype> &in_maps,
const pInOutMaps<Itype> &out_maps,
hipStream_t stream) {
const int nnz = in_maps[0].size();
hipLaunchKernelGGL(( copy_in_out_map<Dtype, Itype>)
, dim3(GET_BLOCKS(nnz)), dim3(CUDA_NUM_THREADS), 0, stream,
nnz, d_in_feat, d_out_feat, nchannel, in_maps[0].data(),
out_maps[0].data());
}
template <typename Dtype, typename Itype>
void PruningBackwardKernelGPU(Dtype *d_grad_in_feat,
const Dtype *d_grad_out_feat, int nchannel,
const pInOutMaps<Itype> &in_maps,
const pInOutMaps<Itype> &out_maps,
hipStream_t stream) {
const int nnz = in_maps[0].size();
hipLaunchKernelGGL(( copy_in_out_map<Dtype, Itype>)
, dim3(GET_BLOCKS(nnz)), dim3(CUDA_NUM_THREADS), 0, stream,
nnz, d_grad_out_feat, d_grad_in_feat, nchannel, out_maps[0].data(),
in_maps[0].data());
}
template void PruningForwardKernelGPU<float, int32_t>(
const float *d_in_feat, float *d_out_feat, int nchannel,
const pInOutMaps<int32_t> &in_maps, const pInOutMaps<int32_t> &out_maps,
hipStream_t stream);
template void PruningBackwardKernelGPU<float, int32_t>(
float *d_grad_in_feat, const float *d_grad_out_feat, int nchannel,
const pInOutMaps<int32_t> &in_maps, const pInOutMaps<int32_t> &out_maps,
hipStream_t stream);
template void PruningForwardKernelGPU<double, int32_t>(
const double *d_in_feat, double *d_out_feat, int nchannel,
const pInOutMaps<int32_t> &in_maps, const pInOutMaps<int32_t> &out_maps,
hipStream_t stream);
template void PruningBackwardKernelGPU<double, int32_t>(
double *d_grad_in_feat, const double *d_grad_out_feat, int nchannel,
const pInOutMaps<int32_t> &in_maps, const pInOutMaps<int32_t> &out_maps,
hipStream_t stream);
} // end namespace minkowski
| 084c40c15be70966369df3dc775f2511c375efc5.cu | /* Copyright (c) Chris Choy (chrischoy@ai.stanford.edu).
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
* Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
* of the code.
*/
#include "gpu.cuh"
#include "pruning.cuh"
namespace minkowski {
template <typename Dtype>
__device__ void device_memcpy(Dtype *dst, const Dtype *src, int num_elements) {
for (int i = 0; i < num_elements; ++i)
dst[i] = src[i];
}
template <typename Dtype, typename Itype>
__global__ void copy_in_out_map(const int n, const Dtype *in_feat,
Dtype *out_feat, const int nchannel,
const Itype *in_map, const Itype *out_map) {
CUDA_KERNEL_LOOP(index, n) {
device_memcpy(&out_feat[out_map[index] * nchannel],
&in_feat[in_map[index] * nchannel], nchannel);
}
}
template <typename Dtype, typename Itype>
void PruningForwardKernelGPU(const Dtype *d_in_feat, Dtype *d_out_feat,
const int nchannel,
const pInOutMaps<Itype> &in_maps,
const pInOutMaps<Itype> &out_maps,
cudaStream_t stream) {
const int nnz = in_maps[0].size();
copy_in_out_map<Dtype, Itype>
<<<GET_BLOCKS(nnz), CUDA_NUM_THREADS, 0, stream>>>(
nnz, d_in_feat, d_out_feat, nchannel, in_maps[0].data(),
out_maps[0].data());
}
template <typename Dtype, typename Itype>
void PruningBackwardKernelGPU(Dtype *d_grad_in_feat,
const Dtype *d_grad_out_feat, int nchannel,
const pInOutMaps<Itype> &in_maps,
const pInOutMaps<Itype> &out_maps,
cudaStream_t stream) {
const int nnz = in_maps[0].size();
copy_in_out_map<Dtype, Itype>
<<<GET_BLOCKS(nnz), CUDA_NUM_THREADS, 0, stream>>>(
nnz, d_grad_out_feat, d_grad_in_feat, nchannel, out_maps[0].data(),
in_maps[0].data());
}
template void PruningForwardKernelGPU<float, int32_t>(
const float *d_in_feat, float *d_out_feat, int nchannel,
const pInOutMaps<int32_t> &in_maps, const pInOutMaps<int32_t> &out_maps,
cudaStream_t stream);
template void PruningBackwardKernelGPU<float, int32_t>(
float *d_grad_in_feat, const float *d_grad_out_feat, int nchannel,
const pInOutMaps<int32_t> &in_maps, const pInOutMaps<int32_t> &out_maps,
cudaStream_t stream);
template void PruningForwardKernelGPU<double, int32_t>(
const double *d_in_feat, double *d_out_feat, int nchannel,
const pInOutMaps<int32_t> &in_maps, const pInOutMaps<int32_t> &out_maps,
cudaStream_t stream);
template void PruningBackwardKernelGPU<double, int32_t>(
double *d_grad_in_feat, const double *d_grad_out_feat, int nchannel,
const pInOutMaps<int32_t> &in_maps, const pInOutMaps<int32_t> &out_maps,
cudaStream_t stream);
} // end namespace minkowski
|
91283f0650ca45e394a135a17a3354fabc1a03e1.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#include <ATen/ATen.h>
#include <ATen/core/TensorAccessor.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <vector>
__constant__ const float kEpsilon = 1e-9;
// TODO(gkioxari) support all data types once AtomicAdd supports doubles.
// Currently, support is for floats only.
__global__ void alphaCompositeCudaForwardKernel(
// clang-format off
at::PackedTensorAccessor64<float, 4, at::RestrictPtrTraits> result,
const at::PackedTensorAccessor64<float, 2, at::RestrictPtrTraits> features,
const at::PackedTensorAccessor64<float, 4, at::RestrictPtrTraits> alphas,
const at::PackedTensorAccessor64<int64_t, 4, at::RestrictPtrTraits> points_idx) {
// clang-format on
const int64_t batch_size = result.size(0);
const int64_t C = features.size(0);
const int64_t H = points_idx.size(2);
const int64_t W = points_idx.size(3);
// Get the batch and index
const int batch = blockIdx.x;
const int num_pixels = C * W * H;
const int num_threads = gridDim.y * blockDim.x;
const int tid = blockIdx.y * blockDim.x + threadIdx.x;
// Iterate over each feature in each pixel
for (int pid = tid; pid < num_pixels; pid += num_threads) {
int ch = pid / (W * H);
int j = (pid % (W * H)) / H;
int i = (pid % (W * H)) % H;
// alphacomposite the different values
float cum_alpha = 1.;
// Iterate through the closest K points for this pixel
for (int k = 0; k < points_idx.size(1); ++k) {
int n_idx = points_idx[batch][k][j][i];
// Sentinel value is -1 indicating no point overlaps the pixel
if (n_idx < 0) {
continue;
}
float alpha = alphas[batch][k][j][i];
// TODO(gkioxari) It might be more efficient to have threads write in a
// local variable, and move atomicAdd outside of the loop such that
// atomicAdd is executed once per thread.
atomicAdd(
&result[batch][ch][j][i], features[ch][n_idx] * cum_alpha * alpha);
cum_alpha = cum_alpha * (1 - alpha);
}
}
}
// TODO(gkioxari) support all data types once AtomicAdd supports doubles.
// Currently, support is for floats only.
__global__ void alphaCompositeCudaBackwardKernel(
// clang-format off
at::PackedTensorAccessor64<float, 2, at::RestrictPtrTraits> grad_features,
at::PackedTensorAccessor64<float, 4, at::RestrictPtrTraits> grad_alphas,
const at::PackedTensorAccessor64<float, 4, at::RestrictPtrTraits> grad_outputs,
const at::PackedTensorAccessor64<float, 2, at::RestrictPtrTraits> features,
const at::PackedTensorAccessor64<float, 4, at::RestrictPtrTraits> alphas,
const at::PackedTensorAccessor64<int64_t, 4, at::RestrictPtrTraits> points_idx) {
// clang-format on
const int64_t batch_size = points_idx.size(0);
const int64_t C = features.size(0);
const int64_t H = points_idx.size(2);
const int64_t W = points_idx.size(3);
// Get the batch and index
const int batch = blockIdx.x;
const int num_pixels = C * W * H;
const int num_threads = gridDim.y * blockDim.x;
const int tid = blockIdx.y * blockDim.x + threadIdx.x;
// Parallelize over each feature in each pixel in images of size H * W,
// for each image in the batch of size batch_size
for (int pid = tid; pid < num_pixels; pid += num_threads) {
int ch = pid / (W * H);
int j = (pid % (W * H)) / H;
int i = (pid % (W * H)) % H;
// alphacomposite the different values
float cum_alpha = 1.;
// Iterate through the closest K points for this pixel
for (int k = 0; k < points_idx.size(1); ++k) {
int n_idx = points_idx[batch][k][j][i];
// Sentinel value is -1 indicating no point overlaps the pixel
if (n_idx < 0) {
continue;
}
float alpha = alphas[batch][k][j][i];
// TODO(gkioxari) It might be more efficient to have threads write in a
// local variable, and move atomicAdd outside of the loop such that
// atomicAdd is executed once per thread.
atomicAdd(
&grad_alphas[batch][k][j][i],
cum_alpha * features[ch][n_idx] * grad_outputs[batch][ch][j][i]);
atomicAdd(
&grad_features[ch][n_idx],
cum_alpha * alpha * grad_outputs[batch][ch][j][i]);
// Iterate over all (K-1) nearest points to update gradient
for (int t = 0; t < k; ++t) {
int t_idx = points_idx[batch][t][j][i];
// Sentinel value is -1, indicating no point overlaps this pixel
if (t_idx < 0) {
continue;
}
float alpha_tvalue = alphas[batch][t][j][i];
// TODO(gkioxari) It might be more efficient to have threads write in a
// local variable, and move atomicAdd outside of the loop such that
// atomicAdd is executed once per thread.
atomicAdd(
&grad_alphas[batch][t][j][i],
-grad_outputs[batch][ch][j][i] * features[ch][n_idx] * cum_alpha *
alpha / (1 - alpha_tvalue + kEpsilon));
}
cum_alpha = cum_alpha * (1 - alphas[batch][k][j][i]);
}
}
}
at::Tensor alphaCompositeCudaForward(
const at::Tensor& features,
const at::Tensor& alphas,
const at::Tensor& points_idx) {
// Check inputs are on the same device
at::TensorArg features_t{features, "features", 1},
alphas_t{alphas, "alphas", 2}, points_idx_t{points_idx, "points_idx", 3};
at::CheckedFrom c = "alphaCompositeCudaForward";
at::checkAllSameGPU(c, {features_t, alphas_t, points_idx_t});
at::checkAllSameType(c, {features_t, alphas_t});
// Set the device for the kernel launch based on the device of the input
at::hip::HIPGuardMasqueradingAsCUDA device_guard(features.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
const int64_t batch_size = points_idx.size(0);
const int64_t C = features.size(0);
const int64_t H = points_idx.size(2);
const int64_t W = points_idx.size(3);
auto result = at::zeros({batch_size, C, H, W}, features.options());
if (result.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return result;
}
const dim3 threadsPerBlock(64);
const dim3 numBlocks(batch_size, 1024 / batch_size + 1);
// TODO(gkioxari) add AT_DISPATCH_FLOATING_TYPES once atomicAdd supports
// doubles. Currently, support is for floats only.
hipLaunchKernelGGL(( alphaCompositeCudaForwardKernel), dim3(numBlocks), dim3(threadsPerBlock), 0, stream,
// clang-format off
// As we are using packed accessors here the tensors
// do not need to be made contiguous.
result.packed_accessor64<float, 4, at::RestrictPtrTraits>(),
features.packed_accessor64<float, 2, at::RestrictPtrTraits>(),
alphas.packed_accessor64<float, 4, at::RestrictPtrTraits>(),
points_idx.packed_accessor64<int64_t, 4, at::RestrictPtrTraits>());
// clang-format on
AT_CUDA_CHECK(hipGetLastError());
return result;
}
std::tuple<at::Tensor, at::Tensor> alphaCompositeCudaBackward(
const at::Tensor& grad_outputs,
const at::Tensor& features,
const at::Tensor& alphas,
const at::Tensor& points_idx) {
// Check inputs are on the same device
at::TensorArg grad_outputs_t{grad_outputs, "grad_outputs", 1},
features_t{features, "features", 2}, alphas_t{alphas, "alphas", 3},
points_idx_t{points_idx, "points_idx", 4};
at::CheckedFrom c = "alphaCompositeCudaBackward";
at::checkAllSameGPU(c, {grad_outputs_t, features_t, alphas_t, points_idx_t});
at::checkAllSameType(c, {grad_outputs_t, features_t, alphas_t});
// Set the device for the kernel launch based on the device of the input
at::hip::HIPGuardMasqueradingAsCUDA device_guard(features.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
auto grad_features = at::zeros_like(features);
auto grad_alphas = at::zeros_like(alphas);
if (grad_features.numel() == 0 || grad_alphas.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(grad_features, grad_alphas);
}
const int64_t bs = alphas.size(0);
const dim3 threadsPerBlock(64);
const dim3 numBlocks(bs, 1024 / bs + 1);
// TODO(gkioxari) add AT_DISPATCH_FLOATING_TYPES once atomicAdd supports
// doubles. Currently, support is for floats only.
hipLaunchKernelGGL(( alphaCompositeCudaBackwardKernel), dim3(numBlocks), dim3(threadsPerBlock), 0, stream,
// clang-format off
// As we are using packed accessors here the tensors
// do not need to be made contiguous.
grad_features.packed_accessor64<float, 2, at::RestrictPtrTraits>(),
grad_alphas.packed_accessor64<float, 4, at::RestrictPtrTraits>(),
grad_outputs.packed_accessor64<float, 4, at::RestrictPtrTraits>(),
features.packed_accessor64<float, 2, at::RestrictPtrTraits>(),
alphas.packed_accessor64<float, 4, at::RestrictPtrTraits>(),
points_idx.packed_accessor64<int64_t, 4, at::RestrictPtrTraits>());
// clang-format on
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(grad_features, grad_alphas);
}
| 91283f0650ca45e394a135a17a3354fabc1a03e1.cu | // Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#include <ATen/ATen.h>
#include <ATen/core/TensorAccessor.h>
#include <ATen/cuda/CUDAContext.h>
#include <c10/cuda/CUDAGuard.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <vector>
__constant__ const float kEpsilon = 1e-9;
// TODO(gkioxari) support all data types once AtomicAdd supports doubles.
// Currently, support is for floats only.
__global__ void alphaCompositeCudaForwardKernel(
// clang-format off
at::PackedTensorAccessor64<float, 4, at::RestrictPtrTraits> result,
const at::PackedTensorAccessor64<float, 2, at::RestrictPtrTraits> features,
const at::PackedTensorAccessor64<float, 4, at::RestrictPtrTraits> alphas,
const at::PackedTensorAccessor64<int64_t, 4, at::RestrictPtrTraits> points_idx) {
// clang-format on
const int64_t batch_size = result.size(0);
const int64_t C = features.size(0);
const int64_t H = points_idx.size(2);
const int64_t W = points_idx.size(3);
// Get the batch and index
const int batch = blockIdx.x;
const int num_pixels = C * W * H;
const int num_threads = gridDim.y * blockDim.x;
const int tid = blockIdx.y * blockDim.x + threadIdx.x;
// Iterate over each feature in each pixel
for (int pid = tid; pid < num_pixels; pid += num_threads) {
int ch = pid / (W * H);
int j = (pid % (W * H)) / H;
int i = (pid % (W * H)) % H;
// alphacomposite the different values
float cum_alpha = 1.;
// Iterate through the closest K points for this pixel
for (int k = 0; k < points_idx.size(1); ++k) {
int n_idx = points_idx[batch][k][j][i];
// Sentinel value is -1 indicating no point overlaps the pixel
if (n_idx < 0) {
continue;
}
float alpha = alphas[batch][k][j][i];
// TODO(gkioxari) It might be more efficient to have threads write in a
// local variable, and move atomicAdd outside of the loop such that
// atomicAdd is executed once per thread.
atomicAdd(
&result[batch][ch][j][i], features[ch][n_idx] * cum_alpha * alpha);
cum_alpha = cum_alpha * (1 - alpha);
}
}
}
// TODO(gkioxari) support all data types once AtomicAdd supports doubles.
// Currently, support is for floats only.
__global__ void alphaCompositeCudaBackwardKernel(
// clang-format off
at::PackedTensorAccessor64<float, 2, at::RestrictPtrTraits> grad_features,
at::PackedTensorAccessor64<float, 4, at::RestrictPtrTraits> grad_alphas,
const at::PackedTensorAccessor64<float, 4, at::RestrictPtrTraits> grad_outputs,
const at::PackedTensorAccessor64<float, 2, at::RestrictPtrTraits> features,
const at::PackedTensorAccessor64<float, 4, at::RestrictPtrTraits> alphas,
const at::PackedTensorAccessor64<int64_t, 4, at::RestrictPtrTraits> points_idx) {
// clang-format on
const int64_t batch_size = points_idx.size(0);
const int64_t C = features.size(0);
const int64_t H = points_idx.size(2);
const int64_t W = points_idx.size(3);
// Get the batch and index
const int batch = blockIdx.x;
const int num_pixels = C * W * H;
const int num_threads = gridDim.y * blockDim.x;
const int tid = blockIdx.y * blockDim.x + threadIdx.x;
// Parallelize over each feature in each pixel in images of size H * W,
// for each image in the batch of size batch_size
for (int pid = tid; pid < num_pixels; pid += num_threads) {
int ch = pid / (W * H);
int j = (pid % (W * H)) / H;
int i = (pid % (W * H)) % H;
// alphacomposite the different values
float cum_alpha = 1.;
// Iterate through the closest K points for this pixel
for (int k = 0; k < points_idx.size(1); ++k) {
int n_idx = points_idx[batch][k][j][i];
// Sentinel value is -1 indicating no point overlaps the pixel
if (n_idx < 0) {
continue;
}
float alpha = alphas[batch][k][j][i];
// TODO(gkioxari) It might be more efficient to have threads write in a
// local variable, and move atomicAdd outside of the loop such that
// atomicAdd is executed once per thread.
atomicAdd(
&grad_alphas[batch][k][j][i],
cum_alpha * features[ch][n_idx] * grad_outputs[batch][ch][j][i]);
atomicAdd(
&grad_features[ch][n_idx],
cum_alpha * alpha * grad_outputs[batch][ch][j][i]);
// Iterate over all (K-1) nearest points to update gradient
for (int t = 0; t < k; ++t) {
int t_idx = points_idx[batch][t][j][i];
// Sentinel value is -1, indicating no point overlaps this pixel
if (t_idx < 0) {
continue;
}
float alpha_tvalue = alphas[batch][t][j][i];
// TODO(gkioxari) It might be more efficient to have threads write in a
// local variable, and move atomicAdd outside of the loop such that
// atomicAdd is executed once per thread.
atomicAdd(
&grad_alphas[batch][t][j][i],
-grad_outputs[batch][ch][j][i] * features[ch][n_idx] * cum_alpha *
alpha / (1 - alpha_tvalue + kEpsilon));
}
cum_alpha = cum_alpha * (1 - alphas[batch][k][j][i]);
}
}
}
at::Tensor alphaCompositeCudaForward(
const at::Tensor& features,
const at::Tensor& alphas,
const at::Tensor& points_idx) {
// Check inputs are on the same device
at::TensorArg features_t{features, "features", 1},
alphas_t{alphas, "alphas", 2}, points_idx_t{points_idx, "points_idx", 3};
at::CheckedFrom c = "alphaCompositeCudaForward";
at::checkAllSameGPU(c, {features_t, alphas_t, points_idx_t});
at::checkAllSameType(c, {features_t, alphas_t});
// Set the device for the kernel launch based on the device of the input
at::cuda::CUDAGuard device_guard(features.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
const int64_t batch_size = points_idx.size(0);
const int64_t C = features.size(0);
const int64_t H = points_idx.size(2);
const int64_t W = points_idx.size(3);
auto result = at::zeros({batch_size, C, H, W}, features.options());
if (result.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return result;
}
const dim3 threadsPerBlock(64);
const dim3 numBlocks(batch_size, 1024 / batch_size + 1);
// TODO(gkioxari) add AT_DISPATCH_FLOATING_TYPES once atomicAdd supports
// doubles. Currently, support is for floats only.
alphaCompositeCudaForwardKernel<<<numBlocks, threadsPerBlock, 0, stream>>>(
// clang-format off
// As we are using packed accessors here the tensors
// do not need to be made contiguous.
result.packed_accessor64<float, 4, at::RestrictPtrTraits>(),
features.packed_accessor64<float, 2, at::RestrictPtrTraits>(),
alphas.packed_accessor64<float, 4, at::RestrictPtrTraits>(),
points_idx.packed_accessor64<int64_t, 4, at::RestrictPtrTraits>());
// clang-format on
AT_CUDA_CHECK(cudaGetLastError());
return result;
}
std::tuple<at::Tensor, at::Tensor> alphaCompositeCudaBackward(
const at::Tensor& grad_outputs,
const at::Tensor& features,
const at::Tensor& alphas,
const at::Tensor& points_idx) {
// Check inputs are on the same device
at::TensorArg grad_outputs_t{grad_outputs, "grad_outputs", 1},
features_t{features, "features", 2}, alphas_t{alphas, "alphas", 3},
points_idx_t{points_idx, "points_idx", 4};
at::CheckedFrom c = "alphaCompositeCudaBackward";
at::checkAllSameGPU(c, {grad_outputs_t, features_t, alphas_t, points_idx_t});
at::checkAllSameType(c, {grad_outputs_t, features_t, alphas_t});
// Set the device for the kernel launch based on the device of the input
at::cuda::CUDAGuard device_guard(features.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
auto grad_features = at::zeros_like(features);
auto grad_alphas = at::zeros_like(alphas);
if (grad_features.numel() == 0 || grad_alphas.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(grad_features, grad_alphas);
}
const int64_t bs = alphas.size(0);
const dim3 threadsPerBlock(64);
const dim3 numBlocks(bs, 1024 / bs + 1);
// TODO(gkioxari) add AT_DISPATCH_FLOATING_TYPES once atomicAdd supports
// doubles. Currently, support is for floats only.
alphaCompositeCudaBackwardKernel<<<numBlocks, threadsPerBlock, 0, stream>>>(
// clang-format off
// As we are using packed accessors here the tensors
// do not need to be made contiguous.
grad_features.packed_accessor64<float, 2, at::RestrictPtrTraits>(),
grad_alphas.packed_accessor64<float, 4, at::RestrictPtrTraits>(),
grad_outputs.packed_accessor64<float, 4, at::RestrictPtrTraits>(),
features.packed_accessor64<float, 2, at::RestrictPtrTraits>(),
alphas.packed_accessor64<float, 4, at::RestrictPtrTraits>(),
points_idx.packed_accessor64<int64_t, 4, at::RestrictPtrTraits>());
// clang-format on
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(grad_features, grad_alphas);
}
|
e03bde0b39d2dcad651482992770242f1a034fd9.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "setupKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
hiprandState_t *state = NULL;
hipMalloc(&state, XSIZE*YSIZE);
unsigned long long seed = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
setupKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, state,seed);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
setupKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, state,seed);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
setupKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, state,seed);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | e03bde0b39d2dcad651482992770242f1a034fd9.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "setupKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
curandState *state = NULL;
cudaMalloc(&state, XSIZE*YSIZE);
unsigned long long seed = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
setupKernel<<<gridBlock,threadBlock>>>(state,seed);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
setupKernel<<<gridBlock,threadBlock>>>(state,seed);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
setupKernel<<<gridBlock,threadBlock>>>(state,seed);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
adc9d83e97b72e0e76022b5960ead26778d50aef.hip | // !!! This is a file automatically generated by hipify!!!
/*-------------------------------------------------------------------------
*
* CUDA function for backrpojection for parallel beam
*
*
* CODE by Ander Biguri
* Optimized and modified by RB
* ---------------------------------------------------------------------------
* ---------------------------------------------------------------------------
* Copyright (c) 2015, University of Bath and CERN- European Organization for
* Nuclear Research
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* ---------------------------------------------------------------------------
*
* Contact: tigre.toolbox@gmail.com
* Codes : https://github.com/CERN/TIGRE
* ---------------------------------------------------------------------------
*/
#define PI_2 1.57079632679489661923
#include <algorithm>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include "voxel_backprojection.hpp"
#include "voxel_backprojection_parallel.hpp"
#include "mex.h"
#include <math.h>
// https://stackoverflow.com/questions/16282136/is-there-a-cuda-equivalent-of-perror
#define cudaCheckErrors(msg) \
do { \
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
mexPrintf("%s \n",msg);\
mexErrMsgIdAndTxt("CBCT:CUDA:Atb",hipGetErrorString(__err));\
} \
} while (0)
#define MAXTREADS 1024
/*GEOMETRY DEFINITION
*
* Detector plane, behind
* |-----------------------------|
* | |
* | |
* | |
* | |
* | +--------+ |
* | / /| |
* A Z | / / |*D |
* | | +--------+ | |
* | | | | | |
* | | | *O | + |
* *--->y | | | / |
* / | | |/ |
* V X | +--------+ |
* |-----------------------------|
*
* *S
*
*
*
*
*
**/
void CreateTextureParallel( float* projectiondata,Geometry geo,hipArray** d_cuArrTex,unsigned int nangles, hipTextureObject_t *texImage,hipStream_t* stream, bool allocate);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// RB, 10/31/2016: Add constant memory arrays to store parameters for all projections to be analyzed during a single kernel call
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// The optimal values of two constants obtained by RB on NVIDIA Quadro K2200 (4 GB RAM, 640 CUDA cores) for 512^3 volume and 512^3 projections (512 proj, each 512 x 512) were:
// PROJ_PER_KERNEL = 32 or 16 (very similar times)
// VOXELS_PER_THREAD = 8
// Speedup of the entire FDK backprojection (not only kernel run, also memcpy etc.) was nearly 4x relative to the original (single projection, single voxel per thread) code.
// (e.g. 16.2 s vs. ~62 s).
const int PROJ_PER_KERNEL = 32; // Number of 2D projections to be analyzed by a single thread. This can be tweaked to see what works best. 32 was the optimal value in the paper by Zinsser and Keck.
const int VOXELS_PER_THREAD = 8; // Number of voxels to be computed by s single thread. Can be tweaked to see what works best. 4 was the optimal value in the paper by Zinsser and Keck.
// We have PROJ_PER_KERNEL projections and we need 6 parameters for each projection:
// deltaX, deltaY, deltaZ, xyzOrigin, offOrig, offDetec
// So we need to keep PROJ_PER_KERNEL*6 values in our deltas array FOR EACH CALL to our main kernel
// (they will be updated in the main loop before each kernel call).
__constant__ Point3D projParamsArrayDevParallel[6*PROJ_PER_KERNEL]; // Dev means it is on device
// We also need a corresponding array on the host side to be filled before each kernel call, then copied to the device (array in constant memory above)
// Point3D projParamsArrayHostParallel[6*PROJ_PER_KERNEL]; // Host means it is host memory
// Now we also need to store sinAlpha and cosAlpha for each projection (two floats per projection)
__constant__ float projSinCosArrayDevParallel[3*PROJ_PER_KERNEL];
// float projSinCosArrayHostParallel[3*PROJ_PER_KERNEL];
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// END RB, 10/31/2016: Add constant memory arrays to store parameters for all projections to be analyzed during a single kernel call
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//______________________________________________________________________________
//
// Function: kernelPixelBackprojectionFDK
//
// Description: Main FDK backprojection kernel
//______________________________________________________________________________
__global__ void kernelPixelBackprojection_parallel(const Geometry geo, float* image,const int currProjSetNumber, const int totalNoOfProjections,hipTextureObject_t tex)
{
// Old kernel call signature:
// kernelPixelBackprojectionFDK<<<grid,block>>>(geo,dimage,i,deltaX,deltaY,deltaZ,xyzOrigin,offOrig,offDetec,sinalpha,cosalpha);
// We just read in most of the params from the constant memory instead of getting them from the param list.
// This is because we now have MANY params, since single kernel processes more than one projection!
/* __global__ void kernelPixelBackprojectionFDK(const Geometry geo,
* float* image,
* const int indAlpha,
* const Point3D deltaX ,
* const Point3D deltaY,
* const Point3D deltaZ,
* const Point3D xyzOrigin,
* const Point3D xyzOffset,
* const Point3D uv0Offset,
* const float sinalpha,
* const float cosalpha){
*/
unsigned long indY = blockIdx.y * blockDim.y + threadIdx.y;
unsigned long indX = blockIdx.x * blockDim.x + threadIdx.x;
// unsigned long startIndZ = blockIdx.z * blockDim.z + threadIdx.z; // This is only STARTING z index of the column of voxels that the thread will handle
unsigned long startIndZ = blockIdx.z * VOXELS_PER_THREAD + threadIdx.z; // This is only STARTING z index of the column of voxels that the thread will handle
//Make sure we dont go out of bounds
if (indX>=geo.nVoxelX | indY>=geo.nVoxelY |startIndZ>=geo.nVoxelZ)
return;
// We'll keep a local auxiliary array of values of a column of voxels that this thread will update
float voxelColumn[VOXELS_PER_THREAD];
// First we need to copy the curent 3D volume values from the column to our auxiliary array so that we can then
// work on them (update them by computing values from multiple projections) locally - avoiding main memory reads/writes
int colIdx;
for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++)
{
unsigned long indZ = startIndZ + colIdx;
// If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't
// be trying to copy the out of bounds values back to the 3D volume anyway (bounds checks will be done in the final loop where the updated values go back to the main volume)
if(indZ>=geo.nVoxelZ)
break; // break the loop.
unsigned long long idx =indZ*geo.nVoxelX*geo.nVoxelY+indY*geo.nVoxelX + indX;
voxelColumn[colIdx] = image[idx]; // Read the current volume value that we'll update by computing values from MULTIPLE projections (not just one)
// We'll be updating the local (register) variable, avoiding reads/writes from the slow main memory.
} // END copy 3D volume voxels to local array
// Now iterate through projections
for(int projNumber=0; projNumber<PROJ_PER_KERNEL; projNumber++)
{
// Get the current parameters from parameter arrays in constant memory.
int indAlpha = currProjSetNumber*PROJ_PER_KERNEL+projNumber; // This is the ABSOLUTE projection number in the projection array
// Our currImageVal will be updated by hovewer many projections we had left in the "remainder" - that's OK.
if(indAlpha>=totalNoOfProjections)
break;
Point3D deltaX = projParamsArrayDevParallel[6*projNumber]; // 6*projNumber because we have 6 Point3D values per projection
Point3D deltaY = projParamsArrayDevParallel[6*projNumber+1];
Point3D deltaZ = projParamsArrayDevParallel[6*projNumber+2];
Point3D xyzOrigin = projParamsArrayDevParallel[6*projNumber+3];
Point3D xyzOffset = projParamsArrayDevParallel[6*projNumber+4];
Point3D S = projParamsArrayDevParallel[6*projNumber+5];
float DSD = projSinCosArrayDevParallel[3*projNumber]; // 2*projNumber because we have 2 float (sin or cos angle) values per projection
float DSO = projSinCosArrayDevParallel[3*projNumber+1];
float COR = projSinCosArrayDevParallel[3*projNumber+2];
// Geometric trasnformations:
//Source, scaled XYZ coordinates
// Now iterate through Z in our voxel column FOR A GIVEN PROJECTION
for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++)
{
unsigned long indZ = startIndZ + colIdx;
// If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't
// be trying to copy the out of bounds values anyway (bounds checks will be done in the final loop where the values go to the main volume)
if(indZ>=geo.nVoxelZ)
break; // break the loop.
// "XYZ" in the scaled coordinate system of the current point. The image is rotated with the projection angles.
Point3D P;
S.x=DSO;
P.x=(xyzOrigin.x+indX*deltaX.x+indY*deltaY.x+indZ*deltaZ.x);
P.y=(xyzOrigin.y+indX*deltaX.y+indY*deltaY.y+indZ*deltaZ.y)-COR/geo.dDetecU;
P.z=(xyzOrigin.z+indX*deltaX.z+indY*deltaY.z+indZ*deltaZ.z);
S.y=P.y;S.z=P.z;
// This is the vector defining the line from the source to the Voxel
float vectX,vectY,vectZ;
vectX=(P.x -S.x);
vectY=(P.y -S.y);
vectZ=(P.z -S.z);
// Get the coordinates in the detector UV where the mid point of the voxel is projected.
float t=(DSO-DSD /*-DOD*/ - S.x)/vectX;
float y,z;
y=vectY*t+S.y;
z=vectZ*t+S.z;
float u,v;
u=y+geo.nDetecU/2.0f-0.5f;
v=z+geo.nDetecV/2.0f-0.5f;
// Get Value in the computed (U,V) and multiply by the corresponding weigth.
// indAlpha is the ABSOLUTE number of projection in the projection array (NOT the current number of projection set!)
voxelColumn[colIdx]+=tex3D<float>(tex, v+0.5f, u+0.5f ,indAlpha+0.5f);
} // END iterating through column of voxels
} // END iterating through multiple projections
// And finally copy the updated local voxelColumn array back to our 3D volume (main memory)
for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++)
{
unsigned long indZ = startIndZ + colIdx;
// If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't
// be trying to copy the out of bounds values back to the 3D volume anyway (bounds checks will be done in the final loop where the values go to the main volume)
if(indZ>=geo.nVoxelZ)
break; // break the loop.
unsigned long long idx =indZ*geo.nVoxelX*geo.nVoxelY+indY*geo.nVoxelX + indX;
image[idx] = voxelColumn[colIdx]; // Read the current volume value that we'll update by computing values from MULTIPLE projections (not just one)
// We'll be updating the local (register) variable, avoiding reads/writes from the slow main memory.
// According to references (Papenhausen), doing = is better than +=, since += requires main memory read followed by a write.
// We did all the reads into the local array at the BEGINNING of this kernel. According to Papenhausen, this type of read-write split is
// better for avoiding memory congestion.
} // END copy updated voxels from local array to our 3D volume
} // END kernelPixelBackprojectionFDK
//______________________________________________________________________________
//
// Function: voxel_backprojection_parallel
//
// Description: Main host function for FDK backprojection (invokes the kernel)
//______________________________________________________________________________
int voxel_backprojection_parallel(float * projections, Geometry geo, float* result,float const * const alphas, int nalpha)
{
/*
* Allocate texture memory on the device
*/
// copy data to CUDA memory
//If it is the first time, lets make sure our image is zeroed.
int nStreamDevice=2;
int nStreams=nStreamDevice;
hipStream_t* stream=(hipStream_t*)malloc(nStreams*sizeof(hipStream_t));;
for (int i = 0; i < nStreamDevice; ++i){
hipStreamCreate(&stream[i]);
}
//Pagelock memory for syncronous copy.
// Lets try to make the host memory pinned:
// We laredy queried the GPU and assuemd they are the same, thus shoudl have the same attributes.
int isHostRegisterSupported;
hipDeviceGetAttribute(&isHostRegisterSupported,hipDeviceAttributeHostRegisterSupported,0);
if (isHostRegisterSupported){
hipHostRegister(projections, (size_t)geo.nDetecU*(size_t)geo.nDetecV*(size_t)nalpha*(size_t)sizeof(float),hipHostRegisterPortable);
}
cudaCheckErrors("Error pinning memory");
// Allocate result image memory
size_t num_bytes = geo.nVoxelX*geo.nVoxelY*geo.nVoxelZ * sizeof(float);
float* dimage;
hipMalloc((void**)&dimage, num_bytes);
hipMemset(dimage,0,num_bytes);
cudaCheckErrors("hipMalloc fail");
Point3D* projParamsArrayHostParallel;
hipHostMalloc((void**)&projParamsArrayHostParallel,6*PROJ_PER_KERNEL*sizeof(Point3D));
float* projSinCosArrayHostParallel;
hipHostMalloc((void**)&projSinCosArrayHostParallel,3*PROJ_PER_KERNEL*sizeof(float));
// Texture buffer objects
hipTextureObject_t *texProj;
hipArray **d_cuArrTex;
texProj =(hipTextureObject_t*)malloc(2*sizeof(hipTextureObject_t));
d_cuArrTex =(hipArray**)malloc(2*sizeof(hipArray*));
unsigned int proj_split_overlap_number;
unsigned int split_projections=1;
// Start with the main loop. The Projection data needs to be allocated and dealocated in the main loop
// as due to the nature of cudaArrays, we can not reuse them. This should not be a problem for the fast execution
// of the code, as repeated allocation and deallocation only happens when the projection data is very very big,
// and therefore allcoation time should be negligible, fluctuation of other computations should mask the time.
unsigned long long proj_linear_idx_start;
unsigned int current_proj_split_size,current_proj_overlap_split_size;
size_t num_bytes_img_curr;
size_t img_linear_idx_start;
current_proj_split_size=nalpha;
// We are going to split it in the same amount of kernels we need to execute.
proj_split_overlap_number=(current_proj_split_size+PROJ_PER_KERNEL-1)/PROJ_PER_KERNEL;
// Create pointer to pointers of projections and precompute their location and size.
float ** partial_projection=(float**)malloc(current_proj_split_size*sizeof(float*));
size_t * proj_split_size=(size_t*)malloc(current_proj_split_size*sizeof(size_t*));
for(unsigned int proj_block_split=0; proj_block_split<proj_split_overlap_number;proj_block_split++){
// Crop the last one, as its likely its not completely divisible.
// now lets split this for simultanoeus memcopy and compute.
// We want to make sure that if we can, we run PROJ_PER_KERNEL projections, to maximize kernel acceleration
// current_proj_overlap_split_size units = angles
current_proj_overlap_split_size=max((current_proj_split_size+proj_split_overlap_number-1)/proj_split_overlap_number,PROJ_PER_KERNEL);
current_proj_overlap_split_size=(proj_block_split<proj_split_overlap_number-1)?current_proj_overlap_split_size:current_proj_split_size-(proj_split_overlap_number-1)*current_proj_overlap_split_size;
//Get the linear index where the current memory chunk starts.
proj_linear_idx_start=proj_block_split*max((current_proj_split_size+proj_split_overlap_number-1)/proj_split_overlap_number,PROJ_PER_KERNEL)*(unsigned long long)geo.nDetecU*(unsigned long long)geo.nDetecV;
//Store resutl
proj_split_size[proj_block_split]=current_proj_overlap_split_size;
partial_projection[proj_block_split]=&projections[proj_linear_idx_start];
}
for(unsigned int proj_block_split=0; proj_block_split<proj_split_overlap_number;proj_block_split++){
// Now get the projections on memory
CreateTextureParallel(partial_projection[proj_block_split],geo,
&d_cuArrTex[(proj_block_split%2)],
proj_split_size[proj_block_split],
&texProj [(proj_block_split%2)],
stream,
(proj_block_split<2));// Only allocate if its the first 2 calls
hipStreamSynchronize(stream[0+1]);
int divx,divy,divz;
// RB: Use the optimal (in their tests) block size from paper by Zinsser and Keck (16 in x and 32 in y).
// I tried different sizes and shapes of blocks (tiles), but it does not appear to significantly affect trhoughput, so
// let's stick with the values from Zinsser and Keck.
divx=16;
divy=32;
divz=VOXELS_PER_THREAD; // We now only have 32 x 16 threads per block (flat tile, see below), BUT each thread works on a Z column of VOXELS_PER_THREAD voxels, so we effectively need fewer blocks!
dim3 grid((geo.nVoxelX+divx-1)/divx,
(geo.nVoxelY+divy-1)/divy,
(geo.nVoxelZ+divz-1)/divz);
dim3 block(divx,divy,1); // Note that we have 1 in the Z size, not divz, since each thread works on a vertical set of VOXELS_PER_THREAD voxels (so we only need a "flat" tile of threads, with depth of 1)
//////////////////////////////////////////////////////////////////////////////////////
// Main reconstruction loop: go through projections (rotation angles) and backproject
//////////////////////////////////////////////////////////////////////////////////////
// Since we'll have multiple projections processed by a SINGLE kernel call, compute how many
// kernel calls we'll need altogether.
int noOfKernelCalls = (proj_split_size[proj_block_split]+PROJ_PER_KERNEL-1)/PROJ_PER_KERNEL; // We'll take care of bounds checking inside the loop if nalpha is not divisible by PROJ_PER_KERNEL
for (unsigned int i=0; i<noOfKernelCalls; i++)
{
// Now we need to generate and copy all data for PROJ_PER_KERNEL projections to constant memory so that our kernel can use it
int j;
for(j=0; j<PROJ_PER_KERNEL; j++)
{
int currProjNumber=i*PROJ_PER_KERNEL+j;
unsigned int currProjNumber_slice=i*PROJ_PER_KERNEL+j;
unsigned int currProjNumber_global=i*PROJ_PER_KERNEL+j // index within kernel
+proj_block_split*max(current_proj_split_size/proj_split_overlap_number,PROJ_PER_KERNEL); // indexof overlap current split
if(currProjNumber_slice>=proj_split_size[proj_block_split])
break; // Exit the loop. Even when we leave the param arrays only partially filled, this is OK, since the kernel will check bounds anyway.
if(currProjNumber_global>=nalpha)
break; // Exit the loop. Even when we leave the param arrays only partially filled, this is OK, since the kernel will check bounds anyway.
Point3D deltaX,deltaY,deltaZ,xyzOrigin, offOrig, /*offDetec,*/source;
float sinalpha,cosalpha;
geo.alpha=-alphas[currProjNumber_global*3];
geo.theta=-alphas[currProjNumber_global*3+1];
geo.psi =-alphas[currProjNumber_global*3+2];
//sinalpha=sin(geo.alpha);
// cosalpha=cos(geo.alpha);
projSinCosArrayHostParallel[3*j]=geo.DSD[currProjNumber_global]; // 3*j because we have 3 float (sin or cos angle) values per projection
projSinCosArrayHostParallel[3*j+1]=geo.DSO[currProjNumber_global];
projSinCosArrayHostParallel[3*j+2]=geo.COR[currProjNumber_global];
//computeDeltasCubeParallel(geo,geo.alpha,currProjNumber,&xyzOrigin,&deltaX,&deltaY,&deltaZ,&source);
computeDeltasCubeParallel(geo,currProjNumber_global,&xyzOrigin,&deltaX,&deltaY,&deltaZ,&source);
offOrig.x=geo.offOrigX[currProjNumber_global];
offOrig.y=geo.offOrigY[currProjNumber_global];
projParamsArrayHostParallel[6*j]=deltaX; // 6*j because we have 6 Point3D values per projection
projParamsArrayHostParallel[6*j+1]=deltaY;
projParamsArrayHostParallel[6*j+2]=deltaZ;
projParamsArrayHostParallel[6*j+3]=xyzOrigin;
projParamsArrayHostParallel[6*j+4]=offOrig;
projParamsArrayHostParallel[6*j+5]=source;
} // END for (preparing params for kernel call)
// Copy the prepared parameter arrays to constant memory to make it available for the kernel
hipMemcpyToSymbolAsync(projSinCosArrayDevParallel, projSinCosArrayHostParallel, sizeof(float)*3*PROJ_PER_KERNEL,0,hipMemcpyHostToDevice,stream[0]);
hipMemcpyToSymbolAsync(projParamsArrayDevParallel, projParamsArrayHostParallel, sizeof(Point3D)*6*PROJ_PER_KERNEL,0,hipMemcpyHostToDevice,stream[0]);
hipStreamSynchronize(stream[0]);
hipLaunchKernelGGL(( kernelPixelBackprojection_parallel), dim3(grid),dim3(block),0,stream[0], geo,dimage,i,proj_split_size[proj_block_split],texProj[(proj_block_split%2)]);
} // END for
//////////////////////////////////////////////////////////////////////////////////////
// END Main reconstruction loop: go through projections (rotation angles) and backproject
//////////////////////////////////////////////////////////////////////////////////////
}
hipDeviceSynchronize();
hipMemcpy(result, dimage, num_bytes, hipMemcpyDeviceToHost);
cudaCheckErrors("hipMemcpy result fail");
free(partial_projection);
free(proj_split_size);
bool two_buffers_used=((((nalpha+split_projections-1)/split_projections)+PROJ_PER_KERNEL-1)/PROJ_PER_KERNEL)>1;
for(unsigned int i=0; i<2;i++){ // 2 buffers (if needed, maybe only 1)
if (!two_buffers_used && i==1)
break;
hipDestroyTextureObject(texProj[i]);
hipFreeArray(d_cuArrTex[i]);
}
hipHostFree(projSinCosArrayHostParallel);
hipHostFree(projParamsArrayHostParallel);
hipFree(dimage);
if (isHostRegisterSupported){
hipHostUnregister(projections);
}
for (int i = 0; i < nStreams; ++i)
hipStreamDestroy(stream[i]);
// hipDeviceReset();
return 0;
} // END voxel_backprojection
void computeDeltasCubeParallel(Geometry geo, int i, Point3D* xyzorigin, Point3D* deltaX, Point3D* deltaY, Point3D* deltaZ,Point3D *S)
{
Point3D P, Px,Py,Pz;
// Get coords of Img(0,0,0)
P.x=-(geo.sVoxelX/2-geo.dVoxelX/2)+geo.offOrigX[i];
P.y=-(geo.sVoxelY/2-geo.dVoxelY/2)+geo.offOrigY[i];
P.z=-(geo.sVoxelZ/2-geo.dVoxelZ/2)+geo.offOrigZ[i];
// Get coors from next voxel in each direction
Px.x=P.x+geo.dVoxelX; Py.x=P.x; Pz.x=P.x;
Px.y=P.y; Py.y=P.y+geo.dVoxelY; Pz.y=P.y;
Px.z=P.z; Py.z=P.z; Pz.z=P.z+geo.dVoxelZ;
// Rotate image around X axis (this is equivalent of rotating the source and detector) RZ RY RZ
eulerZYZT(geo,&P);
eulerZYZT(geo,&Px);
eulerZYZT(geo,&Py);
eulerZYZT(geo,&Pz);
//detector offset
P.z =P.z-geo.offDetecV[i]; P.y =P.y-geo.offDetecU[i];
Px.z =Px.z-geo.offDetecV[i]; Px.y =Px.y-geo.offDetecU[i];
Py.z =Py.z-geo.offDetecV[i]; Py.y =Py.y-geo.offDetecU[i];
Pz.z =Pz.z-geo.offDetecV[i]; Pz.y =Pz.y-geo.offDetecU[i];
//Detector Roll pitch Yaw
Point3D source;
source.x=geo.DSO[i]; //allready offseted for rotation
source.y=-geo.offDetecU[i];
source.z=-geo.offDetecV[i];
P.z =P.z /geo.dDetecV; P.y =P.y/geo.dDetecU;
Px.z=Px.z/geo.dDetecV; Px.y=Px.y/geo.dDetecU;
Py.z=Py.z/geo.dDetecV; Py.y=Py.y/geo.dDetecU;
Pz.z=Pz.z/geo.dDetecV; Pz.y=Pz.y/geo.dDetecU;
source.z=source.z/geo.dDetecV; source.y=source.y/geo.dDetecU;
// get deltas of the changes in voxels
deltaX->x=Px.x-P.x; deltaX->y=Px.y-P.y; deltaX->z=Px.z-P.z;
deltaY->x=Py.x-P.x; deltaY->y=Py.y-P.y; deltaY->z=Py.z-P.z;
deltaZ->x=Pz.x-P.x; deltaZ->y=Pz.y-P.y; deltaZ->z=Pz.z-P.z;
*xyzorigin=P;
*S=source;
} // END computeDeltasCube
void CreateTextureParallel(float* projectiondata,Geometry geo,hipArray** d_cuArrTex,unsigned int nangles, hipTextureObject_t *texImage,hipStream_t* stream, bool alloc)
{
//size_t size_image=geo.nVoxelX*geo.nVoxelY*geo.nVoxelZ;
//hipArray Descriptor
const hipExtent extent = make_hipExtent(geo.nDetecV, geo.nDetecU, nangles);
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
//cuda Array
if (alloc){
hipMalloc3DArray(&d_cuArrTex[0], &channelDesc, extent);
cudaCheckErrors("Texture memory allocation fail");
}
hipMemcpy3DParms copyParams = {0};
//Array creation
copyParams.srcPtr = make_hipPitchedPtr((void *)projectiondata, extent.width*sizeof(float), extent.width, extent.height);
copyParams.dstArray = d_cuArrTex[0];
copyParams.extent = extent;
copyParams.kind = hipMemcpyHostToDevice;
hipMemcpy3DAsync(©Params,stream[0+1]);
cudaCheckErrors("Texture memory data copy fail");
//Array creation End
hipResourceDesc texRes;
memset(&texRes, 0, sizeof(hipResourceDesc));
texRes.resType = hipResourceTypeArray;
texRes.res.array.array = d_cuArrTex[0];
hipTextureDesc texDescr;
memset(&texDescr, 0, sizeof(hipTextureDesc));
texDescr.normalizedCoords = false;
texDescr.filterMode = hipFilterModeLinear;
texDescr.addressMode[0] = hipAddressModeBorder;
texDescr.addressMode[1] = hipAddressModeBorder;
texDescr.addressMode[2] = hipAddressModeBorder;
texDescr.readMode = hipReadModeElementType;
hipCreateTextureObject(&texImage[0], &texRes, &texDescr, NULL);
cudaCheckErrors("Texture object creation fail");
} | adc9d83e97b72e0e76022b5960ead26778d50aef.cu | /*-------------------------------------------------------------------------
*
* CUDA function for backrpojection for parallel beam
*
*
* CODE by Ander Biguri
* Optimized and modified by RB
* ---------------------------------------------------------------------------
* ---------------------------------------------------------------------------
* Copyright (c) 2015, University of Bath and CERN- European Organization for
* Nuclear Research
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* ---------------------------------------------------------------------------
*
* Contact: tigre.toolbox@gmail.com
* Codes : https://github.com/CERN/TIGRE
* ---------------------------------------------------------------------------
*/
#define PI_2 1.57079632679489661923
#include <algorithm>
#include <cuda_runtime_api.h>
#include <cuda.h>
#include "voxel_backprojection.hpp"
#include "voxel_backprojection_parallel.hpp"
#include "mex.h"
#include <math.h>
// https://stackoverflow.com/questions/16282136/is-there-a-cuda-equivalent-of-perror
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
mexPrintf("%s \n",msg);\
mexErrMsgIdAndTxt("CBCT:CUDA:Atb",cudaGetErrorString(__err));\
} \
} while (0)
#define MAXTREADS 1024
/*GEOMETRY DEFINITION
*
* Detector plane, behind
* |-----------------------------|
* | |
* | |
* | |
* | |
* | +--------+ |
* | / /| |
* A Z | / / |*D |
* | | +--------+ | |
* | | | | | |
* | | | *O | + |
* *--->y | | | / |
* / | | |/ |
* V X | +--------+ |
* |-----------------------------|
*
* *S
*
*
*
*
*
**/
void CreateTextureParallel( float* projectiondata,Geometry geo,cudaArray** d_cuArrTex,unsigned int nangles, cudaTextureObject_t *texImage,cudaStream_t* stream, bool allocate);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// RB, 10/31/2016: Add constant memory arrays to store parameters for all projections to be analyzed during a single kernel call
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// The optimal values of two constants obtained by RB on NVIDIA Quadro K2200 (4 GB RAM, 640 CUDA cores) for 512^3 volume and 512^3 projections (512 proj, each 512 x 512) were:
// PROJ_PER_KERNEL = 32 or 16 (very similar times)
// VOXELS_PER_THREAD = 8
// Speedup of the entire FDK backprojection (not only kernel run, also memcpy etc.) was nearly 4x relative to the original (single projection, single voxel per thread) code.
// (e.g. 16.2 s vs. ~62 s).
const int PROJ_PER_KERNEL = 32; // Number of 2D projections to be analyzed by a single thread. This can be tweaked to see what works best. 32 was the optimal value in the paper by Zinsser and Keck.
const int VOXELS_PER_THREAD = 8; // Number of voxels to be computed by s single thread. Can be tweaked to see what works best. 4 was the optimal value in the paper by Zinsser and Keck.
// We have PROJ_PER_KERNEL projections and we need 6 parameters for each projection:
// deltaX, deltaY, deltaZ, xyzOrigin, offOrig, offDetec
// So we need to keep PROJ_PER_KERNEL*6 values in our deltas array FOR EACH CALL to our main kernel
// (they will be updated in the main loop before each kernel call).
__constant__ Point3D projParamsArrayDevParallel[6*PROJ_PER_KERNEL]; // Dev means it is on device
// We also need a corresponding array on the host side to be filled before each kernel call, then copied to the device (array in constant memory above)
// Point3D projParamsArrayHostParallel[6*PROJ_PER_KERNEL]; // Host means it is host memory
// Now we also need to store sinAlpha and cosAlpha for each projection (two floats per projection)
__constant__ float projSinCosArrayDevParallel[3*PROJ_PER_KERNEL];
// float projSinCosArrayHostParallel[3*PROJ_PER_KERNEL];
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// END RB, 10/31/2016: Add constant memory arrays to store parameters for all projections to be analyzed during a single kernel call
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//______________________________________________________________________________
//
// Function: kernelPixelBackprojectionFDK
//
// Description: Main FDK backprojection kernel
//______________________________________________________________________________
__global__ void kernelPixelBackprojection_parallel(const Geometry geo, float* image,const int currProjSetNumber, const int totalNoOfProjections,cudaTextureObject_t tex)
{
// Old kernel call signature:
// kernelPixelBackprojectionFDK<<<grid,block>>>(geo,dimage,i,deltaX,deltaY,deltaZ,xyzOrigin,offOrig,offDetec,sinalpha,cosalpha);
// We just read in most of the params from the constant memory instead of getting them from the param list.
// This is because we now have MANY params, since single kernel processes more than one projection!
/* __global__ void kernelPixelBackprojectionFDK(const Geometry geo,
* float* image,
* const int indAlpha,
* const Point3D deltaX ,
* const Point3D deltaY,
* const Point3D deltaZ,
* const Point3D xyzOrigin,
* const Point3D xyzOffset,
* const Point3D uv0Offset,
* const float sinalpha,
* const float cosalpha){
*/
unsigned long indY = blockIdx.y * blockDim.y + threadIdx.y;
unsigned long indX = blockIdx.x * blockDim.x + threadIdx.x;
// unsigned long startIndZ = blockIdx.z * blockDim.z + threadIdx.z; // This is only STARTING z index of the column of voxels that the thread will handle
unsigned long startIndZ = blockIdx.z * VOXELS_PER_THREAD + threadIdx.z; // This is only STARTING z index of the column of voxels that the thread will handle
//Make sure we dont go out of bounds
if (indX>=geo.nVoxelX | indY>=geo.nVoxelY |startIndZ>=geo.nVoxelZ)
return;
// We'll keep a local auxiliary array of values of a column of voxels that this thread will update
float voxelColumn[VOXELS_PER_THREAD];
// First we need to copy the curent 3D volume values from the column to our auxiliary array so that we can then
// work on them (update them by computing values from multiple projections) locally - avoiding main memory reads/writes
int colIdx;
for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++)
{
unsigned long indZ = startIndZ + colIdx;
// If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't
// be trying to copy the out of bounds values back to the 3D volume anyway (bounds checks will be done in the final loop where the updated values go back to the main volume)
if(indZ>=geo.nVoxelZ)
break; // break the loop.
unsigned long long idx =indZ*geo.nVoxelX*geo.nVoxelY+indY*geo.nVoxelX + indX;
voxelColumn[colIdx] = image[idx]; // Read the current volume value that we'll update by computing values from MULTIPLE projections (not just one)
// We'll be updating the local (register) variable, avoiding reads/writes from the slow main memory.
} // END copy 3D volume voxels to local array
// Now iterate through projections
for(int projNumber=0; projNumber<PROJ_PER_KERNEL; projNumber++)
{
// Get the current parameters from parameter arrays in constant memory.
int indAlpha = currProjSetNumber*PROJ_PER_KERNEL+projNumber; // This is the ABSOLUTE projection number in the projection array
// Our currImageVal will be updated by hovewer many projections we had left in the "remainder" - that's OK.
if(indAlpha>=totalNoOfProjections)
break;
Point3D deltaX = projParamsArrayDevParallel[6*projNumber]; // 6*projNumber because we have 6 Point3D values per projection
Point3D deltaY = projParamsArrayDevParallel[6*projNumber+1];
Point3D deltaZ = projParamsArrayDevParallel[6*projNumber+2];
Point3D xyzOrigin = projParamsArrayDevParallel[6*projNumber+3];
Point3D xyzOffset = projParamsArrayDevParallel[6*projNumber+4];
Point3D S = projParamsArrayDevParallel[6*projNumber+5];
float DSD = projSinCosArrayDevParallel[3*projNumber]; // 2*projNumber because we have 2 float (sin or cos angle) values per projection
float DSO = projSinCosArrayDevParallel[3*projNumber+1];
float COR = projSinCosArrayDevParallel[3*projNumber+2];
// Geometric trasnformations:
//Source, scaled XYZ coordinates
// Now iterate through Z in our voxel column FOR A GIVEN PROJECTION
for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++)
{
unsigned long indZ = startIndZ + colIdx;
// If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't
// be trying to copy the out of bounds values anyway (bounds checks will be done in the final loop where the values go to the main volume)
if(indZ>=geo.nVoxelZ)
break; // break the loop.
// "XYZ" in the scaled coordinate system of the current point. The image is rotated with the projection angles.
Point3D P;
S.x=DSO;
P.x=(xyzOrigin.x+indX*deltaX.x+indY*deltaY.x+indZ*deltaZ.x);
P.y=(xyzOrigin.y+indX*deltaX.y+indY*deltaY.y+indZ*deltaZ.y)-COR/geo.dDetecU;
P.z=(xyzOrigin.z+indX*deltaX.z+indY*deltaY.z+indZ*deltaZ.z);
S.y=P.y;S.z=P.z;
// This is the vector defining the line from the source to the Voxel
float vectX,vectY,vectZ;
vectX=(P.x -S.x);
vectY=(P.y -S.y);
vectZ=(P.z -S.z);
// Get the coordinates in the detector UV where the mid point of the voxel is projected.
float t=(DSO-DSD /*-DOD*/ - S.x)/vectX;
float y,z;
y=vectY*t+S.y;
z=vectZ*t+S.z;
float u,v;
u=y+geo.nDetecU/2.0f-0.5f;
v=z+geo.nDetecV/2.0f-0.5f;
// Get Value in the computed (U,V) and multiply by the corresponding weigth.
// indAlpha is the ABSOLUTE number of projection in the projection array (NOT the current number of projection set!)
voxelColumn[colIdx]+=tex3D<float>(tex, v+0.5f, u+0.5f ,indAlpha+0.5f);
} // END iterating through column of voxels
} // END iterating through multiple projections
// And finally copy the updated local voxelColumn array back to our 3D volume (main memory)
for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++)
{
unsigned long indZ = startIndZ + colIdx;
// If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't
// be trying to copy the out of bounds values back to the 3D volume anyway (bounds checks will be done in the final loop where the values go to the main volume)
if(indZ>=geo.nVoxelZ)
break; // break the loop.
unsigned long long idx =indZ*geo.nVoxelX*geo.nVoxelY+indY*geo.nVoxelX + indX;
image[idx] = voxelColumn[colIdx]; // Read the current volume value that we'll update by computing values from MULTIPLE projections (not just one)
// We'll be updating the local (register) variable, avoiding reads/writes from the slow main memory.
// According to references (Papenhausen), doing = is better than +=, since += requires main memory read followed by a write.
// We did all the reads into the local array at the BEGINNING of this kernel. According to Papenhausen, this type of read-write split is
// better for avoiding memory congestion.
} // END copy updated voxels from local array to our 3D volume
} // END kernelPixelBackprojectionFDK
//______________________________________________________________________________
//
// Function: voxel_backprojection_parallel
//
// Description: Main host function for FDK backprojection (invokes the kernel)
//______________________________________________________________________________
int voxel_backprojection_parallel(float * projections, Geometry geo, float* result,float const * const alphas, int nalpha)
{
/*
* Allocate texture memory on the device
*/
// copy data to CUDA memory
//If it is the first time, lets make sure our image is zeroed.
int nStreamDevice=2;
int nStreams=nStreamDevice;
cudaStream_t* stream=(cudaStream_t*)malloc(nStreams*sizeof(cudaStream_t));;
for (int i = 0; i < nStreamDevice; ++i){
cudaStreamCreate(&stream[i]);
}
//Pagelock memory for syncronous copy.
// Lets try to make the host memory pinned:
// We laredy queried the GPU and assuemd they are the same, thus shoudl have the same attributes.
int isHostRegisterSupported;
cudaDeviceGetAttribute(&isHostRegisterSupported,cudaDevAttrHostRegisterSupported,0);
if (isHostRegisterSupported){
cudaHostRegister(projections, (size_t)geo.nDetecU*(size_t)geo.nDetecV*(size_t)nalpha*(size_t)sizeof(float),cudaHostRegisterPortable);
}
cudaCheckErrors("Error pinning memory");
// Allocate result image memory
size_t num_bytes = geo.nVoxelX*geo.nVoxelY*geo.nVoxelZ * sizeof(float);
float* dimage;
cudaMalloc((void**)&dimage, num_bytes);
cudaMemset(dimage,0,num_bytes);
cudaCheckErrors("cudaMalloc fail");
Point3D* projParamsArrayHostParallel;
cudaMallocHost((void**)&projParamsArrayHostParallel,6*PROJ_PER_KERNEL*sizeof(Point3D));
float* projSinCosArrayHostParallel;
cudaMallocHost((void**)&projSinCosArrayHostParallel,3*PROJ_PER_KERNEL*sizeof(float));
// Texture buffer objects
cudaTextureObject_t *texProj;
cudaArray **d_cuArrTex;
texProj =(cudaTextureObject_t*)malloc(2*sizeof(cudaTextureObject_t));
d_cuArrTex =(cudaArray**)malloc(2*sizeof(cudaArray*));
unsigned int proj_split_overlap_number;
unsigned int split_projections=1;
// Start with the main loop. The Projection data needs to be allocated and dealocated in the main loop
// as due to the nature of cudaArrays, we can not reuse them. This should not be a problem for the fast execution
// of the code, as repeated allocation and deallocation only happens when the projection data is very very big,
// and therefore allcoation time should be negligible, fluctuation of other computations should mask the time.
unsigned long long proj_linear_idx_start;
unsigned int current_proj_split_size,current_proj_overlap_split_size;
size_t num_bytes_img_curr;
size_t img_linear_idx_start;
current_proj_split_size=nalpha;
// We are going to split it in the same amount of kernels we need to execute.
proj_split_overlap_number=(current_proj_split_size+PROJ_PER_KERNEL-1)/PROJ_PER_KERNEL;
// Create pointer to pointers of projections and precompute their location and size.
float ** partial_projection=(float**)malloc(current_proj_split_size*sizeof(float*));
size_t * proj_split_size=(size_t*)malloc(current_proj_split_size*sizeof(size_t*));
for(unsigned int proj_block_split=0; proj_block_split<proj_split_overlap_number;proj_block_split++){
// Crop the last one, as its likely its not completely divisible.
// now lets split this for simultanoeus memcopy and compute.
// We want to make sure that if we can, we run PROJ_PER_KERNEL projections, to maximize kernel acceleration
// current_proj_overlap_split_size units = angles
current_proj_overlap_split_size=max((current_proj_split_size+proj_split_overlap_number-1)/proj_split_overlap_number,PROJ_PER_KERNEL);
current_proj_overlap_split_size=(proj_block_split<proj_split_overlap_number-1)?current_proj_overlap_split_size:current_proj_split_size-(proj_split_overlap_number-1)*current_proj_overlap_split_size;
//Get the linear index where the current memory chunk starts.
proj_linear_idx_start=proj_block_split*max((current_proj_split_size+proj_split_overlap_number-1)/proj_split_overlap_number,PROJ_PER_KERNEL)*(unsigned long long)geo.nDetecU*(unsigned long long)geo.nDetecV;
//Store resutl
proj_split_size[proj_block_split]=current_proj_overlap_split_size;
partial_projection[proj_block_split]=&projections[proj_linear_idx_start];
}
for(unsigned int proj_block_split=0; proj_block_split<proj_split_overlap_number;proj_block_split++){
// Now get the projections on memory
CreateTextureParallel(partial_projection[proj_block_split],geo,
&d_cuArrTex[(proj_block_split%2)],
proj_split_size[proj_block_split],
&texProj [(proj_block_split%2)],
stream,
(proj_block_split<2));// Only allocate if its the first 2 calls
cudaStreamSynchronize(stream[0+1]);
int divx,divy,divz;
// RB: Use the optimal (in their tests) block size from paper by Zinsser and Keck (16 in x and 32 in y).
// I tried different sizes and shapes of blocks (tiles), but it does not appear to significantly affect trhoughput, so
// let's stick with the values from Zinsser and Keck.
divx=16;
divy=32;
divz=VOXELS_PER_THREAD; // We now only have 32 x 16 threads per block (flat tile, see below), BUT each thread works on a Z column of VOXELS_PER_THREAD voxels, so we effectively need fewer blocks!
dim3 grid((geo.nVoxelX+divx-1)/divx,
(geo.nVoxelY+divy-1)/divy,
(geo.nVoxelZ+divz-1)/divz);
dim3 block(divx,divy,1); // Note that we have 1 in the Z size, not divz, since each thread works on a vertical set of VOXELS_PER_THREAD voxels (so we only need a "flat" tile of threads, with depth of 1)
//////////////////////////////////////////////////////////////////////////////////////
// Main reconstruction loop: go through projections (rotation angles) and backproject
//////////////////////////////////////////////////////////////////////////////////////
// Since we'll have multiple projections processed by a SINGLE kernel call, compute how many
// kernel calls we'll need altogether.
int noOfKernelCalls = (proj_split_size[proj_block_split]+PROJ_PER_KERNEL-1)/PROJ_PER_KERNEL; // We'll take care of bounds checking inside the loop if nalpha is not divisible by PROJ_PER_KERNEL
for (unsigned int i=0; i<noOfKernelCalls; i++)
{
// Now we need to generate and copy all data for PROJ_PER_KERNEL projections to constant memory so that our kernel can use it
int j;
for(j=0; j<PROJ_PER_KERNEL; j++)
{
int currProjNumber=i*PROJ_PER_KERNEL+j;
unsigned int currProjNumber_slice=i*PROJ_PER_KERNEL+j;
unsigned int currProjNumber_global=i*PROJ_PER_KERNEL+j // index within kernel
+proj_block_split*max(current_proj_split_size/proj_split_overlap_number,PROJ_PER_KERNEL); // indexof overlap current split
if(currProjNumber_slice>=proj_split_size[proj_block_split])
break; // Exit the loop. Even when we leave the param arrays only partially filled, this is OK, since the kernel will check bounds anyway.
if(currProjNumber_global>=nalpha)
break; // Exit the loop. Even when we leave the param arrays only partially filled, this is OK, since the kernel will check bounds anyway.
Point3D deltaX,deltaY,deltaZ,xyzOrigin, offOrig, /*offDetec,*/source;
float sinalpha,cosalpha;
geo.alpha=-alphas[currProjNumber_global*3];
geo.theta=-alphas[currProjNumber_global*3+1];
geo.psi =-alphas[currProjNumber_global*3+2];
//sinalpha=sin(geo.alpha);
// cosalpha=cos(geo.alpha);
projSinCosArrayHostParallel[3*j]=geo.DSD[currProjNumber_global]; // 3*j because we have 3 float (sin or cos angle) values per projection
projSinCosArrayHostParallel[3*j+1]=geo.DSO[currProjNumber_global];
projSinCosArrayHostParallel[3*j+2]=geo.COR[currProjNumber_global];
//computeDeltasCubeParallel(geo,geo.alpha,currProjNumber,&xyzOrigin,&deltaX,&deltaY,&deltaZ,&source);
computeDeltasCubeParallel(geo,currProjNumber_global,&xyzOrigin,&deltaX,&deltaY,&deltaZ,&source);
offOrig.x=geo.offOrigX[currProjNumber_global];
offOrig.y=geo.offOrigY[currProjNumber_global];
projParamsArrayHostParallel[6*j]=deltaX; // 6*j because we have 6 Point3D values per projection
projParamsArrayHostParallel[6*j+1]=deltaY;
projParamsArrayHostParallel[6*j+2]=deltaZ;
projParamsArrayHostParallel[6*j+3]=xyzOrigin;
projParamsArrayHostParallel[6*j+4]=offOrig;
projParamsArrayHostParallel[6*j+5]=source;
} // END for (preparing params for kernel call)
// Copy the prepared parameter arrays to constant memory to make it available for the kernel
cudaMemcpyToSymbolAsync(projSinCosArrayDevParallel, projSinCosArrayHostParallel, sizeof(float)*3*PROJ_PER_KERNEL,0,cudaMemcpyHostToDevice,stream[0]);
cudaMemcpyToSymbolAsync(projParamsArrayDevParallel, projParamsArrayHostParallel, sizeof(Point3D)*6*PROJ_PER_KERNEL,0,cudaMemcpyHostToDevice,stream[0]);
cudaStreamSynchronize(stream[0]);
kernelPixelBackprojection_parallel<<<grid,block,0,stream[0]>>>(geo,dimage,i,proj_split_size[proj_block_split],texProj[(proj_block_split%2)]);
} // END for
//////////////////////////////////////////////////////////////////////////////////////
// END Main reconstruction loop: go through projections (rotation angles) and backproject
//////////////////////////////////////////////////////////////////////////////////////
}
cudaDeviceSynchronize();
cudaMemcpy(result, dimage, num_bytes, cudaMemcpyDeviceToHost);
cudaCheckErrors("cudaMemcpy result fail");
free(partial_projection);
free(proj_split_size);
bool two_buffers_used=((((nalpha+split_projections-1)/split_projections)+PROJ_PER_KERNEL-1)/PROJ_PER_KERNEL)>1;
for(unsigned int i=0; i<2;i++){ // 2 buffers (if needed, maybe only 1)
if (!two_buffers_used && i==1)
break;
cudaDestroyTextureObject(texProj[i]);
cudaFreeArray(d_cuArrTex[i]);
}
cudaFreeHost(projSinCosArrayHostParallel);
cudaFreeHost(projParamsArrayHostParallel);
cudaFree(dimage);
if (isHostRegisterSupported){
cudaHostUnregister(projections);
}
for (int i = 0; i < nStreams; ++i)
cudaStreamDestroy(stream[i]);
// cudaDeviceReset();
return 0;
} // END voxel_backprojection
void computeDeltasCubeParallel(Geometry geo, int i, Point3D* xyzorigin, Point3D* deltaX, Point3D* deltaY, Point3D* deltaZ,Point3D *S)
{
Point3D P, Px,Py,Pz;
// Get coords of Img(0,0,0)
P.x=-(geo.sVoxelX/2-geo.dVoxelX/2)+geo.offOrigX[i];
P.y=-(geo.sVoxelY/2-geo.dVoxelY/2)+geo.offOrigY[i];
P.z=-(geo.sVoxelZ/2-geo.dVoxelZ/2)+geo.offOrigZ[i];
// Get coors from next voxel in each direction
Px.x=P.x+geo.dVoxelX; Py.x=P.x; Pz.x=P.x;
Px.y=P.y; Py.y=P.y+geo.dVoxelY; Pz.y=P.y;
Px.z=P.z; Py.z=P.z; Pz.z=P.z+geo.dVoxelZ;
// Rotate image around X axis (this is equivalent of rotating the source and detector) RZ RY RZ
eulerZYZT(geo,&P);
eulerZYZT(geo,&Px);
eulerZYZT(geo,&Py);
eulerZYZT(geo,&Pz);
//detector offset
P.z =P.z-geo.offDetecV[i]; P.y =P.y-geo.offDetecU[i];
Px.z =Px.z-geo.offDetecV[i]; Px.y =Px.y-geo.offDetecU[i];
Py.z =Py.z-geo.offDetecV[i]; Py.y =Py.y-geo.offDetecU[i];
Pz.z =Pz.z-geo.offDetecV[i]; Pz.y =Pz.y-geo.offDetecU[i];
//Detector Roll pitch Yaw
Point3D source;
source.x=geo.DSO[i]; //allready offseted for rotation
source.y=-geo.offDetecU[i];
source.z=-geo.offDetecV[i];
P.z =P.z /geo.dDetecV; P.y =P.y/geo.dDetecU;
Px.z=Px.z/geo.dDetecV; Px.y=Px.y/geo.dDetecU;
Py.z=Py.z/geo.dDetecV; Py.y=Py.y/geo.dDetecU;
Pz.z=Pz.z/geo.dDetecV; Pz.y=Pz.y/geo.dDetecU;
source.z=source.z/geo.dDetecV; source.y=source.y/geo.dDetecU;
// get deltas of the changes in voxels
deltaX->x=Px.x-P.x; deltaX->y=Px.y-P.y; deltaX->z=Px.z-P.z;
deltaY->x=Py.x-P.x; deltaY->y=Py.y-P.y; deltaY->z=Py.z-P.z;
deltaZ->x=Pz.x-P.x; deltaZ->y=Pz.y-P.y; deltaZ->z=Pz.z-P.z;
*xyzorigin=P;
*S=source;
} // END computeDeltasCube
void CreateTextureParallel(float* projectiondata,Geometry geo,cudaArray** d_cuArrTex,unsigned int nangles, cudaTextureObject_t *texImage,cudaStream_t* stream, bool alloc)
{
//size_t size_image=geo.nVoxelX*geo.nVoxelY*geo.nVoxelZ;
//cudaArray Descriptor
const cudaExtent extent = make_cudaExtent(geo.nDetecV, geo.nDetecU, nangles);
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
//cuda Array
if (alloc){
cudaMalloc3DArray(&d_cuArrTex[0], &channelDesc, extent);
cudaCheckErrors("Texture memory allocation fail");
}
cudaMemcpy3DParms copyParams = {0};
//Array creation
copyParams.srcPtr = make_cudaPitchedPtr((void *)projectiondata, extent.width*sizeof(float), extent.width, extent.height);
copyParams.dstArray = d_cuArrTex[0];
copyParams.extent = extent;
copyParams.kind = cudaMemcpyHostToDevice;
cudaMemcpy3DAsync(©Params,stream[0+1]);
cudaCheckErrors("Texture memory data copy fail");
//Array creation End
cudaResourceDesc texRes;
memset(&texRes, 0, sizeof(cudaResourceDesc));
texRes.resType = cudaResourceTypeArray;
texRes.res.array.array = d_cuArrTex[0];
cudaTextureDesc texDescr;
memset(&texDescr, 0, sizeof(cudaTextureDesc));
texDescr.normalizedCoords = false;
texDescr.filterMode = cudaFilterModeLinear;
texDescr.addressMode[0] = cudaAddressModeBorder;
texDescr.addressMode[1] = cudaAddressModeBorder;
texDescr.addressMode[2] = cudaAddressModeBorder;
texDescr.readMode = cudaReadModeElementType;
cudaCreateTextureObject(&texImage[0], &texRes, &texDescr, NULL);
cudaCheckErrors("Texture object creation fail");
} |
d54ca247978400881b9828b88e803b09875f709d.hip | // !!! This is a file automatically generated by hipify!!!
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
#include <hip/hip_runtime.h>
#include <exceptions/cuda_exception.h>
#include <exceptions/datatype_exception.h>
#include <execution/AffinityManager.h>
#include <graph/GraphExecutioner.h>
#include <graph/GraphHolder.h>
#include <helpers/BlasHelper.h>
#include <helpers/CudaLaunchHelper.h>
#include <helpers/DebugHelper.h>
#include <helpers/PointersManager.h>
#include <helpers/threshold.h>
#include <legacy/NativeOpExecutioner.h>
#include <legacy/NativeOps.h>
#include <loops/reduce_bool.h>
#include <loops/reduce_long.h>
#include <loops/scalar.h>
#include <loops/transform_any.h>
#include <ops/declarable/CustomOperations.h>
#include <ops/specials_cuda.h>
#include <system/buffer.h>
//#include <sys/time.h>
#include <hiprand/hiprand.h>
#include <helpers/DebugHelper.h>
using namespace sd;
#include <loops/special_kernels.h>
hipDeviceProp_t *deviceProperties;
hipFuncAttributes *funcAttributes = new hipFuncAttributes[64];
int blockLimit = 128;
int maxThreads = 512;
bool allowedP2P = false;
bool supportedP2P = false;
#ifdef SD_EXPERIMENTAL_ENABLED
bool experimentalSupport = true;
#else
bool experimentalSupport = false;
#endif
int minThreads = 32;
__constant__ char deviceConstantMemory[49152];
// this method just does type conversion in fancy way
int getDeviceId(sd::Pointer ptrToDeviceId) { return (int)(sd::LongType)ptrToDeviceId; }
/*
* Basic CUDA constants here: number of blocks per MP
*/
int getDeviceBlockThreshold(int deviceId) {
int ccMinor = deviceProperties[deviceId].minor;
int ccMajor = deviceProperties[deviceId].major;
int blockThreshold = 8;
if (ccMajor >= 5)
blockThreshold = 32;
else if (ccMajor == 3)
blockThreshold = 16;
else if (ccMajor < 3)
blockThreshold = 8;
return blockThreshold;
}
/*
* This message returns shared memory threshold value. default overflow ratio is 0.3
*/
int getDeviceSharedThreshold(int deviceId) {
int ccMinor = deviceProperties[deviceId].minor;
int ccMajor = deviceProperties[deviceId].major;
// please note threshold isn't multiple of 32, and that's NOT a mistake
int shmemThreshold;
if (ccMajor == 6 && ccMinor == 0)
shmemThreshold = 65536;
else if (ccMajor == 6 && ccMinor == 1)
shmemThreshold = 49152;
else if (ccMajor == 5 && ccMinor == 2)
shmemThreshold = 98304;
else if (ccMajor == 5)
shmemThreshold = 65536;
else if (ccMajor == 3 && ccMinor == 7)
shmemThreshold = 114688;
else
shmemThreshold = 49152;
return shmemThreshold / 0.3;
}
sd::buffer::Buffer<sd::LongType> *createScalarBuffer(hipStream_t stream) {
auto scalarShapeInfo = shape::createScalarShapeInfo();
auto buff = sd::buffer::createBuffer(scalarShapeInfo, shape::shapeInfoLength(2), stream);
sd::buffer::copyDataToGpu(&buff, stream);
return buff;
}
class ScalarShapeInformation {
private:
sd::buffer::Buffer<sd::LongType> *scalarDimension;
sd::buffer::Buffer<sd::LongType> *scalarShapeInfo;
// std::thread::id threadId;
public:
ScalarShapeInformation(hipStream_t stream) {
auto scalarDimensionBuff = reinterpret_cast<sd::LongType *>(malloc(sizeof(sd::LongType)));
CHECK_ALLOC(scalarDimensionBuff, "Failed to allocate ShapeInfoBuffer", sizeof(sd::LongType));
scalarDimensionBuff[0] = SD_MAX_DIMENSION;
scalarDimension = sd::buffer::createBuffer(scalarDimensionBuff, 1, stream);
scalarShapeInfo = createScalarBuffer(stream);
// threadId = std::this_thread::get_id();
}
~ScalarShapeInformation() {
sd::buffer::freeBuffer(&scalarShapeInfo);
sd::buffer::freeBuffer(&scalarDimension);
}
sd::LongType *getShapeInfoHostPointer() { return scalarShapeInfo->data; }
sd::LongType *getShapeInfoGpuPointer() { return scalarShapeInfo->gData; }
sd::LongType *getDimensionHostPointer() { return scalarDimension->data; }
sd::LongType *getDimensionGpuPointer() { return scalarDimension->gData; }
};
template <typename T>
class ScalarInfo {
sd::buffer::Buffer<T> *scalarData;
ScalarShapeInformation *shapeInfo;
T finalResult;
hipStream_t streamRef;
public:
ScalarInfo(hipStream_t stream) {
T *scalarResult = reinterpret_cast<T *>(malloc(sizeof(T)));
CHECK_ALLOC(scalarResult, "Failed to allocate new scalar buffer", sizeof(T));
shapeInfo = new ScalarShapeInformation(stream);
scalarData = sd::buffer::createBuffer(scalarResult, 1, stream);
streamRef = stream;
sd::buffer::copyDataToGpu(&scalarData, stream);
}
T getFinalResultFromDevice() {
sd::buffer::copyDataFromGpu(&scalarData, streamRef);
return scalarData->data[0];
}
/**
* Get the device shape information
* representing a scalar
*/
sd::LongType *getDeviceShapeInfo() { return shapeInfo->getShapeInfoGpuPointer(); }
/**
* Get the dZ pointers
*/
T *getDevicePointer() { return scalarData->gData; }
/**
* Get the infinite dimension device pointer
*/
sd::LongType *getDimensionDevicePointer() { return shapeInfo->getDimensionGpuPointer(); }
~ScalarInfo() {
sd::buffer::freeBuffer(&scalarData);
delete shapeInfo;
}
};
void execPairwiseTransform(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX,
sd::LongType const *hXShapeInfo, sd::LongType const *dXShapeInfo, OpaqueDataBuffer *dbY,
sd::LongType const *hYShapeInfo, sd::LongType const *dYShapeInfo, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo, void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execPairwiseTransform(
&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbY->primary(), hYShapeInfo,
dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(), dbZ->primary(),
hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
extraParams);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execPairwiseTransformBool(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX,
sd::LongType const *hXShapeInfo, sd::LongType const *dXShapeInfo, OpaqueDataBuffer *dbY,
sd::LongType const *hYShapeInfo, sd::LongType const *dYShapeInfo, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo, void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execPairwiseBoolTransform(
&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbY->primary(), hYShapeInfo,
dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(), dbZ->primary(),
hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
extraParams);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execSummaryStatsScalar(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX,
sd::LongType const *hXShapeInfo, sd::LongType const *dXShapeInfo, void *extraParams,
OpaqueDataBuffer *dbZ, sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo,
bool biasCorrected) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execSummaryStatsScalar(
&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbZ->primary(),
hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
biasCorrected);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execBroadcastBool(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, OpaqueDataBuffer *dbY, sd::LongType const *hYShapeInfo,
sd::LongType const *dYShapeInfo, OpaqueDataBuffer *dbZ, sd::LongType const *hZShapeInfo,
sd::LongType const *dZShapeInfo, void *extraParams, OpaqueDataBuffer *dbDimension,
sd::LongType const *hDimensionShape, sd::LongType const *dDimensionShape) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
auto hTADShapeInfo = reinterpret_cast<sd::LongType *>(extraPointers[9]);
auto tadOnlyShapeInfo = reinterpret_cast<sd::LongType *>(extraPointers[10]);
auto tadOffsets = reinterpret_cast<sd::LongType *>(extraPointers[11]);
auto tadOnlyShapeInfoZ = reinterpret_cast<sd::LongType *>(extraPointers[12]);
auto tadOffsetsZ = reinterpret_cast<sd::LongType *>(extraPointers[13]);
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execBroadcastBool(
&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbY->primary(), hYShapeInfo,
dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(), dbZ->primary(),
hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
extraParams, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param dY
* @param dYShapeInfo
* @param dZ
* @param dZShapeInfo
* @param dimension
* @param dimensionLength
*/
void execBroadcast(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, OpaqueDataBuffer *dbY, sd::LongType const *hYShapeInfo,
sd::LongType const *dYShapeInfo, OpaqueDataBuffer *dbZ, sd::LongType const *hZShapeInfo,
sd::LongType const *dZShapeInfo, OpaqueDataBuffer *dbDimension, sd::LongType const *hDimensionShape,
sd::LongType const *dDimensionShape) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
hipStream_t *stream = reinterpret_cast<hipStream_t *>(extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<sd::LongType *>(extraPointers[9]);
auto tadOnlyShapeInfo = reinterpret_cast<sd::LongType *>(extraPointers[10]);
auto tadOffsets = reinterpret_cast<sd::LongType *>(extraPointers[11]);
auto tadOnlyShapeInfoZ = reinterpret_cast<sd::LongType *>(extraPointers[12]);
auto tadOffsetsZ = reinterpret_cast<sd::LongType *>(extraPointers[13]);
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto yType = sd::ArrayOptions::dataType(hYShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execBroadcast(
&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbY->primary(), hYShapeInfo,
dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(), dbZ->primary(),
hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param extraParams
* @param dZ
* @param dZShapeInfo
*/
////////////////////////////////////////////////////////////////////////
void execReduceFloat(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduceFloatScalar(
&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbZ->primary(),
hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduceSame(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduceSameScalar(
&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbZ->primary(),
hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduceSame2(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo, OpaqueDataBuffer *dbDimension,
sd::LongType const *hDimensionShape, sd::LongType const *dDimensionShape) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
const auto zLen = shape::length(hZShapeInfo);
std::vector<int> dimensions(dimension, dimension + dimensionLength);
const sd::LongType *zShapeInfoH = hZShapeInfo;
if (shape::rank(hXShapeInfo) - dimensionLength != shape::rank(hZShapeInfo) && zLen != 1) {
auto zPack = ConstantShapeHelper::getInstance().createShapeInfoWithNoUnitiesForReduce(hZShapeInfo, dimensions);
zShapeInfoH = reinterpret_cast<sd::LongType const *>(zPack.primary());
}
std::vector<int> dims =
(zLen != 1) ? ShapeUtils::evalDimsForReduceOp(shape::rank(hXShapeInfo), dimensions) : std::vector<int>();
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduceSame(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
extraParams, dbZ->primary(), zShapeInfoH, dbZ->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(zShapeInfoH).special(),
dims.data(), dims.size());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduceLong2(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo, OpaqueDataBuffer *dbDimension,
sd::LongType const *hDimensionShape, sd::LongType const *dDimensionShape) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
const auto zLen = shape::length(hZShapeInfo);
std::vector<int> dimensions(dimension, dimension + dimensionLength);
const sd::LongType *zShapeInfoH = hZShapeInfo;
if (shape::rank(hXShapeInfo) - dimensionLength != shape::rank(hZShapeInfo) && zLen != 1) {
auto zPack = ConstantShapeHelper::getInstance().createShapeInfoWithNoUnitiesForReduce(hZShapeInfo, dimensions);
zShapeInfoH = reinterpret_cast<sd::LongType const *>(zPack.primary());
}
std::vector<int> dims =
(zLen != 1) ? ShapeUtils::evalDimsForReduceOp(shape::rank(hXShapeInfo), dimensions) : std::vector<int>();
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduceLong(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
extraParams, dbZ->primary(), zShapeInfoH, dbZ->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(zShapeInfoH).special(),
dims.data(), dims.size());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduceLong(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
auto stream = reinterpret_cast<hipStream_t *>(extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<sd::LongType *>(extraPointers[9]);
auto dTADShapeInfo = reinterpret_cast<sd::LongType *>(extraPointers[10]);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
if (zType != sd::DataType::INT64)
throw datatype_exception::build("execReduceLong wrong Z data type", sd::DataType::INT64, zType);
auto xLength = shape::length(hXShapeInfo);
auto blockWidth = 256;
auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth);
dim3 launchDims(numBlocks, blockWidth, 32768);
BUILD_DOUBLE_SELECTOR(
xType, zType, functions::reduce::ReduceLongFunction,
::execReduceScalar(launchDims, stream, opNum, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), hXShapeInfo,
extraParams, dbZ->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), hXShapeInfo,
nullptr, 0, reductionPointer, dTADShapeInfo),
SD_COMMON_TYPES, SD_LONG_TYPES);
sd::DebugHelper::checkErrorCode(stream, "execReduceLong(...) failed");
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduceBool2(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo, OpaqueDataBuffer *dbDimension,
sd::LongType const *hDimensionShape, sd::LongType const *dDimensionShape) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
const auto zLen = shape::length(hZShapeInfo);
std::vector<int> dimensions(dimension, dimension + dimensionLength);
const sd::LongType *zShapeInfoH = hZShapeInfo;
if (shape::rank(hXShapeInfo) - dimensionLength != shape::rank(hZShapeInfo) && zLen != 1) {
auto zPack = ConstantShapeHelper::getInstance().createShapeInfoWithNoUnitiesForReduce(hZShapeInfo, dimensions);
zShapeInfoH = reinterpret_cast<sd::LongType const *>(zPack.primary());
}
std::vector<int> dims =
(zLen != 1) ? ShapeUtils::evalDimsForReduceOp(shape::rank(hXShapeInfo), dimensions) : std::vector<int>();
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduceBool(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
extraParams, dbZ->primary(), zShapeInfoH, dbZ->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(zShapeInfoH).special(),
dims.data(), dims.size());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduceBool(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
auto stream = reinterpret_cast<hipStream_t *>(extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<sd::LongType *>(extraPointers[9]);
auto dTADShapeInfo = reinterpret_cast<sd::LongType *>(extraPointers[10]);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
if (zType != sd::DataType::BOOL) throw std::runtime_error("execReduceBool requires Z operand to have BOOL type");
auto xLength = shape::length(hXShapeInfo);
auto blockWidth = 256;
auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth);
dim3 launchDims(numBlocks, blockWidth, 32768);
BUILD_DOUBLE_SELECTOR(
xType, zType, functions::reduce::ReduceBoolFunction,
::execReduceScalar(launchDims, stream, opNum, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), hXShapeInfo,
extraParams, dbZ->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), hZShapeInfo,
nullptr, 0, reductionPointer, dTADShapeInfo),
SD_COMMON_TYPES, SD_BOOL_TYPES);
sd::DebugHelper::checkErrorCode(stream, "execReduceBool(...) failed");
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param extraParams
* @param dZ
* @param dZShapeInfo
* @param dimension
* @param dimensionLength
*/
////////////////////////////////////////////////////////////////////////
void execIndexReduce(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo, OpaqueDataBuffer *dbDimension,
sd::LongType const *hDimensionShape, sd::LongType const *dDimensionShape) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
auto tadPack =
sd::ConstantTadHelper::getInstance().tadForDimensions(hXShapeInfo, dimension, shape::length(hDimensionShape));
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execIndexReduce(
&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbZ->primary(),
hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
(int *)dbDimension->special(), dimensionLength, tadPack.specialShapeInfo(), tadPack.specialOffsets());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param extraParams
* @param dZ
* @param dZShapeInfo
*/
////////////////////////////////////////////////////////////////////////
void execReduceFloat2(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo, OpaqueDataBuffer *dbDimension,
sd::LongType const *hDimensionShape, sd::LongType const *dDimensionShape) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
const auto zLen = shape::length(hZShapeInfo);
std::vector<int> dimensions(dimension, dimension + dimensionLength);
const sd::LongType *zShapeInfoH = hZShapeInfo;
if (shape::rank(hXShapeInfo) - dimensionLength != shape::rank(hZShapeInfo) && zLen != 1) {
auto zPack = ConstantShapeHelper::getInstance().createShapeInfoWithNoUnitiesForReduce(hZShapeInfo, dimensions);
zShapeInfoH = reinterpret_cast<sd::LongType const *>(zPack.primary());
}
std::vector<int> dims =
(zLen != 1) ? ShapeUtils::evalDimsForReduceOp(shape::rank(hXShapeInfo), dimensions) : std::vector<int>();
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduceFloat(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
extraParams, dbZ->primary(), zShapeInfoH, dbZ->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(zShapeInfoH).special(),
dims.data(), dims.size());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param extraParams
*/
////////////////////////////////////////////////////////////////////////
void execIndexReduceScalar(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX,
sd::LongType const *hXShapeInfo, sd::LongType const *dXShapeInfo, void *extraParams,
OpaqueDataBuffer *dbZ, sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execIndexReduceScalar(
&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbZ->primary(),
hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execTransformSame(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, OpaqueDataBuffer *dbZ, sd::LongType const *hZShapeInfo,
sd::LongType const *dZShapeInfo, void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
auto tadShapeInfo = reinterpret_cast<sd::LongType *>(extraPointers != nullptr ? extraPointers[0] : nullptr);
auto tadOffsets = reinterpret_cast<sd::LongType *>(extraPointers != nullptr ? extraPointers[1] : nullptr);
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execTransformSame(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
extraParams, tadShapeInfo, tadOffsets);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execTransformBool(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, OpaqueDataBuffer *dbZ, sd::LongType const *hZShapeInfo,
sd::LongType const *dZShapeInfo, void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
auto tadShapeInfo = reinterpret_cast<sd::LongType *>(extraPointers != nullptr ? extraPointers[0] : nullptr);
auto tadOffsets = reinterpret_cast<sd::LongType *>(extraPointers != nullptr ? extraPointers[1] : nullptr);
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execTransformBool(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
extraParams, tadShapeInfo, tadOffsets);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execTransformAny(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, OpaqueDataBuffer *dbZ, sd::LongType const *hZShapeInfo,
sd::LongType const *dZShapeInfo, void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
auto stream = reinterpret_cast<hipStream_t *>(extraPointers[1]);
auto streamSpecial = reinterpret_cast<hipStream_t &>(extraPointers[4]);
LaunchContext lc(stream, streamSpecial, extraPointers[5], extraPointers[3],
reinterpret_cast<int *>(extraPointers[6]));
NativeOpExecutioner::execTransformAny(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
extraParams, nullptr, nullptr);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execTransformStrict(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, OpaqueDataBuffer *dbZ, sd::LongType const *hZShapeInfo,
sd::LongType const *dZShapeInfo, void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
auto tadShapeInfo = reinterpret_cast<sd::LongType *>(extraPointers != nullptr ? extraPointers[10] : nullptr);
auto tadOffsets = reinterpret_cast<sd::LongType *>(extraPointers != nullptr ? extraPointers[11] : nullptr);
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execTransformStrict(
&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbZ->primary(), hZShapeInfo,
dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), extraParams,
tadShapeInfo, tadOffsets);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execTransformFloat(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, OpaqueDataBuffer *dbZ, sd::LongType const *hZShapeInfo,
sd::LongType const *dZShapeInfo, void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
auto tadShapeInfo = reinterpret_cast<sd::LongType *>(extraPointers != nullptr ? extraPointers[10] : nullptr);
auto tadOffsets = reinterpret_cast<sd::LongType *>(extraPointers != nullptr ? extraPointers[11] : nullptr);
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execTransformFloat(
&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbZ->primary(), hZShapeInfo,
dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), extraParams,
tadShapeInfo, tadOffsets);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void checkP2P() {
int curDevice = 0;
hipGetDevice(&curDevice);
int devCnt = 0;
hipGetDeviceCount(&devCnt);
if (curDevice < 0 && curDevice > devCnt) curDevice = 0;
bool tempSupport = true;
if (devCnt > 1) {
for (int dX = 0; dX < devCnt; dX++) {
for (int dY = 0; dY < devCnt; dY++) {
if (dX == dY) continue;
int canAccess = 0;
hipSetDevice(dX);
hipDeviceCanAccessPeer(&canAccess, dX, dY);
if (!canAccess) {
tempSupport = false;
break;
}
}
}
supportedP2P = tempSupport;
hipSetDevice(curDevice);
} else {
// if we have only 1 device - we say that we support P2P, since all data will be on 1 device
supportedP2P = true;
}
}
void enableP2P(bool enable) {
if (enable == allowedP2P) return;
int curDevice = 0;
hipGetDevice(&curDevice);
int devCnt = 0;
hipGetDeviceCount(&devCnt);
if (curDevice < 0 && curDevice > devCnt) curDevice = 0;
if (devCnt > 1) {
for (int dX = 0; dX < devCnt; dX++) {
for (int dY = 0; dY < devCnt; dY++) {
if (dX == dY) continue;
int canAccess = 0;
hipSetDevice(dX);
hipDeviceCanAccessPeer(&canAccess, dX, dY);
if (canAccess) {
if (enable) {
hipDeviceEnablePeerAccess(dY, 0);
} else {
hipDeviceDisablePeerAccess(dY);
}
} else {
if (sd::Environment::getInstance().isVerbose()) printf("Peer access [%i] -> [%i] isn't possible\n", dX, dY);
}
}
}
hipSetDevice(curDevice);
}
allowedP2P = enable;
hipSetDevice(curDevice);
}
bool isP2PAvailable() { return supportedP2P; }
void initializeDevicesAndFunctions() {
try {
int devCnt = 0;
hipGetDeviceCount(&devCnt);
deviceProperties = new hipDeviceProp_t[devCnt];
for (int i = 0; i < devCnt; i++) {
hipSetDevice(i);
hipGetDeviceProperties(&deviceProperties[i], i);
hipDeviceSetLimit(hipLimitStackSize, 4096);
}
hipSetDevice(0);
checkP2P();
// enabling p2p gpu access if it's supported
if (supportedP2P && devCnt > 1) enableP2P(allowedP2P);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void initializeFunctions(sd::Pointer *functions) {
sd::BlasHelper::getInstance().initializeDeviceFunctions(functions);
/*
hipblasSgemv = (CublasSgemv)functions[0];
hipblasDgemv = (CublasDgemv)functions[1];
hipblasHgemm = (CublasHgemm)functions[2];
hipblasSgemm = (CublasSgemm)functions[3];
hipblasDgemm = (CublasDgemm)functions[4];
cublasSgemmEx = (CublasSgemmEx)functions[5];
hipblasHgemmBatched = (CublasHgemmBatched)functions[6];
hipblasSgemmBatched = (CublasSgemmBatched)functions[7];
hipblasDgemmBatched = (CublasDgemmBatched)functions[8];
*/
}
/**
* This method acquires memory chunk of requested size on host side
*
* @param pointer pointer that'll be used for allocation
* @param memorySize memory size, in bytes
* @param flags optional parameter
*/
sd::Pointer mallocHost(sd::LongType memorySize, int flags) {
sd::Pointer pointer;
// hipHostMallocMapped |hipHostMallocPortable
auto res = hipHostMalloc(reinterpret_cast<void **>(&pointer), memorySize + 8, hipHostMallocDefault);
if (res != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(res);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipHostMalloc failed");
}
return reinterpret_cast<int8_t *>(pointer);
}
/**
* This method acquires memory chunk of requested size on specified device
*
* @param pointer pointer that'll be used for allocation
* @param memorySize memory size, in bytes
* @param ptrToDeviceId pointer to deviceId. For cuda that's just and int, for OpenCL that's pointer to device_id, etc
* @param flags optional parameter
*/
sd::Pointer mallocDevice(sd::LongType memorySize, int deviceId, int flags) {
sd::Pointer pointer;
auto res = hipMalloc(reinterpret_cast<void **>(&pointer), memorySize + 8);
if (res != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(res);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipMalloc failed");
}
return reinterpret_cast<int8_t *>(pointer);
}
/**
* This method releases previously allocated host memory space
*
* @param pointer pointer that'll be freed
*/
int freeHost(sd::Pointer pointer) {
auto res = hipHostFree(reinterpret_cast<void *>(pointer));
if (res != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(res);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipHostFree failed");
}
return 1L;
}
/**
* This method releases previously allocated memory space on device
*
* @param pointer pointer that'll be freed
* @param ptrToDeviceId pointer to deviceId.
*/
int freeDevice(sd::Pointer pointer, int deviceId) {
auto res = hipFree(reinterpret_cast<void *>(pointer));
// we're intentionally skipping
if (res != 0 && res != 1) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(res);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipFree failed");
}
return res == 0 ? 1L : 0L;
}
sd::Pointer createContext() { return 0L; }
sd::Pointer createStream() {
auto stream = new hipStream_t();
auto dZ = hipStreamCreate(stream);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipStreamCreate failed");
}
return stream;
}
sd::Pointer createEvent() {
sd::Pointer nativeEvent = (sd::Pointer)malloc(sizeof(hipEvent_t));
CHECK_ALLOC(nativeEvent, "Failed to allocate new CUDA event buffer", sizeof(hipEvent_t));
auto dZ = hipEventCreateWithFlags(reinterpret_cast<hipEvent_t *>(&nativeEvent), hipEventDisableTiming);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipEventCreateWithFlags failed");
}
return nativeEvent;
}
int registerEvent(sd::Pointer event, sd::Pointer stream) {
auto pEvent = reinterpret_cast<hipEvent_t *>(&event);
auto pStream = reinterpret_cast<hipStream_t *>(stream);
auto dZ = hipEventRecord(*pEvent, *pStream);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipEventRecord failed");
}
return 1;
}
int setDevice(int deviceId) {
AffinityManager::setCurrentDevice(deviceId);
return 1;
}
sd::LongType getDeviceFreeMemoryDefault() {
size_t memFree = 0;
size_t memTotal = 0;
hipMemGetInfo(&memFree, &memTotal);
return (sd::LongType)memFree;
}
sd::LongType getDeviceFreeMemory(int device) {
int orig = -1;
hipGetDevice(&orig);
if (device >= 0 && device != orig) {
hipSetDevice(device);
}
size_t memFree = 0;
size_t memTotal = 0;
hipMemGetInfo(&memFree, &memTotal);
if (device >= 0 && device != orig) {
hipSetDevice(orig);
}
return (sd::LongType)memFree;
}
sd::LongType getDeviceTotalMemory(int device) {
int orig = -1;
hipGetDevice(&orig);
if (device >= 0 && device != orig) {
hipSetDevice(device);
}
size_t memFree = 0;
size_t memTotal = 0;
hipMemGetInfo(&memFree, &memTotal);
if (device >= 0 && device != orig) {
hipSetDevice(orig);
}
return (sd::LongType)memTotal;
}
int memcpySync(sd::Pointer dst, sd::Pointer src, sd::LongType size, int flags, sd::Pointer reserved) {
hipMemcpyKind kind;
switch (flags) {
case 0: {
kind = hipMemcpyHostToHost;
} break;
case 1: {
kind = hipMemcpyHostToDevice;
} break;
case 2: {
kind = hipMemcpyDeviceToHost;
} break;
case 3: {
kind = hipMemcpyDeviceToDevice;
} break;
default: {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("UNDEFNED MEMCPY");
return 0;
}
}
auto dZ = hipMemcpy(reinterpret_cast<void *>(dst), const_cast<const void *>(reinterpret_cast<void *>(src)),
static_cast<size_t>(size), kind);
if (dZ != 0) {
printf("Failed on [%p] -> [%p], size: [%i], direction: [%i], dZ: [%i]\n", src, dst, size, flags,
static_cast<int>(dZ));
fflush(stdout);
fflush(stderr);
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipMemcpy failed");
return 0;
}
return 1;
}
int memcpyAsync(sd::Pointer dst, sd::Pointer src, sd::LongType size, int flags, sd::Pointer reserved) {
auto pStream = reinterpret_cast<hipStream_t *>(reserved);
hipMemcpyKind kind;
// sd::DebugHelper::checkErrorCode(pStream, "Preliminary sync failed");
switch (flags) {
case 0: {
kind = hipMemcpyHostToHost;
} break;
case 1: {
kind = hipMemcpyHostToDevice;
} break;
case 2: {
kind = hipMemcpyDeviceToHost;
} break;
case 3: {
kind = hipMemcpyDeviceToDevice;
} break;
default: {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("UNDEFNED MEMCPY");
return 0;
}
}
auto dZ = hipMemcpyAsync(reinterpret_cast<void *>(dst), const_cast<const void *>(reinterpret_cast<void *>(src)),
static_cast<size_t>(size), kind, *pStream);
// auto dZ = hipMemcpy(reinterpret_cast<void *>(dst), const_cast<const void *>(reinterpret_cast<void *>(src)),
// static_cast<size_t>(size), kind);
if (dZ != 0) {
printf("Failed on [%p] -> [%p], size: [%i], direction: [%i], dZ: [%i]\n", src, dst, size, flags,
static_cast<int>(dZ));
fflush(stdout);
fflush(stderr);
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipMemcpyAsync failed");
return 0;
}
return 1;
}
int memsetSync(sd::Pointer dst, int value, sd::LongType size, int flags, sd::Pointer reserved) {
auto dZ = hipMemset(reinterpret_cast<void *>(dst), value, static_cast<size_t>(size));
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipMemset failed");
}
return 1;
}
int memsetAsync(sd::Pointer dst, int value, sd::LongType size, int flags, sd::Pointer reserved) {
auto pStream = reinterpret_cast<hipStream_t *>(reserved);
auto dZ = hipMemsetAsync(reinterpret_cast<void *>(dst), value, static_cast<size_t>(size), *pStream);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipMemsetAsync failed");
}
return 1;
}
int destroyEvent(sd::Pointer event) {
auto pEvent = reinterpret_cast<hipEvent_t *>(&event);
auto dZ = hipEventDestroy(*pEvent);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipEventDestroy failed");
}
return 1;
}
int streamSynchronize(sd::Pointer stream) {
auto pStream = reinterpret_cast<hipStream_t *>(stream);
auto dZ = hipStreamSynchronize(*pStream);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipStreamSynchronize failed");
}
return 1L;
}
int eventSynchronize(sd::Pointer event) {
auto pEvent = reinterpret_cast<hipEvent_t *>(&event);
auto dZ = hipEventSynchronize(*pEvent);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipEventSynchronize failed");
}
return 1L;
}
int getAvailableDevices() {
int devCnt = 0;
hipGetDeviceCount(&devCnt);
return devCnt;
}
void enableDebugMode(bool reallyEnable) { sd::Environment::getInstance().setDebug(reallyEnable); }
void setGridLimit(int gridSize) {
if (gridSize > 8192) gridSize = 8192;
if (gridSize < 1) gridSize = 1;
blockLimit = gridSize;
}
int ompGetMaxThreads() { return maxThreads; }
int ompGetNumThreads() { return maxThreads; }
void setOmpNumThreads(int threads) {
if (threads > 1024) threads = 1024;
if (threads < 32) threads = 32;
maxThreads = threads;
}
void enableVerboseMode(bool reallyEnable) { sd::Environment::getInstance().setVerbose(reallyEnable); }
int getDeviceMajor(int device) { return deviceProperties[device].major; }
int getDeviceMinor(int device) { return deviceProperties[device].minor; }
const char *getDeviceName(int device) { return deviceProperties[device].name; }
void specialConcat(sd::Pointer *extraPointers, int dimension, int numArrays, sd::Pointer *data,
sd::Pointer *inputShapeInfo, void *dZ, sd::LongType const *dZShapeInfo, sd::Pointer *tadPointers,
sd::Pointer *offsetPointers) {
try {
BUILD_SINGLE_SELECTOR(ArrayOptions::dataType(dZShapeInfo), sd::SpecialMethods,
::concatCpuGeneric(dimension, numArrays, data, inputShapeInfo, dZ, dZShapeInfo),
SD_COMMON_TYPES);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
/**
* This method saves
*/
sd::TadPack *tadOnlyShapeInfo(sd::LongType const *dXShapeInfo, int *dimension, int dimensionLength) {
try {
auto pack = new TadPack();
*pack = sd::ConstantTadHelper::getInstance().tadForDimensions(dXShapeInfo, dimension, dimensionLength);
return pack;
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
sd::LongType const *getPrimaryShapeInfo(sd::TadPack *pack) { return pack->primaryShapeInfo(); }
sd::LongType const *getPrimaryOffsets(sd::TadPack *pack) { return pack->primaryOffsets(); }
sd::LongType const *getSpecialShapeInfo(sd::TadPack *pack) { return pack->specialShapeInfo(); }
sd::LongType const *getSpecialOffsets(sd::TadPack *pack) { return pack->specialOffsets(); }
sd::LongType getNumberOfTads(sd::TadPack *pack) { return pack->numberOfTads(); }
int getShapeInfoLength(sd::TadPack *pack) { return pack->shapeInfoLength(); }
int memcpyConstantAsync(sd::LongType dst, sd::Pointer src, sd::LongType size, int flags, sd::Pointer reserved) {
hipStream_t *pStream = reinterpret_cast<hipStream_t *>(reserved);
hipMemcpyKind kind;
DEBUG_KERNEL(pStream, -1);
switch (flags) {
case 0: {
kind = hipMemcpyHostToHost;
} break;
case 1: {
kind = hipMemcpyHostToDevice;
} break;
case 2: {
kind = hipMemcpyDeviceToHost;
}
case 3: {
kind = hipMemcpyDeviceToDevice;
} break;
}
auto dZ = hipMemcpyToSymbolAsync(deviceConstantMemory, const_cast<const void *>(src), size, dst, kind, *pStream);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipMemcpyToSymbolAsync failed");
}
return 1;
}
sd::Pointer getConstantSpace() {
sd::Pointer dConstAddr;
hipError_t dZ = hipGetSymbolAddress(reinterpret_cast<void **>(&dConstAddr), deviceConstantMemory);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipGetSymbolAddress failed");
}
return dConstAddr;
}
void pullRows(sd::Pointer *extraPointers, OpaqueDataBuffer *dbX, sd::LongType const *xShapeInfo,
sd::LongType const *dXShapeInfo, OpaqueDataBuffer *dbZ, sd::LongType const *zShapeInfo,
sd::LongType const *dZShapeInfo, sd::LongType n, sd::LongType *indexes, sd::LongType const *tadShapeInfo,
sd::LongType const *tadOffsets, sd::LongType const *zTadShapeInfo, sd::LongType const *zTadOffsets) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
hipStream_t *stream = reinterpret_cast<hipStream_t *>(extraPointers[1]);
dim3 launchDims(64, 256, 1024);
auto xType = sd::ArrayOptions::dataType(xShapeInfo);
BUILD_SINGLE_SELECTOR(xType, pullRowsKernelGeneric,
(launchDims, stream, dbX->special(), dbZ->special(), n, indexes, tadShapeInfo, tadOffsets,
zTadShapeInfo, zTadOffsets),
SD_COMMON_TYPES);
DEBUG_KERNEL(stream, -1);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void average(sd::Pointer *extras, sd::Pointer *x, sd::LongType const *xShapeInfo, sd::Pointer *dx,
sd::LongType const *dXShapeInfo, void *z, sd::LongType const *zShapeInfo, void *dz,
sd::LongType const *dzShapeInfo, int n, sd::LongType length, bool propagate) {
try {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(extras[1]);
int mode = getDeviceId(extras[3]);
auto dX = reinterpret_cast<void **>(dx);
if (sd::Environment::getInstance().isDebugAndVerbose()) printf("averageFloat called\n");
auto xType = sd::ArrayOptions::dataType(xShapeInfo);
// launching on gpu
if (mode == 0) {
dim3 launchDims(256, 256, 4096);
BUILD_SINGLE_SELECTOR(xType, averagingKernelGeneric, (launchDims, stream, dX, dz, n, length, propagate),
SD_COMMON_TYPES);
sd::DebugHelper::checkErrorCode(stream, "AverageFloat(...) failed");
} else {
// launching on host memory
BUILD_SINGLE_SELECTOR(xType, sd::SpecialMethods, ::averageGeneric(x, z, zShapeInfo, n, length, propagate),
SD_COMMON_TYPES);
}
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void accumulate(sd::Pointer *extras, sd::Pointer *x, sd::LongType const *xShapeInfo, sd::Pointer *dx,
sd::LongType const *dXShapeInfo, void *z, sd::LongType const *zShapeInfo, void *dz,
sd::LongType const *dzShapeInfo, int n, sd::LongType length) {
try {
auto stream = reinterpret_cast<hipStream_t *>(extras[1]);
int mode = getDeviceId(extras[3]);
auto dX = reinterpret_cast<void **>(dx);
if (sd::Environment::getInstance().isDebugAndVerbose()) printf("accumulateFloat called\n");
auto xType = sd::ArrayOptions::dataType(xShapeInfo);
// launching on gpu
if (mode == 0) {
dim3 launchDims(n, 256, 16384);
BUILD_SINGLE_SELECTOR(xType, accumulateKernelGeneric, (launchDims, stream, dX, dz, n, length), SD_COMMON_TYPES);
sd::DebugHelper::checkErrorCode(stream, "AccumulateFloat(...) failed");
} else {
// launching on host memory
BUILD_SINGLE_SELECTOR(xType, sd::SpecialMethods, ::accumulateGeneric(x, z, zShapeInfo, n, length),
SD_COMMON_TYPES);
}
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void shuffle(sd::Pointer *extras, sd::Pointer *x, sd::Pointer *xShapeInfo, sd::Pointer *dx, sd::Pointer *dXShapeInfo,
sd::Pointer *z, sd::Pointer *zShapeInfo, sd::Pointer *dz, sd::Pointer *dZShapeInfo, int N, int *shuffleMap,
sd::Pointer *tadShapeInfo, sd::Pointer *tadOffsets) {
try {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(extras[1]);
auto dX = reinterpret_cast<void **>(dx);
auto dZ = reinterpret_cast<void **>(dz);
auto xShape = reinterpret_cast<sd::LongType **>(xShapeInfo);
auto dxShape = reinterpret_cast<sd::LongType **>(dXShapeInfo);
auto tadOnlyShapeInfo = reinterpret_cast<sd::LongType **>(tadShapeInfo);
auto tadOffset = reinterpret_cast<sd::LongType **>(tadOffsets);
auto xType = sd::ArrayOptions::dataType(xShape[0]);
dim3 launchDims(256, 512, 8192);
BUILD_SINGLE_SELECTOR(xType, shuffleKernelGeneric,
(launchDims, stream, dX, dxShape, dZ, N, shuffleMap, tadOnlyShapeInfo, tadOffset),
SD_COMMON_TYPES);
sd::DebugHelper::checkErrorCode(stream, "shuffle(...) failed");
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
bool isExperimentalEnabled() { return sd::Environment::getInstance().isExperimentalBuild(); }
void setOmpMinThreads(int threads) {
minThreads = sd::math::sd_max<int>(32, threads);
minThreads = sd::math::sd_min<int>(maxThreads, minThreads);
}
int getDevice() { return sd::AffinityManager::currentDeviceId(); }
void setElementThreshold(int num) {
// this is no-op for CUDA
}
void setTADThreshold(int num) {
// this is no-op for CUDA
}
////////////////////////////////////////////////////////////////////////
void execSummaryStats(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo, bool biasCorrected) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execSummaryStats(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
extraParams, dbZ->primary(), hZShapeInfo, dbZ->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
biasCorrected);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execSummaryStatsTad(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo,
OpaqueDataBuffer *dbDimension, sd::LongType const *hDimensionShape,
sd::LongType const *dDimensionShape, bool biasCorrected, sd::LongType const *tadShapeInfo,
sd::LongType const *tadOffsets) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbDimension});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execSummaryStats(
&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbZ->primary(),
hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
reinterpret_cast<int *>(dbDimension->special()), dimensionLength, tadShapeInfo, tadOffsets, biasCorrected);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbDimension});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduce3(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbY,
sd::LongType const *hYShapeInfo, sd::LongType const *dYShapeInfo, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduce3(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
extraParams, dbY->primary(), hYShapeInfo, dbY->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduce3Tad(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbY,
sd::LongType const *hYShapeInfo, sd::LongType const *dYShapeInfo, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo, OpaqueDataBuffer *dbDimension,
sd::LongType const *hDimensionShape, sd::LongType const *dDimensionShape,
sd::LongType const *tadOnlyShapeInfo, sd::LongType const *tadOffsets,
sd::LongType const *yTadOnlyShapeInfo, sd::LongType const *yTadOffsets) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
auto tadPack =
sd::ConstantTadHelper::getInstance().tadForDimensions(hXShapeInfo, dimension, shape::length(hDimensionShape));
auto tadLength = shape::length(tadPack.primaryShapeInfo());
auto yLength = shape::length(hYShapeInfo);
auto xLength = shape::length(hXShapeInfo);
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
if (tadLength == yLength || tadLength == xLength) {
// sd_printf("== way\n","");
NativeOpExecutioner::execReduce3(
&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbY->primary(),
hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), dimension, dimensionLength,
tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets);
} else
NativeOpExecutioner::execReduce3TAD(
&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbY->primary(),
hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), dimension, dimensionLength,
tadOnlyShapeInfo, yTadOffsets, yTadOnlyShapeInfo, yTadOffsets);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduce3Scalar(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbY,
sd::LongType const *hYShapeInfo, sd::LongType const *dYShapeInfo, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduce3Scalar(
&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbY->primary(),
hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execScalarBool(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, OpaqueDataBuffer *dbZ, sd::LongType const *hZShapeInfo,
sd::LongType const *dZShapeInfo, OpaqueDataBuffer *dbScalar, sd::LongType const *hScalarShapeInfo,
sd::LongType const *dScalarShapeInfo, void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbScalar});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execScalarBool(
&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbZ->primary(), hZShapeInfo,
dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
dbScalar->primary(), hScalarShapeInfo, dbScalar->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hScalarShapeInfo).special(), extraParams);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbScalar});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execScalarBoolTad(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, OpaqueDataBuffer *dbZ, sd::LongType const *hZShapeInfo,
sd::LongType const *dZShapeInfo, OpaqueDataBuffer *dbScalars,
sd::LongType const *hScalarShapeInfo, sd::LongType const *dScalarShapeInfo, void *extraParams,
OpaqueDataBuffer *dbDimension, sd::LongType const *hDimensionShape,
sd::LongType const *dDimensionShape, sd::LongType const *tadShapeInfo,
sd::LongType const *tadOffsets, sd::LongType const *tadShapeInfoZ,
sd::LongType const *tadOffsetsZ) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbScalars});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execScalarBool(
&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbZ->primary(),
hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
dbScalars->primary(), hScalarShapeInfo, dbScalars->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hScalarShapeInfo).special(), dimension, dimensionLength,
tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbScalars});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execScalar(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, OpaqueDataBuffer *dbZ, sd::LongType const *hZShapeInfo,
sd::LongType const *dZShapeInfo, OpaqueDataBuffer *dbScalar, sd::LongType const *hScalarShapeInfo,
sd::LongType const *dScalarShapeInfo, void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbScalar});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execScalar(
&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbZ->primary(), hZShapeInfo,
dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
dbScalar->primary(), hScalarShapeInfo, dbScalar->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hScalarShapeInfo).special(), extraParams);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbScalar});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execScalarTad(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, OpaqueDataBuffer *dbZ, sd::LongType const *hZShapeInfo,
sd::LongType const *dZShapeInfo, OpaqueDataBuffer *dbScalars, sd::LongType const *hScalarShapeInfo,
sd::LongType const *dScalarShapeInfo, void *extraParams, OpaqueDataBuffer *dbDimension,
sd::LongType const *hDimensionShape, sd::LongType const *dDimensionShape,
sd::LongType const *tadShapeInfo, sd::LongType const *tadOffsets, sd::LongType const *tadShapeInfoZ,
sd::LongType const *tadOffsetsZ) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbScalars});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
hipStream_t *stream = reinterpret_cast<hipStream_t *>(extraPointers[1]);
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto yType = sd::ArrayOptions::dataType(hScalarShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
if (yType != xType && yType != sd::DataType::BOOL && !isExperimentalEnabled())
throw sd::datatype_exception::build("execScalar both operands must have same data type", xType, yType);
dim3 launchDims(256, 256, 16384);
#ifdef SD_EXPERIMENTAL_ENABLED
BUILD_PAIRWISE_SELECTOR(
xType, yType, zType, functions::scalar::ScalarTransform,
::executeCudaAlongDimension(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalars, extraParams,
dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ),
SD_COMMON_TYPES, SD_COMMON_TYPES);
#else
BUILD_SINGLE_SELECTOR_THRICE(
xType, functions::scalar::ScalarTransform,
::executeCudaAlongDimension(
launchDims, stream, opNum, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbZ->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), dbScalars->special(),
extraParams, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ),
SD_COMMON_TYPES);
#endif
DEBUG_KERNEL(stream, opNum);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbScalars});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void execAggregate(sd::Pointer *extraPointers, int opNum, void **arguments, int numArguments, sd::LongType **shapes,
int numShapes, int *indexArguments, int numIndexArguments, int **intArrays, int numIntArrays,
void *realArguments, int numRealArguments, sd::DataType dtype) {}
void batchExecutor(sd::Pointer *extraPointers, int numAggregates, int opNum, int maxArgs, int maxShapes,
int maxIntArrays, int maxIntArraySize, int maxIdx, int maxReals, void *ptrToArguments,
sd::DataType dtype) {}
void execAggregateBatch(sd::Pointer *extraPointers, int numAggregates, int opNum, int maxArgs, int maxShapes,
int maxIntArrays, int maxIntArraySize, int maxIdx, int maxReals, void *ptrToArguments,
sd::DataType dtype) {}
////////////////////////////////////////////////////////////////////////
void execRandom(sd::Pointer *extraPointers, int opNum, sd::Pointer stateHost, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo, void *extraArguments) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execRandom(&lc, opNum, stateHost, dbZ->primary(), hZShapeInfo, dbZ->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
extraArguments);
InteropDataBuffer::registerSpecialUse({dbZ}, {});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execRandom2(sd::Pointer *extraPointers, int opNum, sd::Pointer stateHost, OpaqueDataBuffer *dbX,
sd::LongType const *hXShapeInfo, sd::LongType const *dXShapeInfo, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo, void *extraArguments) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execRandom(
&lc, opNum, stateHost, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbZ->primary(), hZShapeInfo,
dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), extraArguments);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execRandom3(sd::Pointer *extraPointers, int opNum, sd::Pointer stateHost, OpaqueDataBuffer *dbX,
sd::LongType const *hXShapeInfo, sd::LongType const *dXShapeInfo, OpaqueDataBuffer *dbY,
sd::LongType const *hYShapeInfo, sd::LongType const *dYShapeInfo, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo, void *extraArguments) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execRandom(
&lc, opNum, stateHost, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbY->primary(), hYShapeInfo,
dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(), dbZ->primary(),
hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
extraArguments);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
sd::Pointer initRandom(sd::Pointer *extraPointers, long seed, long bufferSize, sd::Pointer ptrToBuffer) {
unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(extraPointers[1]);
// we don't synchronize at random initialization, it's safe to go unsync here
// hipStreamSynchronize(*stream);
auto ptrDev = reinterpret_cast<unsigned long long *>(ptrToBuffer);
auto buffer = new sd::random::RandomBuffer(seed, bufferSize, reinterpret_cast<uint64_t *>(ptrHost),
reinterpret_cast<uint64_t *>(ptrDev));
buffer->propagateToDevice(buffer, *stream);
sd::DebugHelper::checkErrorCode(stream, "initRandom(...) failed A");
// we generate sequence in the host memory
sd::random::Xoroshiro128 generator(buffer);
generator.refreshBuffer();
// and copy it to gpu
hipMemcpyAsync(ptrDev, ptrHost, bufferSize * 8, hipMemcpyHostToDevice, *stream);
sd::DebugHelper::checkErrorCode(stream, "initRandom(...) failed B");
return buffer;
}
void destroyRandom(sd::Pointer ptrBuffer) {
sd::random::RandomBuffer *buffer = reinterpret_cast<sd::random::RandomBuffer *>(ptrBuffer);
// FIXME: it's bad thing, but we can't know in advance, which stream(s) where using this generator in practice
hipDeviceSynchronize();
delete buffer;
}
void refreshBuffer(sd::Pointer *extraPointers, long seed, sd::Pointer ptrRandom) {
sd::random::RandomBuffer *buffer = reinterpret_cast<sd::random::RandomBuffer *>(ptrRandom);
unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(extraPointers[1]);
hipStreamSynchronize(*stream);
uint64_t *ptrDev = buffer->getDeviceBuffer();
// update rng state
buffer->setSeed(seed);
buffer->setOffset(0);
buffer->propagateToDevice(buffer, *stream);
// refresh buffer on host size
sd::random::Xoroshiro128 generator(buffer);
generator.refreshBuffer();
// copy back to gpu
hipMemcpyAsync(ptrDev, ptrHost, buffer->getSize() * 8, hipMemcpyHostToDevice, *stream);
}
void reSeedBuffer(sd::Pointer *extraPointers, long seed, sd::Pointer ptrRandom) {
sd::random::RandomBuffer *buffer = reinterpret_cast<sd::random::RandomBuffer *>(ptrRandom);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(extraPointers[1]);
hipStreamSynchronize(*stream);
// update rng state
buffer->reSeed(seed);
buffer->setOffset(0);
buffer->propagateToDevice(buffer, *stream);
}
/**
* Return the length of a shape buffer
* based on the pointer
* @param buffer the buffer pointer to check
* @return
*/
int lengthForShapeBufferPointer(sd::Pointer buffer) {
auto shapeBuffer = reinterpret_cast<sd::LongType *>(buffer);
return shape::shapeInfoLength(shape::rank(shapeBuffer));
}
/**
* The pointer to get the address for
*
* @param address the address to get the pointer
* @return the pointer for the given address
*/
sd::Pointer pointerForAddress(sd::LongType address) { return reinterpret_cast<sd::Pointer>(address); }
void tear(sd::Pointer *extras, OpaqueDataBuffer *dbX, sd::LongType const *xShapeInfo, sd::LongType const *dXShapeInfo,
sd::Pointer *targets, sd::LongType const *zShapeInfo, sd::LongType const *tadShapeInfo,
sd::LongType const *tadOffsets) {
try {
InteropDataBuffer::prepareSpecialUse({}, {dbX});
hipStream_t *stream = reinterpret_cast<hipStream_t *>(extras[1]);
dim3 launchDims(512, 512, 512);
auto xType = sd::ArrayOptions::dataType(xShapeInfo);
BUILD_SINGLE_SELECTOR(
xType, tearKernelGeneric,
(launchDims, stream, dbX->special(), dXShapeInfo, targets, zShapeInfo, tadShapeInfo, tadOffsets),
SD_COMMON_TYPES);
sd::DebugHelper::checkErrorCode(stream, "tearFloat(...) failed");
InteropDataBuffer::registerSpecialUse({}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void prescanArrayRecursive(sd::Pointer *extras, int *dZ, int *dX, int numElements, int level) {
auto stream = reinterpret_cast<hipStream_t *>(extras[1]);
auto g_scanBlockSums = reinterpret_cast<int **>(extras[2]);
int blockSize = 512; // max size of the thread blocks
int numBlocks = sd::math::sd_max<int>(1, static_cast<int>(ceil(static_cast<float>(numElements) / (2.f * blockSize))));
int numThreads;
if (numBlocks > 1)
numThreads = blockSize;
else if (sd::isPowerOfTwo(numElements))
numThreads = numElements / 2;
else
numThreads = sd::floorPow2(numElements);
int numEltsPerBlock = numThreads * 2;
// if this is a non-power-of-2 array, the last block will be non-full
// compute the smallest power of 2 able to compute its scan.
int numEltsLastBlock = numElements - (numBlocks - 1) * numEltsPerBlock;
int numThreadsLastBlock = sd::math::sd_max<int>(1, numEltsLastBlock / 2);
int np2LastBlock = 0;
int sharedMemLastBlock = 0;
if (numEltsLastBlock != numEltsPerBlock) {
np2LastBlock = 1;
if (!isPowerOfTwo(numEltsLastBlock)) numThreadsLastBlock = floorPow2(numEltsLastBlock);
unsigned int extraSpace = (2 * numThreadsLastBlock) / NUM_BANKS;
sharedMemLastBlock = sizeof(int) * (2 * numThreadsLastBlock + extraSpace);
}
// padding space is used to avoid shared memory bank conflicts
int extraSpace = numEltsPerBlock / NUM_BANKS;
int sharedMemSize = sizeof(int) * (numEltsPerBlock + extraSpace);
// setup execution parameters
// if NP2, we process the last block separately
dim3 grid(max(1, numBlocks - np2LastBlock), 1, 1);
dim3 threads(numThreads, 1, 1);
dim3 gridOnes(1, 1, 1);
dim3 threadsOnes(numThreadsLastBlock, 1, 1);
if (sharedMemSize < 2048) sharedMemSize = 2048;
if (sharedMemLastBlock < 2048) sharedMemLastBlock = 2048;
// execute the scan
if (numBlocks > 1) {
sd::prescanLauncher<true, false>(grid, threads, sharedMemSize, stream, dZ, dX, g_scanBlockSums[level],
numThreads * 2, 0, 0);
if (np2LastBlock) {
sd::prescanLauncher<true, true>(gridOnes, threadsOnes, sharedMemLastBlock, stream, dZ, dX, g_scanBlockSums[level],
numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock);
}
// After scanning all the sub-blocks, we are mostly done. But now we
// need to take all of the last values of the sub-blocks and scan those.
// This will give us a new value that must be sdded to each block to
// get the final results.
// recursive (CPU) call
prescanArrayRecursive(extras, g_scanBlockSums[level], g_scanBlockSums[level], numBlocks, level + 1);
hipLaunchKernelGGL(( sd::uniformAdd), dim3(grid), dim3(threads), 1024, *stream, dZ, g_scanBlockSums[level], numElements - numEltsLastBlock, 0, 0);
if (np2LastBlock) {
hipLaunchKernelGGL(( sd::uniformAdd), dim3(1), dim3(numThreadsLastBlock), 1024, *stream, dZ, g_scanBlockSums[level], numEltsLastBlock,
numBlocks - 1, numElements - numEltsLastBlock);
}
} else if (isPowerOfTwo(numElements)) {
sd::prescanLauncher<false, false>(grid, threads, sharedMemSize, stream, dZ, dX, 0, numThreads * 2, 0, 0);
} else {
sd::prescanLauncher<false, true>(grid, threads, sharedMemSize, stream, dZ, dX, 0, numElements, 0, 0);
}
sd::DebugHelper::checkErrorCode(stream, "prescanArray(...) failed");
}
////////////////////////////////////////////////////////////////////////
void execReduce3All(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, void *extraParamsVals, OpaqueDataBuffer *dbY,
sd::LongType const *hYShapeInfo, sd::LongType const *dYShapeInfo, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo, OpaqueDataBuffer *dbDimension,
sd::LongType const *hDimensionShape, sd::LongType const *dDimensionShape,
sd::LongType const *xTadShapeInfo, sd::LongType const *xOffsets, sd::LongType const *yTadShapeInfo,
sd::LongType const *yOffsets) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY, dbDimension});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduce3All(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
extraParamsVals, dbY->primary(), hYShapeInfo, dbY->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
reinterpret_cast<int *>(dbDimension->special()), dimensionLength, xTadShapeInfo,
xOffsets, yTadShapeInfo, yOffsets);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void sort(sd::Pointer *extraPointers, void *x, sd::LongType const *xShapeInfo, void *dX,
sd::LongType const *dXShapeInfo, bool descending) {
try {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(extraPointers[1]);
auto xLength = shape::length(xShapeInfo);
auto xEWS = shape::elementWiseStride(xShapeInfo);
auto xType = sd::ArrayOptions::dataType(xShapeInfo);
// check if xLength is a power of 2, and use bitonic sort, if that's the case
if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) {
int numThreads = sd::math::sd_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++;
dim3 launchDims(numBlocks, numThreads, 32768);
for (int k = 2; k <= xLength; k = 2 * k) {
for (int j = k >> 1; j > 0; j = j >> 1) {
BUILD_SINGLE_SELECTOR(xType, bitonicSortStepGeneric,
(launchDims, stream, dX, dXShapeInfo, j, k, xLength, descending), SD_COMMON_TYPES);
}
}
} else {
int numThreads = sd::math::sd_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++;
numBlocks = sd::math::sd_min<int>(512, numBlocks);
dim3 launchDims(numBlocks, numThreads, 32768);
int max = 2, dg = 0;
while (max < xLength) {
max <<= 1;
dg++;
}
max <<= 1;
for (int window = 2; window < max; window <<= 1) {
int n = window;
int rev = 0;
do {
int half = n >> 1;
BUILD_SINGLE_SELECTOR(xType, bitonicArbitraryStepGeneric,
(launchDims, stream, dX, dXShapeInfo, n, xLength, rev, descending), SD_COMMON_TYPES);
n >>= 1;
rev = 1;
} while (n > 1);
}
}
sd::DebugHelper::checkErrorCode(stream, "sort(...) failed");
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void sortByKey(sd::Pointer *extraPointers, void *x, sd::LongType const *xShapeInfo, void *dX,
sd::LongType const *dXShapeInfo, void *y, sd::LongType const *yShapeInfo, void *dy,
sd::LongType const *dyShapeInfo, bool descending) {
try {
auto stream = reinterpret_cast<hipStream_t *>(extraPointers[1]);
auto xLength = shape::length(xShapeInfo);
auto yLength = shape::length(yShapeInfo);
auto xEWS = shape::elementWiseStride(xShapeInfo);
auto xType = sd::ArrayOptions::dataType(xShapeInfo);
auto yType = sd::ArrayOptions::dataType(yShapeInfo);
if (shape::isEmpty(xShapeInfo) || shape::isEmpty(yShapeInfo)) return;
if (xLength != yLength) throw std::runtime_error("sortByKey: keys and values must have the same size");
// check if xLength is a power of 2, and use bitonic sort, if that's the case
if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) {
int numThreads = sd::math::sd_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++;
dim3 launchDims(numBlocks, numThreads, 32768);
for (int k = 2; k <= xLength; k = 2 * k) {
for (int j = k >> 1; j > 0; j = j >> 1) {
BUILD_DOUBLE_SELECTOR(xType, yType, bitonicSortStepGenericKey,
(launchDims, stream, dX, dXShapeInfo, dy, dyShapeInfo, j, k, xLength, descending),
SD_COMMON_TYPES, SD_COMMON_TYPES);
}
}
} else {
int numThreads = sd::math::sd_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++;
numBlocks = sd::math::sd_min<int>(512, numBlocks);
dim3 launchDims(numBlocks, numThreads, 32768);
int max = 2, dg = 0;
while (max < xLength) {
max <<= 1;
dg++;
}
max <<= 1;
for (int window = 2; window < max; window <<= 1) {
int n = window;
int rev = 0;
do {
int half = n >> 1;
BUILD_DOUBLE_SELECTOR(xType, yType, bitonicArbitraryStepGenericKey,
(launchDims, stream, dX, dXShapeInfo, dy, dyShapeInfo, n, xLength, rev, descending),
SD_COMMON_TYPES, SD_COMMON_TYPES);
n >>= 1;
rev = 1;
} while (n > 1);
}
}
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void sortByValue(sd::Pointer *extraPointers, void *x, sd::LongType const *xShapeInfo, void *dX,
sd::LongType const *dXShapeInfo, void *y, sd::LongType const *yShapeInfo, void *dy,
sd::LongType const *dyShapeInfo, bool descending) {
try {
auto stream = reinterpret_cast<hipStream_t *>(extraPointers[1]);
auto xLength = shape::length(xShapeInfo);
auto yLength = shape::length(yShapeInfo);
auto xEWS = shape::elementWiseStride(xShapeInfo);
auto xType = sd::ArrayOptions::dataType(yShapeInfo);
auto yType = sd::ArrayOptions::dataType(xShapeInfo);
if (shape::isEmpty(xShapeInfo) || shape::isEmpty(yShapeInfo)) return;
if (xLength != yLength) throw std::runtime_error("sortByValue: keys and values must have the same size");
// check if xLength is a power of 2, and use bitonic sort, if that's the case
if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) {
int numThreads = sd::math::sd_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++;
dim3 launchDims(numBlocks, numThreads, 32768);
for (int k = 2; k <= xLength; k = 2 * k) {
for (int j = k >> 1; j > 0; j = j >> 1) {
BUILD_DOUBLE_SELECTOR(xType, yType, bitonicSortStepGenericKey,
(launchDims, stream, dy, dyShapeInfo, dX, dXShapeInfo, j, k, xLength, descending),
SD_COMMON_TYPES, SD_COMMON_TYPES);
}
}
} else {
int numThreads = sd::math::sd_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++;
numBlocks = sd::math::sd_min<int>(512, numBlocks);
dim3 launchDims(numBlocks, numThreads, 32768);
int max = 2, dg = 0;
while (max < xLength) {
max <<= 1;
dg++;
}
max <<= 1;
for (int window = 2; window < max; window <<= 1) {
int n = window;
int rev = 0;
do {
int half = n >> 1;
BUILD_DOUBLE_SELECTOR(xType, yType, bitonicArbitraryStepGenericKey,
(launchDims, stream, dy, dyShapeInfo, dX, dXShapeInfo, n, xLength, rev, descending),
SD_COMMON_TYPES, SD_COMMON_TYPES);
n >>= 1;
rev = 1;
} while (n > 1);
}
}
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void sortTadByKey(sd::Pointer *extraPointers, void *x, sd::LongType const *xShapeInfo, void *dX,
sd::LongType const *dXShapeInfo, void *y, sd::LongType const *yShapeInfo, void *dy,
sd::LongType const *dyShapeInfo, int *dimension, int dimensionLength, bool descending) {
try {
auto stream = reinterpret_cast<hipStream_t *>(extraPointers[1]);
auto context =
extraPointers[0] == 0 ? LaunchContext::defaultContext() : reinterpret_cast<LaunchContext *>(extraPointers[0]);
auto tadPack = sd::ConstantTadHelper::getInstance().tadForDimensions(xShapeInfo, dimension, dimensionLength);
dim3 launchDims((int)tadPack.numberOfTads(), 256, 2048);
auto xType = sd::ArrayOptions::dataType(xShapeInfo);
auto yType = sd::ArrayOptions::dataType(yShapeInfo);
BUILD_DOUBLE_SELECTOR(xType, yType, oesTadGenericKey,
(launchDims, stream, dX, dXShapeInfo, dy, dyShapeInfo, nullptr, dimensionLength,
tadPack.platformShapeInfo(), tadPack.platformOffsets(), descending),
SD_COMMON_TYPES, SD_COMMON_TYPES);
sd::DebugHelper::checkErrorCode(stream, "sortTadKey(...) failed");
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void sortTadByValue(sd::Pointer *extraPointers, void *x, sd::LongType const *xShapeInfo, void *dX,
sd::LongType const *dXShapeInfo, void *y, sd::LongType const *yShapeInfo, void *dy,
sd::LongType const *dyShapeInfo, int *dimension, int dimensionLength, bool descending) {
try {
auto stream = reinterpret_cast<hipStream_t *>(extraPointers[1]);
auto context =
extraPointers[0] == 0 ? LaunchContext::defaultContext() : reinterpret_cast<LaunchContext *>(extraPointers[0]);
auto tadPack = sd::ConstantTadHelper::getInstance().tadForDimensions(xShapeInfo, dimension, dimensionLength);
dim3 launchDims((int)tadPack.numberOfTads(), 256, 2048);
auto xType = sd::ArrayOptions::dataType(yShapeInfo);
auto yType = sd::ArrayOptions::dataType(xShapeInfo);
BUILD_DOUBLE_SELECTOR(xType, yType, oesTadGenericKey,
(launchDims, stream, dy, dyShapeInfo, dX, dXShapeInfo, nullptr, dimensionLength,
tadPack.platformShapeInfo(), tadPack.platformOffsets(), descending),
SD_COMMON_TYPES, SD_COMMON_TYPES);
sd::DebugHelper::checkErrorCode(stream, "sortTadValue(...) failed");
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void sortTad(sd::Pointer *extraPointers, void *x, sd::LongType const *xShapeInfo, void *dX,
sd::LongType const *dXShapeInfo, int *dimension, int dimensionLength, sd::LongType const *tadShapeInfo,
sd::LongType const *tadOffsets, bool descending) {
try {
// to be implemented
auto stream = reinterpret_cast<hipStream_t *>(extraPointers[1]);
auto context =
extraPointers[0] == 0 ? LaunchContext::defaultContext() : reinterpret_cast<LaunchContext *>(extraPointers[0]);
auto tadPack = sd::ConstantTadHelper::getInstance().tadForDimensions(xShapeInfo, dimension, dimensionLength);
dim3 launchDims((int)tadPack.numberOfTads(), 512, 33768);
auto xType = sd::ArrayOptions::dataType(xShapeInfo);
BUILD_SINGLE_SELECTOR(
xType, oesTadGeneric,
(launchDims, stream, dX, dXShapeInfo, nullptr, dimensionLength, tadShapeInfo, tadOffsets, descending),
SD_COMMON_TYPES);
sd::DebugHelper::checkErrorCode(stream, "sortTad(...) failed");
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void sortCooIndices(sd::Pointer *extraPointers, sd::LongType *indices, void *values, sd::LongType length,
const sd::LongType *xShapeInfo) {
throw std::runtime_error("sortCooIndices:: Not implemented yet");
}
void ravelMultiIndex(sd::Pointer *extraPointers, sd::LongType *indices, sd::LongType *flatIndices, sd::LongType length,
sd::LongType *shapeInfo, int mode) {
throw std::runtime_error("ravelMultiIndex:: Not implemented yet");
}
void unravelIndex(sd::Pointer *extraPointers, sd::LongType *indices, sd::LongType *flatIndices, sd::LongType length,
sd::LongType *shapeInfo) {
throw std::runtime_error("unravelIndex:: Not implemented yet");
}
sd::LongType *mmapFile(sd::Pointer *extraPointers, const char *fileName, sd::LongType length) { return nullptr; }
void munmapFile(sd::Pointer *extraPointers, sd::LongType *ptrMap, sd::LongType length) {}
sd::graph::ResultWrapper *executeFlatGraph(sd::Pointer *extraPointers, sd::Pointer flatBufferPointer) {
try {
return sd::graph::GraphExecutioner::executeFlatBuffer(flatBufferPointer);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
sd::LongType getResultWrapperSize(sd::graph::ResultWrapper *ptr) { return ptr->size(); }
sd::Pointer getResultWrapperPointer(sd::graph::ResultWrapper *ptr) { return ptr->pointer(); }
const char *getAllCustomOps() { return sd::ops::OpRegistrator::getInstance().getAllCustomOperations(); }
sd::ShapeList *_calculateOutputShapes(sd::Pointer *extraPointers, sd::ops::DeclarableOp *op, sd::Pointer *inputBuffers,
sd::Pointer *inputShapes, int numInputShapes, double *tArgs, int numTArgs,
sd::LongType *iArgs, int numIArgs, bool *bArgs, int numBArgs, int *dArgs,
int numDArgs) {
sd::graph::VariableSpace varSpace;
Context block(2, &varSpace);
sd::ShapeList inShapes;
for (int e = 0; e < numIArgs; e++) block.getIArguments()->push_back(iArgs[e]);
for (int e = 0; e < numTArgs; e++) block.getTArguments()->push_back(tArgs[e]);
for (int e = 0; e < numBArgs; e++) block.getBArguments()->push_back(bArgs[e]);
for (int e = 0; e < numDArgs; e++) block.getDArguments()->push_back((sd::DataType)dArgs[e]);
for (int e = 0; e < numInputShapes; e++) {
auto shape_ = reinterpret_cast<sd::LongType *>(inputShapes[e]);
// we shouldn't copy buffer if that's empty array
void *buffer_ = sd::ArrayOptions::arrayType(shape_) == ArrayType::EMPTY ? nullptr : inputBuffers[e];
void *bufferD_ =
sd::ArrayOptions::arrayType(shape_) == ArrayType::EMPTY ? nullptr : inputBuffers[e + numInputShapes];
auto array = new sd::NDArray(buffer_, bufferD_, shape_);
// block should contain references to proper variable
varSpace.putVariable(1, e, array);
block.pickInput(1, e);
inShapes.push_back(shape_);
}
auto shapeList = op->calculateOutputShape(&inShapes, block);
if (varSpace.launchContext()->getWorkspace() != nullptr) shapeList->detach();
return shapeList;
}
sd::ShapeList *calculateOutputShapes2(sd::Pointer *extraPointers, sd::LongType hash, sd::Pointer *inputBuffers,
sd::Pointer *inputShapes, int numInputShapes, double *tArgs, int numTArgs,
sd::LongType *iArgs, int numIArgs, bool *bArgs, int numBArgs, int *dArgs,
int numDArgs) {
try {
auto op = sd::ops::OpRegistrator::getInstance().getOperation(hash);
return _calculateOutputShapes(extraPointers, op, inputBuffers, inputShapes, numInputShapes, tArgs, numTArgs, iArgs,
numIArgs, bArgs, numBArgs, dArgs, numDArgs);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
sd::ShapeList *_calculateOutputShapes(sd::Pointer *extraPointers, sd::ops::DeclarableOp *op, sd::Pointer *inputShapes,
int numInputShapes, double *tArgs, int numTArgs, sd::LongType *iArgs,
int numIArgs) {
Context block(1);
sd::ShapeList inShapes;
for (int e = 0; e < numIArgs; e++) block.getIArguments()->push_back(iArgs[e]);
for (int e = 0; e < numTArgs; e++) block.getTArguments()->push_back(tArgs[e]);
for (int e = 0; e < numInputShapes; e++) inShapes.push_back(reinterpret_cast<sd::LongType *>(inputShapes[e]));
auto shapeList = op->calculateOutputShape(&inShapes, block);
return shapeList;
}
sd::ShapeList *calculateOutputShapes(sd::Pointer *extraPointers, sd::LongType hash, sd::Pointer *inputShapes,
int numInputShapes, double *tArgs, int numTArgs, sd::LongType *iArgs,
int numIArgs) {
try {
auto op = sd::ops::OpRegistrator::getInstance().getOperation(hash);
return _calculateOutputShapes(extraPointers, op, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
sd::LongType getShapeListSize(sd::ShapeList *list) { return list->size(); }
sd::LongType const *getShape(sd::ShapeList *list, sd::LongType i) { return list->at(i); }
static SD_INLINE sd::Status realExec(sd::ops::DeclarableOp *op, sd::Pointer *extraPointers, sd::LongType hash,
sd::Pointer *inputBuffers, sd::Pointer *inputShapes, int numInputs,
sd::Pointer *outputBuffers, sd::Pointer *outputShapes, int numOutputs,
double *tArgs, int numTArgs, sd::LongType *iArgs, int numIArgs, bool *bArgs,
int numBArgs, bool isInplace) {
if (op == nullptr) sd_printf("Can't find requested operation: [%lld]\n", hash);
// we're using the same fake nodeId everywhere here
std::vector<sd::NDArray *> inputs(numInputs);
std::vector<sd::NDArray *> outputs(numOutputs);
std::vector<double> ttArgs(numTArgs);
std::vector<bool> bbArgs(numBArgs);
std::vector<sd::LongType> iiArgs(numIArgs);
// filling block now with inputs
for (int e = 0; e < numInputs; e++) {
auto shape = reinterpret_cast<sd::LongType *>(inputShapes[e]);
void *buffer = sd::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : inputBuffers[e];
void *bufferD = sd::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : inputBuffers[e + numInputs];
inputs[e] = new sd::NDArray(buffer, bufferD, shape);
}
// if not inplace - transferring output arrays
if (!isInplace)
for (int e = 0; e < numOutputs; e++) {
// we want to keep original output shape intact
auto shape = shape::copyShape(reinterpret_cast<sd::LongType *>(outputShapes[e]));
void *buffer = sd::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : outputBuffers[e];
void *bufferD = sd::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : outputBuffers[e + numOutputs];
// FIXME: revisit this.
bool canNullify = true;
for (int i = 0; i < numInputs; i++) {
void *ibuffer = sd::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : inputBuffers[i];
if (ibuffer == buffer) {
canNullify = false;
break;
}
}
if (canNullify && buffer != nullptr)
memset((uint8_t *)buffer, '\0',
shape::length(shape) * DataTypeUtils::sizeOfElement(ArrayOptions::dataType(shape)));
auto array = new sd::NDArray(buffer, bufferD, shape);
outputs[e] = array;
}
for (int e = 0; e < numIArgs; e++) iiArgs[e] = iArgs[e];
for (int e = 0; e < numTArgs; e++) ttArgs[e] = tArgs[e];
for (int e = 0; e < numBArgs; e++) bbArgs[e] = bArgs[e];
// hypothetically at this point we have everything filled
auto dZ = op->execute(inputs, outputs, ttArgs, iiArgs, bbArgs, std::vector<sd::DataType>(), isInplace);
// auto dZ = op->execute(inputs, ttArgs, iiArgs, isInplace);
if (!isInplace)
for (int e = 0; e < numOutputs; e++) {
if (outputs[e]->ordering() != shape::order(reinterpret_cast<sd::LongType *>(outputShapes[e])))
outputs[e]->streamline(shape::order(reinterpret_cast<sd::LongType *>(outputShapes[e])));
}
for (auto v : inputs) delete v;
for (auto v : outputs) delete v;
return Status::OK;
}
Status execCustomOp(sd::Pointer *extraPointers, sd::LongType hash, sd::Pointer *inputBuffers, sd::Pointer *inputShapes,
int numInputs, sd::Pointer *outputBuffers, sd::Pointer *outputShapes, int numOutputs, double *tArgs,
int numTArgs, sd::LongType *iArgs, int numIArgs, bool *bArgs, int numBArgs, bool isInplace) {
try {
auto op = sd::ops::OpRegistrator::getInstance().getOperation(hash);
return realExec(op, extraPointers, hash, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes,
numOutputs, tArgs, numTArgs, iArgs, numIArgs, bArgs, numBArgs, isInplace);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return Status::BAD_INPUT;
}
}
Status execCustomOp2(sd::Pointer *extraPointers, sd::LongType hash, sd::Pointer opContext) {
try {
auto op = sd::ops::OpRegistrator::getInstance().getOperation(hash);
auto context = reinterpret_cast<Context *>(opContext);
auto result = op->execute(context);
auto res = hipStreamSynchronize(*context->launchContext()->getCudaStream());
if (res != 0) throw sd::cuda_exception::build("customOp execution failed", res);
for (auto v : context->fastpath_in()) {
if (!v->isEmpty()) v->syncToDevice();
}
for (auto v : context->fastpath_out()) {
if (!v->isEmpty()) v->syncToDevice();
}
return result;
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return Status::BAD_INPUT;
}
}
Status registerGraph(sd::Pointer *extraPointers, sd::LongType graphId, sd::Pointer flatBufferPointer) {
try {
auto graph = sd::graph::GraphExecutioner::importFromFlatPointer(flatBufferPointer);
sd::graph::GraphHolder::getInstance().registerGraph(graphId, graph);
return Status::OK;
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return Status::BAD_INPUT;
}
}
static VariablesSet *executeStoredGraphT(sd::Pointer *extraPointers, sd::LongType graphId, sd::Pointer *inputBuffers,
sd::Pointer *inputShapes, int *inputIndices, int numInputs) {
auto graph = sd::graph::GraphHolder::getInstance().pullGraph(graphId);
auto varSpace = graph->getVariableSpace()->clone();
std::vector<sd::NDArray *> handles;
for (int e = 0; e < numInputs; e++) {
auto idx = inputIndices[e];
// we'll delete this array later, together with cloned VariableSpace
auto array = new sd::NDArray(inputBuffers[e], reinterpret_cast<sd::LongType *>(inputShapes[e]));
handles.emplace_back(array);
if (varSpace->hasVariable(idx)) {
auto var = varSpace->getVariable(idx);
if (var->hasNDArray()) delete var->getNDArray();
var->setNDArray(array);
} else
varSpace->putVariable(idx, array);
}
auto dZ = sd::graph::GraphExecutioner::execute(graph, varSpace);
auto varSet = new sd::graph::VariablesSet(dZ);
if (dZ == Status::OK) {
// pull back results, and provide them
auto outputs = graph->fetchOutputs();
for (int e = 0; e < outputs->size(); e++) {
// we're only getting variable ID/Index from original grap. values will be taken from cloned workspace
std::pair<int, int> varId(outputs->at(e)->id(), outputs->at(e)->index());
auto var = varSpace->getVariable(varId);
varSet->push_back(var->clone());
}
delete outputs;
}
delete varSpace;
return varSet;
}
VariablesSet *executeStoredGraph(sd::Pointer *extraPointers, sd::LongType graphId, sd::Pointer *inputBuffers,
sd::Pointer *inputShapes, int *inputIndices, int numInputs) {
try {
return executeStoredGraphT(extraPointers, graphId, inputBuffers, inputShapes, inputIndices, numInputs);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
sd::LongType getVariablesSetSize(sd::graph::VariablesSet *set) { return set->size(); }
sd::Status getVariablesSetStatus(sd::graph::VariablesSet *set) { return set->status(); }
sd::graph::Variable *getVariable(sd::graph::VariablesSet *set, sd::LongType i) { return set->at(i); }
int getVariableId(sd::graph::Variable *variable) { return variable->id(); }
int getVariableIndex(sd::graph::Variable *variable) { return variable->index(); }
const char *getVariableName(sd::graph::Variable *variable) { return variable->getName()->c_str(); }
sd::LongType const *getVariableShape(sd::graph::Variable *variable) { return variable->getNDArray()->shapeInfo(); }
void *getVariableBuffer(sd::graph::Variable *variable) { return variable->getNDArray()->buffer(); }
sd::Status unregisterGraph(sd::Pointer *extraPointers, sd::LongType graphId) {
try {
sd::graph::GraphHolder::getInstance().dropGraphAny(graphId);
return Status::OK;
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return Status::BAD_INPUT;
}
}
void deletePointerArray(sd::Pointer pointer) {
sd::Pointer *ptr = reinterpret_cast<sd::Pointer *>(pointer);
delete[] ptr;
}
void deleteCharArray(sd::Pointer pointer) {
auto ptr = reinterpret_cast<char *>(pointer);
delete[] ptr;
}
void deleteIntArray(sd::Pointer pointer) {
auto ptr = reinterpret_cast<int *>(pointer);
delete[] ptr;
}
void deleteLongArray(sd::Pointer pointer) {
auto ptr = reinterpret_cast<sd::LongType *>(pointer);
delete[] ptr;
}
void deleteVariablesSet(sd::graph::VariablesSet *pointer) { delete pointer; }
void deleteShapeList(sd::Pointer shapeList) {
sd::ShapeList *list = reinterpret_cast<sd::ShapeList *>(shapeList);
// list->destroy();
delete list;
}
const char *getAllOperations() { return sd::OpTracker::getInstance().exportOperations(); }
sd::Pointer getGraphState(sd::LongType id) { return (sd::Pointer) new sd::graph::GraphState(id); }
void deleteGraphState(sd::Pointer state) {
auto stateP = reinterpret_cast<sd::graph::GraphState *>(state);
delete stateP;
}
sd::Status execCustomOpWithScope(sd::Pointer *extraPointers, sd::graph::GraphState *state, sd::LongType opHash,
sd::LongType *scopes, int numScopes, sd::Pointer *inputBuffers,
sd::Pointer *inputShapes, int numInputs, sd::Pointer *outputBuffers,
sd::Pointer *outputShapes, int numOutputs) {
/**
* That's basically exec, with VariableSpace provided in GraphState:
* depending on operation (i.e. while of if), different logic executors could be used
*/
auto graph = state->graph();
auto varSpace = state->variableSpace();
// Node is dynamically created, and has nothing beyond it: only inputs and outputs
// this node has id of 0, and inputs are
Node node(OpType_LOGIC, opHash, 0);
// mapping inputs
for (int e = 0; e < numInputs; e++) {
auto buffer = inputBuffers[e];
auto shapeInfo = reinterpret_cast<sd::LongType *>(inputShapes[e]);
auto array = new sd::NDArray(buffer, shapeInfo, varSpace->launchContext());
// now we just put array to VarSpace
varSpace->putVariable(0, e, array);
node.pickInput(0, e);
}
// mapping scopes
for (int e = 0; e < numScopes; e++) {
// we should check scope existence in GraphState/Graph
int scopeId = (int)scopes[e];
if (!state->hasScope(scopeId)) {
// sd_printf("execCustomOpWithScope: referenced scope [%i] doesn't exist\n", scopeId);
return Logger::logKernelFailureMsg();
}
node.pickInput(scopeId, 0);
}
auto dZ = LogicExecutor::processNode(graph, &node);
if (dZ != Status::OK) return dZ;
// mapping outputs
for (int e = 0; e < numOutputs; e++) {
auto buffer = outputBuffers[e];
auto shapeInfo = reinterpret_cast<sd::LongType *>(outputShapes[e]);
NDArray array(buffer, shapeInfo, varSpace->launchContext());
// now we just put array to VarSpace to the same ID
// varSpace->putVariable(0, e, array);
auto t = varSpace->getVariable(0, e)->getNDArray();
array.assign(t);
}
// removing input variables
for (int e = 0; e < numInputs; e++) {
varSpace->dropVariable(0, e);
}
// after some bla-bla-bla we should have Graph and Node for current op
return Status::OK;
}
sd::Status execCustomOpWithScope(sd::Pointer *extraPointers, sd::Pointer state, sd::LongType opHash,
sd::LongType *scopes, int numScopes, sd::Pointer *inputBuffers,
sd::Pointer *inputShapes, int numInputs, sd::Pointer *outputBuffers,
sd::Pointer *outputShapes, int numOutputs) {
try {
return execCustomOpWithScope(extraPointers, reinterpret_cast<sd::graph::GraphState *>(state), opHash, scopes,
numScopes, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes,
numOutputs);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return sd::Status::BAD_INPUT;
}
}
void deleteResultWrapper(sd::Pointer ptr) {
// just 0 room for compiler s@!t
auto p = reinterpret_cast<sd::graph::ResultWrapper *>(ptr);
delete p;
}
int estimateThreshold(sd::Pointer *extraPointers, sd::Pointer dX, sd::LongType const *dXShapeInfo, int N,
float threshold) {
throw std::runtime_error("estimateThreshold: Not implemented yet");
}
/*
* TypeDef:
* void convertTypes(sd::Pointer *extras, int srcType, sd::Pointer dX, long N, int dstType, sd::Pointer dZ);
*/
void convertTypes(sd::Pointer *extras, int srcType, sd::Pointer dX, sd::LongType N, int dstType, sd::Pointer dZ) {
try {
auto dx = reinterpret_cast<void *>(dX);
auto dz = reinterpret_cast<void *>(dZ);
if (srcType == ND4J_FLOAT8) {
if (dstType == ND4J_FLOAT8) {
// convertKernel<double, sd::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
// sd::TypeCast::convertGenericCuda<sd::float8, sd::int8>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
// sd::TypeCast::convertGenericCuda<sd::float8, sd::uint8>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
// sd::TypeCast::convertGenericCuda<sd::float8, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
// sd::TypeCast::convertGenericCuda<sd::float8, sd::int16>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
// sd::TypeCast::convertGenericCuda<sd::float8, sd::uint16>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
} else if (dstType == ND4J_FLOAT32) {
// sd::TypeCast::convertGenericCuda<sd::float8, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
// sd::TypeCast::convertGenericCuda<sd::float8, double>(extras, dx, N, dz);
} else {
sd_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_INT8) {
if (dstType == ND4J_FLOAT8) {
// sd::TypeCast::convertGenericCuda<sd::int8, sd::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
// convertKernel<sd::int8, sd::int8>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
sd::TypeCast::convertGenericCuda<int8_t, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
sd::TypeCast::convertGenericCuda<int8_t, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
sd::TypeCast::convertGenericCuda<int8_t, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
sd::TypeCast::convertGenericCuda<int8_t, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
// TODO: eventually we might want to add it
} else if (dstType == ND4J_FLOAT32) {
sd::TypeCast::convertGenericCuda<int8_t, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
sd::TypeCast::convertGenericCuda<int8_t, double>(extras, dx, N, dz);
} else {
sd_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_UINT8) {
if (dstType == ND4J_FLOAT8) {
// sd::TypeCast::convertGenericCuda<uint8_t, sd::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
sd::TypeCast::convertGenericCuda<uint8_t, int8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
sd::TypeCast::convertGenericCuda<uint8_t, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
sd::TypeCast::convertGenericCuda<uint8_t, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
sd::TypeCast::convertGenericCuda<uint8_t, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
sd::TypeCast::convertGenericCuda<uint8_t, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
// TODO: still might want to add
} else if (dstType == ND4J_FLOAT32) {
sd::TypeCast::convertGenericCuda<uint8_t, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
sd::TypeCast::convertGenericCuda<uint8_t, double>(extras, dx, N, dz);
} else {
sd_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_FLOAT16) {
if (dstType == ND4J_FLOAT8) {
// sd::TypeCast::convertGenericCuda<float16, sd::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
sd::TypeCast::convertGenericCuda<float16, int8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
sd::TypeCast::convertGenericCuda<float16, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
sd::TypeCast::convertGenericCuda<float16, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
sd::TypeCast::convertGenericCuda<float16, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
sd::TypeCast::convertGenericCuda<float16, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
// TODO: .... ^^^
} else if (dstType == ND4J_FLOAT32) {
sd::TypeCast::convertGenericCuda<float16, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
sd::TypeCast::convertGenericCuda<float16, double>(extras, dx, N, dz);
} else if (dstType == ND4J_THRESHOLD) {
// sd::convertToThreshold<float16>(nullptr, dx, N, dz);
} else {
sd_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_INT16) {
if (dstType == ND4J_FLOAT8) {
// sd::TypeCast::convertGenericCuda<int16_t, sd::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
sd::TypeCast::convertGenericCuda<int16_t, int8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
sd::TypeCast::convertGenericCuda<int16_t, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
sd::TypeCast::convertGenericCuda<int16_t, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
sd::TypeCast::convertGenericCuda<int16_t, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
sd::TypeCast::convertGenericCuda<int16_t, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
// TODO...
} else if (dstType == ND4J_FLOAT32) {
sd::TypeCast::convertGenericCuda<int16_t, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
sd::TypeCast::convertGenericCuda<int16_t, double>(extras, dx, N, dz);
} else {
printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_FLOAT24) {
} else if (srcType == ND4J_FLOAT32) {
if (dstType == ND4J_FLOAT8) {
// sd::TypeCast::convertGenericCuda<float, sd::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
sd::TypeCast::convertGenericCuda<float, int8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
sd::TypeCast::convertGenericCuda<float, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
sd::TypeCast::convertGenericCuda<float, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
sd::TypeCast::convertGenericCuda<float, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
sd::TypeCast::convertGenericCuda<float, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
} else if (dstType == ND4J_DOUBLE) {
sd::TypeCast::convertGenericCuda<float, double>(extras, dx, N, dz);
} else if (dstType == ND4J_THRESHOLD) {
// sd::convertToThreshold<float>(nullptr, dx, N, dz);
} else {
sd_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_DOUBLE) {
if (dstType == ND4J_FLOAT8) {
// sd::TypeCast::convertGenericCuda<double, sd::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
sd::TypeCast::convertGenericCuda<double, int8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
sd::TypeCast::convertGenericCuda<double, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
sd::TypeCast::convertGenericCuda<double, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
sd::TypeCast::convertGenericCuda<double, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
sd::TypeCast::convertGenericCuda<double, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
} else if (dstType == ND4J_FLOAT32) {
sd::TypeCast::convertGenericCuda<double, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
//
} else if (dstType == ND4J_THRESHOLD) {
// sd::convertToThreshold<double>(nullptr, dx, N, dz);
} else {
sd_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_THRESHOLD) {
if (dstType == ND4J_FLOAT16) {
// sd::convertFromThreshold<float16>(nullptr, dx, N, dz);
} else if (dstType == ND4J_FLOAT32) {
// sd::convertFromThreshold<float>(nullptr, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
// sd::convertFromThreshold<double>(nullptr, dx, N, dz);
} else {
sd_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else {
sd_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
sd::Pointer createUtf8String(sd::Pointer *extraPointers, const char *string, int length) {
auto u = new sd::utf8string(string, length);
return reinterpret_cast<sd::Pointer>(u);
}
sd::LongType getUtf8StringLength(sd::Pointer *extraPointers, sd::Pointer ptr) {
return reinterpret_cast<sd::utf8string *>(ptr)->_length;
}
char *getUtf8StringBuffer(sd::Pointer *extraPointers, sd::Pointer ptr) {
return reinterpret_cast<sd::utf8string *>(ptr)->_buffer;
}
void deleteUtf8String(sd::Pointer *extraPointers, sd::Pointer ptr) { delete (reinterpret_cast<sd::utf8string *>(ptr)); }
///////////////////////////////////////////////////////////////////
template <typename T, typename I>
SD_KERNEL static void scatterUpdateCuda(const int opCode, const int numOfSubArrs, void *vx,
const sd::LongType *xShapeInfo, const sd::LongType *xOffsets, void *vy,
const sd::LongType *yShapeInfo, const sd::LongType *yOffsets,
const void *vindexes) {
__shared__ T *x, *y;
__shared__ sd::LongType arrLenX, arrLenY;
auto indexes = reinterpret_cast<const I *>(vindexes);
for (int e = 0; e < numOfSubArrs; e++) {
const auto xIndex = indexes[e];
const bool isOwner = xIndex < gridDim.x ? blockIdx.x == xIndex : blockIdx.x == xIndex % gridDim.x;
if (!isOwner) continue;
if (threadIdx.x == 0) {
x = reinterpret_cast<T *>(vx) + xOffsets[xIndex];
y = reinterpret_cast<T *>(vy) + yOffsets[e];
arrLenX = shape::length(xShapeInfo);
arrLenY = shape::length(yShapeInfo);
}
__syncthreads();
if (arrLenX != arrLenY) return;
for (sd::LongType i = threadIdx.x; i < arrLenX; i += blockDim.x) {
const auto xOffset = shape::getIndexOffset(i, xShapeInfo);
const auto yOffset = shape::getIndexOffset(i, yShapeInfo);
switch (opCode) {
case 0:
x[xOffset] += y[yOffset];
break;
case 1:
x[xOffset] -= y[yOffset];
break;
case 2:
x[xOffset] *= y[yOffset];
break;
case 3:
x[xOffset] /= y[yOffset];
break;
case 4:
x[xOffset] = y[yOffset] - x[xOffset];
break;
case 5:
x[xOffset] = y[yOffset] / x[xOffset];
break;
case 6:
x[xOffset] = y[yOffset];
break;
default:
continue;
}
}
__syncthreads();
}
}
template <typename T, typename I>
SD_HOST static void scatterUpdateCudaLauncher(const hipStream_t *stream, const int opCode, const int numOfSubArrs,
void *vx, const sd::LongType const *xShapeInfo,
const sd::LongType *xOffsets, void *vy, const sd::LongType *yShapeInfo,
const sd::LongType *yOffsets, const void *indexes) {
hipLaunchKernelGGL(( scatterUpdateCuda<T, I>), dim3(512), dim3(256), SD_MAX_NUM_THREADS, *stream, opCode, numOfSubArrs, vx, xShapeInfo, xOffsets, vy,
yShapeInfo, yOffsets, indexes);
}
//////////////////////////////////////////////////////////////////////////
void scatterUpdate(sd::Pointer *extraPointers, int opCode, int numOfSubArrs, void *hX, sd::LongType const *hXShapeInfo,
sd::LongType const *hXOffsets, void *dX, sd::LongType const *dXShapeInfo,
sd::LongType const *dXOffsets, void *hY, sd::LongType const *hYShapeInfo,
sd::LongType const *hYOffsets, void *dY, sd::LongType const *dYShapeInfo,
sd::LongType const *dYOffsets, void *hIindexes, sd::LongType const *hIndicesShapeInfo,
void *dIindexes, sd::LongType const *dIndicesShapeInfo) {
try {
auto stream = reinterpret_cast<hipStream_t *>(extraPointers[1]);
auto type = ArrayOptions::dataType(hXShapeInfo);
auto iType = ArrayOptions::dataType(hIndicesShapeInfo);
BUILD_DOUBLE_SELECTOR(
type, iType, scatterUpdateCudaLauncher,
(stream, opCode, numOfSubArrs, dX, dXShapeInfo, dXOffsets, dY, dYShapeInfo, dYOffsets, dIindexes),
SD_COMMON_TYPES, SD_INDEXING_TYPES);
sd::DebugHelper::checkErrorCode(stream, "scatterUpdate(...) failed");
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void inspectArray(sd::Pointer *extraPointers, sd::Pointer buffer, sd::LongType *shapeInfo, sd::Pointer specialBuffer,
sd::LongType *specialShapeInfo, sd::Pointer debugInfo) {
try {
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
auto p = reinterpret_cast<sd::DebugInfo *>(debugInfo);
NDArray array(buffer, specialBuffer, shapeInfo, &lc);
sd::DebugHelper::retrieveDebugStatistics(p, &array);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void SD_KERNEL tryPointerKernel(void *p, int len) {
auto buf = reinterpret_cast<int8_t *>(p);
auto tid = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ int b;
if (tid < len) atomicAdd(&b, buf[tid]);
__syncthreads();
if (threadIdx.x == 0 && blockIdx.x == 0) printf("Pointer check complete: %i\n", b);
}
void tryPointer(sd::Pointer extra, sd::Pointer p, int len) {
try {
hipStream_t stream;
hipStreamCreate(&stream);
hipLaunchKernelGGL(( tryPointerKernel), dim3(256), dim3(512), len + 64, stream, p, len);
auto e = hipStreamSynchronize(stream);
if (e != 0) throw sd::cuda_exception::build("tryPointer failed", e);
hipStreamDestroy(stream);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
int dataTypeFromNpyHeader(void *header) { return (int)cnpy::dataTypeFromHeader(reinterpret_cast<char *>(header)); }
OpaqueConstantShapeBuffer *shapeBuffer(int rank, sd::LongType *shape, sd::LongType *strides, sd::DataType dtype,
char order, sd::LongType ews, bool empty) {
return shapeBufferEx(rank, shape, strides, dtype, order, ews, empty ? ARRAY_EMPTY : 0);
}
OpaqueConstantShapeBuffer *shapeBufferEx(int rank, sd::LongType *shape, sd::LongType *strides, sd::DataType dtype,
char order, sd::LongType ews, sd::LongType extras) {
try {
auto buffer = new ConstantShapeBuffer();
*buffer = sd::ConstantShapeHelper::getInstance().bufferForShapeInfo(
ShapeDescriptor(dtype, order, shape, strides, rank, ews, extras));
return buffer;
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
void deleteConstantShapeBuffer(OpaqueConstantShapeBuffer *ptr) { delete ptr; }
void deleteConstantDataBuffer(OpaqueConstantDataBuffer *ptr) { delete ptr; }
void deleteTadPack(sd::TadPack *ptr) { delete ptr; }
bool isBlasVersionMatches(int major, int minor, int build) {
auto result = major == Environment::getInstance()._blasMajorVersion &&
minor == Environment::getInstance()._blasMinorVersion &&
build == Environment::getInstance()._blasPatchVersion;
if (!result) {
sd_printf("CUDA/cuBLAS version mismatch. Expected: %i.%i.%i but got %i.%i.%i instead\n",
Environment::getInstance()._blasMajorVersion, Environment::getInstance()._blasMinorVersion,
Environment::getInstance()._blasPatchVersion, major, minor, build);
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(152);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("CUDA/cuBLAS version mismatch");
}
return result;
}
sd::ConstantDataBuffer *constantBufferLong(sd::DataType dtype, sd::LongType const *data, int length) {
return sd::ConstantHelper::getInstance().constantBuffer(ConstantDescriptor(data, length), dtype);
}
sd::ConstantDataBuffer *constantBufferDouble(sd::DataType dtype, double *data, int length) {
return sd::ConstantHelper::getInstance().constantBuffer(ConstantDescriptor(data, length), dtype);
}
sd::ConstantDataBuffer *constantBuffer(sd::DataType dtype, sd::ConstantDescriptor *descriptor) {
return sd::ConstantHelper::getInstance().constantBuffer(*descriptor, dtype);
}
sd::Pointer getConstantDataBufferPrimary(sd::ConstantDataBuffer *dbf) { return dbf->primary(); }
sd::Pointer getConstantDataBufferSpecial(sd::ConstantDataBuffer *dbf) { return dbf->special(); }
sd::LongType getConstantDataBufferLength(sd::ConstantDataBuffer *dbf) { return dbf->length(); }
sd::LongType getConstantDataBufferSizeOf(sd::ConstantDataBuffer *dbf) { return dbf->sizeOf(); }
sd::Pointer getConstantShapeBufferPrimary(OpaqueConstantShapeBuffer *dbf) {
return const_cast<sd::LongType *>(dbf->primary());
}
sd::Pointer getConstantShapeBufferSpecial(OpaqueConstantShapeBuffer *dbf) {
return const_cast<sd::LongType *>(dbf->special());
}
sd::graph::Context *createGraphContext(int nodeId) { return new sd::graph::Context(nodeId); }
sd::graph::RandomGenerator *getGraphContextRandomGenerator(sd::graph::Context *ptr) { return &ptr->randomGenerator(); }
void markGraphContextInplace(sd::graph::Context *ptr, bool reallyInplace) { ptr->markInplace(reallyInplace); }
void setGraphContextCudaContext(sd::graph::Context *ptr, void *stream, void *reductionPointer,
void *allocationPointer) {
ptr->setCudaContext(stream, reductionPointer, allocationPointer);
}
void setGraphContextInputArray(sd::graph::Context *ptr, int index, void *buffer, void *shapeInfo, void *specialBuffer,
void *specialShapeInfo) {
ptr->setInputArray(index, buffer, shapeInfo, specialBuffer, specialShapeInfo);
}
void setGraphContextOutputArray(sd::graph::Context *ptr, int index, void *buffer, void *shapeInfo, void *specialBuffer,
void *specialShapeInfo) {
ptr->setOutputArray(index, buffer, shapeInfo, specialBuffer, specialShapeInfo);
}
void setGraphContextInputBuffer(OpaqueContext *ptr, int index, OpaqueDataBuffer *buffer, void *shapeInfo,
void *specialShapeInfo) {
ptr->setInputArray(index, buffer, shapeInfo, specialShapeInfo);
}
void setGraphContextOutputBuffer(OpaqueContext *ptr, int index, OpaqueDataBuffer *buffer, void *shapeInfo,
void *specialShapeInfo) {
ptr->setOutputArray(index, buffer, shapeInfo, specialShapeInfo);
}
void setGraphContextTArguments(sd::graph::Context *ptr, double *arguments, int numberOfArguments) {
ptr->setTArguments(arguments, numberOfArguments);
}
void setGraphContextIArguments(sd::graph::Context *ptr, sd::LongType *arguments, int numberOfArguments) {
ptr->setIArguments(arguments, numberOfArguments);
}
void setGraphContextBArguments(sd::graph::Context *ptr, bool *arguments, int numberOfArguments) {
ptr->setBArguments(arguments, numberOfArguments);
}
void setGraphContextDArguments(OpaqueContext *ptr, int *arguments, int numberOfArguments) {
std::vector<sd::DataType> dtypes(numberOfArguments);
for (int e = 0; e < numberOfArguments; e++) dtypes[e] = (sd::DataType)arguments[e];
ptr->setDArguments(dtypes);
}
void deleteGraphContext(sd::graph::Context *ptr) { delete ptr; }
sd::graph::RandomGenerator *createRandomGenerator(sd::LongType rootSeed, sd::LongType nodeSeed) {
try {
return new sd::graph::RandomGenerator(rootSeed, nodeSeed);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
sd::LongType getRandomGeneratorRootState(sd::graph::RandomGenerator *ptr) { return ptr->rootState(); }
sd::LongType getRandomGeneratorNodeState(sd::graph::RandomGenerator *ptr) { return ptr->nodeState(); }
void setRandomGeneratorStates(sd::graph::RandomGenerator *ptr, sd::LongType rootSeed, sd::LongType nodeSeed) {
ptr->setStates(rootSeed, nodeSeed);
}
float getRandomGeneratorRelativeFloat(sd::graph::RandomGenerator *ptr, sd::LongType index) {
return ptr->relativeT<float>(index);
}
double getRandomGeneratorRelativeDouble(sd::graph::RandomGenerator *ptr, sd::LongType index) {
return ptr->relativeT<double>(index);
}
int getRandomGeneratorRelativeInt(sd::graph::RandomGenerator *ptr, sd::LongType index) {
return ptr->relativeInt(index);
}
sd::LongType getRandomGeneratorRelativeLong(sd::graph::RandomGenerator *ptr, sd::LongType index) {
return ptr->relativeLong(index);
}
int getRandomGeneratorNextInt(sd::graph::RandomGenerator *ptr) {
// to nullify _nodeState._long ^= (steps ^ 0xdeadbeef);
// we will use step = 0xdeadbeef
auto result = ptr->relativeInt(1);
ptr->rewindH(0xdeadbeef);
return result;
}
sd::LongType getRandomGeneratorNextLong(sd::graph::RandomGenerator *ptr) {
auto result = ptr->relativeLong(1);
ptr->rewindH(0xdeadbeef);
return result;
}
float getRandomGeneratorNextFloat(sd::graph::RandomGenerator *ptr) {
auto result = ptr->relativeT<float>(1);
ptr->rewindH(0xdeadbeef);
return result;
}
double getRandomGeneratorNextDouble(sd::graph::RandomGenerator *ptr) {
auto result = ptr->relativeT<double>(1);
ptr->rewindH(0xdeadbeef);
return result;
}
void deleteRandomGenerator(sd::graph::RandomGenerator *ptr) { delete ptr; }
sd::Pointer shapeBufferForNumpy(sd::Pointer npyArray) {
try {
cnpy::NpyArray arr = cnpy::loadNpyFromPointer(reinterpret_cast<char *>(npyArray));
unsigned int shapeSize = arr.shape.size();
std::vector<sd::LongType> shape(shapeSize);
bool _empty = false;
for (unsigned int i = 0; i < shapeSize; i++) {
shape[i] = arr.shape[i];
if (arr.shape[i] == 0) _empty = true;
}
auto dtype = cnpy::dataTypeFromHeader(reinterpret_cast<char *>(npyArray));
sd::LongType *shapeBuffer;
if (shape.size() == 1 && shape[0] == 0) {
// scalar case
shapeBuffer = sd::ShapeBuilders::createScalarShapeInfo(dtype);
} else if (_empty) {
if (shapeSize > 0)
shapeBuffer = sd::ShapeBuilders::emptyShapeInfo(dtype, arr.fortranOrder ? 'f' : 'c', shape);
else
shapeBuffer = sd::ShapeBuilders::emptyShapeInfo(dtype);
} else {
shapeBuffer = sd::ShapeBuilders::createShapeInfo(dtype, arr.fortranOrder ? 'f' : 'c', shape);
}
return (sd::Pointer)(sd::ConstantShapeHelper::getInstance().createFromExisting(
shapeBuffer, true)); // TO DO: this can lead to unpleasant crash sometimes
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
sd::LongType getCachedMemory(int deviceId) { return sd::ConstantHelper::getInstance().getCachedAmount(deviceId); }
sd::LaunchContext *defaultLaunchContext() { return LaunchContext::defaultContext(); }
sd::Pointer lcScalarPointer(OpaqueLaunchContext *lc) { return lc->getScalarPointer(); }
sd::Pointer lcReductionPointer(OpaqueLaunchContext *lc) { return lc->getReductionPointer(); }
sd::Pointer lcAllocationPointer(OpaqueLaunchContext *lc) { return lc->getAllocationPointer(); }
sd::Pointer lcExecutionStream(OpaqueLaunchContext *lc) { return lc->getCudaStream(); }
sd::Pointer lcCopyStream(OpaqueLaunchContext *lc) { return lc->getCudaSpecialStream(); }
sd::Pointer lcBlasHandle(OpaqueLaunchContext *lc) { return lc->getCublasHandle(); }
sd::Pointer lcSolverHandle(OpaqueLaunchContext *lc) { return lc->getCusolverHandle(); }
int lastErrorCode() { return sd::LaunchContext::defaultContext()->errorReference()->errorCode(); }
const char *lastErrorMessage() { return sd::LaunchContext::defaultContext()->errorReference()->errorMessage(); }
void ctxShapeFunctionOverride(OpaqueContext *ptr, bool reallyOverride) {
ptr->setShapeFunctionOverride(reallyOverride);
}
void ctxPurge(OpaqueContext *ptr) { ptr->clearFastPath(); }
int binaryLevel() { return 0; }
int optimalLevel() { return 0; }
bool isMinimalRequirementsMet() { return true; }
bool isOptimalRequirementsMet() { return true; }
void ctxAllowHelpers(OpaqueContext *ptr, bool reallyAllow) { ptr->allowHelpers(reallyAllow); }
void ctxSetExecutionMode(OpaqueContext *ptr, int execMode) {
if (execMode < 0 || execMode > 2) execMode = 0;
ptr->setExecutionMode((samediff::ExecutionMode)execMode);
}
OpaqueDataBuffer *dbCreateExternalDataBuffer(sd::LongType elements, int dataType, sd::Pointer primary,
sd::Pointer special) {
auto buffer = dbAllocateDataBuffer(0, dataType, false);
if (primary != nullptr) buffer->setPrimary(primary, elements);
if (special != nullptr) buffer->setSpecial(special, elements);
return buffer;
}
OpaqueDataBuffer *dbAllocateDataBuffer(sd::LongType elements, int dataType, bool allocateBoth) {
return allocateDataBuffer(elements, dataType, allocateBoth);
}
OpaqueDataBuffer *allocateDataBuffer(sd::LongType elements, int dataType, bool allocateBoth) {
try {
auto dtype = DataTypeUtils::fromInt(dataType);
return new sd::InteropDataBuffer(elements * DataTypeUtils::sizeOf(dtype), dtype, allocateBoth);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
sd::Pointer dbPrimaryBuffer(OpaqueDataBuffer *dataBuffer) { return dataBuffer->primary(); }
sd::Pointer dbSpecialBuffer(OpaqueDataBuffer *dataBuffer) { return dataBuffer->special(); }
void deleteDataBuffer(OpaqueDataBuffer *dataBuffer) { delete dataBuffer; }
void dbSetPrimaryBuffer(OpaqueDataBuffer *dataBuffer, sd::Pointer primaryBuffer, sd::LongType numBytes) {
dataBuffer->setPrimary(primaryBuffer, numBytes);
}
void dbSetSpecialBuffer(OpaqueDataBuffer *dataBuffer, sd::Pointer specialBuffer, sd::LongType numBytes) {
dataBuffer->setSpecial(specialBuffer, numBytes);
}
void dbAllocatePrimaryBuffer(OpaqueDataBuffer *dataBuffer) { dataBuffer->dataBuffer()->allocatePrimary(); }
void dbAllocateSpecialBuffer(OpaqueDataBuffer *dataBuffer) { dataBuffer->dataBuffer()->allocateSpecial(); }
void dbExpandBuffer(OpaqueDataBuffer *dataBuffer, sd::LongType elements) {
try {
dataBuffer->dataBuffer()->expand(elements * DataTypeUtils::sizeOf(dataBuffer->dataBuffer()->getDataType()));
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
OpaqueDataBuffer *dbCreateView(OpaqueDataBuffer *dataBuffer, sd::LongType length, sd::LongType offset) {
return new InteropDataBuffer(*dataBuffer, length, offset);
}
int dbUseCount(OpaqueDataBuffer* dataBuffer){
if(dataBuffer) return dataBuffer->useCount();
return 0;
}
void dbSyncToSpecial(OpaqueDataBuffer *dataBuffer) { dataBuffer->dataBuffer()->syncToSpecial(); }
void dbSyncToPrimary(OpaqueDataBuffer *dataBuffer) { dataBuffer->dataBuffer()->syncToPrimary(nullptr); }
void dbTickHostRead(OpaqueDataBuffer *dataBuffer) { dataBuffer->dataBuffer()->readPrimary(); }
void dbTickHostWrite(OpaqueDataBuffer *dataBuffer) { dataBuffer->dataBuffer()->writePrimary(); }
void dbTickDeviceRead(OpaqueDataBuffer *dataBuffer) { dataBuffer->dataBuffer()->readSpecial(); }
void dbTickDeviceWrite(OpaqueDataBuffer *dataBuffer) { dataBuffer->dataBuffer()->writeSpecial(); }
void dbExpand(OpaqueDataBuffer *dataBuffer, sd::LongType elements) { dataBuffer->expand(elements); }
void dbClose(OpaqueDataBuffer *dataBuffer) { dataBuffer->getDataBuffer()->close(); }
int dbDeviceId(OpaqueDataBuffer *dataBuffer) { return dataBuffer->deviceId(); }
void dbSetDeviceId(OpaqueDataBuffer *dataBuffer, int deviceId) { dataBuffer->setDeviceId(deviceId); }
int dbLocality(OpaqueDataBuffer *dataBuffer) {
auto p = dataBuffer->dataBuffer()->isPrimaryActual();
auto d = dataBuffer->dataBuffer()->isSpecialActual();
if (p && d)
return 0;
else if (p)
return -1;
else
return 1;
}
void setVedaDeviceLibFolder(std::string path){
}
| d54ca247978400881b9828b88e803b09875f709d.cu | /* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
#include <cuda.h>
#include <exceptions/cuda_exception.h>
#include <exceptions/datatype_exception.h>
#include <execution/AffinityManager.h>
#include <graph/GraphExecutioner.h>
#include <graph/GraphHolder.h>
#include <helpers/BlasHelper.h>
#include <helpers/CudaLaunchHelper.h>
#include <helpers/DebugHelper.h>
#include <helpers/PointersManager.h>
#include <helpers/threshold.h>
#include <legacy/NativeOpExecutioner.h>
#include <legacy/NativeOps.h>
#include <loops/reduce_bool.h>
#include <loops/reduce_long.h>
#include <loops/scalar.h>
#include <loops/transform_any.h>
#include <ops/declarable/CustomOperations.h>
#include <ops/specials_cuda.h>
#include <system/buffer.h>
//#include <sys/time.h>
#include <curand.h>
#include <helpers/DebugHelper.h>
using namespace sd;
#include <loops/special_kernels.h>
cudaDeviceProp *deviceProperties;
cudaFuncAttributes *funcAttributes = new cudaFuncAttributes[64];
int blockLimit = 128;
int maxThreads = 512;
bool allowedP2P = false;
bool supportedP2P = false;
#ifdef SD_EXPERIMENTAL_ENABLED
bool experimentalSupport = true;
#else
bool experimentalSupport = false;
#endif
int minThreads = 32;
__constant__ char deviceConstantMemory[49152];
// this method just does type conversion in fancy way
int getDeviceId(sd::Pointer ptrToDeviceId) { return (int)(sd::LongType)ptrToDeviceId; }
/*
* Basic CUDA constants here: number of blocks per MP
*/
int getDeviceBlockThreshold(int deviceId) {
int ccMinor = deviceProperties[deviceId].minor;
int ccMajor = deviceProperties[deviceId].major;
int blockThreshold = 8;
if (ccMajor >= 5)
blockThreshold = 32;
else if (ccMajor == 3)
blockThreshold = 16;
else if (ccMajor < 3)
blockThreshold = 8;
return blockThreshold;
}
/*
* This message returns shared memory threshold value. default overflow ratio is 0.3
*/
int getDeviceSharedThreshold(int deviceId) {
int ccMinor = deviceProperties[deviceId].minor;
int ccMajor = deviceProperties[deviceId].major;
// please note threshold isn't multiple of 32, and that's NOT a mistake
int shmemThreshold;
if (ccMajor == 6 && ccMinor == 0)
shmemThreshold = 65536;
else if (ccMajor == 6 && ccMinor == 1)
shmemThreshold = 49152;
else if (ccMajor == 5 && ccMinor == 2)
shmemThreshold = 98304;
else if (ccMajor == 5)
shmemThreshold = 65536;
else if (ccMajor == 3 && ccMinor == 7)
shmemThreshold = 114688;
else
shmemThreshold = 49152;
return shmemThreshold / 0.3;
}
sd::buffer::Buffer<sd::LongType> *createScalarBuffer(cudaStream_t stream) {
auto scalarShapeInfo = shape::createScalarShapeInfo();
auto buff = sd::buffer::createBuffer(scalarShapeInfo, shape::shapeInfoLength(2), stream);
sd::buffer::copyDataToGpu(&buff, stream);
return buff;
}
class ScalarShapeInformation {
private:
sd::buffer::Buffer<sd::LongType> *scalarDimension;
sd::buffer::Buffer<sd::LongType> *scalarShapeInfo;
// std::thread::id threadId;
public:
ScalarShapeInformation(cudaStream_t stream) {
auto scalarDimensionBuff = reinterpret_cast<sd::LongType *>(malloc(sizeof(sd::LongType)));
CHECK_ALLOC(scalarDimensionBuff, "Failed to allocate ShapeInfoBuffer", sizeof(sd::LongType));
scalarDimensionBuff[0] = SD_MAX_DIMENSION;
scalarDimension = sd::buffer::createBuffer(scalarDimensionBuff, 1, stream);
scalarShapeInfo = createScalarBuffer(stream);
// threadId = std::this_thread::get_id();
}
~ScalarShapeInformation() {
sd::buffer::freeBuffer(&scalarShapeInfo);
sd::buffer::freeBuffer(&scalarDimension);
}
sd::LongType *getShapeInfoHostPointer() { return scalarShapeInfo->data; }
sd::LongType *getShapeInfoGpuPointer() { return scalarShapeInfo->gData; }
sd::LongType *getDimensionHostPointer() { return scalarDimension->data; }
sd::LongType *getDimensionGpuPointer() { return scalarDimension->gData; }
};
template <typename T>
class ScalarInfo {
sd::buffer::Buffer<T> *scalarData;
ScalarShapeInformation *shapeInfo;
T finalResult;
cudaStream_t streamRef;
public:
ScalarInfo(cudaStream_t stream) {
T *scalarResult = reinterpret_cast<T *>(malloc(sizeof(T)));
CHECK_ALLOC(scalarResult, "Failed to allocate new scalar buffer", sizeof(T));
shapeInfo = new ScalarShapeInformation(stream);
scalarData = sd::buffer::createBuffer(scalarResult, 1, stream);
streamRef = stream;
sd::buffer::copyDataToGpu(&scalarData, stream);
}
T getFinalResultFromDevice() {
sd::buffer::copyDataFromGpu(&scalarData, streamRef);
return scalarData->data[0];
}
/**
* Get the device shape information
* representing a scalar
*/
sd::LongType *getDeviceShapeInfo() { return shapeInfo->getShapeInfoGpuPointer(); }
/**
* Get the dZ pointers
*/
T *getDevicePointer() { return scalarData->gData; }
/**
* Get the infinite dimension device pointer
*/
sd::LongType *getDimensionDevicePointer() { return shapeInfo->getDimensionGpuPointer(); }
~ScalarInfo() {
sd::buffer::freeBuffer(&scalarData);
delete shapeInfo;
}
};
void execPairwiseTransform(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX,
sd::LongType const *hXShapeInfo, sd::LongType const *dXShapeInfo, OpaqueDataBuffer *dbY,
sd::LongType const *hYShapeInfo, sd::LongType const *dYShapeInfo, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo, void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execPairwiseTransform(
&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbY->primary(), hYShapeInfo,
dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(), dbZ->primary(),
hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
extraParams);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execPairwiseTransformBool(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX,
sd::LongType const *hXShapeInfo, sd::LongType const *dXShapeInfo, OpaqueDataBuffer *dbY,
sd::LongType const *hYShapeInfo, sd::LongType const *dYShapeInfo, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo, void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execPairwiseBoolTransform(
&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbY->primary(), hYShapeInfo,
dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(), dbZ->primary(),
hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
extraParams);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execSummaryStatsScalar(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX,
sd::LongType const *hXShapeInfo, sd::LongType const *dXShapeInfo, void *extraParams,
OpaqueDataBuffer *dbZ, sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo,
bool biasCorrected) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execSummaryStatsScalar(
&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbZ->primary(),
hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
biasCorrected);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execBroadcastBool(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, OpaqueDataBuffer *dbY, sd::LongType const *hYShapeInfo,
sd::LongType const *dYShapeInfo, OpaqueDataBuffer *dbZ, sd::LongType const *hZShapeInfo,
sd::LongType const *dZShapeInfo, void *extraParams, OpaqueDataBuffer *dbDimension,
sd::LongType const *hDimensionShape, sd::LongType const *dDimensionShape) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
auto hTADShapeInfo = reinterpret_cast<sd::LongType *>(extraPointers[9]);
auto tadOnlyShapeInfo = reinterpret_cast<sd::LongType *>(extraPointers[10]);
auto tadOffsets = reinterpret_cast<sd::LongType *>(extraPointers[11]);
auto tadOnlyShapeInfoZ = reinterpret_cast<sd::LongType *>(extraPointers[12]);
auto tadOffsetsZ = reinterpret_cast<sd::LongType *>(extraPointers[13]);
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execBroadcastBool(
&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbY->primary(), hYShapeInfo,
dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(), dbZ->primary(),
hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
extraParams, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param dY
* @param dYShapeInfo
* @param dZ
* @param dZShapeInfo
* @param dimension
* @param dimensionLength
*/
void execBroadcast(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, OpaqueDataBuffer *dbY, sd::LongType const *hYShapeInfo,
sd::LongType const *dYShapeInfo, OpaqueDataBuffer *dbZ, sd::LongType const *hZShapeInfo,
sd::LongType const *dZShapeInfo, OpaqueDataBuffer *dbDimension, sd::LongType const *hDimensionShape,
sd::LongType const *dDimensionShape) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<sd::LongType *>(extraPointers[9]);
auto tadOnlyShapeInfo = reinterpret_cast<sd::LongType *>(extraPointers[10]);
auto tadOffsets = reinterpret_cast<sd::LongType *>(extraPointers[11]);
auto tadOnlyShapeInfoZ = reinterpret_cast<sd::LongType *>(extraPointers[12]);
auto tadOffsetsZ = reinterpret_cast<sd::LongType *>(extraPointers[13]);
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto yType = sd::ArrayOptions::dataType(hYShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execBroadcast(
&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbY->primary(), hYShapeInfo,
dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(), dbZ->primary(),
hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param extraParams
* @param dZ
* @param dZShapeInfo
*/
////////////////////////////////////////////////////////////////////////
void execReduceFloat(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduceFloatScalar(
&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbZ->primary(),
hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduceSame(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduceSameScalar(
&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbZ->primary(),
hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduceSame2(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo, OpaqueDataBuffer *dbDimension,
sd::LongType const *hDimensionShape, sd::LongType const *dDimensionShape) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
const auto zLen = shape::length(hZShapeInfo);
std::vector<int> dimensions(dimension, dimension + dimensionLength);
const sd::LongType *zShapeInfoH = hZShapeInfo;
if (shape::rank(hXShapeInfo) - dimensionLength != shape::rank(hZShapeInfo) && zLen != 1) {
auto zPack = ConstantShapeHelper::getInstance().createShapeInfoWithNoUnitiesForReduce(hZShapeInfo, dimensions);
zShapeInfoH = reinterpret_cast<sd::LongType const *>(zPack.primary());
}
std::vector<int> dims =
(zLen != 1) ? ShapeUtils::evalDimsForReduceOp(shape::rank(hXShapeInfo), dimensions) : std::vector<int>();
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduceSame(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
extraParams, dbZ->primary(), zShapeInfoH, dbZ->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(zShapeInfoH).special(),
dims.data(), dims.size());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduceLong2(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo, OpaqueDataBuffer *dbDimension,
sd::LongType const *hDimensionShape, sd::LongType const *dDimensionShape) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
const auto zLen = shape::length(hZShapeInfo);
std::vector<int> dimensions(dimension, dimension + dimensionLength);
const sd::LongType *zShapeInfoH = hZShapeInfo;
if (shape::rank(hXShapeInfo) - dimensionLength != shape::rank(hZShapeInfo) && zLen != 1) {
auto zPack = ConstantShapeHelper::getInstance().createShapeInfoWithNoUnitiesForReduce(hZShapeInfo, dimensions);
zShapeInfoH = reinterpret_cast<sd::LongType const *>(zPack.primary());
}
std::vector<int> dims =
(zLen != 1) ? ShapeUtils::evalDimsForReduceOp(shape::rank(hXShapeInfo), dimensions) : std::vector<int>();
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduceLong(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
extraParams, dbZ->primary(), zShapeInfoH, dbZ->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(zShapeInfoH).special(),
dims.data(), dims.size());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduceLong(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<sd::LongType *>(extraPointers[9]);
auto dTADShapeInfo = reinterpret_cast<sd::LongType *>(extraPointers[10]);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
if (zType != sd::DataType::INT64)
throw datatype_exception::build("execReduceLong wrong Z data type", sd::DataType::INT64, zType);
auto xLength = shape::length(hXShapeInfo);
auto blockWidth = 256;
auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth);
dim3 launchDims(numBlocks, blockWidth, 32768);
BUILD_DOUBLE_SELECTOR(
xType, zType, functions::reduce::ReduceLongFunction,
::execReduceScalar(launchDims, stream, opNum, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), hXShapeInfo,
extraParams, dbZ->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), hXShapeInfo,
nullptr, 0, reductionPointer, dTADShapeInfo),
SD_COMMON_TYPES, SD_LONG_TYPES);
sd::DebugHelper::checkErrorCode(stream, "execReduceLong(...) failed");
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduceBool2(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo, OpaqueDataBuffer *dbDimension,
sd::LongType const *hDimensionShape, sd::LongType const *dDimensionShape) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
const auto zLen = shape::length(hZShapeInfo);
std::vector<int> dimensions(dimension, dimension + dimensionLength);
const sd::LongType *zShapeInfoH = hZShapeInfo;
if (shape::rank(hXShapeInfo) - dimensionLength != shape::rank(hZShapeInfo) && zLen != 1) {
auto zPack = ConstantShapeHelper::getInstance().createShapeInfoWithNoUnitiesForReduce(hZShapeInfo, dimensions);
zShapeInfoH = reinterpret_cast<sd::LongType const *>(zPack.primary());
}
std::vector<int> dims =
(zLen != 1) ? ShapeUtils::evalDimsForReduceOp(shape::rank(hXShapeInfo), dimensions) : std::vector<int>();
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduceBool(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
extraParams, dbZ->primary(), zShapeInfoH, dbZ->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(zShapeInfoH).special(),
dims.data(), dims.size());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduceBool(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<sd::LongType *>(extraPointers[9]);
auto dTADShapeInfo = reinterpret_cast<sd::LongType *>(extraPointers[10]);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
if (zType != sd::DataType::BOOL) throw std::runtime_error("execReduceBool requires Z operand to have BOOL type");
auto xLength = shape::length(hXShapeInfo);
auto blockWidth = 256;
auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth);
dim3 launchDims(numBlocks, blockWidth, 32768);
BUILD_DOUBLE_SELECTOR(
xType, zType, functions::reduce::ReduceBoolFunction,
::execReduceScalar(launchDims, stream, opNum, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), hXShapeInfo,
extraParams, dbZ->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), hZShapeInfo,
nullptr, 0, reductionPointer, dTADShapeInfo),
SD_COMMON_TYPES, SD_BOOL_TYPES);
sd::DebugHelper::checkErrorCode(stream, "execReduceBool(...) failed");
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param extraParams
* @param dZ
* @param dZShapeInfo
* @param dimension
* @param dimensionLength
*/
////////////////////////////////////////////////////////////////////////
void execIndexReduce(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo, OpaqueDataBuffer *dbDimension,
sd::LongType const *hDimensionShape, sd::LongType const *dDimensionShape) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
auto tadPack =
sd::ConstantTadHelper::getInstance().tadForDimensions(hXShapeInfo, dimension, shape::length(hDimensionShape));
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execIndexReduce(
&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbZ->primary(),
hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
(int *)dbDimension->special(), dimensionLength, tadPack.specialShapeInfo(), tadPack.specialOffsets());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param extraParams
* @param dZ
* @param dZShapeInfo
*/
////////////////////////////////////////////////////////////////////////
void execReduceFloat2(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo, OpaqueDataBuffer *dbDimension,
sd::LongType const *hDimensionShape, sd::LongType const *dDimensionShape) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
const auto zLen = shape::length(hZShapeInfo);
std::vector<int> dimensions(dimension, dimension + dimensionLength);
const sd::LongType *zShapeInfoH = hZShapeInfo;
if (shape::rank(hXShapeInfo) - dimensionLength != shape::rank(hZShapeInfo) && zLen != 1) {
auto zPack = ConstantShapeHelper::getInstance().createShapeInfoWithNoUnitiesForReduce(hZShapeInfo, dimensions);
zShapeInfoH = reinterpret_cast<sd::LongType const *>(zPack.primary());
}
std::vector<int> dims =
(zLen != 1) ? ShapeUtils::evalDimsForReduceOp(shape::rank(hXShapeInfo), dimensions) : std::vector<int>();
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduceFloat(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
extraParams, dbZ->primary(), zShapeInfoH, dbZ->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(zShapeInfoH).special(),
dims.data(), dims.size());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param extraParams
*/
////////////////////////////////////////////////////////////////////////
void execIndexReduceScalar(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX,
sd::LongType const *hXShapeInfo, sd::LongType const *dXShapeInfo, void *extraParams,
OpaqueDataBuffer *dbZ, sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execIndexReduceScalar(
&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbZ->primary(),
hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execTransformSame(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, OpaqueDataBuffer *dbZ, sd::LongType const *hZShapeInfo,
sd::LongType const *dZShapeInfo, void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
auto tadShapeInfo = reinterpret_cast<sd::LongType *>(extraPointers != nullptr ? extraPointers[0] : nullptr);
auto tadOffsets = reinterpret_cast<sd::LongType *>(extraPointers != nullptr ? extraPointers[1] : nullptr);
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execTransformSame(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
extraParams, tadShapeInfo, tadOffsets);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execTransformBool(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, OpaqueDataBuffer *dbZ, sd::LongType const *hZShapeInfo,
sd::LongType const *dZShapeInfo, void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
auto tadShapeInfo = reinterpret_cast<sd::LongType *>(extraPointers != nullptr ? extraPointers[0] : nullptr);
auto tadOffsets = reinterpret_cast<sd::LongType *>(extraPointers != nullptr ? extraPointers[1] : nullptr);
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execTransformBool(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
extraParams, tadShapeInfo, tadOffsets);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execTransformAny(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, OpaqueDataBuffer *dbZ, sd::LongType const *hZShapeInfo,
sd::LongType const *dZShapeInfo, void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
auto streamSpecial = reinterpret_cast<cudaStream_t &>(extraPointers[4]);
LaunchContext lc(stream, streamSpecial, extraPointers[5], extraPointers[3],
reinterpret_cast<int *>(extraPointers[6]));
NativeOpExecutioner::execTransformAny(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
extraParams, nullptr, nullptr);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execTransformStrict(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, OpaqueDataBuffer *dbZ, sd::LongType const *hZShapeInfo,
sd::LongType const *dZShapeInfo, void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
auto tadShapeInfo = reinterpret_cast<sd::LongType *>(extraPointers != nullptr ? extraPointers[10] : nullptr);
auto tadOffsets = reinterpret_cast<sd::LongType *>(extraPointers != nullptr ? extraPointers[11] : nullptr);
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execTransformStrict(
&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbZ->primary(), hZShapeInfo,
dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), extraParams,
tadShapeInfo, tadOffsets);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execTransformFloat(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, OpaqueDataBuffer *dbZ, sd::LongType const *hZShapeInfo,
sd::LongType const *dZShapeInfo, void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
auto tadShapeInfo = reinterpret_cast<sd::LongType *>(extraPointers != nullptr ? extraPointers[10] : nullptr);
auto tadOffsets = reinterpret_cast<sd::LongType *>(extraPointers != nullptr ? extraPointers[11] : nullptr);
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execTransformFloat(
&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbZ->primary(), hZShapeInfo,
dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), extraParams,
tadShapeInfo, tadOffsets);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void checkP2P() {
int curDevice = 0;
cudaGetDevice(&curDevice);
int devCnt = 0;
cudaGetDeviceCount(&devCnt);
if (curDevice < 0 && curDevice > devCnt) curDevice = 0;
bool tempSupport = true;
if (devCnt > 1) {
for (int dX = 0; dX < devCnt; dX++) {
for (int dY = 0; dY < devCnt; dY++) {
if (dX == dY) continue;
int canAccess = 0;
cudaSetDevice(dX);
cudaDeviceCanAccessPeer(&canAccess, dX, dY);
if (!canAccess) {
tempSupport = false;
break;
}
}
}
supportedP2P = tempSupport;
cudaSetDevice(curDevice);
} else {
// if we have only 1 device - we say that we support P2P, since all data will be on 1 device
supportedP2P = true;
}
}
void enableP2P(bool enable) {
if (enable == allowedP2P) return;
int curDevice = 0;
cudaGetDevice(&curDevice);
int devCnt = 0;
cudaGetDeviceCount(&devCnt);
if (curDevice < 0 && curDevice > devCnt) curDevice = 0;
if (devCnt > 1) {
for (int dX = 0; dX < devCnt; dX++) {
for (int dY = 0; dY < devCnt; dY++) {
if (dX == dY) continue;
int canAccess = 0;
cudaSetDevice(dX);
cudaDeviceCanAccessPeer(&canAccess, dX, dY);
if (canAccess) {
if (enable) {
cudaDeviceEnablePeerAccess(dY, 0);
} else {
cudaDeviceDisablePeerAccess(dY);
}
} else {
if (sd::Environment::getInstance().isVerbose()) printf("Peer access [%i] -> [%i] isn't possible\n", dX, dY);
}
}
}
cudaSetDevice(curDevice);
}
allowedP2P = enable;
cudaSetDevice(curDevice);
}
bool isP2PAvailable() { return supportedP2P; }
void initializeDevicesAndFunctions() {
try {
int devCnt = 0;
cudaGetDeviceCount(&devCnt);
deviceProperties = new cudaDeviceProp[devCnt];
for (int i = 0; i < devCnt; i++) {
cudaSetDevice(i);
cudaGetDeviceProperties(&deviceProperties[i], i);
cudaDeviceSetLimit(cudaLimitStackSize, 4096);
}
cudaSetDevice(0);
checkP2P();
// enabling p2p gpu access if it's supported
if (supportedP2P && devCnt > 1) enableP2P(allowedP2P);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void initializeFunctions(sd::Pointer *functions) {
sd::BlasHelper::getInstance().initializeDeviceFunctions(functions);
/*
cublasSgemv = (CublasSgemv)functions[0];
cublasDgemv = (CublasDgemv)functions[1];
cublasHgemm = (CublasHgemm)functions[2];
cublasSgemm = (CublasSgemm)functions[3];
cublasDgemm = (CublasDgemm)functions[4];
cublasSgemmEx = (CublasSgemmEx)functions[5];
cublasHgemmBatched = (CublasHgemmBatched)functions[6];
cublasSgemmBatched = (CublasSgemmBatched)functions[7];
cublasDgemmBatched = (CublasDgemmBatched)functions[8];
*/
}
/**
* This method acquires memory chunk of requested size on host side
*
* @param pointer pointer that'll be used for allocation
* @param memorySize memory size, in bytes
* @param flags optional parameter
*/
sd::Pointer mallocHost(sd::LongType memorySize, int flags) {
sd::Pointer pointer;
// cudaHostAllocMapped |cudaHostAllocPortable
auto res = cudaHostAlloc(reinterpret_cast<void **>(&pointer), memorySize + 8, cudaHostAllocDefault);
if (res != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(res);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaHostAlloc failed");
}
return reinterpret_cast<int8_t *>(pointer);
}
/**
* This method acquires memory chunk of requested size on specified device
*
* @param pointer pointer that'll be used for allocation
* @param memorySize memory size, in bytes
* @param ptrToDeviceId pointer to deviceId. For cuda that's just and int, for OpenCL that's pointer to device_id, etc
* @param flags optional parameter
*/
sd::Pointer mallocDevice(sd::LongType memorySize, int deviceId, int flags) {
sd::Pointer pointer;
auto res = cudaMalloc(reinterpret_cast<void **>(&pointer), memorySize + 8);
if (res != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(res);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaMalloc failed");
}
return reinterpret_cast<int8_t *>(pointer);
}
/**
* This method releases previously allocated host memory space
*
* @param pointer pointer that'll be freed
*/
int freeHost(sd::Pointer pointer) {
auto res = cudaFreeHost(reinterpret_cast<void *>(pointer));
if (res != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(res);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaFreeHost failed");
}
return 1L;
}
/**
* This method releases previously allocated memory space on device
*
* @param pointer pointer that'll be freed
* @param ptrToDeviceId pointer to deviceId.
*/
int freeDevice(sd::Pointer pointer, int deviceId) {
auto res = cudaFree(reinterpret_cast<void *>(pointer));
// we're intentionally skipping
if (res != 0 && res != 1) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(res);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaFree failed");
}
return res == 0 ? 1L : 0L;
}
sd::Pointer createContext() { return 0L; }
sd::Pointer createStream() {
auto stream = new cudaStream_t();
auto dZ = cudaStreamCreate(stream);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaStreamCreate failed");
}
return stream;
}
sd::Pointer createEvent() {
sd::Pointer nativeEvent = (sd::Pointer)malloc(sizeof(cudaEvent_t));
CHECK_ALLOC(nativeEvent, "Failed to allocate new CUDA event buffer", sizeof(cudaEvent_t));
auto dZ = cudaEventCreateWithFlags(reinterpret_cast<cudaEvent_t *>(&nativeEvent), cudaEventDisableTiming);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaEventCreateWithFlags failed");
}
return nativeEvent;
}
int registerEvent(sd::Pointer event, sd::Pointer stream) {
auto pEvent = reinterpret_cast<cudaEvent_t *>(&event);
auto pStream = reinterpret_cast<cudaStream_t *>(stream);
auto dZ = cudaEventRecord(*pEvent, *pStream);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaEventRecord failed");
}
return 1;
}
int setDevice(int deviceId) {
AffinityManager::setCurrentDevice(deviceId);
return 1;
}
sd::LongType getDeviceFreeMemoryDefault() {
size_t memFree = 0;
size_t memTotal = 0;
cudaMemGetInfo(&memFree, &memTotal);
return (sd::LongType)memFree;
}
sd::LongType getDeviceFreeMemory(int device) {
int orig = -1;
cudaGetDevice(&orig);
if (device >= 0 && device != orig) {
cudaSetDevice(device);
}
size_t memFree = 0;
size_t memTotal = 0;
cudaMemGetInfo(&memFree, &memTotal);
if (device >= 0 && device != orig) {
cudaSetDevice(orig);
}
return (sd::LongType)memFree;
}
sd::LongType getDeviceTotalMemory(int device) {
int orig = -1;
cudaGetDevice(&orig);
if (device >= 0 && device != orig) {
cudaSetDevice(device);
}
size_t memFree = 0;
size_t memTotal = 0;
cudaMemGetInfo(&memFree, &memTotal);
if (device >= 0 && device != orig) {
cudaSetDevice(orig);
}
return (sd::LongType)memTotal;
}
int memcpySync(sd::Pointer dst, sd::Pointer src, sd::LongType size, int flags, sd::Pointer reserved) {
cudaMemcpyKind kind;
switch (flags) {
case 0: {
kind = cudaMemcpyHostToHost;
} break;
case 1: {
kind = cudaMemcpyHostToDevice;
} break;
case 2: {
kind = cudaMemcpyDeviceToHost;
} break;
case 3: {
kind = cudaMemcpyDeviceToDevice;
} break;
default: {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("UNDEFNED MEMCPY");
return 0;
}
}
auto dZ = cudaMemcpy(reinterpret_cast<void *>(dst), const_cast<const void *>(reinterpret_cast<void *>(src)),
static_cast<size_t>(size), kind);
if (dZ != 0) {
printf("Failed on [%p] -> [%p], size: [%i], direction: [%i], dZ: [%i]\n", src, dst, size, flags,
static_cast<int>(dZ));
fflush(stdout);
fflush(stderr);
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaMemcpy failed");
return 0;
}
return 1;
}
int memcpyAsync(sd::Pointer dst, sd::Pointer src, sd::LongType size, int flags, sd::Pointer reserved) {
auto pStream = reinterpret_cast<cudaStream_t *>(reserved);
cudaMemcpyKind kind;
// sd::DebugHelper::checkErrorCode(pStream, "Preliminary sync failed");
switch (flags) {
case 0: {
kind = cudaMemcpyHostToHost;
} break;
case 1: {
kind = cudaMemcpyHostToDevice;
} break;
case 2: {
kind = cudaMemcpyDeviceToHost;
} break;
case 3: {
kind = cudaMemcpyDeviceToDevice;
} break;
default: {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("UNDEFNED MEMCPY");
return 0;
}
}
auto dZ = cudaMemcpyAsync(reinterpret_cast<void *>(dst), const_cast<const void *>(reinterpret_cast<void *>(src)),
static_cast<size_t>(size), kind, *pStream);
// auto dZ = cudaMemcpy(reinterpret_cast<void *>(dst), const_cast<const void *>(reinterpret_cast<void *>(src)),
// static_cast<size_t>(size), kind);
if (dZ != 0) {
printf("Failed on [%p] -> [%p], size: [%i], direction: [%i], dZ: [%i]\n", src, dst, size, flags,
static_cast<int>(dZ));
fflush(stdout);
fflush(stderr);
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaMemcpyAsync failed");
return 0;
}
return 1;
}
int memsetSync(sd::Pointer dst, int value, sd::LongType size, int flags, sd::Pointer reserved) {
auto dZ = cudaMemset(reinterpret_cast<void *>(dst), value, static_cast<size_t>(size));
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaMemset failed");
}
return 1;
}
int memsetAsync(sd::Pointer dst, int value, sd::LongType size, int flags, sd::Pointer reserved) {
auto pStream = reinterpret_cast<cudaStream_t *>(reserved);
auto dZ = cudaMemsetAsync(reinterpret_cast<void *>(dst), value, static_cast<size_t>(size), *pStream);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaMemsetAsync failed");
}
return 1;
}
int destroyEvent(sd::Pointer event) {
auto pEvent = reinterpret_cast<cudaEvent_t *>(&event);
auto dZ = cudaEventDestroy(*pEvent);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaEventDestroy failed");
}
return 1;
}
int streamSynchronize(sd::Pointer stream) {
auto pStream = reinterpret_cast<cudaStream_t *>(stream);
auto dZ = cudaStreamSynchronize(*pStream);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaStreamSynchronize failed");
}
return 1L;
}
int eventSynchronize(sd::Pointer event) {
auto pEvent = reinterpret_cast<cudaEvent_t *>(&event);
auto dZ = cudaEventSynchronize(*pEvent);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaEventSynchronize failed");
}
return 1L;
}
int getAvailableDevices() {
int devCnt = 0;
cudaGetDeviceCount(&devCnt);
return devCnt;
}
void enableDebugMode(bool reallyEnable) { sd::Environment::getInstance().setDebug(reallyEnable); }
void setGridLimit(int gridSize) {
if (gridSize > 8192) gridSize = 8192;
if (gridSize < 1) gridSize = 1;
blockLimit = gridSize;
}
int ompGetMaxThreads() { return maxThreads; }
int ompGetNumThreads() { return maxThreads; }
void setOmpNumThreads(int threads) {
if (threads > 1024) threads = 1024;
if (threads < 32) threads = 32;
maxThreads = threads;
}
void enableVerboseMode(bool reallyEnable) { sd::Environment::getInstance().setVerbose(reallyEnable); }
int getDeviceMajor(int device) { return deviceProperties[device].major; }
int getDeviceMinor(int device) { return deviceProperties[device].minor; }
const char *getDeviceName(int device) { return deviceProperties[device].name; }
void specialConcat(sd::Pointer *extraPointers, int dimension, int numArrays, sd::Pointer *data,
sd::Pointer *inputShapeInfo, void *dZ, sd::LongType const *dZShapeInfo, sd::Pointer *tadPointers,
sd::Pointer *offsetPointers) {
try {
BUILD_SINGLE_SELECTOR(ArrayOptions::dataType(dZShapeInfo), sd::SpecialMethods,
::concatCpuGeneric(dimension, numArrays, data, inputShapeInfo, dZ, dZShapeInfo),
SD_COMMON_TYPES);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
/**
* This method saves
*/
sd::TadPack *tadOnlyShapeInfo(sd::LongType const *dXShapeInfo, int *dimension, int dimensionLength) {
try {
auto pack = new TadPack();
*pack = sd::ConstantTadHelper::getInstance().tadForDimensions(dXShapeInfo, dimension, dimensionLength);
return pack;
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
sd::LongType const *getPrimaryShapeInfo(sd::TadPack *pack) { return pack->primaryShapeInfo(); }
sd::LongType const *getPrimaryOffsets(sd::TadPack *pack) { return pack->primaryOffsets(); }
sd::LongType const *getSpecialShapeInfo(sd::TadPack *pack) { return pack->specialShapeInfo(); }
sd::LongType const *getSpecialOffsets(sd::TadPack *pack) { return pack->specialOffsets(); }
sd::LongType getNumberOfTads(sd::TadPack *pack) { return pack->numberOfTads(); }
int getShapeInfoLength(sd::TadPack *pack) { return pack->shapeInfoLength(); }
int memcpyConstantAsync(sd::LongType dst, sd::Pointer src, sd::LongType size, int flags, sd::Pointer reserved) {
cudaStream_t *pStream = reinterpret_cast<cudaStream_t *>(reserved);
cudaMemcpyKind kind;
DEBUG_KERNEL(pStream, -1);
switch (flags) {
case 0: {
kind = cudaMemcpyHostToHost;
} break;
case 1: {
kind = cudaMemcpyHostToDevice;
} break;
case 2: {
kind = cudaMemcpyDeviceToHost;
}
case 3: {
kind = cudaMemcpyDeviceToDevice;
} break;
}
auto dZ = cudaMemcpyToSymbolAsync(deviceConstantMemory, const_cast<const void *>(src), size, dst, kind, *pStream);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaMemcpyToSymbolAsync failed");
}
return 1;
}
sd::Pointer getConstantSpace() {
sd::Pointer dConstAddr;
cudaError_t dZ = cudaGetSymbolAddress(reinterpret_cast<void **>(&dConstAddr), deviceConstantMemory);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaGetSymbolAddress failed");
}
return dConstAddr;
}
void pullRows(sd::Pointer *extraPointers, OpaqueDataBuffer *dbX, sd::LongType const *xShapeInfo,
sd::LongType const *dXShapeInfo, OpaqueDataBuffer *dbZ, sd::LongType const *zShapeInfo,
sd::LongType const *dZShapeInfo, sd::LongType n, sd::LongType *indexes, sd::LongType const *tadShapeInfo,
sd::LongType const *tadOffsets, sd::LongType const *zTadShapeInfo, sd::LongType const *zTadOffsets) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
dim3 launchDims(64, 256, 1024);
auto xType = sd::ArrayOptions::dataType(xShapeInfo);
BUILD_SINGLE_SELECTOR(xType, pullRowsKernelGeneric,
(launchDims, stream, dbX->special(), dbZ->special(), n, indexes, tadShapeInfo, tadOffsets,
zTadShapeInfo, zTadOffsets),
SD_COMMON_TYPES);
DEBUG_KERNEL(stream, -1);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void average(sd::Pointer *extras, sd::Pointer *x, sd::LongType const *xShapeInfo, sd::Pointer *dx,
sd::LongType const *dXShapeInfo, void *z, sd::LongType const *zShapeInfo, void *dz,
sd::LongType const *dzShapeInfo, int n, sd::LongType length, bool propagate) {
try {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extras[1]);
int mode = getDeviceId(extras[3]);
auto dX = reinterpret_cast<void **>(dx);
if (sd::Environment::getInstance().isDebugAndVerbose()) printf("averageFloat called\n");
auto xType = sd::ArrayOptions::dataType(xShapeInfo);
// launching on gpu
if (mode == 0) {
dim3 launchDims(256, 256, 4096);
BUILD_SINGLE_SELECTOR(xType, averagingKernelGeneric, (launchDims, stream, dX, dz, n, length, propagate),
SD_COMMON_TYPES);
sd::DebugHelper::checkErrorCode(stream, "AverageFloat(...) failed");
} else {
// launching on host memory
BUILD_SINGLE_SELECTOR(xType, sd::SpecialMethods, ::averageGeneric(x, z, zShapeInfo, n, length, propagate),
SD_COMMON_TYPES);
}
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void accumulate(sd::Pointer *extras, sd::Pointer *x, sd::LongType const *xShapeInfo, sd::Pointer *dx,
sd::LongType const *dXShapeInfo, void *z, sd::LongType const *zShapeInfo, void *dz,
sd::LongType const *dzShapeInfo, int n, sd::LongType length) {
try {
auto stream = reinterpret_cast<cudaStream_t *>(extras[1]);
int mode = getDeviceId(extras[3]);
auto dX = reinterpret_cast<void **>(dx);
if (sd::Environment::getInstance().isDebugAndVerbose()) printf("accumulateFloat called\n");
auto xType = sd::ArrayOptions::dataType(xShapeInfo);
// launching on gpu
if (mode == 0) {
dim3 launchDims(n, 256, 16384);
BUILD_SINGLE_SELECTOR(xType, accumulateKernelGeneric, (launchDims, stream, dX, dz, n, length), SD_COMMON_TYPES);
sd::DebugHelper::checkErrorCode(stream, "AccumulateFloat(...) failed");
} else {
// launching on host memory
BUILD_SINGLE_SELECTOR(xType, sd::SpecialMethods, ::accumulateGeneric(x, z, zShapeInfo, n, length),
SD_COMMON_TYPES);
}
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void shuffle(sd::Pointer *extras, sd::Pointer *x, sd::Pointer *xShapeInfo, sd::Pointer *dx, sd::Pointer *dXShapeInfo,
sd::Pointer *z, sd::Pointer *zShapeInfo, sd::Pointer *dz, sd::Pointer *dZShapeInfo, int N, int *shuffleMap,
sd::Pointer *tadShapeInfo, sd::Pointer *tadOffsets) {
try {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extras[1]);
auto dX = reinterpret_cast<void **>(dx);
auto dZ = reinterpret_cast<void **>(dz);
auto xShape = reinterpret_cast<sd::LongType **>(xShapeInfo);
auto dxShape = reinterpret_cast<sd::LongType **>(dXShapeInfo);
auto tadOnlyShapeInfo = reinterpret_cast<sd::LongType **>(tadShapeInfo);
auto tadOffset = reinterpret_cast<sd::LongType **>(tadOffsets);
auto xType = sd::ArrayOptions::dataType(xShape[0]);
dim3 launchDims(256, 512, 8192);
BUILD_SINGLE_SELECTOR(xType, shuffleKernelGeneric,
(launchDims, stream, dX, dxShape, dZ, N, shuffleMap, tadOnlyShapeInfo, tadOffset),
SD_COMMON_TYPES);
sd::DebugHelper::checkErrorCode(stream, "shuffle(...) failed");
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
bool isExperimentalEnabled() { return sd::Environment::getInstance().isExperimentalBuild(); }
void setOmpMinThreads(int threads) {
minThreads = sd::math::sd_max<int>(32, threads);
minThreads = sd::math::sd_min<int>(maxThreads, minThreads);
}
int getDevice() { return sd::AffinityManager::currentDeviceId(); }
void setElementThreshold(int num) {
// this is no-op for CUDA
}
void setTADThreshold(int num) {
// this is no-op for CUDA
}
////////////////////////////////////////////////////////////////////////
void execSummaryStats(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo, bool biasCorrected) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execSummaryStats(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
extraParams, dbZ->primary(), hZShapeInfo, dbZ->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
biasCorrected);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execSummaryStatsTad(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo,
OpaqueDataBuffer *dbDimension, sd::LongType const *hDimensionShape,
sd::LongType const *dDimensionShape, bool biasCorrected, sd::LongType const *tadShapeInfo,
sd::LongType const *tadOffsets) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbDimension});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execSummaryStats(
&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbZ->primary(),
hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
reinterpret_cast<int *>(dbDimension->special()), dimensionLength, tadShapeInfo, tadOffsets, biasCorrected);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbDimension});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduce3(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbY,
sd::LongType const *hYShapeInfo, sd::LongType const *dYShapeInfo, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduce3(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
extraParams, dbY->primary(), hYShapeInfo, dbY->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduce3Tad(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbY,
sd::LongType const *hYShapeInfo, sd::LongType const *dYShapeInfo, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo, OpaqueDataBuffer *dbDimension,
sd::LongType const *hDimensionShape, sd::LongType const *dDimensionShape,
sd::LongType const *tadOnlyShapeInfo, sd::LongType const *tadOffsets,
sd::LongType const *yTadOnlyShapeInfo, sd::LongType const *yTadOffsets) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
auto tadPack =
sd::ConstantTadHelper::getInstance().tadForDimensions(hXShapeInfo, dimension, shape::length(hDimensionShape));
auto tadLength = shape::length(tadPack.primaryShapeInfo());
auto yLength = shape::length(hYShapeInfo);
auto xLength = shape::length(hXShapeInfo);
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
if (tadLength == yLength || tadLength == xLength) {
// sd_printf("== way\n","");
NativeOpExecutioner::execReduce3(
&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbY->primary(),
hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), dimension, dimensionLength,
tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets);
} else
NativeOpExecutioner::execReduce3TAD(
&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbY->primary(),
hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), dimension, dimensionLength,
tadOnlyShapeInfo, yTadOffsets, yTadOnlyShapeInfo, yTadOffsets);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduce3Scalar(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbY,
sd::LongType const *hYShapeInfo, sd::LongType const *dYShapeInfo, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduce3Scalar(
&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbY->primary(),
hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execScalarBool(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, OpaqueDataBuffer *dbZ, sd::LongType const *hZShapeInfo,
sd::LongType const *dZShapeInfo, OpaqueDataBuffer *dbScalar, sd::LongType const *hScalarShapeInfo,
sd::LongType const *dScalarShapeInfo, void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbScalar});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execScalarBool(
&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbZ->primary(), hZShapeInfo,
dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
dbScalar->primary(), hScalarShapeInfo, dbScalar->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hScalarShapeInfo).special(), extraParams);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbScalar});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execScalarBoolTad(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, OpaqueDataBuffer *dbZ, sd::LongType const *hZShapeInfo,
sd::LongType const *dZShapeInfo, OpaqueDataBuffer *dbScalars,
sd::LongType const *hScalarShapeInfo, sd::LongType const *dScalarShapeInfo, void *extraParams,
OpaqueDataBuffer *dbDimension, sd::LongType const *hDimensionShape,
sd::LongType const *dDimensionShape, sd::LongType const *tadShapeInfo,
sd::LongType const *tadOffsets, sd::LongType const *tadShapeInfoZ,
sd::LongType const *tadOffsetsZ) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbScalars});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execScalarBool(
&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbZ->primary(),
hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
dbScalars->primary(), hScalarShapeInfo, dbScalars->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hScalarShapeInfo).special(), dimension, dimensionLength,
tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbScalars});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execScalar(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, OpaqueDataBuffer *dbZ, sd::LongType const *hZShapeInfo,
sd::LongType const *dZShapeInfo, OpaqueDataBuffer *dbScalar, sd::LongType const *hScalarShapeInfo,
sd::LongType const *dScalarShapeInfo, void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbScalar});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execScalar(
&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbZ->primary(), hZShapeInfo,
dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
dbScalar->primary(), hScalarShapeInfo, dbScalar->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hScalarShapeInfo).special(), extraParams);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbScalar});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execScalarTad(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, OpaqueDataBuffer *dbZ, sd::LongType const *hZShapeInfo,
sd::LongType const *dZShapeInfo, OpaqueDataBuffer *dbScalars, sd::LongType const *hScalarShapeInfo,
sd::LongType const *dScalarShapeInfo, void *extraParams, OpaqueDataBuffer *dbDimension,
sd::LongType const *hDimensionShape, sd::LongType const *dDimensionShape,
sd::LongType const *tadShapeInfo, sd::LongType const *tadOffsets, sd::LongType const *tadShapeInfoZ,
sd::LongType const *tadOffsetsZ) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbScalars});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto yType = sd::ArrayOptions::dataType(hScalarShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
if (yType != xType && yType != sd::DataType::BOOL && !isExperimentalEnabled())
throw sd::datatype_exception::build("execScalar both operands must have same data type", xType, yType);
dim3 launchDims(256, 256, 16384);
#ifdef SD_EXPERIMENTAL_ENABLED
BUILD_PAIRWISE_SELECTOR(
xType, yType, zType, functions::scalar::ScalarTransform,
::executeCudaAlongDimension(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalars, extraParams,
dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ),
SD_COMMON_TYPES, SD_COMMON_TYPES);
#else
BUILD_SINGLE_SELECTOR_THRICE(
xType, functions::scalar::ScalarTransform,
::executeCudaAlongDimension(
launchDims, stream, opNum, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbZ->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), dbScalars->special(),
extraParams, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ),
SD_COMMON_TYPES);
#endif
DEBUG_KERNEL(stream, opNum);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbScalars});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void execAggregate(sd::Pointer *extraPointers, int opNum, void **arguments, int numArguments, sd::LongType **shapes,
int numShapes, int *indexArguments, int numIndexArguments, int **intArrays, int numIntArrays,
void *realArguments, int numRealArguments, sd::DataType dtype) {}
void batchExecutor(sd::Pointer *extraPointers, int numAggregates, int opNum, int maxArgs, int maxShapes,
int maxIntArrays, int maxIntArraySize, int maxIdx, int maxReals, void *ptrToArguments,
sd::DataType dtype) {}
void execAggregateBatch(sd::Pointer *extraPointers, int numAggregates, int opNum, int maxArgs, int maxShapes,
int maxIntArrays, int maxIntArraySize, int maxIdx, int maxReals, void *ptrToArguments,
sd::DataType dtype) {}
////////////////////////////////////////////////////////////////////////
void execRandom(sd::Pointer *extraPointers, int opNum, sd::Pointer stateHost, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo, void *extraArguments) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execRandom(&lc, opNum, stateHost, dbZ->primary(), hZShapeInfo, dbZ->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
extraArguments);
InteropDataBuffer::registerSpecialUse({dbZ}, {});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execRandom2(sd::Pointer *extraPointers, int opNum, sd::Pointer stateHost, OpaqueDataBuffer *dbX,
sd::LongType const *hXShapeInfo, sd::LongType const *dXShapeInfo, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo, void *extraArguments) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execRandom(
&lc, opNum, stateHost, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbZ->primary(), hZShapeInfo,
dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), extraArguments);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execRandom3(sd::Pointer *extraPointers, int opNum, sd::Pointer stateHost, OpaqueDataBuffer *dbX,
sd::LongType const *hXShapeInfo, sd::LongType const *dXShapeInfo, OpaqueDataBuffer *dbY,
sd::LongType const *hYShapeInfo, sd::LongType const *dYShapeInfo, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo, void *extraArguments) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execRandom(
&lc, opNum, stateHost, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbY->primary(), hYShapeInfo,
dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(), dbZ->primary(),
hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
extraArguments);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
sd::Pointer initRandom(sd::Pointer *extraPointers, long seed, long bufferSize, sd::Pointer ptrToBuffer) {
unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
// we don't synchronize at random initialization, it's safe to go unsync here
// cudaStreamSynchronize(*stream);
auto ptrDev = reinterpret_cast<unsigned long long *>(ptrToBuffer);
auto buffer = new sd::random::RandomBuffer(seed, bufferSize, reinterpret_cast<uint64_t *>(ptrHost),
reinterpret_cast<uint64_t *>(ptrDev));
buffer->propagateToDevice(buffer, *stream);
sd::DebugHelper::checkErrorCode(stream, "initRandom(...) failed A");
// we generate sequence in the host memory
sd::random::Xoroshiro128 generator(buffer);
generator.refreshBuffer();
// and copy it to gpu
cudaMemcpyAsync(ptrDev, ptrHost, bufferSize * 8, cudaMemcpyHostToDevice, *stream);
sd::DebugHelper::checkErrorCode(stream, "initRandom(...) failed B");
return buffer;
}
void destroyRandom(sd::Pointer ptrBuffer) {
sd::random::RandomBuffer *buffer = reinterpret_cast<sd::random::RandomBuffer *>(ptrBuffer);
// FIXME: it's bad thing, but we can't know in advance, which stream(s) where using this generator in practice
cudaDeviceSynchronize();
delete buffer;
}
void refreshBuffer(sd::Pointer *extraPointers, long seed, sd::Pointer ptrRandom) {
sd::random::RandomBuffer *buffer = reinterpret_cast<sd::random::RandomBuffer *>(ptrRandom);
unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
cudaStreamSynchronize(*stream);
uint64_t *ptrDev = buffer->getDeviceBuffer();
// update rng state
buffer->setSeed(seed);
buffer->setOffset(0);
buffer->propagateToDevice(buffer, *stream);
// refresh buffer on host size
sd::random::Xoroshiro128 generator(buffer);
generator.refreshBuffer();
// copy back to gpu
cudaMemcpyAsync(ptrDev, ptrHost, buffer->getSize() * 8, cudaMemcpyHostToDevice, *stream);
}
void reSeedBuffer(sd::Pointer *extraPointers, long seed, sd::Pointer ptrRandom) {
sd::random::RandomBuffer *buffer = reinterpret_cast<sd::random::RandomBuffer *>(ptrRandom);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
cudaStreamSynchronize(*stream);
// update rng state
buffer->reSeed(seed);
buffer->setOffset(0);
buffer->propagateToDevice(buffer, *stream);
}
/**
* Return the length of a shape buffer
* based on the pointer
* @param buffer the buffer pointer to check
* @return
*/
int lengthForShapeBufferPointer(sd::Pointer buffer) {
auto shapeBuffer = reinterpret_cast<sd::LongType *>(buffer);
return shape::shapeInfoLength(shape::rank(shapeBuffer));
}
/**
* The pointer to get the address for
*
* @param address the address to get the pointer
* @return the pointer for the given address
*/
sd::Pointer pointerForAddress(sd::LongType address) { return reinterpret_cast<sd::Pointer>(address); }
void tear(sd::Pointer *extras, OpaqueDataBuffer *dbX, sd::LongType const *xShapeInfo, sd::LongType const *dXShapeInfo,
sd::Pointer *targets, sd::LongType const *zShapeInfo, sd::LongType const *tadShapeInfo,
sd::LongType const *tadOffsets) {
try {
InteropDataBuffer::prepareSpecialUse({}, {dbX});
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extras[1]);
dim3 launchDims(512, 512, 512);
auto xType = sd::ArrayOptions::dataType(xShapeInfo);
BUILD_SINGLE_SELECTOR(
xType, tearKernelGeneric,
(launchDims, stream, dbX->special(), dXShapeInfo, targets, zShapeInfo, tadShapeInfo, tadOffsets),
SD_COMMON_TYPES);
sd::DebugHelper::checkErrorCode(stream, "tearFloat(...) failed");
InteropDataBuffer::registerSpecialUse({}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void prescanArrayRecursive(sd::Pointer *extras, int *dZ, int *dX, int numElements, int level) {
auto stream = reinterpret_cast<cudaStream_t *>(extras[1]);
auto g_scanBlockSums = reinterpret_cast<int **>(extras[2]);
int blockSize = 512; // max size of the thread blocks
int numBlocks = sd::math::sd_max<int>(1, static_cast<int>(ceil(static_cast<float>(numElements) / (2.f * blockSize))));
int numThreads;
if (numBlocks > 1)
numThreads = blockSize;
else if (sd::isPowerOfTwo(numElements))
numThreads = numElements / 2;
else
numThreads = sd::floorPow2(numElements);
int numEltsPerBlock = numThreads * 2;
// if this is a non-power-of-2 array, the last block will be non-full
// compute the smallest power of 2 able to compute its scan.
int numEltsLastBlock = numElements - (numBlocks - 1) * numEltsPerBlock;
int numThreadsLastBlock = sd::math::sd_max<int>(1, numEltsLastBlock / 2);
int np2LastBlock = 0;
int sharedMemLastBlock = 0;
if (numEltsLastBlock != numEltsPerBlock) {
np2LastBlock = 1;
if (!isPowerOfTwo(numEltsLastBlock)) numThreadsLastBlock = floorPow2(numEltsLastBlock);
unsigned int extraSpace = (2 * numThreadsLastBlock) / NUM_BANKS;
sharedMemLastBlock = sizeof(int) * (2 * numThreadsLastBlock + extraSpace);
}
// padding space is used to avoid shared memory bank conflicts
int extraSpace = numEltsPerBlock / NUM_BANKS;
int sharedMemSize = sizeof(int) * (numEltsPerBlock + extraSpace);
// setup execution parameters
// if NP2, we process the last block separately
dim3 grid(max(1, numBlocks - np2LastBlock), 1, 1);
dim3 threads(numThreads, 1, 1);
dim3 gridOnes(1, 1, 1);
dim3 threadsOnes(numThreadsLastBlock, 1, 1);
if (sharedMemSize < 2048) sharedMemSize = 2048;
if (sharedMemLastBlock < 2048) sharedMemLastBlock = 2048;
// execute the scan
if (numBlocks > 1) {
sd::prescanLauncher<true, false>(grid, threads, sharedMemSize, stream, dZ, dX, g_scanBlockSums[level],
numThreads * 2, 0, 0);
if (np2LastBlock) {
sd::prescanLauncher<true, true>(gridOnes, threadsOnes, sharedMemLastBlock, stream, dZ, dX, g_scanBlockSums[level],
numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock);
}
// After scanning all the sub-blocks, we are mostly done. But now we
// need to take all of the last values of the sub-blocks and scan those.
// This will give us a new value that must be sdded to each block to
// get the final results.
// recursive (CPU) call
prescanArrayRecursive(extras, g_scanBlockSums[level], g_scanBlockSums[level], numBlocks, level + 1);
sd::uniformAdd<<<grid, threads, 1024, *stream>>>(dZ, g_scanBlockSums[level], numElements - numEltsLastBlock, 0, 0);
if (np2LastBlock) {
sd::uniformAdd<<<1, numThreadsLastBlock, 1024, *stream>>>(dZ, g_scanBlockSums[level], numEltsLastBlock,
numBlocks - 1, numElements - numEltsLastBlock);
}
} else if (isPowerOfTwo(numElements)) {
sd::prescanLauncher<false, false>(grid, threads, sharedMemSize, stream, dZ, dX, 0, numThreads * 2, 0, 0);
} else {
sd::prescanLauncher<false, true>(grid, threads, sharedMemSize, stream, dZ, dX, 0, numElements, 0, 0);
}
sd::DebugHelper::checkErrorCode(stream, "prescanArray(...) failed");
}
////////////////////////////////////////////////////////////////////////
void execReduce3All(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, void *extraParamsVals, OpaqueDataBuffer *dbY,
sd::LongType const *hYShapeInfo, sd::LongType const *dYShapeInfo, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo, OpaqueDataBuffer *dbDimension,
sd::LongType const *hDimensionShape, sd::LongType const *dDimensionShape,
sd::LongType const *xTadShapeInfo, sd::LongType const *xOffsets, sd::LongType const *yTadShapeInfo,
sd::LongType const *yOffsets) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY, dbDimension});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduce3All(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
extraParamsVals, dbY->primary(), hYShapeInfo, dbY->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
reinterpret_cast<int *>(dbDimension->special()), dimensionLength, xTadShapeInfo,
xOffsets, yTadShapeInfo, yOffsets);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void sort(sd::Pointer *extraPointers, void *x, sd::LongType const *xShapeInfo, void *dX,
sd::LongType const *dXShapeInfo, bool descending) {
try {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
auto xLength = shape::length(xShapeInfo);
auto xEWS = shape::elementWiseStride(xShapeInfo);
auto xType = sd::ArrayOptions::dataType(xShapeInfo);
// check if xLength is a power of 2, and use bitonic sort, if that's the case
if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) {
int numThreads = sd::math::sd_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++;
dim3 launchDims(numBlocks, numThreads, 32768);
for (int k = 2; k <= xLength; k = 2 * k) {
for (int j = k >> 1; j > 0; j = j >> 1) {
BUILD_SINGLE_SELECTOR(xType, bitonicSortStepGeneric,
(launchDims, stream, dX, dXShapeInfo, j, k, xLength, descending), SD_COMMON_TYPES);
}
}
} else {
int numThreads = sd::math::sd_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++;
numBlocks = sd::math::sd_min<int>(512, numBlocks);
dim3 launchDims(numBlocks, numThreads, 32768);
int max = 2, dg = 0;
while (max < xLength) {
max <<= 1;
dg++;
}
max <<= 1;
for (int window = 2; window < max; window <<= 1) {
int n = window;
int rev = 0;
do {
int half = n >> 1;
BUILD_SINGLE_SELECTOR(xType, bitonicArbitraryStepGeneric,
(launchDims, stream, dX, dXShapeInfo, n, xLength, rev, descending), SD_COMMON_TYPES);
n >>= 1;
rev = 1;
} while (n > 1);
}
}
sd::DebugHelper::checkErrorCode(stream, "sort(...) failed");
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void sortByKey(sd::Pointer *extraPointers, void *x, sd::LongType const *xShapeInfo, void *dX,
sd::LongType const *dXShapeInfo, void *y, sd::LongType const *yShapeInfo, void *dy,
sd::LongType const *dyShapeInfo, bool descending) {
try {
auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
auto xLength = shape::length(xShapeInfo);
auto yLength = shape::length(yShapeInfo);
auto xEWS = shape::elementWiseStride(xShapeInfo);
auto xType = sd::ArrayOptions::dataType(xShapeInfo);
auto yType = sd::ArrayOptions::dataType(yShapeInfo);
if (shape::isEmpty(xShapeInfo) || shape::isEmpty(yShapeInfo)) return;
if (xLength != yLength) throw std::runtime_error("sortByKey: keys and values must have the same size");
// check if xLength is a power of 2, and use bitonic sort, if that's the case
if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) {
int numThreads = sd::math::sd_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++;
dim3 launchDims(numBlocks, numThreads, 32768);
for (int k = 2; k <= xLength; k = 2 * k) {
for (int j = k >> 1; j > 0; j = j >> 1) {
BUILD_DOUBLE_SELECTOR(xType, yType, bitonicSortStepGenericKey,
(launchDims, stream, dX, dXShapeInfo, dy, dyShapeInfo, j, k, xLength, descending),
SD_COMMON_TYPES, SD_COMMON_TYPES);
}
}
} else {
int numThreads = sd::math::sd_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++;
numBlocks = sd::math::sd_min<int>(512, numBlocks);
dim3 launchDims(numBlocks, numThreads, 32768);
int max = 2, dg = 0;
while (max < xLength) {
max <<= 1;
dg++;
}
max <<= 1;
for (int window = 2; window < max; window <<= 1) {
int n = window;
int rev = 0;
do {
int half = n >> 1;
BUILD_DOUBLE_SELECTOR(xType, yType, bitonicArbitraryStepGenericKey,
(launchDims, stream, dX, dXShapeInfo, dy, dyShapeInfo, n, xLength, rev, descending),
SD_COMMON_TYPES, SD_COMMON_TYPES);
n >>= 1;
rev = 1;
} while (n > 1);
}
}
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void sortByValue(sd::Pointer *extraPointers, void *x, sd::LongType const *xShapeInfo, void *dX,
sd::LongType const *dXShapeInfo, void *y, sd::LongType const *yShapeInfo, void *dy,
sd::LongType const *dyShapeInfo, bool descending) {
try {
auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
auto xLength = shape::length(xShapeInfo);
auto yLength = shape::length(yShapeInfo);
auto xEWS = shape::elementWiseStride(xShapeInfo);
auto xType = sd::ArrayOptions::dataType(yShapeInfo);
auto yType = sd::ArrayOptions::dataType(xShapeInfo);
if (shape::isEmpty(xShapeInfo) || shape::isEmpty(yShapeInfo)) return;
if (xLength != yLength) throw std::runtime_error("sortByValue: keys and values must have the same size");
// check if xLength is a power of 2, and use bitonic sort, if that's the case
if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) {
int numThreads = sd::math::sd_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++;
dim3 launchDims(numBlocks, numThreads, 32768);
for (int k = 2; k <= xLength; k = 2 * k) {
for (int j = k >> 1; j > 0; j = j >> 1) {
BUILD_DOUBLE_SELECTOR(xType, yType, bitonicSortStepGenericKey,
(launchDims, stream, dy, dyShapeInfo, dX, dXShapeInfo, j, k, xLength, descending),
SD_COMMON_TYPES, SD_COMMON_TYPES);
}
}
} else {
int numThreads = sd::math::sd_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++;
numBlocks = sd::math::sd_min<int>(512, numBlocks);
dim3 launchDims(numBlocks, numThreads, 32768);
int max = 2, dg = 0;
while (max < xLength) {
max <<= 1;
dg++;
}
max <<= 1;
for (int window = 2; window < max; window <<= 1) {
int n = window;
int rev = 0;
do {
int half = n >> 1;
BUILD_DOUBLE_SELECTOR(xType, yType, bitonicArbitraryStepGenericKey,
(launchDims, stream, dy, dyShapeInfo, dX, dXShapeInfo, n, xLength, rev, descending),
SD_COMMON_TYPES, SD_COMMON_TYPES);
n >>= 1;
rev = 1;
} while (n > 1);
}
}
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void sortTadByKey(sd::Pointer *extraPointers, void *x, sd::LongType const *xShapeInfo, void *dX,
sd::LongType const *dXShapeInfo, void *y, sd::LongType const *yShapeInfo, void *dy,
sd::LongType const *dyShapeInfo, int *dimension, int dimensionLength, bool descending) {
try {
auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
auto context =
extraPointers[0] == 0 ? LaunchContext::defaultContext() : reinterpret_cast<LaunchContext *>(extraPointers[0]);
auto tadPack = sd::ConstantTadHelper::getInstance().tadForDimensions(xShapeInfo, dimension, dimensionLength);
dim3 launchDims((int)tadPack.numberOfTads(), 256, 2048);
auto xType = sd::ArrayOptions::dataType(xShapeInfo);
auto yType = sd::ArrayOptions::dataType(yShapeInfo);
BUILD_DOUBLE_SELECTOR(xType, yType, oesTadGenericKey,
(launchDims, stream, dX, dXShapeInfo, dy, dyShapeInfo, nullptr, dimensionLength,
tadPack.platformShapeInfo(), tadPack.platformOffsets(), descending),
SD_COMMON_TYPES, SD_COMMON_TYPES);
sd::DebugHelper::checkErrorCode(stream, "sortTadKey(...) failed");
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void sortTadByValue(sd::Pointer *extraPointers, void *x, sd::LongType const *xShapeInfo, void *dX,
sd::LongType const *dXShapeInfo, void *y, sd::LongType const *yShapeInfo, void *dy,
sd::LongType const *dyShapeInfo, int *dimension, int dimensionLength, bool descending) {
try {
auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
auto context =
extraPointers[0] == 0 ? LaunchContext::defaultContext() : reinterpret_cast<LaunchContext *>(extraPointers[0]);
auto tadPack = sd::ConstantTadHelper::getInstance().tadForDimensions(xShapeInfo, dimension, dimensionLength);
dim3 launchDims((int)tadPack.numberOfTads(), 256, 2048);
auto xType = sd::ArrayOptions::dataType(yShapeInfo);
auto yType = sd::ArrayOptions::dataType(xShapeInfo);
BUILD_DOUBLE_SELECTOR(xType, yType, oesTadGenericKey,
(launchDims, stream, dy, dyShapeInfo, dX, dXShapeInfo, nullptr, dimensionLength,
tadPack.platformShapeInfo(), tadPack.platformOffsets(), descending),
SD_COMMON_TYPES, SD_COMMON_TYPES);
sd::DebugHelper::checkErrorCode(stream, "sortTadValue(...) failed");
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void sortTad(sd::Pointer *extraPointers, void *x, sd::LongType const *xShapeInfo, void *dX,
sd::LongType const *dXShapeInfo, int *dimension, int dimensionLength, sd::LongType const *tadShapeInfo,
sd::LongType const *tadOffsets, bool descending) {
try {
// to be implemented
auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
auto context =
extraPointers[0] == 0 ? LaunchContext::defaultContext() : reinterpret_cast<LaunchContext *>(extraPointers[0]);
auto tadPack = sd::ConstantTadHelper::getInstance().tadForDimensions(xShapeInfo, dimension, dimensionLength);
dim3 launchDims((int)tadPack.numberOfTads(), 512, 33768);
auto xType = sd::ArrayOptions::dataType(xShapeInfo);
BUILD_SINGLE_SELECTOR(
xType, oesTadGeneric,
(launchDims, stream, dX, dXShapeInfo, nullptr, dimensionLength, tadShapeInfo, tadOffsets, descending),
SD_COMMON_TYPES);
sd::DebugHelper::checkErrorCode(stream, "sortTad(...) failed");
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void sortCooIndices(sd::Pointer *extraPointers, sd::LongType *indices, void *values, sd::LongType length,
const sd::LongType *xShapeInfo) {
throw std::runtime_error("sortCooIndices:: Not implemented yet");
}
void ravelMultiIndex(sd::Pointer *extraPointers, sd::LongType *indices, sd::LongType *flatIndices, sd::LongType length,
sd::LongType *shapeInfo, int mode) {
throw std::runtime_error("ravelMultiIndex:: Not implemented yet");
}
void unravelIndex(sd::Pointer *extraPointers, sd::LongType *indices, sd::LongType *flatIndices, sd::LongType length,
sd::LongType *shapeInfo) {
throw std::runtime_error("unravelIndex:: Not implemented yet");
}
sd::LongType *mmapFile(sd::Pointer *extraPointers, const char *fileName, sd::LongType length) { return nullptr; }
void munmapFile(sd::Pointer *extraPointers, sd::LongType *ptrMap, sd::LongType length) {}
sd::graph::ResultWrapper *executeFlatGraph(sd::Pointer *extraPointers, sd::Pointer flatBufferPointer) {
try {
return sd::graph::GraphExecutioner::executeFlatBuffer(flatBufferPointer);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
sd::LongType getResultWrapperSize(sd::graph::ResultWrapper *ptr) { return ptr->size(); }
sd::Pointer getResultWrapperPointer(sd::graph::ResultWrapper *ptr) { return ptr->pointer(); }
const char *getAllCustomOps() { return sd::ops::OpRegistrator::getInstance().getAllCustomOperations(); }
sd::ShapeList *_calculateOutputShapes(sd::Pointer *extraPointers, sd::ops::DeclarableOp *op, sd::Pointer *inputBuffers,
sd::Pointer *inputShapes, int numInputShapes, double *tArgs, int numTArgs,
sd::LongType *iArgs, int numIArgs, bool *bArgs, int numBArgs, int *dArgs,
int numDArgs) {
sd::graph::VariableSpace varSpace;
Context block(2, &varSpace);
sd::ShapeList inShapes;
for (int e = 0; e < numIArgs; e++) block.getIArguments()->push_back(iArgs[e]);
for (int e = 0; e < numTArgs; e++) block.getTArguments()->push_back(tArgs[e]);
for (int e = 0; e < numBArgs; e++) block.getBArguments()->push_back(bArgs[e]);
for (int e = 0; e < numDArgs; e++) block.getDArguments()->push_back((sd::DataType)dArgs[e]);
for (int e = 0; e < numInputShapes; e++) {
auto shape_ = reinterpret_cast<sd::LongType *>(inputShapes[e]);
// we shouldn't copy buffer if that's empty array
void *buffer_ = sd::ArrayOptions::arrayType(shape_) == ArrayType::EMPTY ? nullptr : inputBuffers[e];
void *bufferD_ =
sd::ArrayOptions::arrayType(shape_) == ArrayType::EMPTY ? nullptr : inputBuffers[e + numInputShapes];
auto array = new sd::NDArray(buffer_, bufferD_, shape_);
// block should contain references to proper variable
varSpace.putVariable(1, e, array);
block.pickInput(1, e);
inShapes.push_back(shape_);
}
auto shapeList = op->calculateOutputShape(&inShapes, block);
if (varSpace.launchContext()->getWorkspace() != nullptr) shapeList->detach();
return shapeList;
}
sd::ShapeList *calculateOutputShapes2(sd::Pointer *extraPointers, sd::LongType hash, sd::Pointer *inputBuffers,
sd::Pointer *inputShapes, int numInputShapes, double *tArgs, int numTArgs,
sd::LongType *iArgs, int numIArgs, bool *bArgs, int numBArgs, int *dArgs,
int numDArgs) {
try {
auto op = sd::ops::OpRegistrator::getInstance().getOperation(hash);
return _calculateOutputShapes(extraPointers, op, inputBuffers, inputShapes, numInputShapes, tArgs, numTArgs, iArgs,
numIArgs, bArgs, numBArgs, dArgs, numDArgs);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
sd::ShapeList *_calculateOutputShapes(sd::Pointer *extraPointers, sd::ops::DeclarableOp *op, sd::Pointer *inputShapes,
int numInputShapes, double *tArgs, int numTArgs, sd::LongType *iArgs,
int numIArgs) {
Context block(1);
sd::ShapeList inShapes;
for (int e = 0; e < numIArgs; e++) block.getIArguments()->push_back(iArgs[e]);
for (int e = 0; e < numTArgs; e++) block.getTArguments()->push_back(tArgs[e]);
for (int e = 0; e < numInputShapes; e++) inShapes.push_back(reinterpret_cast<sd::LongType *>(inputShapes[e]));
auto shapeList = op->calculateOutputShape(&inShapes, block);
return shapeList;
}
sd::ShapeList *calculateOutputShapes(sd::Pointer *extraPointers, sd::LongType hash, sd::Pointer *inputShapes,
int numInputShapes, double *tArgs, int numTArgs, sd::LongType *iArgs,
int numIArgs) {
try {
auto op = sd::ops::OpRegistrator::getInstance().getOperation(hash);
return _calculateOutputShapes(extraPointers, op, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
sd::LongType getShapeListSize(sd::ShapeList *list) { return list->size(); }
sd::LongType const *getShape(sd::ShapeList *list, sd::LongType i) { return list->at(i); }
static SD_INLINE sd::Status realExec(sd::ops::DeclarableOp *op, sd::Pointer *extraPointers, sd::LongType hash,
sd::Pointer *inputBuffers, sd::Pointer *inputShapes, int numInputs,
sd::Pointer *outputBuffers, sd::Pointer *outputShapes, int numOutputs,
double *tArgs, int numTArgs, sd::LongType *iArgs, int numIArgs, bool *bArgs,
int numBArgs, bool isInplace) {
if (op == nullptr) sd_printf("Can't find requested operation: [%lld]\n", hash);
// we're using the same fake nodeId everywhere here
std::vector<sd::NDArray *> inputs(numInputs);
std::vector<sd::NDArray *> outputs(numOutputs);
std::vector<double> ttArgs(numTArgs);
std::vector<bool> bbArgs(numBArgs);
std::vector<sd::LongType> iiArgs(numIArgs);
// filling block now with inputs
for (int e = 0; e < numInputs; e++) {
auto shape = reinterpret_cast<sd::LongType *>(inputShapes[e]);
void *buffer = sd::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : inputBuffers[e];
void *bufferD = sd::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : inputBuffers[e + numInputs];
inputs[e] = new sd::NDArray(buffer, bufferD, shape);
}
// if not inplace - transferring output arrays
if (!isInplace)
for (int e = 0; e < numOutputs; e++) {
// we want to keep original output shape intact
auto shape = shape::copyShape(reinterpret_cast<sd::LongType *>(outputShapes[e]));
void *buffer = sd::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : outputBuffers[e];
void *bufferD = sd::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : outputBuffers[e + numOutputs];
// FIXME: revisit this.
bool canNullify = true;
for (int i = 0; i < numInputs; i++) {
void *ibuffer = sd::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : inputBuffers[i];
if (ibuffer == buffer) {
canNullify = false;
break;
}
}
if (canNullify && buffer != nullptr)
memset((uint8_t *)buffer, '\0',
shape::length(shape) * DataTypeUtils::sizeOfElement(ArrayOptions::dataType(shape)));
auto array = new sd::NDArray(buffer, bufferD, shape);
outputs[e] = array;
}
for (int e = 0; e < numIArgs; e++) iiArgs[e] = iArgs[e];
for (int e = 0; e < numTArgs; e++) ttArgs[e] = tArgs[e];
for (int e = 0; e < numBArgs; e++) bbArgs[e] = bArgs[e];
// hypothetically at this point we have everything filled
auto dZ = op->execute(inputs, outputs, ttArgs, iiArgs, bbArgs, std::vector<sd::DataType>(), isInplace);
// auto dZ = op->execute(inputs, ttArgs, iiArgs, isInplace);
if (!isInplace)
for (int e = 0; e < numOutputs; e++) {
if (outputs[e]->ordering() != shape::order(reinterpret_cast<sd::LongType *>(outputShapes[e])))
outputs[e]->streamline(shape::order(reinterpret_cast<sd::LongType *>(outputShapes[e])));
}
for (auto v : inputs) delete v;
for (auto v : outputs) delete v;
return Status::OK;
}
Status execCustomOp(sd::Pointer *extraPointers, sd::LongType hash, sd::Pointer *inputBuffers, sd::Pointer *inputShapes,
int numInputs, sd::Pointer *outputBuffers, sd::Pointer *outputShapes, int numOutputs, double *tArgs,
int numTArgs, sd::LongType *iArgs, int numIArgs, bool *bArgs, int numBArgs, bool isInplace) {
try {
auto op = sd::ops::OpRegistrator::getInstance().getOperation(hash);
return realExec(op, extraPointers, hash, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes,
numOutputs, tArgs, numTArgs, iArgs, numIArgs, bArgs, numBArgs, isInplace);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return Status::BAD_INPUT;
}
}
Status execCustomOp2(sd::Pointer *extraPointers, sd::LongType hash, sd::Pointer opContext) {
try {
auto op = sd::ops::OpRegistrator::getInstance().getOperation(hash);
auto context = reinterpret_cast<Context *>(opContext);
auto result = op->execute(context);
auto res = cudaStreamSynchronize(*context->launchContext()->getCudaStream());
if (res != 0) throw sd::cuda_exception::build("customOp execution failed", res);
for (auto v : context->fastpath_in()) {
if (!v->isEmpty()) v->syncToDevice();
}
for (auto v : context->fastpath_out()) {
if (!v->isEmpty()) v->syncToDevice();
}
return result;
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return Status::BAD_INPUT;
}
}
Status registerGraph(sd::Pointer *extraPointers, sd::LongType graphId, sd::Pointer flatBufferPointer) {
try {
auto graph = sd::graph::GraphExecutioner::importFromFlatPointer(flatBufferPointer);
sd::graph::GraphHolder::getInstance().registerGraph(graphId, graph);
return Status::OK;
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return Status::BAD_INPUT;
}
}
static VariablesSet *executeStoredGraphT(sd::Pointer *extraPointers, sd::LongType graphId, sd::Pointer *inputBuffers,
sd::Pointer *inputShapes, int *inputIndices, int numInputs) {
auto graph = sd::graph::GraphHolder::getInstance().pullGraph(graphId);
auto varSpace = graph->getVariableSpace()->clone();
std::vector<sd::NDArray *> handles;
for (int e = 0; e < numInputs; e++) {
auto idx = inputIndices[e];
// we'll delete this array later, together with cloned VariableSpace
auto array = new sd::NDArray(inputBuffers[e], reinterpret_cast<sd::LongType *>(inputShapes[e]));
handles.emplace_back(array);
if (varSpace->hasVariable(idx)) {
auto var = varSpace->getVariable(idx);
if (var->hasNDArray()) delete var->getNDArray();
var->setNDArray(array);
} else
varSpace->putVariable(idx, array);
}
auto dZ = sd::graph::GraphExecutioner::execute(graph, varSpace);
auto varSet = new sd::graph::VariablesSet(dZ);
if (dZ == Status::OK) {
// pull back results, and provide them
auto outputs = graph->fetchOutputs();
for (int e = 0; e < outputs->size(); e++) {
// we're only getting variable ID/Index from original grap. values will be taken from cloned workspace
std::pair<int, int> varId(outputs->at(e)->id(), outputs->at(e)->index());
auto var = varSpace->getVariable(varId);
varSet->push_back(var->clone());
}
delete outputs;
}
delete varSpace;
return varSet;
}
VariablesSet *executeStoredGraph(sd::Pointer *extraPointers, sd::LongType graphId, sd::Pointer *inputBuffers,
sd::Pointer *inputShapes, int *inputIndices, int numInputs) {
try {
return executeStoredGraphT(extraPointers, graphId, inputBuffers, inputShapes, inputIndices, numInputs);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
sd::LongType getVariablesSetSize(sd::graph::VariablesSet *set) { return set->size(); }
sd::Status getVariablesSetStatus(sd::graph::VariablesSet *set) { return set->status(); }
sd::graph::Variable *getVariable(sd::graph::VariablesSet *set, sd::LongType i) { return set->at(i); }
int getVariableId(sd::graph::Variable *variable) { return variable->id(); }
int getVariableIndex(sd::graph::Variable *variable) { return variable->index(); }
const char *getVariableName(sd::graph::Variable *variable) { return variable->getName()->c_str(); }
sd::LongType const *getVariableShape(sd::graph::Variable *variable) { return variable->getNDArray()->shapeInfo(); }
void *getVariableBuffer(sd::graph::Variable *variable) { return variable->getNDArray()->buffer(); }
sd::Status unregisterGraph(sd::Pointer *extraPointers, sd::LongType graphId) {
try {
sd::graph::GraphHolder::getInstance().dropGraphAny(graphId);
return Status::OK;
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return Status::BAD_INPUT;
}
}
void deletePointerArray(sd::Pointer pointer) {
sd::Pointer *ptr = reinterpret_cast<sd::Pointer *>(pointer);
delete[] ptr;
}
void deleteCharArray(sd::Pointer pointer) {
auto ptr = reinterpret_cast<char *>(pointer);
delete[] ptr;
}
void deleteIntArray(sd::Pointer pointer) {
auto ptr = reinterpret_cast<int *>(pointer);
delete[] ptr;
}
void deleteLongArray(sd::Pointer pointer) {
auto ptr = reinterpret_cast<sd::LongType *>(pointer);
delete[] ptr;
}
void deleteVariablesSet(sd::graph::VariablesSet *pointer) { delete pointer; }
void deleteShapeList(sd::Pointer shapeList) {
sd::ShapeList *list = reinterpret_cast<sd::ShapeList *>(shapeList);
// list->destroy();
delete list;
}
const char *getAllOperations() { return sd::OpTracker::getInstance().exportOperations(); }
sd::Pointer getGraphState(sd::LongType id) { return (sd::Pointer) new sd::graph::GraphState(id); }
void deleteGraphState(sd::Pointer state) {
auto stateP = reinterpret_cast<sd::graph::GraphState *>(state);
delete stateP;
}
sd::Status execCustomOpWithScope(sd::Pointer *extraPointers, sd::graph::GraphState *state, sd::LongType opHash,
sd::LongType *scopes, int numScopes, sd::Pointer *inputBuffers,
sd::Pointer *inputShapes, int numInputs, sd::Pointer *outputBuffers,
sd::Pointer *outputShapes, int numOutputs) {
/**
* That's basically exec, with VariableSpace provided in GraphState:
* depending on operation (i.e. while of if), different logic executors could be used
*/
auto graph = state->graph();
auto varSpace = state->variableSpace();
// Node is dynamically created, and has nothing beyond it: only inputs and outputs
// this node has id of 0, and inputs are
Node node(OpType_LOGIC, opHash, 0);
// mapping inputs
for (int e = 0; e < numInputs; e++) {
auto buffer = inputBuffers[e];
auto shapeInfo = reinterpret_cast<sd::LongType *>(inputShapes[e]);
auto array = new sd::NDArray(buffer, shapeInfo, varSpace->launchContext());
// now we just put array to VarSpace
varSpace->putVariable(0, e, array);
node.pickInput(0, e);
}
// mapping scopes
for (int e = 0; e < numScopes; e++) {
// we should check scope existence in GraphState/Graph
int scopeId = (int)scopes[e];
if (!state->hasScope(scopeId)) {
// sd_printf("execCustomOpWithScope: referenced scope [%i] doesn't exist\n", scopeId);
return Logger::logKernelFailureMsg();
}
node.pickInput(scopeId, 0);
}
auto dZ = LogicExecutor::processNode(graph, &node);
if (dZ != Status::OK) return dZ;
// mapping outputs
for (int e = 0; e < numOutputs; e++) {
auto buffer = outputBuffers[e];
auto shapeInfo = reinterpret_cast<sd::LongType *>(outputShapes[e]);
NDArray array(buffer, shapeInfo, varSpace->launchContext());
// now we just put array to VarSpace to the same ID
// varSpace->putVariable(0, e, array);
auto t = varSpace->getVariable(0, e)->getNDArray();
array.assign(t);
}
// removing input variables
for (int e = 0; e < numInputs; e++) {
varSpace->dropVariable(0, e);
}
// after some bla-bla-bla we should have Graph and Node for current op
return Status::OK;
}
sd::Status execCustomOpWithScope(sd::Pointer *extraPointers, sd::Pointer state, sd::LongType opHash,
sd::LongType *scopes, int numScopes, sd::Pointer *inputBuffers,
sd::Pointer *inputShapes, int numInputs, sd::Pointer *outputBuffers,
sd::Pointer *outputShapes, int numOutputs) {
try {
return execCustomOpWithScope(extraPointers, reinterpret_cast<sd::graph::GraphState *>(state), opHash, scopes,
numScopes, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes,
numOutputs);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return sd::Status::BAD_INPUT;
}
}
void deleteResultWrapper(sd::Pointer ptr) {
// just 0 room for compiler s@!t
auto p = reinterpret_cast<sd::graph::ResultWrapper *>(ptr);
delete p;
}
int estimateThreshold(sd::Pointer *extraPointers, sd::Pointer dX, sd::LongType const *dXShapeInfo, int N,
float threshold) {
throw std::runtime_error("estimateThreshold: Not implemented yet");
}
/*
* TypeDef:
* void convertTypes(sd::Pointer *extras, int srcType, sd::Pointer dX, long N, int dstType, sd::Pointer dZ);
*/
void convertTypes(sd::Pointer *extras, int srcType, sd::Pointer dX, sd::LongType N, int dstType, sd::Pointer dZ) {
try {
auto dx = reinterpret_cast<void *>(dX);
auto dz = reinterpret_cast<void *>(dZ);
if (srcType == ND4J_FLOAT8) {
if (dstType == ND4J_FLOAT8) {
// convertKernel<double, sd::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
// sd::TypeCast::convertGenericCuda<sd::float8, sd::int8>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
// sd::TypeCast::convertGenericCuda<sd::float8, sd::uint8>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
// sd::TypeCast::convertGenericCuda<sd::float8, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
// sd::TypeCast::convertGenericCuda<sd::float8, sd::int16>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
// sd::TypeCast::convertGenericCuda<sd::float8, sd::uint16>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
} else if (dstType == ND4J_FLOAT32) {
// sd::TypeCast::convertGenericCuda<sd::float8, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
// sd::TypeCast::convertGenericCuda<sd::float8, double>(extras, dx, N, dz);
} else {
sd_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_INT8) {
if (dstType == ND4J_FLOAT8) {
// sd::TypeCast::convertGenericCuda<sd::int8, sd::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
// convertKernel<sd::int8, sd::int8>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
sd::TypeCast::convertGenericCuda<int8_t, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
sd::TypeCast::convertGenericCuda<int8_t, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
sd::TypeCast::convertGenericCuda<int8_t, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
sd::TypeCast::convertGenericCuda<int8_t, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
// TODO: eventually we might want to add it
} else if (dstType == ND4J_FLOAT32) {
sd::TypeCast::convertGenericCuda<int8_t, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
sd::TypeCast::convertGenericCuda<int8_t, double>(extras, dx, N, dz);
} else {
sd_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_UINT8) {
if (dstType == ND4J_FLOAT8) {
// sd::TypeCast::convertGenericCuda<uint8_t, sd::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
sd::TypeCast::convertGenericCuda<uint8_t, int8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
sd::TypeCast::convertGenericCuda<uint8_t, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
sd::TypeCast::convertGenericCuda<uint8_t, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
sd::TypeCast::convertGenericCuda<uint8_t, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
sd::TypeCast::convertGenericCuda<uint8_t, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
// TODO: still might want to add
} else if (dstType == ND4J_FLOAT32) {
sd::TypeCast::convertGenericCuda<uint8_t, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
sd::TypeCast::convertGenericCuda<uint8_t, double>(extras, dx, N, dz);
} else {
sd_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_FLOAT16) {
if (dstType == ND4J_FLOAT8) {
// sd::TypeCast::convertGenericCuda<float16, sd::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
sd::TypeCast::convertGenericCuda<float16, int8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
sd::TypeCast::convertGenericCuda<float16, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
sd::TypeCast::convertGenericCuda<float16, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
sd::TypeCast::convertGenericCuda<float16, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
sd::TypeCast::convertGenericCuda<float16, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
// TODO: .... ^^^
} else if (dstType == ND4J_FLOAT32) {
sd::TypeCast::convertGenericCuda<float16, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
sd::TypeCast::convertGenericCuda<float16, double>(extras, dx, N, dz);
} else if (dstType == ND4J_THRESHOLD) {
// sd::convertToThreshold<float16>(nullptr, dx, N, dz);
} else {
sd_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_INT16) {
if (dstType == ND4J_FLOAT8) {
// sd::TypeCast::convertGenericCuda<int16_t, sd::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
sd::TypeCast::convertGenericCuda<int16_t, int8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
sd::TypeCast::convertGenericCuda<int16_t, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
sd::TypeCast::convertGenericCuda<int16_t, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
sd::TypeCast::convertGenericCuda<int16_t, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
sd::TypeCast::convertGenericCuda<int16_t, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
// TODO...
} else if (dstType == ND4J_FLOAT32) {
sd::TypeCast::convertGenericCuda<int16_t, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
sd::TypeCast::convertGenericCuda<int16_t, double>(extras, dx, N, dz);
} else {
printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_FLOAT24) {
} else if (srcType == ND4J_FLOAT32) {
if (dstType == ND4J_FLOAT8) {
// sd::TypeCast::convertGenericCuda<float, sd::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
sd::TypeCast::convertGenericCuda<float, int8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
sd::TypeCast::convertGenericCuda<float, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
sd::TypeCast::convertGenericCuda<float, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
sd::TypeCast::convertGenericCuda<float, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
sd::TypeCast::convertGenericCuda<float, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
} else if (dstType == ND4J_DOUBLE) {
sd::TypeCast::convertGenericCuda<float, double>(extras, dx, N, dz);
} else if (dstType == ND4J_THRESHOLD) {
// sd::convertToThreshold<float>(nullptr, dx, N, dz);
} else {
sd_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_DOUBLE) {
if (dstType == ND4J_FLOAT8) {
// sd::TypeCast::convertGenericCuda<double, sd::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
sd::TypeCast::convertGenericCuda<double, int8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
sd::TypeCast::convertGenericCuda<double, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
sd::TypeCast::convertGenericCuda<double, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
sd::TypeCast::convertGenericCuda<double, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
sd::TypeCast::convertGenericCuda<double, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
} else if (dstType == ND4J_FLOAT32) {
sd::TypeCast::convertGenericCuda<double, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
//
} else if (dstType == ND4J_THRESHOLD) {
// sd::convertToThreshold<double>(nullptr, dx, N, dz);
} else {
sd_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_THRESHOLD) {
if (dstType == ND4J_FLOAT16) {
// sd::convertFromThreshold<float16>(nullptr, dx, N, dz);
} else if (dstType == ND4J_FLOAT32) {
// sd::convertFromThreshold<float>(nullptr, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
// sd::convertFromThreshold<double>(nullptr, dx, N, dz);
} else {
sd_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else {
sd_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
sd::Pointer createUtf8String(sd::Pointer *extraPointers, const char *string, int length) {
auto u = new sd::utf8string(string, length);
return reinterpret_cast<sd::Pointer>(u);
}
sd::LongType getUtf8StringLength(sd::Pointer *extraPointers, sd::Pointer ptr) {
return reinterpret_cast<sd::utf8string *>(ptr)->_length;
}
char *getUtf8StringBuffer(sd::Pointer *extraPointers, sd::Pointer ptr) {
return reinterpret_cast<sd::utf8string *>(ptr)->_buffer;
}
void deleteUtf8String(sd::Pointer *extraPointers, sd::Pointer ptr) { delete (reinterpret_cast<sd::utf8string *>(ptr)); }
///////////////////////////////////////////////////////////////////
template <typename T, typename I>
SD_KERNEL static void scatterUpdateCuda(const int opCode, const int numOfSubArrs, void *vx,
const sd::LongType *xShapeInfo, const sd::LongType *xOffsets, void *vy,
const sd::LongType *yShapeInfo, const sd::LongType *yOffsets,
const void *vindexes) {
__shared__ T *x, *y;
__shared__ sd::LongType arrLenX, arrLenY;
auto indexes = reinterpret_cast<const I *>(vindexes);
for (int e = 0; e < numOfSubArrs; e++) {
const auto xIndex = indexes[e];
const bool isOwner = xIndex < gridDim.x ? blockIdx.x == xIndex : blockIdx.x == xIndex % gridDim.x;
if (!isOwner) continue;
if (threadIdx.x == 0) {
x = reinterpret_cast<T *>(vx) + xOffsets[xIndex];
y = reinterpret_cast<T *>(vy) + yOffsets[e];
arrLenX = shape::length(xShapeInfo);
arrLenY = shape::length(yShapeInfo);
}
__syncthreads();
if (arrLenX != arrLenY) return;
for (sd::LongType i = threadIdx.x; i < arrLenX; i += blockDim.x) {
const auto xOffset = shape::getIndexOffset(i, xShapeInfo);
const auto yOffset = shape::getIndexOffset(i, yShapeInfo);
switch (opCode) {
case 0:
x[xOffset] += y[yOffset];
break;
case 1:
x[xOffset] -= y[yOffset];
break;
case 2:
x[xOffset] *= y[yOffset];
break;
case 3:
x[xOffset] /= y[yOffset];
break;
case 4:
x[xOffset] = y[yOffset] - x[xOffset];
break;
case 5:
x[xOffset] = y[yOffset] / x[xOffset];
break;
case 6:
x[xOffset] = y[yOffset];
break;
default:
continue;
}
}
__syncthreads();
}
}
template <typename T, typename I>
SD_HOST static void scatterUpdateCudaLauncher(const cudaStream_t *stream, const int opCode, const int numOfSubArrs,
void *vx, const sd::LongType const *xShapeInfo,
const sd::LongType *xOffsets, void *vy, const sd::LongType *yShapeInfo,
const sd::LongType *yOffsets, const void *indexes) {
scatterUpdateCuda<T, I><<<512, 256, SD_MAX_NUM_THREADS, *stream>>>(opCode, numOfSubArrs, vx, xShapeInfo, xOffsets, vy,
yShapeInfo, yOffsets, indexes);
}
//////////////////////////////////////////////////////////////////////////
void scatterUpdate(sd::Pointer *extraPointers, int opCode, int numOfSubArrs, void *hX, sd::LongType const *hXShapeInfo,
sd::LongType const *hXOffsets, void *dX, sd::LongType const *dXShapeInfo,
sd::LongType const *dXOffsets, void *hY, sd::LongType const *hYShapeInfo,
sd::LongType const *hYOffsets, void *dY, sd::LongType const *dYShapeInfo,
sd::LongType const *dYOffsets, void *hIindexes, sd::LongType const *hIndicesShapeInfo,
void *dIindexes, sd::LongType const *dIndicesShapeInfo) {
try {
auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
auto type = ArrayOptions::dataType(hXShapeInfo);
auto iType = ArrayOptions::dataType(hIndicesShapeInfo);
BUILD_DOUBLE_SELECTOR(
type, iType, scatterUpdateCudaLauncher,
(stream, opCode, numOfSubArrs, dX, dXShapeInfo, dXOffsets, dY, dYShapeInfo, dYOffsets, dIindexes),
SD_COMMON_TYPES, SD_INDEXING_TYPES);
sd::DebugHelper::checkErrorCode(stream, "scatterUpdate(...) failed");
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void inspectArray(sd::Pointer *extraPointers, sd::Pointer buffer, sd::LongType *shapeInfo, sd::Pointer specialBuffer,
sd::LongType *specialShapeInfo, sd::Pointer debugInfo) {
try {
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
auto p = reinterpret_cast<sd::DebugInfo *>(debugInfo);
NDArray array(buffer, specialBuffer, shapeInfo, &lc);
sd::DebugHelper::retrieveDebugStatistics(p, &array);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void SD_KERNEL tryPointerKernel(void *p, int len) {
auto buf = reinterpret_cast<int8_t *>(p);
auto tid = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ int b;
if (tid < len) atomicAdd(&b, buf[tid]);
__syncthreads();
if (threadIdx.x == 0 && blockIdx.x == 0) printf("Pointer check complete: %i\n", b);
}
void tryPointer(sd::Pointer extra, sd::Pointer p, int len) {
try {
cudaStream_t stream;
cudaStreamCreate(&stream);
tryPointerKernel<<<256, 512, len + 64, stream>>>(p, len);
auto e = cudaStreamSynchronize(stream);
if (e != 0) throw sd::cuda_exception::build("tryPointer failed", e);
cudaStreamDestroy(stream);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
int dataTypeFromNpyHeader(void *header) { return (int)cnpy::dataTypeFromHeader(reinterpret_cast<char *>(header)); }
OpaqueConstantShapeBuffer *shapeBuffer(int rank, sd::LongType *shape, sd::LongType *strides, sd::DataType dtype,
char order, sd::LongType ews, bool empty) {
return shapeBufferEx(rank, shape, strides, dtype, order, ews, empty ? ARRAY_EMPTY : 0);
}
OpaqueConstantShapeBuffer *shapeBufferEx(int rank, sd::LongType *shape, sd::LongType *strides, sd::DataType dtype,
char order, sd::LongType ews, sd::LongType extras) {
try {
auto buffer = new ConstantShapeBuffer();
*buffer = sd::ConstantShapeHelper::getInstance().bufferForShapeInfo(
ShapeDescriptor(dtype, order, shape, strides, rank, ews, extras));
return buffer;
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
void deleteConstantShapeBuffer(OpaqueConstantShapeBuffer *ptr) { delete ptr; }
void deleteConstantDataBuffer(OpaqueConstantDataBuffer *ptr) { delete ptr; }
void deleteTadPack(sd::TadPack *ptr) { delete ptr; }
bool isBlasVersionMatches(int major, int minor, int build) {
auto result = major == Environment::getInstance()._blasMajorVersion &&
minor == Environment::getInstance()._blasMinorVersion &&
build == Environment::getInstance()._blasPatchVersion;
if (!result) {
sd_printf("CUDA/cuBLAS version mismatch. Expected: %i.%i.%i but got %i.%i.%i instead\n",
Environment::getInstance()._blasMajorVersion, Environment::getInstance()._blasMinorVersion,
Environment::getInstance()._blasPatchVersion, major, minor, build);
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(152);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("CUDA/cuBLAS version mismatch");
}
return result;
}
sd::ConstantDataBuffer *constantBufferLong(sd::DataType dtype, sd::LongType const *data, int length) {
return sd::ConstantHelper::getInstance().constantBuffer(ConstantDescriptor(data, length), dtype);
}
sd::ConstantDataBuffer *constantBufferDouble(sd::DataType dtype, double *data, int length) {
return sd::ConstantHelper::getInstance().constantBuffer(ConstantDescriptor(data, length), dtype);
}
sd::ConstantDataBuffer *constantBuffer(sd::DataType dtype, sd::ConstantDescriptor *descriptor) {
return sd::ConstantHelper::getInstance().constantBuffer(*descriptor, dtype);
}
sd::Pointer getConstantDataBufferPrimary(sd::ConstantDataBuffer *dbf) { return dbf->primary(); }
sd::Pointer getConstantDataBufferSpecial(sd::ConstantDataBuffer *dbf) { return dbf->special(); }
sd::LongType getConstantDataBufferLength(sd::ConstantDataBuffer *dbf) { return dbf->length(); }
sd::LongType getConstantDataBufferSizeOf(sd::ConstantDataBuffer *dbf) { return dbf->sizeOf(); }
sd::Pointer getConstantShapeBufferPrimary(OpaqueConstantShapeBuffer *dbf) {
return const_cast<sd::LongType *>(dbf->primary());
}
sd::Pointer getConstantShapeBufferSpecial(OpaqueConstantShapeBuffer *dbf) {
return const_cast<sd::LongType *>(dbf->special());
}
sd::graph::Context *createGraphContext(int nodeId) { return new sd::graph::Context(nodeId); }
sd::graph::RandomGenerator *getGraphContextRandomGenerator(sd::graph::Context *ptr) { return &ptr->randomGenerator(); }
void markGraphContextInplace(sd::graph::Context *ptr, bool reallyInplace) { ptr->markInplace(reallyInplace); }
void setGraphContextCudaContext(sd::graph::Context *ptr, void *stream, void *reductionPointer,
void *allocationPointer) {
ptr->setCudaContext(stream, reductionPointer, allocationPointer);
}
void setGraphContextInputArray(sd::graph::Context *ptr, int index, void *buffer, void *shapeInfo, void *specialBuffer,
void *specialShapeInfo) {
ptr->setInputArray(index, buffer, shapeInfo, specialBuffer, specialShapeInfo);
}
void setGraphContextOutputArray(sd::graph::Context *ptr, int index, void *buffer, void *shapeInfo, void *specialBuffer,
void *specialShapeInfo) {
ptr->setOutputArray(index, buffer, shapeInfo, specialBuffer, specialShapeInfo);
}
void setGraphContextInputBuffer(OpaqueContext *ptr, int index, OpaqueDataBuffer *buffer, void *shapeInfo,
void *specialShapeInfo) {
ptr->setInputArray(index, buffer, shapeInfo, specialShapeInfo);
}
void setGraphContextOutputBuffer(OpaqueContext *ptr, int index, OpaqueDataBuffer *buffer, void *shapeInfo,
void *specialShapeInfo) {
ptr->setOutputArray(index, buffer, shapeInfo, specialShapeInfo);
}
void setGraphContextTArguments(sd::graph::Context *ptr, double *arguments, int numberOfArguments) {
ptr->setTArguments(arguments, numberOfArguments);
}
void setGraphContextIArguments(sd::graph::Context *ptr, sd::LongType *arguments, int numberOfArguments) {
ptr->setIArguments(arguments, numberOfArguments);
}
void setGraphContextBArguments(sd::graph::Context *ptr, bool *arguments, int numberOfArguments) {
ptr->setBArguments(arguments, numberOfArguments);
}
void setGraphContextDArguments(OpaqueContext *ptr, int *arguments, int numberOfArguments) {
std::vector<sd::DataType> dtypes(numberOfArguments);
for (int e = 0; e < numberOfArguments; e++) dtypes[e] = (sd::DataType)arguments[e];
ptr->setDArguments(dtypes);
}
void deleteGraphContext(sd::graph::Context *ptr) { delete ptr; }
sd::graph::RandomGenerator *createRandomGenerator(sd::LongType rootSeed, sd::LongType nodeSeed) {
try {
return new sd::graph::RandomGenerator(rootSeed, nodeSeed);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
sd::LongType getRandomGeneratorRootState(sd::graph::RandomGenerator *ptr) { return ptr->rootState(); }
sd::LongType getRandomGeneratorNodeState(sd::graph::RandomGenerator *ptr) { return ptr->nodeState(); }
void setRandomGeneratorStates(sd::graph::RandomGenerator *ptr, sd::LongType rootSeed, sd::LongType nodeSeed) {
ptr->setStates(rootSeed, nodeSeed);
}
float getRandomGeneratorRelativeFloat(sd::graph::RandomGenerator *ptr, sd::LongType index) {
return ptr->relativeT<float>(index);
}
double getRandomGeneratorRelativeDouble(sd::graph::RandomGenerator *ptr, sd::LongType index) {
return ptr->relativeT<double>(index);
}
int getRandomGeneratorRelativeInt(sd::graph::RandomGenerator *ptr, sd::LongType index) {
return ptr->relativeInt(index);
}
sd::LongType getRandomGeneratorRelativeLong(sd::graph::RandomGenerator *ptr, sd::LongType index) {
return ptr->relativeLong(index);
}
int getRandomGeneratorNextInt(sd::graph::RandomGenerator *ptr) {
// to nullify _nodeState._long ^= (steps ^ 0xdeadbeef);
// we will use step = 0xdeadbeef
auto result = ptr->relativeInt(1);
ptr->rewindH(0xdeadbeef);
return result;
}
sd::LongType getRandomGeneratorNextLong(sd::graph::RandomGenerator *ptr) {
auto result = ptr->relativeLong(1);
ptr->rewindH(0xdeadbeef);
return result;
}
float getRandomGeneratorNextFloat(sd::graph::RandomGenerator *ptr) {
auto result = ptr->relativeT<float>(1);
ptr->rewindH(0xdeadbeef);
return result;
}
double getRandomGeneratorNextDouble(sd::graph::RandomGenerator *ptr) {
auto result = ptr->relativeT<double>(1);
ptr->rewindH(0xdeadbeef);
return result;
}
void deleteRandomGenerator(sd::graph::RandomGenerator *ptr) { delete ptr; }
sd::Pointer shapeBufferForNumpy(sd::Pointer npyArray) {
try {
cnpy::NpyArray arr = cnpy::loadNpyFromPointer(reinterpret_cast<char *>(npyArray));
unsigned int shapeSize = arr.shape.size();
std::vector<sd::LongType> shape(shapeSize);
bool _empty = false;
for (unsigned int i = 0; i < shapeSize; i++) {
shape[i] = arr.shape[i];
if (arr.shape[i] == 0) _empty = true;
}
auto dtype = cnpy::dataTypeFromHeader(reinterpret_cast<char *>(npyArray));
sd::LongType *shapeBuffer;
if (shape.size() == 1 && shape[0] == 0) {
// scalar case
shapeBuffer = sd::ShapeBuilders::createScalarShapeInfo(dtype);
} else if (_empty) {
if (shapeSize > 0)
shapeBuffer = sd::ShapeBuilders::emptyShapeInfo(dtype, arr.fortranOrder ? 'f' : 'c', shape);
else
shapeBuffer = sd::ShapeBuilders::emptyShapeInfo(dtype);
} else {
shapeBuffer = sd::ShapeBuilders::createShapeInfo(dtype, arr.fortranOrder ? 'f' : 'c', shape);
}
return (sd::Pointer)(sd::ConstantShapeHelper::getInstance().createFromExisting(
shapeBuffer, true)); // TO DO: this can lead to unpleasant crash sometimes
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
sd::LongType getCachedMemory(int deviceId) { return sd::ConstantHelper::getInstance().getCachedAmount(deviceId); }
sd::LaunchContext *defaultLaunchContext() { return LaunchContext::defaultContext(); }
sd::Pointer lcScalarPointer(OpaqueLaunchContext *lc) { return lc->getScalarPointer(); }
sd::Pointer lcReductionPointer(OpaqueLaunchContext *lc) { return lc->getReductionPointer(); }
sd::Pointer lcAllocationPointer(OpaqueLaunchContext *lc) { return lc->getAllocationPointer(); }
sd::Pointer lcExecutionStream(OpaqueLaunchContext *lc) { return lc->getCudaStream(); }
sd::Pointer lcCopyStream(OpaqueLaunchContext *lc) { return lc->getCudaSpecialStream(); }
sd::Pointer lcBlasHandle(OpaqueLaunchContext *lc) { return lc->getCublasHandle(); }
sd::Pointer lcSolverHandle(OpaqueLaunchContext *lc) { return lc->getCusolverHandle(); }
int lastErrorCode() { return sd::LaunchContext::defaultContext()->errorReference()->errorCode(); }
const char *lastErrorMessage() { return sd::LaunchContext::defaultContext()->errorReference()->errorMessage(); }
void ctxShapeFunctionOverride(OpaqueContext *ptr, bool reallyOverride) {
ptr->setShapeFunctionOverride(reallyOverride);
}
void ctxPurge(OpaqueContext *ptr) { ptr->clearFastPath(); }
int binaryLevel() { return 0; }
int optimalLevel() { return 0; }
bool isMinimalRequirementsMet() { return true; }
bool isOptimalRequirementsMet() { return true; }
void ctxAllowHelpers(OpaqueContext *ptr, bool reallyAllow) { ptr->allowHelpers(reallyAllow); }
void ctxSetExecutionMode(OpaqueContext *ptr, int execMode) {
if (execMode < 0 || execMode > 2) execMode = 0;
ptr->setExecutionMode((samediff::ExecutionMode)execMode);
}
OpaqueDataBuffer *dbCreateExternalDataBuffer(sd::LongType elements, int dataType, sd::Pointer primary,
sd::Pointer special) {
auto buffer = dbAllocateDataBuffer(0, dataType, false);
if (primary != nullptr) buffer->setPrimary(primary, elements);
if (special != nullptr) buffer->setSpecial(special, elements);
return buffer;
}
OpaqueDataBuffer *dbAllocateDataBuffer(sd::LongType elements, int dataType, bool allocateBoth) {
return allocateDataBuffer(elements, dataType, allocateBoth);
}
OpaqueDataBuffer *allocateDataBuffer(sd::LongType elements, int dataType, bool allocateBoth) {
try {
auto dtype = DataTypeUtils::fromInt(dataType);
return new sd::InteropDataBuffer(elements * DataTypeUtils::sizeOf(dtype), dtype, allocateBoth);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
sd::Pointer dbPrimaryBuffer(OpaqueDataBuffer *dataBuffer) { return dataBuffer->primary(); }
sd::Pointer dbSpecialBuffer(OpaqueDataBuffer *dataBuffer) { return dataBuffer->special(); }
void deleteDataBuffer(OpaqueDataBuffer *dataBuffer) { delete dataBuffer; }
void dbSetPrimaryBuffer(OpaqueDataBuffer *dataBuffer, sd::Pointer primaryBuffer, sd::LongType numBytes) {
dataBuffer->setPrimary(primaryBuffer, numBytes);
}
void dbSetSpecialBuffer(OpaqueDataBuffer *dataBuffer, sd::Pointer specialBuffer, sd::LongType numBytes) {
dataBuffer->setSpecial(specialBuffer, numBytes);
}
void dbAllocatePrimaryBuffer(OpaqueDataBuffer *dataBuffer) { dataBuffer->dataBuffer()->allocatePrimary(); }
void dbAllocateSpecialBuffer(OpaqueDataBuffer *dataBuffer) { dataBuffer->dataBuffer()->allocateSpecial(); }
void dbExpandBuffer(OpaqueDataBuffer *dataBuffer, sd::LongType elements) {
try {
dataBuffer->dataBuffer()->expand(elements * DataTypeUtils::sizeOf(dataBuffer->dataBuffer()->getDataType()));
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
OpaqueDataBuffer *dbCreateView(OpaqueDataBuffer *dataBuffer, sd::LongType length, sd::LongType offset) {
return new InteropDataBuffer(*dataBuffer, length, offset);
}
int dbUseCount(OpaqueDataBuffer* dataBuffer){
if(dataBuffer) return dataBuffer->useCount();
return 0;
}
void dbSyncToSpecial(OpaqueDataBuffer *dataBuffer) { dataBuffer->dataBuffer()->syncToSpecial(); }
void dbSyncToPrimary(OpaqueDataBuffer *dataBuffer) { dataBuffer->dataBuffer()->syncToPrimary(nullptr); }
void dbTickHostRead(OpaqueDataBuffer *dataBuffer) { dataBuffer->dataBuffer()->readPrimary(); }
void dbTickHostWrite(OpaqueDataBuffer *dataBuffer) { dataBuffer->dataBuffer()->writePrimary(); }
void dbTickDeviceRead(OpaqueDataBuffer *dataBuffer) { dataBuffer->dataBuffer()->readSpecial(); }
void dbTickDeviceWrite(OpaqueDataBuffer *dataBuffer) { dataBuffer->dataBuffer()->writeSpecial(); }
void dbExpand(OpaqueDataBuffer *dataBuffer, sd::LongType elements) { dataBuffer->expand(elements); }
void dbClose(OpaqueDataBuffer *dataBuffer) { dataBuffer->getDataBuffer()->close(); }
int dbDeviceId(OpaqueDataBuffer *dataBuffer) { return dataBuffer->deviceId(); }
void dbSetDeviceId(OpaqueDataBuffer *dataBuffer, int deviceId) { dataBuffer->setDeviceId(deviceId); }
int dbLocality(OpaqueDataBuffer *dataBuffer) {
auto p = dataBuffer->dataBuffer()->isPrimaryActual();
auto d = dataBuffer->dataBuffer()->isSpecialActual();
if (p && d)
return 0;
else if (p)
return -1;
else
return 1;
}
void setVedaDeviceLibFolder(std::string path){
}
|
0a00c43a9e6e55fab9eda290e04f260b550dd5af.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "SolutionGrid.h"
#include "SudokuGrid.h"
__global__ void genPoss(int* grid, short* poss, short* veri_row, short* veri_col, short* veri_box, int* soln_row, int* soln_col, int* soln_val, int* grid_size, int* box_size, int* soln_size) {
if (blockIdx.x < *soln_size) {
int b_width = calcBlockWidth(*soln_size);
int b_s_idx = calcStartIdxBlock(*soln_size);
int b_e_idx = b_s_idx + b_width;
if (threadIdx.x < *grid_size) {
//printf("blk %d bs %d be %d bw %d\n", blockIdx.x, b_s_idx, b_e_idx, b_width);
int t_width = calcThreadWidth(*grid_size);
int s_idx = calcStartIdx(*grid_size);
int e_idx = s_idx + t_width;
for (int blk_cnt = b_s_idx; blk_cnt < b_e_idx; blk_cnt++) { // row idx of poss
for (int thd_cnt = s_idx; thd_cnt < e_idx; thd_cnt++) { // column idx of poss
int row = soln_row[blk_cnt];
int col = soln_col[blk_cnt];
int box = getBox(row, col, *box_size);
int veri_row_idx = row * (*grid_size) + thd_cnt;
int veri_col_idx = col * (*grid_size) + thd_cnt;
int veri_box_idx = box * (*grid_size) + thd_cnt;
poss[blk_cnt * (*grid_size) + thd_cnt] = veri_row[veri_row_idx] + veri_col[veri_col_idx] + veri_box[veri_box_idx];
}
}
__syncthreads();
}
}
}
__global__ void checkValid(int* grid, short* veri_row, short* veri_col, short* veri_box, int* grid_size, int* box_size) {
if (threadIdx.x < *grid_size) {
int t_width = calcThreadWidth(*grid_size);
int s_idx = calcStartIdx(*grid_size);
int e_idx = s_idx + t_width;
checkRow(grid, veri_row, *grid_size, s_idx, e_idx);
checkCol(grid, veri_col, *grid_size, s_idx, e_idx);
checkBox(grid, veri_box, *grid_size, *box_size, s_idx, e_idx);
__syncthreads();
}
}
__global__ void solveCuda(int* grid, int* soln_row, int* soln_col, int* soln_val, short* poss, int* grid_size, int* soln_size, int* box_size) {
__shared__ short l_poss[NUM_EMPTY * 9];
__shared__ short l_poss_curr[NUM_EMPTY * 9];
__shared__ short max_poss[NUM_EMPTY];
__shared__ int l_soln_val[NUM_EMPTY];
__shared__ short max;
__shared__ int max_idx;
if (blockIdx.x < *grid_size) {
//printf("blk %d bs %d be %d bw %d\n", blockIdx.x, b_s_idx, b_e_idx, b_width);
//printf("blk %d, thd %d\n", blockIdx.x, threadIdx.x);
if (threadIdx.x < (*grid_size)) {
int t_width = calcThreadWidth(*grid_size);
int s_idx = calcStartIdx(*grid_size);
int e_idx = s_idx + t_width;
//printf("blk %d, thd %d\n", blockIdx.x, threadIdx.x);
for (int sol_cnt = 0; sol_cnt < (*soln_size); sol_cnt++) {
max_poss[sol_cnt] = 0;
l_soln_val[sol_cnt] = 0;
for (int thd_cnt = s_idx; thd_cnt < e_idx; thd_cnt++) {
int poss_idx = sol_cnt * (*grid_size) + thd_cnt;
l_poss[poss_idx] = poss[poss_idx];
l_poss_curr[poss_idx] = l_poss[poss_idx];
// if (blockIdx.x == 0) {
// printf("[%d, %d] %d l_poss %d poss %d\n", sol_cnt, thd_cnt, poss_idx, l_poss[poss_idx], poss[poss_idx]);
// }
}
}
__syncthreads();
}
if (threadIdx.x < (*soln_size)) {
int t_width_sol = calcThreadWidth(*soln_size);
int s_idx_sol = calcStartIdx(*soln_size);
int e_idx_sol = s_idx_sol + t_width_sol;
// if (blockIdx.x == 0)
// printf("s %d e %d w %d\n", s_idx_sol, e_idx_sol, t_width_sol);
for (int sol_cnt = s_idx_sol; sol_cnt < e_idx_sol; sol_cnt++) {
for (int thd_cnt = 0; thd_cnt < (*grid_size); thd_cnt++) {
int poss_idx = sol_cnt * (*grid_size) + thd_cnt;
// if (blockIdx.x == 0)
// printf("[%d, %d] %d max_poss[%d] %d\n", sol_cnt, thd_cnt, poss_idx, sol_cnt, max_poss[sol_cnt]);
if (l_poss[poss_idx] == 0) {
//printf("max_poss %d\n", max_poss[sol_cnt]);
max_poss[sol_cnt] = max_poss[sol_cnt] + 1;
}
}
}
__syncthreads();
}
// if (blockIdx.x == 0 && threadIdx.x == 0) {
// for (int i = 0; i < (*soln_size); i++) {
// printf("[%d] \t[%d, %d] ", i, soln_row[i], soln_col[i]);
// for (int j = 0; j < (*grid_size); j++) {
// printf("[%d] %d ", j, l_poss[i * (*grid_size) + j]);
// }
// printf("\n");
// }
// printf("\n");
// for (int idx = 0; idx < (*soln_size); idx++)
// printf("[%d] %d ,", idx, max_poss[idx]);
// printf("\n");
// }
if (threadIdx.x == 0) {
max = 0;
for (int idx = 0; idx < (*soln_size); idx++) {
if (max_poss[idx] > max) {
max = max_poss[idx];
max_idx = idx;
}
}
// printf("max poss [%d] %d\n", max_idx, max);
}
if (blockIdx.x == 0 && threadIdx.x == 0) {
swap(soln_row, 0, max_idx);
swap(soln_col, 0, max_idx);
}
//if (blockIdx.x == 0 && threadIdx.x == 0) {
// for (int i = 0; i < (*soln_size); i++) {
// printf("[%d] \t[%d, %d] ", i, soln_row[i], soln_col[i]);
// for (int j = 0; j < (*grid_size); j++) {
// printf("[%d] %d ", j, l_poss[i * (*grid_size) + j]);
// }
// printf("\n");
// }
// printf("\n");
// for (int idx = 0; idx < (*soln_size); idx++)
// printf("[%d] %d ,", idx, max_poss[idx]);
// printf("\n");
//}
if (threadIdx.x < (*grid_size)) {
int t_width = calcThreadWidth(*grid_size);
int s_idx = calcStartIdx(*grid_size);
int e_idx = s_idx + t_width;
for (int thd_cnt = s_idx; thd_cnt < e_idx; thd_cnt++) {
int poss_idx_1 = thd_cnt;
int poss_idx_2 = max_idx * (*grid_size) + thd_cnt;
swap(l_poss, poss_idx_1, poss_idx_2);
swap(l_poss_curr, poss_idx_1, poss_idx_2);
}
__syncthreads();
}
//if (blockIdx.x == 0 && threadIdx.x == 0) {
// for (int j = 0; j < (*soln_size); j++) {
// printf("curr %d [%d, %d] [%d] ", j, soln_row[j], soln_col[j], getBox(soln_row[j], soln_col[j], *box_size));
// for (int i = 0; i < (*grid_size); i++) {
// printf("[%d]%d", i, l_poss_curr[j * (*grid_size) + i]);
// }
// printf("\n");
// }
//}
//if (blockIdx.x == 0 && threadIdx.x == 0) {
// for (int i = 0; i < (*soln_size); i++) {
// printf("[%d] [%d, %d] %d\n", i, soln_row[i], soln_col[i], max_poss[i]);
// }
//}
__syncthreads();
}
if (blockIdx.x < max) {
int b_width = calcBlockWidth(max);
int b_s_idx = calcStartIdxBlock(max);
int b_e_idx = b_s_idx + b_width;
if (threadIdx.x == 0) {
for (int blk_cnt = b_s_idx; blk_cnt < b_e_idx; blk_cnt++) {
bool branching = true;
for (int curr_idx = 0; curr_idx < (*soln_size); curr_idx++) {
bool soln_found = false;
int poss_cnt = -1;
//if (blockIdx.x == 0) {
// printf("\ncurr-1 %d [%d, %d]: ", curr_idx - 1, soln_row[curr_idx - 1], soln_col[curr_idx - 1]);
// for (int i = 0; i < (*grid_size); i++) {
// printf("[%d]%d", i, l_poss_curr[(curr_idx - 1) * (*grid_size) + i]);
// }
// printf("\n");
// printf("curr %d [%d, %d]: ", curr_idx, soln_row[curr_idx], soln_col[curr_idx]);
// for (int i = 0; i < (*grid_size); i++) {
// printf("[%d]%d", i, l_poss_curr[curr_idx * (*grid_size) + i]);
// }
// printf("\n");
// for (int i = 0; i < (*soln_size); i++) {
// printf("[%d]%d ", i, l_soln_val[i]);
// }
// printf("\n");
//}
for (int soln_cnt = 0; soln_cnt < (*grid_size); soln_cnt++) {
int poss_idx = curr_idx * (*grid_size) + soln_cnt;
if (l_poss_curr[poss_idx] == 0) {
poss_cnt++;
if (branching) {
branching = blk_cnt != poss_cnt;
}
if (!branching) {
// check existing solutions
bool exist = false;
for (int chk_soln = 0; l_soln_val[chk_soln] != 0 && chk_soln < (*soln_size); chk_soln++) {
if (soln_row[chk_soln] == soln_row[curr_idx]) {
if (l_soln_val[chk_soln] == soln_cnt + 1) {
exist = true;
break;
}
}
if (soln_col[chk_soln] == soln_col[curr_idx]) {
if (l_soln_val[chk_soln] == soln_cnt + 1) {
exist = true;
break;
}
}
int soln_box = getBox(soln_row[curr_idx], soln_col[curr_idx], *box_size);
int chk_box = getBox(soln_row[chk_soln], soln_col[chk_soln], *box_size);
if (soln_box == chk_box) {
if (l_soln_val[chk_soln] == soln_cnt + 1) {
exist = true;
break;
}
}
}
if (!exist) {
soln_found = true;
l_poss_curr[poss_idx] = 1;
l_soln_val[curr_idx] = soln_cnt + 1;
break;
}
}
}
}
if (!soln_found) {
if (curr_idx < 1) {
break;
}
for (int reset_cnt = 0; reset_cnt < (*grid_size); reset_cnt++) {
int reset_idx = curr_idx * (*grid_size) + reset_cnt;
l_poss_curr[reset_idx] = l_poss[reset_idx];
}
l_soln_val[curr_idx - 1] = 0;
curr_idx -= 2;
}
if (curr_idx < 1 && !branching) {
break;
}
branching = false;
}
//for (int i = 0; i < *soln_size; i++) {
// printf("blk %d [%d](%d %d) %d\n", blk_cnt, i, soln_row[i], soln_col[i], l_soln_val[i]);
//}
}
}
// if (threadIdx.x == 0) {
// branching = true;
// for (curr_idx = 0; curr_idx < (*soln_size); curr_idx++) { // iterate over all solutions
// if (blockIdx.x == 0) {
// printf("\ncurr-1 %d [%d, %d]: ", curr_idx - 1, soln_row[curr_idx - 1], soln_col[curr_idx - 1]);
// for (int i = 0; i < (*grid_size); i++) {
// printf("[%d]%d", i, l_poss_curr[(curr_idx - 1) * (*grid_size) + i]);
// }
// printf("\n");
//
// printf("curr %d [%d, %d]: ", curr_idx, soln_row[curr_idx], soln_col[curr_idx]);
// for (int i = 0; i < (*grid_size); i++) {
// printf("[%d]%d", i, l_poss_curr[curr_idx * (*grid_size) + i]);
// }
// printf("\n");
// }
// soln_found = false;
// int poss_cnt = -1;
// for (int soln_cnt = 0; soln_cnt < (*grid_size); soln_cnt++) { // the jth number of a cell
// int poss_idx = curr_idx * (*grid_size) + soln_cnt;
// if (l_poss_curr[poss_idx] == 0) {
// poss_cnt++;
// if (!branching || blk_cnt == poss_cnt) {
// soln_found = true;
// setRelativeVal(l_poss_curr, 1, curr_idx, soln_cnt, soln_row, soln_col, *grid_size, *box_size, *soln_size);
// l_poss_curr[poss_idx] = 1;
// l_soln_val[curr_idx] = soln_cnt + 1;
// break;
// }
// }
// }
//
// // backward case
// if (!soln_found) {
// stop_at = curr_idx;
//
// if (curr_idx < 1) // have a branch checked exhaustively
// break;
// for (int reset_cnt = 0; reset_cnt < (*grid_size); reset_cnt++) {
// int reset_idx = curr_idx * (*grid_size) + reset_cnt;
// l_poss_curr[reset_idx] = poss[reset_idx];
// //printf("resetting %d\n", reset_cnt);
// resetRelativeVal(l_poss_curr, poss, curr_idx, reset_cnt, soln_row, soln_col, *grid_size, *box_size, *soln_size);
// }
// if (blockIdx.x == 0 && threadIdx.x == 0) {
// printf("reset:");
// printf("curr %d [%d, %d]: ", curr_idx, soln_row[curr_idx], soln_col[curr_idx]);
// for (int i = 0; i < (*grid_size); i++) {
// printf("[%d]%d", i, l_poss_curr[curr_idx * (*grid_size) + i]);
// }
// printf("\n");
// }
// curr_idx --; // go back one cell and clear possiblity
//
// //for (int reset_cnt = 0; reset_cnt < (*grid_size); reset_cnt++) {
// // int reset_idx = curr_idx * (*grid_size) + reset_cnt;
// // l_poss[reset_idx] = l_poss_curr[reset_idx];
// // resetRelativeVal(l_poss_curr, l_poss, curr_idx, reset_cnt, soln_row, soln_col, *grid_size, *box_size, *soln_size);
// //}
// //l_soln_val[curr_idx] = 0;
// curr_idx --; // nullify increment of for loop
// }
//
// if (curr_idx < 1 && !branching) {
// break;
// }
// branching = false;
// }
// }
// }
// __syncthreads();
if (l_soln_val[(*soln_size) - 1] != 0) {
if (threadIdx.x < *soln_size) {
int t_width = calcThreadWidth(*soln_size);
int s_idx = calcStartIdx(*soln_size);
int e_idx = s_idx + t_width;
for (int thd_cnt = s_idx; thd_cnt < e_idx; thd_cnt++) {
soln_val[thd_cnt] = l_soln_val[thd_cnt];
}
for (int thd_cnt = s_idx; thd_cnt < e_idx; thd_cnt++) {
int g_idx = soln_row[thd_cnt] * (*grid_size) + soln_col[thd_cnt];
grid[g_idx] = l_soln_val[thd_cnt];
//printf("%d %d %d %d\n", soln_row[thd_cnt], soln_col[thd_cnt], g_idx, grid[g_idx]);
}
__syncthreads();
if (threadIdx.x == 0) {
for (int i = 0; i < *grid_size; i++) {
for (int j = 0; j < *grid_size; j++) {
int g_idx = i * (*grid_size) + j;
printf("%d ", grid[g_idx]);
}
printf("\n");
}
}
}
}
__syncthreads();
//if (blockIdx.x == 0 && threadIdx.x == 0) {
// for (int i = 0; i < *soln_size; i++) {
// printf("[%d] %d\n", i, soln_val[i]);
// }
// for (int i = 0; i < *grid_size; i++) {
// for (int j = 0; j < *grid_size; j++) {
// int g_idx = i * (*grid_size) + j;
// printf("%d ", grid[g_idx]);
// }
// printf("\n");
// }
//}
}
}
__device__ void resetRelativeVal(short* l_poss_curr, short * l_poss, int curr_idx, int soln_cnt, int* soln_row, int* soln_col, int grid_size, int box_size, int soln_size) {
int curr_row = soln_row[curr_idx];
int curr_col = soln_col[curr_idx];
int curr_box = getBox(curr_row, curr_col, box_size);
for (int idx = curr_idx; idx < soln_size; idx++) {
if (idx != curr_idx) {
int chg_idx = idx * grid_size + soln_cnt;
if (soln_row[idx] == curr_row) {
l_poss_curr[chg_idx] = l_poss[chg_idx];
}
if (soln_col[idx] == curr_col) {
l_poss_curr[chg_idx] = l_poss[chg_idx];
}
if (getBox(soln_row[idx], soln_col[idx], box_size) == curr_box) {
l_poss_curr[chg_idx] = l_poss[chg_idx];
}
}
}
}
__device__ void setRelativeVal(short* l_poss, int val, int curr_idx, int soln_cnt, int* soln_row, int* soln_col, int grid_size, int box_size, int soln_size) {
int curr_row = soln_row[curr_idx];
int curr_col = soln_col[curr_idx];
int curr_box = getBox(curr_row, curr_col, box_size);
for (int idx = curr_idx; idx < soln_size; idx++) {
int chg_idx = idx * grid_size + soln_cnt;
int soln_box = getBox(soln_row[idx], soln_col[idx], box_size);
if (soln_row[idx] == curr_row) {
l_poss[chg_idx] = val;
}
if (soln_col[idx] == curr_col) {
l_poss[chg_idx] = val;
}
if (soln_box == curr_box) {
l_poss[chg_idx] = val;
}
}
}
__device__ int getBox(int row, int col, int box_size) {
int box = row / box_size * box_size + col / box_size;
return box;
}
__device__ void swap(int* soln, int idx1, int idx2) {
int temp = soln[idx1];
soln[idx1] = soln[idx2];
soln[idx2] = temp;
}
__device__ void swap(short* soln, int idx1, int idx2) {
short temp = soln[idx1];
soln[idx1] = soln[idx2];
soln[idx2] = temp;
}
__device__ void checkRow(int* grid, short* veri_row, int grid_size, int row_s, int row_e) {
for (int row = row_s; row < row_e; row++) {
for (int col = 0; col < grid_size; col++) {
int g_idx = row * grid_size + col;
int soln_idx = row * grid_size + grid[g_idx];
if (grid[g_idx] != 0)
veri_row[soln_idx - 1] = 1;
}
}
}
__device__ void checkCol(int* grid, short* veri_col, int grid_size, int col_s, int col_e) {
for (int row = 0; row < grid_size; row++) {
for (int col = col_s; col < col_e; col++) {
int g_idx = row * grid_size + col;
int soln_idx = col * grid_size + grid[g_idx];
if (grid[g_idx] != 0)
veri_col[soln_idx - 1] = 1;
}
}
}
__device__ void checkBox(int* grid, short* veri_box, int grid_size, int box_size, int thd_cnt_s, int thd_cnt_e) {
for (int thd_cnt = thd_cnt_s; thd_cnt < thd_cnt_e; thd_cnt++) {
int box_row_s = thd_cnt * box_size / grid_size * box_size; // row start location of box
int box_row_e = box_row_s + box_size;
int box_col_s = thd_cnt * box_size - thd_cnt * box_size / grid_size * grid_size;
int box_col_e = box_col_s + box_size;
// printf("thd %d r_s %d r_e %d c_s %d c_e %d\n", threadIdx.x, box_row_s, box_row_e, box_col_s, box_col_e);
for (int row = box_row_s; row < box_row_e; row++) {
for (int col = box_col_s; col < box_col_e; col++) {
int g_idx = row * grid_size + col;
// if(threadIdx.x == 0)
// printf("thread %d [%d][%d] %d \n",threadIdx.x, row, col, g_idx);
int soln_idx = thd_cnt * grid_size + grid[g_idx];
if (grid[g_idx] != 0)
veri_box[soln_idx - 1] = 1;
}
}
}
}
__device__ int calcBlockWidth(int grid_size) {
int b_width;
if (gridDim.x < grid_size) {
b_width = (grid_size) / gridDim.x;
if (blockIdx.x > gridDim.x - 2)
b_width = grid_size - blockIdx.x * b_width;
}
else {
b_width = 1;
}
return b_width;
}
__device__ int calcStartIdxBlock(int grid_size) {
int s_idx;
if (gridDim.x < grid_size) {
s_idx = blockIdx.x * (grid_size / gridDim.x);
}
else {
s_idx = blockIdx.x;
}
return s_idx;
}
__device__ int calcThreadWidth(int grid_size) {
int t_width;
if (blockDim.x < grid_size) {
t_width = (grid_size) / blockDim.x;
if (threadIdx.x > blockDim.x - 2)
t_width = grid_size - threadIdx.x * t_width;
}
else {
t_width = 1;
}
return t_width;
}
__device__ int calcStartIdx(int grid_size) {
int s_idx;
if (blockDim.x < grid_size) {
s_idx = threadIdx.x * (grid_size / blockDim.x);
}
else {
s_idx = threadIdx.x;
}
return s_idx;
}
int main(int argc, char ** argv) {
int grid_size = stoi(argv[1]);
//int zeros = grid_size * grid_size / 3;
int zeros = NUM_EMPTY;
int* soln_val = new int[zeros];
int* soln_row = new int[zeros];
int* soln_col = new int[zeros];
SudokuGrid config = SudokuGrid(grid_size);
config.randFirstRow(&config.grid, grid_size);
config.solve(&config.grid, grid_size);
//config.printGrid();
config.randClearSlots(&config.grid, grid_size, zeros, soln_row, soln_col, soln_val);
config.printGrid();
cout << "Number of empty slots = " << zeros << endl;
// implement in serial by CPU
time_t start_time = time(NULL);
//config.solve(&config.grid, grid_size);
time_t end_time = time(NULL);
printf("Time spent: %ld\n", end_time - start_time);
//config.randClearSlots(&config.grid, grid_size, zeros, soln_row, soln_col, soln_val);
// CUDA parts starts
hipError_t cuda_status;
hipEvent_t start, stop;
float gpu_time = 0.0f;
int grid_size_cuda = grid_size * grid_size;
int box_size = sqrt(grid_size);
int* grid = sudokuInit(NULL, config.grid, grid_size);
int* _grid = 0;
int* _soln_val = 0;
int* _soln_row = 0;
int* _soln_col = 0;
short* _veri_row = 0;
short* _veri_col = 0;
short* _veri_box = 0;
short* _poss = 0;
int* _grid_size = 0;
int* _box_size = 0;
int* _soln_size = 0;
cuda_status = hipMalloc((void**)&_grid, grid_size_cuda * sizeof(int));
if (cuda_status != hipSuccess) {
cerr << "hipMalloc grid failed with error code " << cuda_status << endl;
}
cuda_status = hipMalloc((void**)&_soln_val, zeros * sizeof(int));
if (cuda_status != hipSuccess) {
cerr << "hipMalloc soln_val failed with error code " << cuda_status << endl;
}
cuda_status = hipMalloc((void**)&_soln_row, zeros * sizeof(int));
if (cuda_status != hipSuccess) {
cerr << "hipMalloc soln_row failed with error code " << cuda_status << endl;
}
cuda_status = hipMalloc((void**)&_soln_col, zeros * sizeof(int));
if (cuda_status != hipSuccess) {
cerr << "hipMalloc soln_col failed with error code " << cuda_status << endl;
}
cuda_status = hipMalloc((void**)&_veri_row, grid_size_cuda * sizeof(short));
if (cuda_status != hipSuccess) {
cerr << "hipMalloc veri_row failed with error code " << cuda_status << endl;
}
cuda_status = hipMalloc((void**)&_veri_col, grid_size_cuda * sizeof(short));
if (cuda_status != hipSuccess) {
cerr << "hipMalloc veri_col failed with error code " << cuda_status << endl;
}
cuda_status = hipMalloc((void**)&_veri_box, grid_size_cuda * sizeof(short));
if (cuda_status != hipSuccess) {
cerr << "hipMalloc veri_box failed with error code " << cuda_status << endl;
}
cuda_status = hipMalloc((void**)&_poss, zeros * grid_size * sizeof(short));
if (cuda_status != hipSuccess) {
cerr << "hipMalloc poss failed with error code " << cuda_status << endl;
}
cuda_status = hipMalloc((void**)&_grid_size, sizeof(int));
if (cuda_status != hipSuccess) {
cerr << "hipMalloc grid_size failed with error code " << cuda_status << endl;
}
cuda_status = hipMalloc((void**)&_box_size, sizeof(int));
if (cuda_status != hipSuccess) {
cerr << "hipMalloc box_size failed with error code " << cuda_status << endl;
}
cuda_status = hipMalloc((void**)&_soln_size, sizeof(int));
if (cuda_status != hipSuccess) {
cerr << "hipMalloc soln_size failed with error code " << cuda_status << endl;
}
cuda_status = hipMemset(_veri_row, 0, grid_size_cuda * sizeof(short));
if (cuda_status != hipSuccess) {
cerr << "hipMemset veri_row failed with error code " << cuda_status << endl;
}
cuda_status = hipMemset(_veri_col, 0, grid_size_cuda * sizeof(short));
if (cuda_status != hipSuccess) {
cerr << "hipMemset veri_col failed with error code " << cuda_status << endl;
}
cuda_status = hipMemset(_veri_box, 0, grid_size_cuda * sizeof(short));
if (cuda_status != hipSuccess) {
cerr << "hipMemset veri_box failed with error code " << cuda_status << endl;
}
cuda_status = hipMemset(_poss, 0, zeros * grid_size * sizeof(short));
if (cuda_status != hipSuccess) {
cerr << "hipMemset poss failed with error code " << cuda_status << endl;
}
cuda_status = hipMemcpy(_grid, grid, grid_size_cuda * sizeof(int), hipMemcpyHostToDevice);
if (cuda_status != hipSuccess) {
cerr << "hipMemcpy grid failed with error code " << cuda_status << endl;
}
cuda_status = hipMemcpy(_soln_val, soln_val, zeros * sizeof(int), hipMemcpyHostToDevice);
if (cuda_status != hipSuccess) {
cerr << "hipMemcpy soln_val failed with error code " << cuda_status << endl;
}
cuda_status = hipMemcpy(_soln_row, soln_row, zeros * sizeof(int), hipMemcpyHostToDevice);
if (cuda_status != hipSuccess) {
cerr << "hipMemcpy soln_row failed with error code " << cuda_status << endl;
}
cuda_status = hipMemcpy(_soln_col, soln_col, zeros * sizeof(int), hipMemcpyHostToDevice);
if (cuda_status != hipSuccess) {
cerr << "hipMemcpy soln_col failed with error code " << cuda_status << endl;
}
cuda_status = hipMemcpy(_grid_size, &grid_size, sizeof(int), hipMemcpyHostToDevice);
if (cuda_status != hipSuccess) {
cerr << "hipMemcpy grid_size failed with error code " << cuda_status << endl;
}
cuda_status = hipMemcpy(_box_size, &box_size, sizeof(int), hipMemcpyHostToDevice);
if (cuda_status != hipSuccess) {
cerr << "hipMemcpy box_size failed with error code " << cuda_status << endl;
}
cuda_status = hipMemcpy(_soln_size, &zeros, sizeof(int), hipMemcpyHostToDevice);
if (cuda_status != hipSuccess) {
cerr << "hipMemcpy soln_size failed with error code " << cuda_status << endl;
}
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
//solveCuda <<<1, NUM_THREAD >>> (_grid, _soln_row, _soln_col, _soln_val, _veri_row, _veri_col, _veri_box, _grid_size, _box_size);
hipLaunchKernelGGL(( checkValid) , dim3(1), dim3(NUM_THREAD), 0, 0, _grid, _veri_row, _veri_col, _veri_box, _grid_size, _box_size);
cuda_status = hipGetLastError();
if (cuda_status != hipSuccess) {
cerr << "checkValid launch failed with error code " << cuda_status << endl;
}
cuda_status = hipDeviceSynchronize();
hipLaunchKernelGGL(( genPoss) , dim3(NUM_BLOCK), dim3(NUM_THREAD) , 0, 0, _grid, _poss, _veri_row, _veri_col, _veri_box, _soln_row,
_soln_col, _soln_val, _grid_size, _box_size, _soln_size);
cuda_status = hipGetLastError();
if (cuda_status != hipSuccess) {
cerr << "genPoss launch failed with error code " << cuda_status << endl;
}
cuda_status = hipDeviceSynchronize();
// solveCudaHost(grid, soln_row, soln_col, soln_val, poss, poss_curr, grid_size, zeros, box_size);
// for (int i = 0; i < zeros; i++) {
// printf("[%d](%d, %d) %d\n", i, soln_row[i], soln_col[i], soln_val[i]);
// }
hipLaunchKernelGGL(( solveCuda) , dim3(1), dim3(NUM_THREAD) , 0, 0, _grid, _soln_row, _soln_col, _soln_val, _poss, _grid_size, _soln_size, _box_size);
cuda_status = hipGetLastError();
if (cuda_status != hipSuccess) {
cerr << "solveCuda launch failed with error code " << cuda_status << endl;
}
cuda_status = hipDeviceSynchronize();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&gpu_time, start, stop);
printf("Time spent: %.5f\n", gpu_time);
hipEventDestroy(start);
hipEventDestroy(stop);
hipFree(_grid);
hipFree(_soln_val);
hipFree(_soln_row);
hipFree(_soln_col);
hipFree(_veri_row);
hipFree(_veri_col);
hipFree(_veri_box);
delete[] grid;
delete[] soln_val;
delete[] soln_col;
delete[] soln_row;
return EXIT_SUCCESS;
}
| 0a00c43a9e6e55fab9eda290e04f260b550dd5af.cu | #include "SolutionGrid.h"
#include "SudokuGrid.h"
__global__ void genPoss(int* grid, short* poss, short* veri_row, short* veri_col, short* veri_box, int* soln_row, int* soln_col, int* soln_val, int* grid_size, int* box_size, int* soln_size) {
if (blockIdx.x < *soln_size) {
int b_width = calcBlockWidth(*soln_size);
int b_s_idx = calcStartIdxBlock(*soln_size);
int b_e_idx = b_s_idx + b_width;
if (threadIdx.x < *grid_size) {
//printf("blk %d bs %d be %d bw %d\n", blockIdx.x, b_s_idx, b_e_idx, b_width);
int t_width = calcThreadWidth(*grid_size);
int s_idx = calcStartIdx(*grid_size);
int e_idx = s_idx + t_width;
for (int blk_cnt = b_s_idx; blk_cnt < b_e_idx; blk_cnt++) { // row idx of poss
for (int thd_cnt = s_idx; thd_cnt < e_idx; thd_cnt++) { // column idx of poss
int row = soln_row[blk_cnt];
int col = soln_col[blk_cnt];
int box = getBox(row, col, *box_size);
int veri_row_idx = row * (*grid_size) + thd_cnt;
int veri_col_idx = col * (*grid_size) + thd_cnt;
int veri_box_idx = box * (*grid_size) + thd_cnt;
poss[blk_cnt * (*grid_size) + thd_cnt] = veri_row[veri_row_idx] + veri_col[veri_col_idx] + veri_box[veri_box_idx];
}
}
__syncthreads();
}
}
}
__global__ void checkValid(int* grid, short* veri_row, short* veri_col, short* veri_box, int* grid_size, int* box_size) {
if (threadIdx.x < *grid_size) {
int t_width = calcThreadWidth(*grid_size);
int s_idx = calcStartIdx(*grid_size);
int e_idx = s_idx + t_width;
checkRow(grid, veri_row, *grid_size, s_idx, e_idx);
checkCol(grid, veri_col, *grid_size, s_idx, e_idx);
checkBox(grid, veri_box, *grid_size, *box_size, s_idx, e_idx);
__syncthreads();
}
}
__global__ void solveCuda(int* grid, int* soln_row, int* soln_col, int* soln_val, short* poss, int* grid_size, int* soln_size, int* box_size) {
__shared__ short l_poss[NUM_EMPTY * 9];
__shared__ short l_poss_curr[NUM_EMPTY * 9];
__shared__ short max_poss[NUM_EMPTY];
__shared__ int l_soln_val[NUM_EMPTY];
__shared__ short max;
__shared__ int max_idx;
if (blockIdx.x < *grid_size) {
//printf("blk %d bs %d be %d bw %d\n", blockIdx.x, b_s_idx, b_e_idx, b_width);
//printf("blk %d, thd %d\n", blockIdx.x, threadIdx.x);
if (threadIdx.x < (*grid_size)) {
int t_width = calcThreadWidth(*grid_size);
int s_idx = calcStartIdx(*grid_size);
int e_idx = s_idx + t_width;
//printf("blk %d, thd %d\n", blockIdx.x, threadIdx.x);
for (int sol_cnt = 0; sol_cnt < (*soln_size); sol_cnt++) {
max_poss[sol_cnt] = 0;
l_soln_val[sol_cnt] = 0;
for (int thd_cnt = s_idx; thd_cnt < e_idx; thd_cnt++) {
int poss_idx = sol_cnt * (*grid_size) + thd_cnt;
l_poss[poss_idx] = poss[poss_idx];
l_poss_curr[poss_idx] = l_poss[poss_idx];
// if (blockIdx.x == 0) {
// printf("[%d, %d] %d l_poss %d poss %d\n", sol_cnt, thd_cnt, poss_idx, l_poss[poss_idx], poss[poss_idx]);
// }
}
}
__syncthreads();
}
if (threadIdx.x < (*soln_size)) {
int t_width_sol = calcThreadWidth(*soln_size);
int s_idx_sol = calcStartIdx(*soln_size);
int e_idx_sol = s_idx_sol + t_width_sol;
// if (blockIdx.x == 0)
// printf("s %d e %d w %d\n", s_idx_sol, e_idx_sol, t_width_sol);
for (int sol_cnt = s_idx_sol; sol_cnt < e_idx_sol; sol_cnt++) {
for (int thd_cnt = 0; thd_cnt < (*grid_size); thd_cnt++) {
int poss_idx = sol_cnt * (*grid_size) + thd_cnt;
// if (blockIdx.x == 0)
// printf("[%d, %d] %d max_poss[%d] %d\n", sol_cnt, thd_cnt, poss_idx, sol_cnt, max_poss[sol_cnt]);
if (l_poss[poss_idx] == 0) {
//printf("max_poss %d\n", max_poss[sol_cnt]);
max_poss[sol_cnt] = max_poss[sol_cnt] + 1;
}
}
}
__syncthreads();
}
// if (blockIdx.x == 0 && threadIdx.x == 0) {
// for (int i = 0; i < (*soln_size); i++) {
// printf("[%d] \t[%d, %d] ", i, soln_row[i], soln_col[i]);
// for (int j = 0; j < (*grid_size); j++) {
// printf("[%d] %d ", j, l_poss[i * (*grid_size) + j]);
// }
// printf("\n");
// }
// printf("\n");
// for (int idx = 0; idx < (*soln_size); idx++)
// printf("[%d] %d ,", idx, max_poss[idx]);
// printf("\n");
// }
if (threadIdx.x == 0) {
max = 0;
for (int idx = 0; idx < (*soln_size); idx++) {
if (max_poss[idx] > max) {
max = max_poss[idx];
max_idx = idx;
}
}
// printf("max poss [%d] %d\n", max_idx, max);
}
if (blockIdx.x == 0 && threadIdx.x == 0) {
swap(soln_row, 0, max_idx);
swap(soln_col, 0, max_idx);
}
//if (blockIdx.x == 0 && threadIdx.x == 0) {
// for (int i = 0; i < (*soln_size); i++) {
// printf("[%d] \t[%d, %d] ", i, soln_row[i], soln_col[i]);
// for (int j = 0; j < (*grid_size); j++) {
// printf("[%d] %d ", j, l_poss[i * (*grid_size) + j]);
// }
// printf("\n");
// }
// printf("\n");
// for (int idx = 0; idx < (*soln_size); idx++)
// printf("[%d] %d ,", idx, max_poss[idx]);
// printf("\n");
//}
if (threadIdx.x < (*grid_size)) {
int t_width = calcThreadWidth(*grid_size);
int s_idx = calcStartIdx(*grid_size);
int e_idx = s_idx + t_width;
for (int thd_cnt = s_idx; thd_cnt < e_idx; thd_cnt++) {
int poss_idx_1 = thd_cnt;
int poss_idx_2 = max_idx * (*grid_size) + thd_cnt;
swap(l_poss, poss_idx_1, poss_idx_2);
swap(l_poss_curr, poss_idx_1, poss_idx_2);
}
__syncthreads();
}
//if (blockIdx.x == 0 && threadIdx.x == 0) {
// for (int j = 0; j < (*soln_size); j++) {
// printf("curr %d [%d, %d] [%d] ", j, soln_row[j], soln_col[j], getBox(soln_row[j], soln_col[j], *box_size));
// for (int i = 0; i < (*grid_size); i++) {
// printf("[%d]%d", i, l_poss_curr[j * (*grid_size) + i]);
// }
// printf("\n");
// }
//}
//if (blockIdx.x == 0 && threadIdx.x == 0) {
// for (int i = 0; i < (*soln_size); i++) {
// printf("[%d] [%d, %d] %d\n", i, soln_row[i], soln_col[i], max_poss[i]);
// }
//}
__syncthreads();
}
if (blockIdx.x < max) {
int b_width = calcBlockWidth(max);
int b_s_idx = calcStartIdxBlock(max);
int b_e_idx = b_s_idx + b_width;
if (threadIdx.x == 0) {
for (int blk_cnt = b_s_idx; blk_cnt < b_e_idx; blk_cnt++) {
bool branching = true;
for (int curr_idx = 0; curr_idx < (*soln_size); curr_idx++) {
bool soln_found = false;
int poss_cnt = -1;
//if (blockIdx.x == 0) {
// printf("\ncurr-1 %d [%d, %d]: ", curr_idx - 1, soln_row[curr_idx - 1], soln_col[curr_idx - 1]);
// for (int i = 0; i < (*grid_size); i++) {
// printf("[%d]%d", i, l_poss_curr[(curr_idx - 1) * (*grid_size) + i]);
// }
// printf("\n");
// printf("curr %d [%d, %d]: ", curr_idx, soln_row[curr_idx], soln_col[curr_idx]);
// for (int i = 0; i < (*grid_size); i++) {
// printf("[%d]%d", i, l_poss_curr[curr_idx * (*grid_size) + i]);
// }
// printf("\n");
// for (int i = 0; i < (*soln_size); i++) {
// printf("[%d]%d ", i, l_soln_val[i]);
// }
// printf("\n");
//}
for (int soln_cnt = 0; soln_cnt < (*grid_size); soln_cnt++) {
int poss_idx = curr_idx * (*grid_size) + soln_cnt;
if (l_poss_curr[poss_idx] == 0) {
poss_cnt++;
if (branching) {
branching = blk_cnt != poss_cnt;
}
if (!branching) {
// check existing solutions
bool exist = false;
for (int chk_soln = 0; l_soln_val[chk_soln] != 0 && chk_soln < (*soln_size); chk_soln++) {
if (soln_row[chk_soln] == soln_row[curr_idx]) {
if (l_soln_val[chk_soln] == soln_cnt + 1) {
exist = true;
break;
}
}
if (soln_col[chk_soln] == soln_col[curr_idx]) {
if (l_soln_val[chk_soln] == soln_cnt + 1) {
exist = true;
break;
}
}
int soln_box = getBox(soln_row[curr_idx], soln_col[curr_idx], *box_size);
int chk_box = getBox(soln_row[chk_soln], soln_col[chk_soln], *box_size);
if (soln_box == chk_box) {
if (l_soln_val[chk_soln] == soln_cnt + 1) {
exist = true;
break;
}
}
}
if (!exist) {
soln_found = true;
l_poss_curr[poss_idx] = 1;
l_soln_val[curr_idx] = soln_cnt + 1;
break;
}
}
}
}
if (!soln_found) {
if (curr_idx < 1) {
break;
}
for (int reset_cnt = 0; reset_cnt < (*grid_size); reset_cnt++) {
int reset_idx = curr_idx * (*grid_size) + reset_cnt;
l_poss_curr[reset_idx] = l_poss[reset_idx];
}
l_soln_val[curr_idx - 1] = 0;
curr_idx -= 2;
}
if (curr_idx < 1 && !branching) {
break;
}
branching = false;
}
//for (int i = 0; i < *soln_size; i++) {
// printf("blk %d [%d](%d %d) %d\n", blk_cnt, i, soln_row[i], soln_col[i], l_soln_val[i]);
//}
}
}
// if (threadIdx.x == 0) {
// branching = true;
// for (curr_idx = 0; curr_idx < (*soln_size); curr_idx++) { // iterate over all solutions
// if (blockIdx.x == 0) {
// printf("\ncurr-1 %d [%d, %d]: ", curr_idx - 1, soln_row[curr_idx - 1], soln_col[curr_idx - 1]);
// for (int i = 0; i < (*grid_size); i++) {
// printf("[%d]%d", i, l_poss_curr[(curr_idx - 1) * (*grid_size) + i]);
// }
// printf("\n");
//
// printf("curr %d [%d, %d]: ", curr_idx, soln_row[curr_idx], soln_col[curr_idx]);
// for (int i = 0; i < (*grid_size); i++) {
// printf("[%d]%d", i, l_poss_curr[curr_idx * (*grid_size) + i]);
// }
// printf("\n");
// }
// soln_found = false;
// int poss_cnt = -1;
// for (int soln_cnt = 0; soln_cnt < (*grid_size); soln_cnt++) { // the jth number of a cell
// int poss_idx = curr_idx * (*grid_size) + soln_cnt;
// if (l_poss_curr[poss_idx] == 0) {
// poss_cnt++;
// if (!branching || blk_cnt == poss_cnt) {
// soln_found = true;
// setRelativeVal(l_poss_curr, 1, curr_idx, soln_cnt, soln_row, soln_col, *grid_size, *box_size, *soln_size);
// l_poss_curr[poss_idx] = 1;
// l_soln_val[curr_idx] = soln_cnt + 1;
// break;
// }
// }
// }
//
// // backward case
// if (!soln_found) {
// stop_at = curr_idx;
//
// if (curr_idx < 1) // have a branch checked exhaustively
// break;
// for (int reset_cnt = 0; reset_cnt < (*grid_size); reset_cnt++) {
// int reset_idx = curr_idx * (*grid_size) + reset_cnt;
// l_poss_curr[reset_idx] = poss[reset_idx];
// //printf("resetting %d\n", reset_cnt);
// resetRelativeVal(l_poss_curr, poss, curr_idx, reset_cnt, soln_row, soln_col, *grid_size, *box_size, *soln_size);
// }
// if (blockIdx.x == 0 && threadIdx.x == 0) {
// printf("reset:");
// printf("curr %d [%d, %d]: ", curr_idx, soln_row[curr_idx], soln_col[curr_idx]);
// for (int i = 0; i < (*grid_size); i++) {
// printf("[%d]%d", i, l_poss_curr[curr_idx * (*grid_size) + i]);
// }
// printf("\n");
// }
// curr_idx --; // go back one cell and clear possiblity
//
// //for (int reset_cnt = 0; reset_cnt < (*grid_size); reset_cnt++) {
// // int reset_idx = curr_idx * (*grid_size) + reset_cnt;
// // l_poss[reset_idx] = l_poss_curr[reset_idx];
// // resetRelativeVal(l_poss_curr, l_poss, curr_idx, reset_cnt, soln_row, soln_col, *grid_size, *box_size, *soln_size);
// //}
// //l_soln_val[curr_idx] = 0;
// curr_idx --; // nullify increment of for loop
// }
//
// if (curr_idx < 1 && !branching) {
// break;
// }
// branching = false;
// }
// }
// }
// __syncthreads();
if (l_soln_val[(*soln_size) - 1] != 0) {
if (threadIdx.x < *soln_size) {
int t_width = calcThreadWidth(*soln_size);
int s_idx = calcStartIdx(*soln_size);
int e_idx = s_idx + t_width;
for (int thd_cnt = s_idx; thd_cnt < e_idx; thd_cnt++) {
soln_val[thd_cnt] = l_soln_val[thd_cnt];
}
for (int thd_cnt = s_idx; thd_cnt < e_idx; thd_cnt++) {
int g_idx = soln_row[thd_cnt] * (*grid_size) + soln_col[thd_cnt];
grid[g_idx] = l_soln_val[thd_cnt];
//printf("%d %d %d %d\n", soln_row[thd_cnt], soln_col[thd_cnt], g_idx, grid[g_idx]);
}
__syncthreads();
if (threadIdx.x == 0) {
for (int i = 0; i < *grid_size; i++) {
for (int j = 0; j < *grid_size; j++) {
int g_idx = i * (*grid_size) + j;
printf("%d ", grid[g_idx]);
}
printf("\n");
}
}
}
}
__syncthreads();
//if (blockIdx.x == 0 && threadIdx.x == 0) {
// for (int i = 0; i < *soln_size; i++) {
// printf("[%d] %d\n", i, soln_val[i]);
// }
// for (int i = 0; i < *grid_size; i++) {
// for (int j = 0; j < *grid_size; j++) {
// int g_idx = i * (*grid_size) + j;
// printf("%d ", grid[g_idx]);
// }
// printf("\n");
// }
//}
}
}
__device__ void resetRelativeVal(short* l_poss_curr, short * l_poss, int curr_idx, int soln_cnt, int* soln_row, int* soln_col, int grid_size, int box_size, int soln_size) {
int curr_row = soln_row[curr_idx];
int curr_col = soln_col[curr_idx];
int curr_box = getBox(curr_row, curr_col, box_size);
for (int idx = curr_idx; idx < soln_size; idx++) {
if (idx != curr_idx) {
int chg_idx = idx * grid_size + soln_cnt;
if (soln_row[idx] == curr_row) {
l_poss_curr[chg_idx] = l_poss[chg_idx];
}
if (soln_col[idx] == curr_col) {
l_poss_curr[chg_idx] = l_poss[chg_idx];
}
if (getBox(soln_row[idx], soln_col[idx], box_size) == curr_box) {
l_poss_curr[chg_idx] = l_poss[chg_idx];
}
}
}
}
__device__ void setRelativeVal(short* l_poss, int val, int curr_idx, int soln_cnt, int* soln_row, int* soln_col, int grid_size, int box_size, int soln_size) {
int curr_row = soln_row[curr_idx];
int curr_col = soln_col[curr_idx];
int curr_box = getBox(curr_row, curr_col, box_size);
for (int idx = curr_idx; idx < soln_size; idx++) {
int chg_idx = idx * grid_size + soln_cnt;
int soln_box = getBox(soln_row[idx], soln_col[idx], box_size);
if (soln_row[idx] == curr_row) {
l_poss[chg_idx] = val;
}
if (soln_col[idx] == curr_col) {
l_poss[chg_idx] = val;
}
if (soln_box == curr_box) {
l_poss[chg_idx] = val;
}
}
}
__device__ int getBox(int row, int col, int box_size) {
int box = row / box_size * box_size + col / box_size;
return box;
}
__device__ void swap(int* soln, int idx1, int idx2) {
int temp = soln[idx1];
soln[idx1] = soln[idx2];
soln[idx2] = temp;
}
__device__ void swap(short* soln, int idx1, int idx2) {
short temp = soln[idx1];
soln[idx1] = soln[idx2];
soln[idx2] = temp;
}
__device__ void checkRow(int* grid, short* veri_row, int grid_size, int row_s, int row_e) {
for (int row = row_s; row < row_e; row++) {
for (int col = 0; col < grid_size; col++) {
int g_idx = row * grid_size + col;
int soln_idx = row * grid_size + grid[g_idx];
if (grid[g_idx] != 0)
veri_row[soln_idx - 1] = 1;
}
}
}
__device__ void checkCol(int* grid, short* veri_col, int grid_size, int col_s, int col_e) {
for (int row = 0; row < grid_size; row++) {
for (int col = col_s; col < col_e; col++) {
int g_idx = row * grid_size + col;
int soln_idx = col * grid_size + grid[g_idx];
if (grid[g_idx] != 0)
veri_col[soln_idx - 1] = 1;
}
}
}
__device__ void checkBox(int* grid, short* veri_box, int grid_size, int box_size, int thd_cnt_s, int thd_cnt_e) {
for (int thd_cnt = thd_cnt_s; thd_cnt < thd_cnt_e; thd_cnt++) {
int box_row_s = thd_cnt * box_size / grid_size * box_size; // row start location of box
int box_row_e = box_row_s + box_size;
int box_col_s = thd_cnt * box_size - thd_cnt * box_size / grid_size * grid_size;
int box_col_e = box_col_s + box_size;
// printf("thd %d r_s %d r_e %d c_s %d c_e %d\n", threadIdx.x, box_row_s, box_row_e, box_col_s, box_col_e);
for (int row = box_row_s; row < box_row_e; row++) {
for (int col = box_col_s; col < box_col_e; col++) {
int g_idx = row * grid_size + col;
// if(threadIdx.x == 0)
// printf("thread %d [%d][%d] %d \n",threadIdx.x, row, col, g_idx);
int soln_idx = thd_cnt * grid_size + grid[g_idx];
if (grid[g_idx] != 0)
veri_box[soln_idx - 1] = 1;
}
}
}
}
__device__ int calcBlockWidth(int grid_size) {
int b_width;
if (gridDim.x < grid_size) {
b_width = (grid_size) / gridDim.x;
if (blockIdx.x > gridDim.x - 2)
b_width = grid_size - blockIdx.x * b_width;
}
else {
b_width = 1;
}
return b_width;
}
__device__ int calcStartIdxBlock(int grid_size) {
int s_idx;
if (gridDim.x < grid_size) {
s_idx = blockIdx.x * (grid_size / gridDim.x);
}
else {
s_idx = blockIdx.x;
}
return s_idx;
}
__device__ int calcThreadWidth(int grid_size) {
int t_width;
if (blockDim.x < grid_size) {
t_width = (grid_size) / blockDim.x;
if (threadIdx.x > blockDim.x - 2)
t_width = grid_size - threadIdx.x * t_width;
}
else {
t_width = 1;
}
return t_width;
}
__device__ int calcStartIdx(int grid_size) {
int s_idx;
if (blockDim.x < grid_size) {
s_idx = threadIdx.x * (grid_size / blockDim.x);
}
else {
s_idx = threadIdx.x;
}
return s_idx;
}
int main(int argc, char ** argv) {
int grid_size = stoi(argv[1]);
//int zeros = grid_size * grid_size / 3;
int zeros = NUM_EMPTY;
int* soln_val = new int[zeros];
int* soln_row = new int[zeros];
int* soln_col = new int[zeros];
SudokuGrid config = SudokuGrid(grid_size);
config.randFirstRow(&config.grid, grid_size);
config.solve(&config.grid, grid_size);
//config.printGrid();
config.randClearSlots(&config.grid, grid_size, zeros, soln_row, soln_col, soln_val);
config.printGrid();
cout << "Number of empty slots = " << zeros << endl;
// implement in serial by CPU
time_t start_time = time(NULL);
//config.solve(&config.grid, grid_size);
time_t end_time = time(NULL);
printf("Time spent: %ld\n", end_time - start_time);
//config.randClearSlots(&config.grid, grid_size, zeros, soln_row, soln_col, soln_val);
// CUDA parts starts
cudaError_t cuda_status;
cudaEvent_t start, stop;
float gpu_time = 0.0f;
int grid_size_cuda = grid_size * grid_size;
int box_size = sqrt(grid_size);
int* grid = sudokuInit(NULL, config.grid, grid_size);
int* _grid = 0;
int* _soln_val = 0;
int* _soln_row = 0;
int* _soln_col = 0;
short* _veri_row = 0;
short* _veri_col = 0;
short* _veri_box = 0;
short* _poss = 0;
int* _grid_size = 0;
int* _box_size = 0;
int* _soln_size = 0;
cuda_status = cudaMalloc((void**)&_grid, grid_size_cuda * sizeof(int));
if (cuda_status != cudaSuccess) {
cerr << "cudaMalloc grid failed with error code " << cuda_status << endl;
}
cuda_status = cudaMalloc((void**)&_soln_val, zeros * sizeof(int));
if (cuda_status != cudaSuccess) {
cerr << "cudaMalloc soln_val failed with error code " << cuda_status << endl;
}
cuda_status = cudaMalloc((void**)&_soln_row, zeros * sizeof(int));
if (cuda_status != cudaSuccess) {
cerr << "cudaMalloc soln_row failed with error code " << cuda_status << endl;
}
cuda_status = cudaMalloc((void**)&_soln_col, zeros * sizeof(int));
if (cuda_status != cudaSuccess) {
cerr << "cudaMalloc soln_col failed with error code " << cuda_status << endl;
}
cuda_status = cudaMalloc((void**)&_veri_row, grid_size_cuda * sizeof(short));
if (cuda_status != cudaSuccess) {
cerr << "cudaMalloc veri_row failed with error code " << cuda_status << endl;
}
cuda_status = cudaMalloc((void**)&_veri_col, grid_size_cuda * sizeof(short));
if (cuda_status != cudaSuccess) {
cerr << "cudaMalloc veri_col failed with error code " << cuda_status << endl;
}
cuda_status = cudaMalloc((void**)&_veri_box, grid_size_cuda * sizeof(short));
if (cuda_status != cudaSuccess) {
cerr << "cudaMalloc veri_box failed with error code " << cuda_status << endl;
}
cuda_status = cudaMalloc((void**)&_poss, zeros * grid_size * sizeof(short));
if (cuda_status != cudaSuccess) {
cerr << "cudaMalloc poss failed with error code " << cuda_status << endl;
}
cuda_status = cudaMalloc((void**)&_grid_size, sizeof(int));
if (cuda_status != cudaSuccess) {
cerr << "cudaMalloc grid_size failed with error code " << cuda_status << endl;
}
cuda_status = cudaMalloc((void**)&_box_size, sizeof(int));
if (cuda_status != cudaSuccess) {
cerr << "cudaMalloc box_size failed with error code " << cuda_status << endl;
}
cuda_status = cudaMalloc((void**)&_soln_size, sizeof(int));
if (cuda_status != cudaSuccess) {
cerr << "cudaMalloc soln_size failed with error code " << cuda_status << endl;
}
cuda_status = cudaMemset(_veri_row, 0, grid_size_cuda * sizeof(short));
if (cuda_status != cudaSuccess) {
cerr << "cudaMemset veri_row failed with error code " << cuda_status << endl;
}
cuda_status = cudaMemset(_veri_col, 0, grid_size_cuda * sizeof(short));
if (cuda_status != cudaSuccess) {
cerr << "cudaMemset veri_col failed with error code " << cuda_status << endl;
}
cuda_status = cudaMemset(_veri_box, 0, grid_size_cuda * sizeof(short));
if (cuda_status != cudaSuccess) {
cerr << "cudaMemset veri_box failed with error code " << cuda_status << endl;
}
cuda_status = cudaMemset(_poss, 0, zeros * grid_size * sizeof(short));
if (cuda_status != cudaSuccess) {
cerr << "cudaMemset poss failed with error code " << cuda_status << endl;
}
cuda_status = cudaMemcpy(_grid, grid, grid_size_cuda * sizeof(int), cudaMemcpyHostToDevice);
if (cuda_status != cudaSuccess) {
cerr << "cudaMemcpy grid failed with error code " << cuda_status << endl;
}
cuda_status = cudaMemcpy(_soln_val, soln_val, zeros * sizeof(int), cudaMemcpyHostToDevice);
if (cuda_status != cudaSuccess) {
cerr << "cudaMemcpy soln_val failed with error code " << cuda_status << endl;
}
cuda_status = cudaMemcpy(_soln_row, soln_row, zeros * sizeof(int), cudaMemcpyHostToDevice);
if (cuda_status != cudaSuccess) {
cerr << "cudaMemcpy soln_row failed with error code " << cuda_status << endl;
}
cuda_status = cudaMemcpy(_soln_col, soln_col, zeros * sizeof(int), cudaMemcpyHostToDevice);
if (cuda_status != cudaSuccess) {
cerr << "cudaMemcpy soln_col failed with error code " << cuda_status << endl;
}
cuda_status = cudaMemcpy(_grid_size, &grid_size, sizeof(int), cudaMemcpyHostToDevice);
if (cuda_status != cudaSuccess) {
cerr << "cudaMemcpy grid_size failed with error code " << cuda_status << endl;
}
cuda_status = cudaMemcpy(_box_size, &box_size, sizeof(int), cudaMemcpyHostToDevice);
if (cuda_status != cudaSuccess) {
cerr << "cudaMemcpy box_size failed with error code " << cuda_status << endl;
}
cuda_status = cudaMemcpy(_soln_size, &zeros, sizeof(int), cudaMemcpyHostToDevice);
if (cuda_status != cudaSuccess) {
cerr << "cudaMemcpy soln_size failed with error code " << cuda_status << endl;
}
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
//solveCuda <<<1, NUM_THREAD >>> (_grid, _soln_row, _soln_col, _soln_val, _veri_row, _veri_col, _veri_box, _grid_size, _box_size);
checkValid <<<1, NUM_THREAD>>> (_grid, _veri_row, _veri_col, _veri_box, _grid_size, _box_size);
cuda_status = cudaGetLastError();
if (cuda_status != cudaSuccess) {
cerr << "checkValid launch failed with error code " << cuda_status << endl;
}
cuda_status = cudaDeviceSynchronize();
genPoss <<<NUM_BLOCK, NUM_THREAD >>> (_grid, _poss, _veri_row, _veri_col, _veri_box, _soln_row,
_soln_col, _soln_val, _grid_size, _box_size, _soln_size);
cuda_status = cudaGetLastError();
if (cuda_status != cudaSuccess) {
cerr << "genPoss launch failed with error code " << cuda_status << endl;
}
cuda_status = cudaDeviceSynchronize();
// solveCudaHost(grid, soln_row, soln_col, soln_val, poss, poss_curr, grid_size, zeros, box_size);
// for (int i = 0; i < zeros; i++) {
// printf("[%d](%d, %d) %d\n", i, soln_row[i], soln_col[i], soln_val[i]);
// }
solveCuda <<<1, NUM_THREAD >>> (_grid, _soln_row, _soln_col, _soln_val, _poss, _grid_size, _soln_size, _box_size);
cuda_status = cudaGetLastError();
if (cuda_status != cudaSuccess) {
cerr << "solveCuda launch failed with error code " << cuda_status << endl;
}
cuda_status = cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gpu_time, start, stop);
printf("Time spent: %.5f\n", gpu_time);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaFree(_grid);
cudaFree(_soln_val);
cudaFree(_soln_row);
cudaFree(_soln_col);
cudaFree(_veri_row);
cudaFree(_veri_col);
cudaFree(_veri_box);
delete[] grid;
delete[] soln_val;
delete[] soln_col;
delete[] soln_row;
return EXIT_SUCCESS;
}
|
1a2207afb26d0ad4b9b814ac03666888fa127904.hip | // !!! This is a file automatically generated by hipify!!!
#include "utils.h"
void __cudaSafeCall(hipError_t err, char *file, int line)
{
if ((err) != hipSuccess)
{
fprintf(stderr, "CUDA error in file %s at line %i: %s.\n", file, line, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
void cudaDeviceInit(int major, int minor)
{
int devCount, device;
hipGetDeviceCount(&devCount);
if (devCount == 0) {
printf("No CUDA capable devices detected. \n");
exit(EXIT_FAILURE);
}
for (device=0; device < devCount; device++){
hipDeviceProp_t props;
hipGetDeviceProperties(&props, device);
if (props.major > 1 || (props.major == major && props.minor >= minor)) break;
}
if (device == devCount) {
printf("No device above 1.2 compute capability detected. \n");
exit(EXIT_FAILURE);
}
else hipSetDevice(device);
}
void cudaDeviceCount()
{
int deviceCount;
hipGetDeviceCount(&deviceCount);
int device;
for(device=0; device<deviceCount; device++)
{
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, device);
printf("Device %d has compute capability %d.%d \n", device, deviceProp.major, deviceProp.minor);
}
cudaDeviceInit();
} | 1a2207afb26d0ad4b9b814ac03666888fa127904.cu | #include "utils.h"
void __cudaSafeCall(cudaError_t err, char *file, int line)
{
if ((err) != cudaSuccess)
{
fprintf(stderr, "CUDA error in file %s at line %i: %s.\n", file, line, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
void cudaDeviceInit(int major, int minor)
{
int devCount, device;
cudaGetDeviceCount(&devCount);
if (devCount == 0) {
printf("No CUDA capable devices detected. \n");
exit(EXIT_FAILURE);
}
for (device=0; device < devCount; device++){
cudaDeviceProp props;
cudaGetDeviceProperties(&props, device);
if (props.major > 1 || (props.major == major && props.minor >= minor)) break;
}
if (device == devCount) {
printf("No device above 1.2 compute capability detected. \n");
exit(EXIT_FAILURE);
}
else cudaSetDevice(device);
}
void cudaDeviceCount()
{
int deviceCount;
cudaGetDeviceCount(&deviceCount);
int device;
for(device=0; device<deviceCount; device++)
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, device);
printf("Device %d has compute capability %d.%d \n", device, deviceProp.major, deviceProp.minor);
}
cudaDeviceInit();
} |
6959ac44cf42722d49e0f2a4bbd8f202905d54c1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2009, Andrew Corrigan, acorriga@gmu.edu
// This code is from the AIAA-2009-4001 paper
//#include <cutil.h>
#include <helper_cuda.h>
#include <helper_timer.h>
#include <iostream>
#include <fstream>
#define CUDA_UVM
//#define CUDA_HST
//#define CUDA_HYB // work when defined with one of previous two macros
/*
* Options
*
*/
#define GAMMA 1.4f
//#define iterations 2000
#define iterations 20
// #ifndef block_length
// #define block_length 192
// #endif
#define NDIM 3
#define NNB 4
#define RK 3 // 3rd order RK
#define ff_mach 1.2f
#define deg_angle_of_attack 0.0f
/*
* not options
*/
#ifdef RD_WG_SIZE_0_0
#define BLOCK_SIZE_0 RD_WG_SIZE_0_0
#elif defined(RD_WG_SIZE_0)
#define BLOCK_SIZE_0 RD_WG_SIZE_0
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE_0 RD_WG_SIZE
#else
#define BLOCK_SIZE_0 192
#endif
#ifdef RD_WG_SIZE_1_0
#define BLOCK_SIZE_1 RD_WG_SIZE_1_0
#elif defined(RD_WG_SIZE_1)
#define BLOCK_SIZE_1 RD_WG_SIZE_1
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE_1 RD_WG_SIZE
#else
#define BLOCK_SIZE_1 192
#endif
#ifdef RD_WG_SIZE_2_0
#define BLOCK_SIZE_2 RD_WG_SIZE_2_0
#elif defined(RD_WG_SIZE_1)
#define BLOCK_SIZE_2 RD_WG_SIZE_2
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE_2 RD_WG_SIZE
#else
#define BLOCK_SIZE_2 192
#endif
#ifdef RD_WG_SIZE_3_0
#define BLOCK_SIZE_3 RD_WG_SIZE_3_0
#elif defined(RD_WG_SIZE_3)
#define BLOCK_SIZE_3 RD_WG_SIZE_3
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE_3 RD_WG_SIZE
#else
#define BLOCK_SIZE_3 192
#endif
#ifdef RD_WG_SIZE_4_0
#define BLOCK_SIZE_4 RD_WG_SIZE_4_0
#elif defined(RD_WG_SIZE_4)
#define BLOCK_SIZE_4 RD_WG_SIZE_4
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE_4 RD_WG_SIZE
#else
#define BLOCK_SIZE_4 192
#endif
// #if block_length > 128
// #warning "the kernels may fail too launch on some systems if the block length is too large"
// #endif
#define VAR_DENSITY 0
#define VAR_MOMENTUM 1
#define VAR_DENSITY_ENERGY (VAR_MOMENTUM+NDIM)
#define NVAR (VAR_DENSITY_ENERGY+1)
/*
* Generic functions
*/
template <typename T>
T* alloc(unsigned long long N)
{
T* t;
#if defined (CUDA_UVM)
checkCudaErrors(hipMallocManaged((void**)&t, sizeof(T)*N));
#elif defined (CUDA_HST)
checkCudaErrors(hipHostMalloc((void**)&t, sizeof(T)*N));
#else
checkCudaErrors(hipMalloc((void**)&t, sizeof(T)*N));
#endif
return t;
}
template <typename T>
T* alloc_dev(int N)
{
T* t;
checkCudaErrors(hipMalloc((void**)&t, sizeof(T)*N));
return t;
}
template <typename T>
void dealloc(T* array)
{
checkCudaErrors(hipFree((void*)array));
}
template <typename T>
void copy(T* dst, T* src, int N)
{
checkCudaErrors(hipMemcpy((void*)dst, (void*)src, N*sizeof(T), hipMemcpyDeviceToDevice));
}
template <typename T>
void upload(T* dst, T* src, int N)
{
checkCudaErrors(hipMemcpy((void*)dst, (void*)src, N*sizeof(T), hipMemcpyHostToDevice));
}
template <typename T>
void download(T* dst, T* src, int N)
{
checkCudaErrors(hipMemcpy((void*)dst, (void*)src, N*sizeof(T), hipMemcpyDeviceToHost));
}
void dump(float* variables, int nel, int nelr)
{
#if !defined (CUDA_UVM) && !defined (CUDA_HST)
float* h_variables = new float[nelr*NVAR];
download(h_variables, variables, nelr*NVAR);
#else
float* h_variables = variables;
#endif
{
std::ofstream file("density");
file << nel << " " << nelr << std::endl;
for(int i = 0; i < nel; i++) file << h_variables[i + VAR_DENSITY*nelr] << std::endl;
}
{
std::ofstream file("momentum");
file << nel << " " << nelr << std::endl;
for(int i = 0; i < nel; i++)
{
for(int j = 0; j != NDIM; j++)
file << h_variables[i + (VAR_MOMENTUM+j)*nelr] << " ";
file << std::endl;
}
}
{
std::ofstream file("density_energy");
file << nel << " " << nelr << std::endl;
for(int i = 0; i < nel; i++) file << h_variables[i + VAR_DENSITY_ENERGY*nelr] << std::endl;
}
#if !defined (CUDA_UVM) && !defined (CUDA_HST)
delete[] h_variables;
#endif
}
/*
* Element-based Cell-centered FVM solver functions
*/
__constant__ float ff_variable[NVAR];
__constant__ float3 ff_flux_contribution_momentum_x[1];
__constant__ float3 ff_flux_contribution_momentum_y[1];
__constant__ float3 ff_flux_contribution_momentum_z[1];
__constant__ float3 ff_flux_contribution_density_energy[1];
__global__ void cuda_initialize_variables(int nelr, float* variables)
{
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
for(int j = 0; j < NVAR; j++)
variables[i + j*nelr] = ff_variable[j];
}
void initialize_variables(int nelr, float* variables)
{
dim3 Dg(nelr / BLOCK_SIZE_1), Db(BLOCK_SIZE_1);
hipLaunchKernelGGL(( cuda_initialize_variables), dim3(Dg), dim3(Db), 0, 0, nelr, variables);
getLastCudaError("initialize_variables failed");
}
__device__ __host__ inline void compute_flux_contribution(float& density, float3& momentum, float& density_energy, float& pressure, float3& velocity, float3& fc_momentum_x, float3& fc_momentum_y, float3& fc_momentum_z, float3& fc_density_energy)
{
fc_momentum_x.x = velocity.x*momentum.x + pressure;
fc_momentum_x.y = velocity.x*momentum.y;
fc_momentum_x.z = velocity.x*momentum.z;
fc_momentum_y.x = fc_momentum_x.y;
fc_momentum_y.y = velocity.y*momentum.y + pressure;
fc_momentum_y.z = velocity.y*momentum.z;
fc_momentum_z.x = fc_momentum_x.z;
fc_momentum_z.y = fc_momentum_y.z;
fc_momentum_z.z = velocity.z*momentum.z + pressure;
float de_p = density_energy+pressure;
fc_density_energy.x = velocity.x*de_p;
fc_density_energy.y = velocity.y*de_p;
fc_density_energy.z = velocity.z*de_p;
}
__device__ inline void compute_velocity(float& density, float3& momentum, float3& velocity)
{
velocity.x = momentum.x / density;
velocity.y = momentum.y / density;
velocity.z = momentum.z / density;
}
__device__ inline float compute_speed_sqd(float3& velocity)
{
return velocity.x*velocity.x + velocity.y*velocity.y + velocity.z*velocity.z;
}
__device__ inline float compute_pressure(float& density, float& density_energy, float& speed_sqd)
{
return (float(GAMMA)-float(1.0f))*(density_energy - float(0.5f)*density*speed_sqd);
}
__device__ inline float compute_speed_of_sound(float& density, float& pressure)
{
return sqrtf(float(GAMMA)*pressure/density);
}
__global__ void cuda_compute_step_factor(int nelr, float* variables, float* areas, float* step_factors)
{
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
float density = variables[i + VAR_DENSITY*nelr];
float3 momentum;
momentum.x = variables[i + (VAR_MOMENTUM+0)*nelr];
momentum.y = variables[i + (VAR_MOMENTUM+1)*nelr];
momentum.z = variables[i + (VAR_MOMENTUM+2)*nelr];
float density_energy = variables[i + VAR_DENSITY_ENERGY*nelr];
float3 velocity; compute_velocity(density, momentum, velocity);
float speed_sqd = compute_speed_sqd(velocity);
float pressure = compute_pressure(density, density_energy, speed_sqd);
float speed_of_sound = compute_speed_of_sound(density, pressure);
// dt = float(0.5f) * sqrtf(areas[i]) / (||v|| + c).... but when we do time stepping, this later would need to be divided by the area, so we just do it all at once
step_factors[i] = float(0.5f) / (sqrtf(areas[i]) * (sqrtf(speed_sqd) + speed_of_sound));
}
void compute_step_factor(int nelr, float* variables, float* areas, float* step_factors)
{
dim3 Dg(nelr / BLOCK_SIZE_2), Db(BLOCK_SIZE_2);
hipLaunchKernelGGL(( cuda_compute_step_factor), dim3(Dg), dim3(Db), 0, 0, nelr, variables, areas, step_factors);
getLastCudaError("compute_step_factor failed");
}
/*
*
*
*/
#if defined (CUDA_HYB)
__global__ void cuda_compute_flux(int nelr, int* elements_surrounding_elements, float* normals, float* variables, float* fluxes, float* normals2, unsigned long long ele_dev_size)
#else
__global__ void cuda_compute_flux(int nelr, int* elements_surrounding_elements, float* normals, float* variables, float* fluxes)
#endif
{
const float smoothing_coefficient = float(0.2f);
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
int j, nb;
float3 normal; float normal_len;
float factor;
float density_i = variables[i + VAR_DENSITY*nelr];
float3 momentum_i;
momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr];
momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr];
momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr];
float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr];
float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i);
float speed_sqd_i = compute_speed_sqd(velocity_i);
float speed_i = sqrtf(speed_sqd_i);
float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i);
float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i);
float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z;
float3 flux_contribution_i_density_energy;
compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy);
float flux_i_density = float(0.0f);
float3 flux_i_momentum;
flux_i_momentum.x = float(0.0f);
flux_i_momentum.y = float(0.0f);
flux_i_momentum.z = float(0.0f);
float flux_i_density_energy = float(0.0f);
float3 velocity_nb;
float density_nb, density_energy_nb;
float3 momentum_nb;
float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z;
float3 flux_contribution_nb_density_energy;
float speed_sqd_nb, speed_of_sound_nb, pressure_nb;
#pragma unroll
for(j = 0; j < NNB; j++)
{
nb = elements_surrounding_elements[i + j*nelr];
#if defined (CUDA_HYB)
unsigned long long idx = i + (j + 0*NNB)*nelr;
if (idx < ele_dev_size)
normal.x = normals[idx];
else
normal.x = normals2[idx - ele_dev_size];
idx += NNB*nelr;
if (idx < ele_dev_size)
normal.y = normals[idx];
else
normal.y = normals2[idx - ele_dev_size];
idx += NNB*nelr;
if (idx < ele_dev_size)
normal.z = normals[idx];
else
normal.z = normals2[idx - ele_dev_size];
#else
normal.x = normals[i + (j + 0*NNB)*nelr];
normal.y = normals[i + (j + 1*NNB)*nelr];
normal.z = normals[i + (j + 2*NNB)*nelr];
#endif
normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z);
if(nb >= 0) // a legitimate neighbor
{
density_nb = variables[nb + VAR_DENSITY*nelr];
momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr];
momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr];
momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr];
density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr];
compute_velocity(density_nb, momentum_nb, velocity_nb);
speed_sqd_nb = compute_speed_sqd(velocity_nb);
pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb);
speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb);
compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy);
// artificial viscosity
factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb);
flux_i_density += factor*(density_i-density_nb);
flux_i_density_energy += factor*(density_energy_i-density_energy_nb);
flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x);
flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y);
flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z);
// accumulate cell-centered fluxes
factor = float(0.5f)*normal.x;
flux_i_density += factor*(momentum_nb.x+momentum_i.x);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x);
factor = float(0.5f)*normal.y;
flux_i_density += factor*(momentum_nb.y+momentum_i.y);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y);
factor = float(0.5f)*normal.z;
flux_i_density += factor*(momentum_nb.z+momentum_i.z);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z);
}
else if(nb == -1) // a wing boundary
{
flux_i_momentum.x += normal.x*pressure_i;
flux_i_momentum.y += normal.y*pressure_i;
flux_i_momentum.z += normal.z*pressure_i;
}
else if(nb == -2) // a far field boundary
{
factor = float(0.5f)*normal.x;
flux_i_density += factor*(ff_variable[VAR_MOMENTUM+0]+momentum_i.x);
flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].x+flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].x + flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].x + flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].x + flux_contribution_i_momentum_z.x);
factor = float(0.5f)*normal.y;
flux_i_density += factor*(ff_variable[VAR_MOMENTUM+1]+momentum_i.y);
flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].y+flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].y + flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].y + flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].y + flux_contribution_i_momentum_z.y);
factor = float(0.5f)*normal.z;
flux_i_density += factor*(ff_variable[VAR_MOMENTUM+2]+momentum_i.z);
flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].z+flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].z + flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].z + flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].z + flux_contribution_i_momentum_z.z);
}
}
fluxes[i + VAR_DENSITY*nelr] = flux_i_density;
fluxes[i + (VAR_MOMENTUM+0)*nelr] = flux_i_momentum.x;
fluxes[i + (VAR_MOMENTUM+1)*nelr] = flux_i_momentum.y;
fluxes[i + (VAR_MOMENTUM+2)*nelr] = flux_i_momentum.z;
fluxes[i + VAR_DENSITY_ENERGY*nelr] = flux_i_density_energy;
}
#if defined (CUDA_HYB)
void compute_flux(int nelr, int* elements_surrounding_elements, float* normals, float* variables, float* fluxes, float* normals2, unsigned long long ele_dev_size)
#else
void compute_flux(int nelr, int* elements_surrounding_elements, float* normals, float* variables, float* fluxes)
#endif
{
dim3 Dg(nelr / BLOCK_SIZE_3), Db(BLOCK_SIZE_3);
#if defined (CUDA_HYB)
hipLaunchKernelGGL(( cuda_compute_flux), dim3(Dg),dim3(Db), 0, 0, nelr, elements_surrounding_elements, normals, variables, fluxes, normals2, ele_dev_size);
#else
hipLaunchKernelGGL(( cuda_compute_flux), dim3(Dg),dim3(Db), 0, 0, nelr, elements_surrounding_elements, normals, variables, fluxes);
#endif
getLastCudaError("compute_flux failed");
}
__global__ void cuda_time_step(int j, int nelr, float* old_variables, float* variables, float* step_factors, float* fluxes)
{
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
float factor = step_factors[i]/float(RK+1-j);
variables[i + VAR_DENSITY*nelr] = old_variables[i + VAR_DENSITY*nelr] + factor*fluxes[i + VAR_DENSITY*nelr];
variables[i + VAR_DENSITY_ENERGY*nelr] = old_variables[i + VAR_DENSITY_ENERGY*nelr] + factor*fluxes[i + VAR_DENSITY_ENERGY*nelr];
variables[i + (VAR_MOMENTUM+0)*nelr] = old_variables[i + (VAR_MOMENTUM+0)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+0)*nelr];
variables[i + (VAR_MOMENTUM+1)*nelr] = old_variables[i + (VAR_MOMENTUM+1)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+1)*nelr];
variables[i + (VAR_MOMENTUM+2)*nelr] = old_variables[i + (VAR_MOMENTUM+2)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+2)*nelr];
}
void time_step(int j, int nelr, float* old_variables, float* variables, float* step_factors, float* fluxes)
{
dim3 Dg(nelr / BLOCK_SIZE_4), Db(BLOCK_SIZE_4);
hipLaunchKernelGGL(( cuda_time_step), dim3(Dg),dim3(Db), 0, 0, j, nelr, old_variables, variables, step_factors, fluxes);
getLastCudaError("update failed");
}
/*
* Main function
*/
int main(int argc, char** argv)
{
printf("WG size of kernel:initialize = %d, WG size of kernel:compute_step_factor = %d, WG size of kernel:compute_flux = %d, WG size of kernel:time_step = %d\n", BLOCK_SIZE_1, BLOCK_SIZE_2, BLOCK_SIZE_3, BLOCK_SIZE_4);
if (argc < 3)
{
std::cout << "specify data file name" << std::endl;
return 0;
}
const char* data_file_name = argv[1];
int input_time = atoi(argv[2]);
hipDeviceProp_t prop;
int dev;
checkCudaErrors(hipSetDevice(0));
checkCudaErrors(hipGetDevice(&dev));
checkCudaErrors(hipGetDeviceProperties(&prop, dev));
printf("Name: %s\n", prop.name);
// set far field conditions and load them into constant memory on the gpu
{
float h_ff_variable[NVAR];
const float angle_of_attack = float(3.1415926535897931 / 180.0f) * float(deg_angle_of_attack);
h_ff_variable[VAR_DENSITY] = float(1.4);
float ff_pressure = float(1.0f);
float ff_speed_of_sound = sqrt(GAMMA*ff_pressure / h_ff_variable[VAR_DENSITY]);
float ff_speed = float(ff_mach)*ff_speed_of_sound;
float3 ff_velocity;
ff_velocity.x = ff_speed*float(cos((float)angle_of_attack));
ff_velocity.y = ff_speed*float(sin((float)angle_of_attack));
ff_velocity.z = 0.0f;
h_ff_variable[VAR_MOMENTUM+0] = h_ff_variable[VAR_DENSITY] * ff_velocity.x;
h_ff_variable[VAR_MOMENTUM+1] = h_ff_variable[VAR_DENSITY] * ff_velocity.y;
h_ff_variable[VAR_MOMENTUM+2] = h_ff_variable[VAR_DENSITY] * ff_velocity.z;
h_ff_variable[VAR_DENSITY_ENERGY] = h_ff_variable[VAR_DENSITY]*(float(0.5f)*(ff_speed*ff_speed)) + (ff_pressure / float(GAMMA-1.0f));
float3 h_ff_momentum;
h_ff_momentum.x = *(h_ff_variable+VAR_MOMENTUM+0);
h_ff_momentum.y = *(h_ff_variable+VAR_MOMENTUM+1);
h_ff_momentum.z = *(h_ff_variable+VAR_MOMENTUM+2);
float3 h_ff_flux_contribution_momentum_x;
float3 h_ff_flux_contribution_momentum_y;
float3 h_ff_flux_contribution_momentum_z;
float3 h_ff_flux_contribution_density_energy;
compute_flux_contribution(h_ff_variable[VAR_DENSITY], h_ff_momentum, h_ff_variable[VAR_DENSITY_ENERGY], ff_pressure, ff_velocity, h_ff_flux_contribution_momentum_x, h_ff_flux_contribution_momentum_y, h_ff_flux_contribution_momentum_z, h_ff_flux_contribution_density_energy);
// copy far field conditions to the gpu
checkCudaErrors( hipMemcpyToSymbol(ff_variable, h_ff_variable, NVAR*sizeof(float)) );
checkCudaErrors( hipMemcpyToSymbol(ff_flux_contribution_momentum_x, &h_ff_flux_contribution_momentum_x, sizeof(float3)) );
checkCudaErrors( hipMemcpyToSymbol(ff_flux_contribution_momentum_y, &h_ff_flux_contribution_momentum_y, sizeof(float3)) );
checkCudaErrors( hipMemcpyToSymbol(ff_flux_contribution_momentum_z, &h_ff_flux_contribution_momentum_z, sizeof(float3)) );
checkCudaErrors( hipMemcpyToSymbol(ff_flux_contribution_density_energy, &h_ff_flux_contribution_density_energy, sizeof(float3)) );
}
int nel;
int nelr;
// read in domain geometry
float* areas;
int* elements_surrounding_elements;
float* normals;
#if defined (CUDA_HYB)
unsigned long long ele_dev_size;
float* normals2 = NULL;
#endif
{
std::ifstream file(data_file_name);
file >> nel;
nelr = BLOCK_SIZE_0*((nel * input_time / BLOCK_SIZE_0 )+ ::min(1, (nel * input_time) % BLOCK_SIZE_0));
#if defined (CUDA_HYB)
areas = alloc_dev<float>(nelr);
elements_surrounding_elements = alloc_dev<int>(nelr*NNB);
long long avail_size = 14 * 1024 * 1024 * 1024L - sizeof(float)*nelr*NVAR*3 + sizeof(float)*nelr*2 - sizeof(int)*nelr*NNB;
if (avail_size <= 0) {
std::cout << "Input is too large for HYB." << std::endl;
return 0;
}
ele_dev_size = avail_size / sizeof(float);
unsigned long long ele_um_size = 0;
if (ele_dev_size >= nelr*NDIM*NNB) {
std::cout << "Input is too small for HYB." << std::endl;
ele_dev_size = nelr*NDIM*NNB;
} else
ele_um_size = nelr*NDIM*NNB - ele_dev_size;
normals = alloc_dev<float>(ele_dev_size);
if (ele_um_size)
normals2 = alloc<float>(ele_um_size);
#else
areas = alloc<float>(nelr);
elements_surrounding_elements = alloc<int>(nelr*NNB);
unsigned long long size = (unsigned long long)nelr*NDIM*NNB;
normals = alloc<float>(size);
#endif
#if defined (CUDA_HYB)
float* h_areas = new float[nelr];
int* h_elements_surrounding_elements = new int[nelr*NNB];
float* h_normals = new float[ele_dev_size];
float* h_normals2 = normals2;
#elif defined (CUDA_UVM) || defined (CUDA_HST)
float* h_areas = areas;
int* h_elements_surrounding_elements = elements_surrounding_elements;
float* h_normals = normals;
#else
float* h_areas = new float[nelr];
int* h_elements_surrounding_elements = new int[nelr*NNB];
float* h_normals = new float[nelr*NDIM*NNB];
#endif
// read in data
for(int i = 0; i < nel; i++)
{
file >> h_areas[i];
for(int j = 0; j < NNB; j++)
{
file >> h_elements_surrounding_elements[i + j*nelr];
if(h_elements_surrounding_elements[i+j*nelr] < 0) h_elements_surrounding_elements[i+j*nelr] = -1;
h_elements_surrounding_elements[i + j*nelr]--; //it's coming in with Fortran numbering
for(int k = 0; k < NDIM; k++)
{
#if defined (CUDA_HYB)
unsigned long long idx = i + (j + k*NNB)*nelr;
if (idx < ele_dev_size) {
file >> h_normals[idx];
h_normals[idx] = -h_normals[idx];
} else {
file >> h_normals2[idx - ele_dev_size];
h_normals2[idx - ele_dev_size] = -h_normals2[idx - ele_dev_size];
}
#else
file >> h_normals[i + (j + k*NNB)*nelr];
h_normals[i + (j + k*NNB)*nelr] = -h_normals[i + (j + k*NNB)*nelr];
#endif
}
}
}
file.close();
for (int nn = 1; nn < input_time; nn++)
{
for(int i = 0; i < nel; i++)
{
h_areas[i + nn*nel] = h_areas[i];
for(int j = 0; j < NNB; j++)
{
h_elements_surrounding_elements[i + j*nelr + nn*nel] = h_elements_surrounding_elements[i + j*nelr];
for(int k = 0; k < NDIM; k++)
{
#if defined (CUDA_HYB)
unsigned long long idx = i + (j + k*NNB)*nelr + nn*nel;
unsigned long long idx2 = i + (j + k*NNB)*nelr;
if (idx < ele_dev_size) {
if (idx2 < ele_dev_size)
h_normals[idx] = h_normals[idx2];
else
h_normals[idx] = h_normals2[idx2 - ele_dev_size];
} else {
if (idx2 < ele_dev_size)
h_normals2[idx - ele_dev_size] = h_normals[idx2];
else
h_normals2[idx - ele_dev_size] = h_normals2[idx2 - ele_dev_size];
}
#else
h_normals[i + (j + k*NNB)*nelr + nn*nel] = h_normals[i + (j + k*NNB)*nelr];
#endif
}
}
}
}
nel *= input_time;
// fill in remaining data
int last = nel-1;
for(int i = nel; i < nelr; i++)
{
h_areas[i] = h_areas[last];
for(int j = 0; j < NNB; j++)
{
// duplicate the last element
h_elements_surrounding_elements[i + j*nelr] = h_elements_surrounding_elements[last + j*nelr];
#if defined (CUDA_HYB)
for(int k = 0; k < NDIM; k++) {
unsigned long long idx = i + (j + k*NNB)*nelr;
if (idx < ele_dev_size) {
if (last + (j + k*NNB)*nelr < ele_dev_size)
h_normals[idx] = h_normals[last + (j + k*NNB)*nelr];
else
h_normals[idx] = h_normals2[last + (j + k*NNB)*nelr - ele_dev_size];
}
else {
if (last + (j + k*NNB)*nelr < ele_dev_size)
h_normals2[idx - ele_dev_size] = h_normals[last + (j + k*NNB)*nelr];
else
h_normals2[idx - ele_dev_size] = h_normals2[last + (j + k*NNB)*nelr - ele_dev_size];
}
}
#else
for(int k = 0; k < NDIM; k++) h_normals[i + (j + k*NNB)*nelr] = h_normals[last + (j + k*NNB)*nelr];
#endif
}
}
#if defined (CUDA_HYB)
upload<float>(areas, h_areas, nelr);
upload<int>(elements_surrounding_elements, h_elements_surrounding_elements, nelr*NNB);
upload<float>(normals, h_normals, ele_dev_size);
delete[] h_areas;
delete[] h_elements_surrounding_elements;
delete[] h_normals;
#elif !defined (CUDA_UVM) && !defined (CUDA_HST)
upload<float>(areas, h_areas, nelr);
upload<int>(elements_surrounding_elements, h_elements_surrounding_elements, nelr*NNB);
upload<float>(normals, h_normals, nelr*NDIM*NNB);
delete[] h_areas;
delete[] h_elements_surrounding_elements;
delete[] h_normals;
#endif
}
// Create arrays and set initial conditions
float* variables = alloc<float>(nelr*NVAR);
initialize_variables(nelr, variables);
float* old_variables = alloc<float>(nelr*NVAR);
float* fluxes = alloc<float>(nelr*NVAR);
float* step_factors = alloc<float>(nelr);
// make sure all memory is floatly allocated before we start timing
initialize_variables(nelr, old_variables);
initialize_variables(nelr, fluxes);
hipMemset( (void*) step_factors, 0, sizeof(float)*nelr );
// make sure CUDA isn't still doing something before we start timing
hipDeviceSynchronize();
unsigned long long total_size = sizeof(float)*nelr // area
+ sizeof(int)*nelr*NNB + sizeof(float)*nelr*NDIM*NNB;
std::cout << "Input size: " << total_size << "\n";
total_size += sizeof(float)*nelr*NVAR*3 + sizeof(float)*nelr;
std::cout << "Total size: " << total_size << "\n";
// these need to be computed the first time in order to compute time step
std::cout << "Starting..." << std::endl;
StopWatchInterface *timer = 0;
// unsigned int timer = 0;
// CUT_SAFE_CALL( cutCreateTimer( &timer));
// CUT_SAFE_CALL( cutStartTimer( timer));
sdkCreateTimer(&timer);
sdkStartTimer(&timer);
// Begin iterations
for(int i = 0; i < iterations; i++)
{
copy<float>(old_variables, variables, nelr*NVAR);
// for the first iteration we compute the time step
compute_step_factor(nelr, variables, areas, step_factors);
getLastCudaError("compute_step_factor failed");
for(int j = 0; j < RK; j++)
{
#if defined (CUDA_HYB)
compute_flux(nelr, elements_surrounding_elements, normals, variables, fluxes, normals2, ele_dev_size);
#else
compute_flux(nelr, elements_surrounding_elements, normals, variables, fluxes);
#endif
getLastCudaError("compute_flux failed");
time_step(j, nelr, old_variables, variables, step_factors, fluxes);
getLastCudaError("time_step failed");
}
}
hipDeviceSynchronize();
// CUT_SAFE_CALL( cutStopTimer(timer) );
sdkStopTimer(&timer);
std::cout << (sdkGetAverageTimerValue(&timer)/1000.0) / iterations << " seconds per iteration" << std::endl;
//std::cout << "Saving solution..." << std::endl;
//dump(variables, nel, nelr);
//std::cout << "Saved solution..." << std::endl;
std::cout << "Cleaning up..." << std::endl;
//dealloc<float>(areas);
//dealloc<int>(elements_surrounding_elements);
//dealloc<float>(normals);
//
//dealloc<float>(variables);
//dealloc<float>(old_variables);
//dealloc<float>(fluxes);
//dealloc<float>(step_factors);
std::cout << "Done..." << std::endl;
return 0;
}
| 6959ac44cf42722d49e0f2a4bbd8f202905d54c1.cu | // Copyright 2009, Andrew Corrigan, acorriga@gmu.edu
// This code is from the AIAA-2009-4001 paper
//#include <cutil.h>
#include <helper_cuda.h>
#include <helper_timer.h>
#include <iostream>
#include <fstream>
#define CUDA_UVM
//#define CUDA_HST
//#define CUDA_HYB // work when defined with one of previous two macros
/*
* Options
*
*/
#define GAMMA 1.4f
//#define iterations 2000
#define iterations 20
// #ifndef block_length
// #define block_length 192
// #endif
#define NDIM 3
#define NNB 4
#define RK 3 // 3rd order RK
#define ff_mach 1.2f
#define deg_angle_of_attack 0.0f
/*
* not options
*/
#ifdef RD_WG_SIZE_0_0
#define BLOCK_SIZE_0 RD_WG_SIZE_0_0
#elif defined(RD_WG_SIZE_0)
#define BLOCK_SIZE_0 RD_WG_SIZE_0
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE_0 RD_WG_SIZE
#else
#define BLOCK_SIZE_0 192
#endif
#ifdef RD_WG_SIZE_1_0
#define BLOCK_SIZE_1 RD_WG_SIZE_1_0
#elif defined(RD_WG_SIZE_1)
#define BLOCK_SIZE_1 RD_WG_SIZE_1
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE_1 RD_WG_SIZE
#else
#define BLOCK_SIZE_1 192
#endif
#ifdef RD_WG_SIZE_2_0
#define BLOCK_SIZE_2 RD_WG_SIZE_2_0
#elif defined(RD_WG_SIZE_1)
#define BLOCK_SIZE_2 RD_WG_SIZE_2
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE_2 RD_WG_SIZE
#else
#define BLOCK_SIZE_2 192
#endif
#ifdef RD_WG_SIZE_3_0
#define BLOCK_SIZE_3 RD_WG_SIZE_3_0
#elif defined(RD_WG_SIZE_3)
#define BLOCK_SIZE_3 RD_WG_SIZE_3
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE_3 RD_WG_SIZE
#else
#define BLOCK_SIZE_3 192
#endif
#ifdef RD_WG_SIZE_4_0
#define BLOCK_SIZE_4 RD_WG_SIZE_4_0
#elif defined(RD_WG_SIZE_4)
#define BLOCK_SIZE_4 RD_WG_SIZE_4
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE_4 RD_WG_SIZE
#else
#define BLOCK_SIZE_4 192
#endif
// #if block_length > 128
// #warning "the kernels may fail too launch on some systems if the block length is too large"
// #endif
#define VAR_DENSITY 0
#define VAR_MOMENTUM 1
#define VAR_DENSITY_ENERGY (VAR_MOMENTUM+NDIM)
#define NVAR (VAR_DENSITY_ENERGY+1)
/*
* Generic functions
*/
template <typename T>
T* alloc(unsigned long long N)
{
T* t;
#if defined (CUDA_UVM)
checkCudaErrors(cudaMallocManaged((void**)&t, sizeof(T)*N));
#elif defined (CUDA_HST)
checkCudaErrors(cudaMallocHost((void**)&t, sizeof(T)*N));
#else
checkCudaErrors(cudaMalloc((void**)&t, sizeof(T)*N));
#endif
return t;
}
template <typename T>
T* alloc_dev(int N)
{
T* t;
checkCudaErrors(cudaMalloc((void**)&t, sizeof(T)*N));
return t;
}
template <typename T>
void dealloc(T* array)
{
checkCudaErrors(cudaFree((void*)array));
}
template <typename T>
void copy(T* dst, T* src, int N)
{
checkCudaErrors(cudaMemcpy((void*)dst, (void*)src, N*sizeof(T), cudaMemcpyDeviceToDevice));
}
template <typename T>
void upload(T* dst, T* src, int N)
{
checkCudaErrors(cudaMemcpy((void*)dst, (void*)src, N*sizeof(T), cudaMemcpyHostToDevice));
}
template <typename T>
void download(T* dst, T* src, int N)
{
checkCudaErrors(cudaMemcpy((void*)dst, (void*)src, N*sizeof(T), cudaMemcpyDeviceToHost));
}
void dump(float* variables, int nel, int nelr)
{
#if !defined (CUDA_UVM) && !defined (CUDA_HST)
float* h_variables = new float[nelr*NVAR];
download(h_variables, variables, nelr*NVAR);
#else
float* h_variables = variables;
#endif
{
std::ofstream file("density");
file << nel << " " << nelr << std::endl;
for(int i = 0; i < nel; i++) file << h_variables[i + VAR_DENSITY*nelr] << std::endl;
}
{
std::ofstream file("momentum");
file << nel << " " << nelr << std::endl;
for(int i = 0; i < nel; i++)
{
for(int j = 0; j != NDIM; j++)
file << h_variables[i + (VAR_MOMENTUM+j)*nelr] << " ";
file << std::endl;
}
}
{
std::ofstream file("density_energy");
file << nel << " " << nelr << std::endl;
for(int i = 0; i < nel; i++) file << h_variables[i + VAR_DENSITY_ENERGY*nelr] << std::endl;
}
#if !defined (CUDA_UVM) && !defined (CUDA_HST)
delete[] h_variables;
#endif
}
/*
* Element-based Cell-centered FVM solver functions
*/
__constant__ float ff_variable[NVAR];
__constant__ float3 ff_flux_contribution_momentum_x[1];
__constant__ float3 ff_flux_contribution_momentum_y[1];
__constant__ float3 ff_flux_contribution_momentum_z[1];
__constant__ float3 ff_flux_contribution_density_energy[1];
__global__ void cuda_initialize_variables(int nelr, float* variables)
{
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
for(int j = 0; j < NVAR; j++)
variables[i + j*nelr] = ff_variable[j];
}
void initialize_variables(int nelr, float* variables)
{
dim3 Dg(nelr / BLOCK_SIZE_1), Db(BLOCK_SIZE_1);
cuda_initialize_variables<<<Dg, Db>>>(nelr, variables);
getLastCudaError("initialize_variables failed");
}
__device__ __host__ inline void compute_flux_contribution(float& density, float3& momentum, float& density_energy, float& pressure, float3& velocity, float3& fc_momentum_x, float3& fc_momentum_y, float3& fc_momentum_z, float3& fc_density_energy)
{
fc_momentum_x.x = velocity.x*momentum.x + pressure;
fc_momentum_x.y = velocity.x*momentum.y;
fc_momentum_x.z = velocity.x*momentum.z;
fc_momentum_y.x = fc_momentum_x.y;
fc_momentum_y.y = velocity.y*momentum.y + pressure;
fc_momentum_y.z = velocity.y*momentum.z;
fc_momentum_z.x = fc_momentum_x.z;
fc_momentum_z.y = fc_momentum_y.z;
fc_momentum_z.z = velocity.z*momentum.z + pressure;
float de_p = density_energy+pressure;
fc_density_energy.x = velocity.x*de_p;
fc_density_energy.y = velocity.y*de_p;
fc_density_energy.z = velocity.z*de_p;
}
__device__ inline void compute_velocity(float& density, float3& momentum, float3& velocity)
{
velocity.x = momentum.x / density;
velocity.y = momentum.y / density;
velocity.z = momentum.z / density;
}
__device__ inline float compute_speed_sqd(float3& velocity)
{
return velocity.x*velocity.x + velocity.y*velocity.y + velocity.z*velocity.z;
}
__device__ inline float compute_pressure(float& density, float& density_energy, float& speed_sqd)
{
return (float(GAMMA)-float(1.0f))*(density_energy - float(0.5f)*density*speed_sqd);
}
__device__ inline float compute_speed_of_sound(float& density, float& pressure)
{
return sqrtf(float(GAMMA)*pressure/density);
}
__global__ void cuda_compute_step_factor(int nelr, float* variables, float* areas, float* step_factors)
{
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
float density = variables[i + VAR_DENSITY*nelr];
float3 momentum;
momentum.x = variables[i + (VAR_MOMENTUM+0)*nelr];
momentum.y = variables[i + (VAR_MOMENTUM+1)*nelr];
momentum.z = variables[i + (VAR_MOMENTUM+2)*nelr];
float density_energy = variables[i + VAR_DENSITY_ENERGY*nelr];
float3 velocity; compute_velocity(density, momentum, velocity);
float speed_sqd = compute_speed_sqd(velocity);
float pressure = compute_pressure(density, density_energy, speed_sqd);
float speed_of_sound = compute_speed_of_sound(density, pressure);
// dt = float(0.5f) * sqrtf(areas[i]) / (||v|| + c).... but when we do time stepping, this later would need to be divided by the area, so we just do it all at once
step_factors[i] = float(0.5f) / (sqrtf(areas[i]) * (sqrtf(speed_sqd) + speed_of_sound));
}
void compute_step_factor(int nelr, float* variables, float* areas, float* step_factors)
{
dim3 Dg(nelr / BLOCK_SIZE_2), Db(BLOCK_SIZE_2);
cuda_compute_step_factor<<<Dg, Db>>>(nelr, variables, areas, step_factors);
getLastCudaError("compute_step_factor failed");
}
/*
*
*
*/
#if defined (CUDA_HYB)
__global__ void cuda_compute_flux(int nelr, int* elements_surrounding_elements, float* normals, float* variables, float* fluxes, float* normals2, unsigned long long ele_dev_size)
#else
__global__ void cuda_compute_flux(int nelr, int* elements_surrounding_elements, float* normals, float* variables, float* fluxes)
#endif
{
const float smoothing_coefficient = float(0.2f);
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
int j, nb;
float3 normal; float normal_len;
float factor;
float density_i = variables[i + VAR_DENSITY*nelr];
float3 momentum_i;
momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr];
momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr];
momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr];
float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr];
float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i);
float speed_sqd_i = compute_speed_sqd(velocity_i);
float speed_i = sqrtf(speed_sqd_i);
float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i);
float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i);
float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z;
float3 flux_contribution_i_density_energy;
compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy);
float flux_i_density = float(0.0f);
float3 flux_i_momentum;
flux_i_momentum.x = float(0.0f);
flux_i_momentum.y = float(0.0f);
flux_i_momentum.z = float(0.0f);
float flux_i_density_energy = float(0.0f);
float3 velocity_nb;
float density_nb, density_energy_nb;
float3 momentum_nb;
float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z;
float3 flux_contribution_nb_density_energy;
float speed_sqd_nb, speed_of_sound_nb, pressure_nb;
#pragma unroll
for(j = 0; j < NNB; j++)
{
nb = elements_surrounding_elements[i + j*nelr];
#if defined (CUDA_HYB)
unsigned long long idx = i + (j + 0*NNB)*nelr;
if (idx < ele_dev_size)
normal.x = normals[idx];
else
normal.x = normals2[idx - ele_dev_size];
idx += NNB*nelr;
if (idx < ele_dev_size)
normal.y = normals[idx];
else
normal.y = normals2[idx - ele_dev_size];
idx += NNB*nelr;
if (idx < ele_dev_size)
normal.z = normals[idx];
else
normal.z = normals2[idx - ele_dev_size];
#else
normal.x = normals[i + (j + 0*NNB)*nelr];
normal.y = normals[i + (j + 1*NNB)*nelr];
normal.z = normals[i + (j + 2*NNB)*nelr];
#endif
normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z);
if(nb >= 0) // a legitimate neighbor
{
density_nb = variables[nb + VAR_DENSITY*nelr];
momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr];
momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr];
momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr];
density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr];
compute_velocity(density_nb, momentum_nb, velocity_nb);
speed_sqd_nb = compute_speed_sqd(velocity_nb);
pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb);
speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb);
compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy);
// artificial viscosity
factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb);
flux_i_density += factor*(density_i-density_nb);
flux_i_density_energy += factor*(density_energy_i-density_energy_nb);
flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x);
flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y);
flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z);
// accumulate cell-centered fluxes
factor = float(0.5f)*normal.x;
flux_i_density += factor*(momentum_nb.x+momentum_i.x);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x);
factor = float(0.5f)*normal.y;
flux_i_density += factor*(momentum_nb.y+momentum_i.y);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y);
factor = float(0.5f)*normal.z;
flux_i_density += factor*(momentum_nb.z+momentum_i.z);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z);
}
else if(nb == -1) // a wing boundary
{
flux_i_momentum.x += normal.x*pressure_i;
flux_i_momentum.y += normal.y*pressure_i;
flux_i_momentum.z += normal.z*pressure_i;
}
else if(nb == -2) // a far field boundary
{
factor = float(0.5f)*normal.x;
flux_i_density += factor*(ff_variable[VAR_MOMENTUM+0]+momentum_i.x);
flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].x+flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].x + flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].x + flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].x + flux_contribution_i_momentum_z.x);
factor = float(0.5f)*normal.y;
flux_i_density += factor*(ff_variable[VAR_MOMENTUM+1]+momentum_i.y);
flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].y+flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].y + flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].y + flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].y + flux_contribution_i_momentum_z.y);
factor = float(0.5f)*normal.z;
flux_i_density += factor*(ff_variable[VAR_MOMENTUM+2]+momentum_i.z);
flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].z+flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].z + flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].z + flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].z + flux_contribution_i_momentum_z.z);
}
}
fluxes[i + VAR_DENSITY*nelr] = flux_i_density;
fluxes[i + (VAR_MOMENTUM+0)*nelr] = flux_i_momentum.x;
fluxes[i + (VAR_MOMENTUM+1)*nelr] = flux_i_momentum.y;
fluxes[i + (VAR_MOMENTUM+2)*nelr] = flux_i_momentum.z;
fluxes[i + VAR_DENSITY_ENERGY*nelr] = flux_i_density_energy;
}
#if defined (CUDA_HYB)
void compute_flux(int nelr, int* elements_surrounding_elements, float* normals, float* variables, float* fluxes, float* normals2, unsigned long long ele_dev_size)
#else
void compute_flux(int nelr, int* elements_surrounding_elements, float* normals, float* variables, float* fluxes)
#endif
{
dim3 Dg(nelr / BLOCK_SIZE_3), Db(BLOCK_SIZE_3);
#if defined (CUDA_HYB)
cuda_compute_flux<<<Dg,Db>>>(nelr, elements_surrounding_elements, normals, variables, fluxes, normals2, ele_dev_size);
#else
cuda_compute_flux<<<Dg,Db>>>(nelr, elements_surrounding_elements, normals, variables, fluxes);
#endif
getLastCudaError("compute_flux failed");
}
__global__ void cuda_time_step(int j, int nelr, float* old_variables, float* variables, float* step_factors, float* fluxes)
{
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
float factor = step_factors[i]/float(RK+1-j);
variables[i + VAR_DENSITY*nelr] = old_variables[i + VAR_DENSITY*nelr] + factor*fluxes[i + VAR_DENSITY*nelr];
variables[i + VAR_DENSITY_ENERGY*nelr] = old_variables[i + VAR_DENSITY_ENERGY*nelr] + factor*fluxes[i + VAR_DENSITY_ENERGY*nelr];
variables[i + (VAR_MOMENTUM+0)*nelr] = old_variables[i + (VAR_MOMENTUM+0)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+0)*nelr];
variables[i + (VAR_MOMENTUM+1)*nelr] = old_variables[i + (VAR_MOMENTUM+1)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+1)*nelr];
variables[i + (VAR_MOMENTUM+2)*nelr] = old_variables[i + (VAR_MOMENTUM+2)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+2)*nelr];
}
void time_step(int j, int nelr, float* old_variables, float* variables, float* step_factors, float* fluxes)
{
dim3 Dg(nelr / BLOCK_SIZE_4), Db(BLOCK_SIZE_4);
cuda_time_step<<<Dg,Db>>>(j, nelr, old_variables, variables, step_factors, fluxes);
getLastCudaError("update failed");
}
/*
* Main function
*/
int main(int argc, char** argv)
{
printf("WG size of kernel:initialize = %d, WG size of kernel:compute_step_factor = %d, WG size of kernel:compute_flux = %d, WG size of kernel:time_step = %d\n", BLOCK_SIZE_1, BLOCK_SIZE_2, BLOCK_SIZE_3, BLOCK_SIZE_4);
if (argc < 3)
{
std::cout << "specify data file name" << std::endl;
return 0;
}
const char* data_file_name = argv[1];
int input_time = atoi(argv[2]);
cudaDeviceProp prop;
int dev;
checkCudaErrors(cudaSetDevice(0));
checkCudaErrors(cudaGetDevice(&dev));
checkCudaErrors(cudaGetDeviceProperties(&prop, dev));
printf("Name: %s\n", prop.name);
// set far field conditions and load them into constant memory on the gpu
{
float h_ff_variable[NVAR];
const float angle_of_attack = float(3.1415926535897931 / 180.0f) * float(deg_angle_of_attack);
h_ff_variable[VAR_DENSITY] = float(1.4);
float ff_pressure = float(1.0f);
float ff_speed_of_sound = sqrt(GAMMA*ff_pressure / h_ff_variable[VAR_DENSITY]);
float ff_speed = float(ff_mach)*ff_speed_of_sound;
float3 ff_velocity;
ff_velocity.x = ff_speed*float(cos((float)angle_of_attack));
ff_velocity.y = ff_speed*float(sin((float)angle_of_attack));
ff_velocity.z = 0.0f;
h_ff_variable[VAR_MOMENTUM+0] = h_ff_variable[VAR_DENSITY] * ff_velocity.x;
h_ff_variable[VAR_MOMENTUM+1] = h_ff_variable[VAR_DENSITY] * ff_velocity.y;
h_ff_variable[VAR_MOMENTUM+2] = h_ff_variable[VAR_DENSITY] * ff_velocity.z;
h_ff_variable[VAR_DENSITY_ENERGY] = h_ff_variable[VAR_DENSITY]*(float(0.5f)*(ff_speed*ff_speed)) + (ff_pressure / float(GAMMA-1.0f));
float3 h_ff_momentum;
h_ff_momentum.x = *(h_ff_variable+VAR_MOMENTUM+0);
h_ff_momentum.y = *(h_ff_variable+VAR_MOMENTUM+1);
h_ff_momentum.z = *(h_ff_variable+VAR_MOMENTUM+2);
float3 h_ff_flux_contribution_momentum_x;
float3 h_ff_flux_contribution_momentum_y;
float3 h_ff_flux_contribution_momentum_z;
float3 h_ff_flux_contribution_density_energy;
compute_flux_contribution(h_ff_variable[VAR_DENSITY], h_ff_momentum, h_ff_variable[VAR_DENSITY_ENERGY], ff_pressure, ff_velocity, h_ff_flux_contribution_momentum_x, h_ff_flux_contribution_momentum_y, h_ff_flux_contribution_momentum_z, h_ff_flux_contribution_density_energy);
// copy far field conditions to the gpu
checkCudaErrors( cudaMemcpyToSymbol(ff_variable, h_ff_variable, NVAR*sizeof(float)) );
checkCudaErrors( cudaMemcpyToSymbol(ff_flux_contribution_momentum_x, &h_ff_flux_contribution_momentum_x, sizeof(float3)) );
checkCudaErrors( cudaMemcpyToSymbol(ff_flux_contribution_momentum_y, &h_ff_flux_contribution_momentum_y, sizeof(float3)) );
checkCudaErrors( cudaMemcpyToSymbol(ff_flux_contribution_momentum_z, &h_ff_flux_contribution_momentum_z, sizeof(float3)) );
checkCudaErrors( cudaMemcpyToSymbol(ff_flux_contribution_density_energy, &h_ff_flux_contribution_density_energy, sizeof(float3)) );
}
int nel;
int nelr;
// read in domain geometry
float* areas;
int* elements_surrounding_elements;
float* normals;
#if defined (CUDA_HYB)
unsigned long long ele_dev_size;
float* normals2 = NULL;
#endif
{
std::ifstream file(data_file_name);
file >> nel;
nelr = BLOCK_SIZE_0*((nel * input_time / BLOCK_SIZE_0 )+ std::min(1, (nel * input_time) % BLOCK_SIZE_0));
#if defined (CUDA_HYB)
areas = alloc_dev<float>(nelr);
elements_surrounding_elements = alloc_dev<int>(nelr*NNB);
long long avail_size = 14 * 1024 * 1024 * 1024L - sizeof(float)*nelr*NVAR*3 + sizeof(float)*nelr*2 - sizeof(int)*nelr*NNB;
if (avail_size <= 0) {
std::cout << "Input is too large for HYB." << std::endl;
return 0;
}
ele_dev_size = avail_size / sizeof(float);
unsigned long long ele_um_size = 0;
if (ele_dev_size >= nelr*NDIM*NNB) {
std::cout << "Input is too small for HYB." << std::endl;
ele_dev_size = nelr*NDIM*NNB;
} else
ele_um_size = nelr*NDIM*NNB - ele_dev_size;
normals = alloc_dev<float>(ele_dev_size);
if (ele_um_size)
normals2 = alloc<float>(ele_um_size);
#else
areas = alloc<float>(nelr);
elements_surrounding_elements = alloc<int>(nelr*NNB);
unsigned long long size = (unsigned long long)nelr*NDIM*NNB;
normals = alloc<float>(size);
#endif
#if defined (CUDA_HYB)
float* h_areas = new float[nelr];
int* h_elements_surrounding_elements = new int[nelr*NNB];
float* h_normals = new float[ele_dev_size];
float* h_normals2 = normals2;
#elif defined (CUDA_UVM) || defined (CUDA_HST)
float* h_areas = areas;
int* h_elements_surrounding_elements = elements_surrounding_elements;
float* h_normals = normals;
#else
float* h_areas = new float[nelr];
int* h_elements_surrounding_elements = new int[nelr*NNB];
float* h_normals = new float[nelr*NDIM*NNB];
#endif
// read in data
for(int i = 0; i < nel; i++)
{
file >> h_areas[i];
for(int j = 0; j < NNB; j++)
{
file >> h_elements_surrounding_elements[i + j*nelr];
if(h_elements_surrounding_elements[i+j*nelr] < 0) h_elements_surrounding_elements[i+j*nelr] = -1;
h_elements_surrounding_elements[i + j*nelr]--; //it's coming in with Fortran numbering
for(int k = 0; k < NDIM; k++)
{
#if defined (CUDA_HYB)
unsigned long long idx = i + (j + k*NNB)*nelr;
if (idx < ele_dev_size) {
file >> h_normals[idx];
h_normals[idx] = -h_normals[idx];
} else {
file >> h_normals2[idx - ele_dev_size];
h_normals2[idx - ele_dev_size] = -h_normals2[idx - ele_dev_size];
}
#else
file >> h_normals[i + (j + k*NNB)*nelr];
h_normals[i + (j + k*NNB)*nelr] = -h_normals[i + (j + k*NNB)*nelr];
#endif
}
}
}
file.close();
for (int nn = 1; nn < input_time; nn++)
{
for(int i = 0; i < nel; i++)
{
h_areas[i + nn*nel] = h_areas[i];
for(int j = 0; j < NNB; j++)
{
h_elements_surrounding_elements[i + j*nelr + nn*nel] = h_elements_surrounding_elements[i + j*nelr];
for(int k = 0; k < NDIM; k++)
{
#if defined (CUDA_HYB)
unsigned long long idx = i + (j + k*NNB)*nelr + nn*nel;
unsigned long long idx2 = i + (j + k*NNB)*nelr;
if (idx < ele_dev_size) {
if (idx2 < ele_dev_size)
h_normals[idx] = h_normals[idx2];
else
h_normals[idx] = h_normals2[idx2 - ele_dev_size];
} else {
if (idx2 < ele_dev_size)
h_normals2[idx - ele_dev_size] = h_normals[idx2];
else
h_normals2[idx - ele_dev_size] = h_normals2[idx2 - ele_dev_size];
}
#else
h_normals[i + (j + k*NNB)*nelr + nn*nel] = h_normals[i + (j + k*NNB)*nelr];
#endif
}
}
}
}
nel *= input_time;
// fill in remaining data
int last = nel-1;
for(int i = nel; i < nelr; i++)
{
h_areas[i] = h_areas[last];
for(int j = 0; j < NNB; j++)
{
// duplicate the last element
h_elements_surrounding_elements[i + j*nelr] = h_elements_surrounding_elements[last + j*nelr];
#if defined (CUDA_HYB)
for(int k = 0; k < NDIM; k++) {
unsigned long long idx = i + (j + k*NNB)*nelr;
if (idx < ele_dev_size) {
if (last + (j + k*NNB)*nelr < ele_dev_size)
h_normals[idx] = h_normals[last + (j + k*NNB)*nelr];
else
h_normals[idx] = h_normals2[last + (j + k*NNB)*nelr - ele_dev_size];
}
else {
if (last + (j + k*NNB)*nelr < ele_dev_size)
h_normals2[idx - ele_dev_size] = h_normals[last + (j + k*NNB)*nelr];
else
h_normals2[idx - ele_dev_size] = h_normals2[last + (j + k*NNB)*nelr - ele_dev_size];
}
}
#else
for(int k = 0; k < NDIM; k++) h_normals[i + (j + k*NNB)*nelr] = h_normals[last + (j + k*NNB)*nelr];
#endif
}
}
#if defined (CUDA_HYB)
upload<float>(areas, h_areas, nelr);
upload<int>(elements_surrounding_elements, h_elements_surrounding_elements, nelr*NNB);
upload<float>(normals, h_normals, ele_dev_size);
delete[] h_areas;
delete[] h_elements_surrounding_elements;
delete[] h_normals;
#elif !defined (CUDA_UVM) && !defined (CUDA_HST)
upload<float>(areas, h_areas, nelr);
upload<int>(elements_surrounding_elements, h_elements_surrounding_elements, nelr*NNB);
upload<float>(normals, h_normals, nelr*NDIM*NNB);
delete[] h_areas;
delete[] h_elements_surrounding_elements;
delete[] h_normals;
#endif
}
// Create arrays and set initial conditions
float* variables = alloc<float>(nelr*NVAR);
initialize_variables(nelr, variables);
float* old_variables = alloc<float>(nelr*NVAR);
float* fluxes = alloc<float>(nelr*NVAR);
float* step_factors = alloc<float>(nelr);
// make sure all memory is floatly allocated before we start timing
initialize_variables(nelr, old_variables);
initialize_variables(nelr, fluxes);
cudaMemset( (void*) step_factors, 0, sizeof(float)*nelr );
// make sure CUDA isn't still doing something before we start timing
cudaThreadSynchronize();
unsigned long long total_size = sizeof(float)*nelr // area
+ sizeof(int)*nelr*NNB + sizeof(float)*nelr*NDIM*NNB;
std::cout << "Input size: " << total_size << "\n";
total_size += sizeof(float)*nelr*NVAR*3 + sizeof(float)*nelr;
std::cout << "Total size: " << total_size << "\n";
// these need to be computed the first time in order to compute time step
std::cout << "Starting..." << std::endl;
StopWatchInterface *timer = 0;
// unsigned int timer = 0;
// CUT_SAFE_CALL( cutCreateTimer( &timer));
// CUT_SAFE_CALL( cutStartTimer( timer));
sdkCreateTimer(&timer);
sdkStartTimer(&timer);
// Begin iterations
for(int i = 0; i < iterations; i++)
{
copy<float>(old_variables, variables, nelr*NVAR);
// for the first iteration we compute the time step
compute_step_factor(nelr, variables, areas, step_factors);
getLastCudaError("compute_step_factor failed");
for(int j = 0; j < RK; j++)
{
#if defined (CUDA_HYB)
compute_flux(nelr, elements_surrounding_elements, normals, variables, fluxes, normals2, ele_dev_size);
#else
compute_flux(nelr, elements_surrounding_elements, normals, variables, fluxes);
#endif
getLastCudaError("compute_flux failed");
time_step(j, nelr, old_variables, variables, step_factors, fluxes);
getLastCudaError("time_step failed");
}
}
cudaThreadSynchronize();
// CUT_SAFE_CALL( cutStopTimer(timer) );
sdkStopTimer(&timer);
std::cout << (sdkGetAverageTimerValue(&timer)/1000.0) / iterations << " seconds per iteration" << std::endl;
//std::cout << "Saving solution..." << std::endl;
//dump(variables, nel, nelr);
//std::cout << "Saved solution..." << std::endl;
std::cout << "Cleaning up..." << std::endl;
//dealloc<float>(areas);
//dealloc<int>(elements_surrounding_elements);
//dealloc<float>(normals);
//
//dealloc<float>(variables);
//dealloc<float>(old_variables);
//dealloc<float>(fluxes);
//dealloc<float>(step_factors);
std::cout << "Done..." << std::endl;
return 0;
}
|
9b2bedc49264148ee1c2e1f8dd5089efaf290005.hip | // !!! This is a file automatically generated by hipify!!!
#include "common.h"
#include <stdio.h>
#include <stdlib.h>
#include <cusparse_v2.h>
#include <device_launch_parameters.h>
#include <hip/hip_runtime.h>
/*
* M = # of rows
* N = # of columns
*/
int M = 1024;
int N = 1024;
int generate_random_dense_matrix(int M, int N, double **outA)
{
int i, j;
double rMax = (double)RAND_MAX;
double *A = (double *)malloc(sizeof(double) * M * N);
int totalNnz = 0;
for (j = 0; j < N; j++)
{
for (i = 0; i < M; i++)
{
int r = rand();
double *curr = A + (j * M + i);
if (r % 3 > 0)
{
*curr = 0.0f;
}
else
{
double dr = (double)r;
*curr = (dr / rMax) * 100.0;
}
if (*curr != 0.0f)
{
totalNnz++;
}
}
}
*outA = A;
return totalNnz;
}
void print_partial_matrix(double *M, int nrows, int ncols, int max_row, int max_col)
{
int row, col;
for (row = 0; row < max_row; row++)
{
for (col = 0; col < max_col; col++)
{
printf_s("%2.2f ", M[row * ncols + col]);
}
printf_s("...\n");
}
printf_s("...\n");
}
int main(int argc, char **argv)
{
double *A, *d_A;
double *B, *d_B;
double *C, *d_C;
double *d_csrValA;
int *d_csrRowPtrA;
int *d_csrColIndA;
int *d_AnnzPerRow;
int nnzATotal;
const double alpha = 3.0f, beta = 5.0f;
hipsparseHandle_t handle = 0;
hipsparseMatDescr_t descrA = 0;
// generate the two matrices
srand(2468);
int actualNnzA = generate_random_dense_matrix(M, N, &A);
int actualNnzB = generate_random_dense_matrix(M, N, &B);
printf_s("A:\n");
print_partial_matrix(A, M, N, 10, 10);
printf_s("B:\n");
print_partial_matrix(B, M, N, 10, 10);
// Create the cuSPARSE handle and describe matrix
CHECK_CUSPARSE(hipsparseCreate(&handle));
CHECK_CUSPARSE(hipsparseCreateMatDescr(&descrA));
CHECK_CUSPARSE(hipsparseSetMatType(descrA, HIPSPARSE_MATRIX_TYPE_GENERAL));
CHECK_CUSPARSE(hipsparseSetMatIndexBase(descrA, HIPSPARSE_INDEX_BASE_ZERO));
// Allocate device memory for the matrices
CHECK(hipMalloc((void **)&d_A, M * N * sizeof(double)));
CHECK(hipMalloc((void **)&d_B, M * N * sizeof(double)));
CHECK(hipMalloc((void **)&d_C, M * N * sizeof(double)));
// Transer matrices to device
CHECK(hipMemcpy(d_A, A, M * N * sizeof(double), hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_B, B, M * N * sizeof(double), hipMemcpyHostToDevice));
// Allocate device memory for the number of non-zero elements of matrix A
CHECK(hipMalloc(&d_AnnzPerRow, M * sizeof(int)));
// Compute the number of non-zero elements in A
CHECK_CUSPARSE(hipsparseDnnz(handle, HIPSPARSE_DIRECTION_ROW, M, N, descrA, d_A, M, d_AnnzPerRow, &nnzATotal));
if (actualNnzA != nnzATotal)
{
printf_s("Number of non-zero elements in A: %d don't match number returned by cuSPARSE NNZ: %d\n", actualNnzA, nnzATotal);
EXIT_SUCCESS;
}
// Allocate memory for the csr values
CHECK(hipMalloc((void **)&d_csrValA, nnzATotal * sizeof(double)));
CHECK(hipMalloc((void **)&d_csrRowPtrA, (M + 1) * sizeof(int)));
CHECK(hipMalloc((void **)&d_csrColIndA, nnzATotal * sizeof(int)));
// convert dense matrix A into csr format for use in csrmm
CHECK_CUSPARSE(hipsparseDdense2csr(handle, M, N, descrA, d_A, M, d_AnnzPerRow, d_csrValA, d_csrRowPtrA, d_csrColIndA));
// do the matrix multiplication with matrix A converted to csr format
CHECK_CUSPARSE(hipsparseDcsrmm(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, M, N, N, nnzATotal, &alpha, descrA, d_csrValA, d_csrRowPtrA, d_csrColIndA,
d_B, M, &beta, d_C, M));
// Allocate memory for C and return it
C = (double *)malloc(M * N * sizeof(double));
CHECK(hipMemcpy(C, d_C, M * N * sizeof(double), hipMemcpyDeviceToHost));
printf_s("C:\n");
print_partial_matrix(C, M, N, 10, 10);
free(A);
free(B);
free(C);
CHECK(hipFree(d_A));
CHECK(hipFree(d_B));
CHECK(hipFree(d_C));
CHECK(hipFree(d_csrValA));
CHECK(hipFree(d_csrRowPtrA));
CHECK(hipFree(d_csrColIndA));
CHECK_CUSPARSE(hipsparseDestroyMatDescr(descrA));
CHECK_CUSPARSE(hipsparseDestroy(handle));
return EXIT_SUCCESS;
} | 9b2bedc49264148ee1c2e1f8dd5089efaf290005.cu | #include "common.h"
#include <stdio.h>
#include <stdlib.h>
#include <cusparse_v2.h>
#include <device_launch_parameters.h>
#include <cuda_runtime.h>
/*
* M = # of rows
* N = # of columns
*/
int M = 1024;
int N = 1024;
int generate_random_dense_matrix(int M, int N, double **outA)
{
int i, j;
double rMax = (double)RAND_MAX;
double *A = (double *)malloc(sizeof(double) * M * N);
int totalNnz = 0;
for (j = 0; j < N; j++)
{
for (i = 0; i < M; i++)
{
int r = rand();
double *curr = A + (j * M + i);
if (r % 3 > 0)
{
*curr = 0.0f;
}
else
{
double dr = (double)r;
*curr = (dr / rMax) * 100.0;
}
if (*curr != 0.0f)
{
totalNnz++;
}
}
}
*outA = A;
return totalNnz;
}
void print_partial_matrix(double *M, int nrows, int ncols, int max_row, int max_col)
{
int row, col;
for (row = 0; row < max_row; row++)
{
for (col = 0; col < max_col; col++)
{
printf_s("%2.2f ", M[row * ncols + col]);
}
printf_s("...\n");
}
printf_s("...\n");
}
int main(int argc, char **argv)
{
double *A, *d_A;
double *B, *d_B;
double *C, *d_C;
double *d_csrValA;
int *d_csrRowPtrA;
int *d_csrColIndA;
int *d_AnnzPerRow;
int nnzATotal;
const double alpha = 3.0f, beta = 5.0f;
cusparseHandle_t handle = 0;
cusparseMatDescr_t descrA = 0;
// generate the two matrices
srand(2468);
int actualNnzA = generate_random_dense_matrix(M, N, &A);
int actualNnzB = generate_random_dense_matrix(M, N, &B);
printf_s("A:\n");
print_partial_matrix(A, M, N, 10, 10);
printf_s("B:\n");
print_partial_matrix(B, M, N, 10, 10);
// Create the cuSPARSE handle and describe matrix
CHECK_CUSPARSE(cusparseCreate(&handle));
CHECK_CUSPARSE(cusparseCreateMatDescr(&descrA));
CHECK_CUSPARSE(cusparseSetMatType(descrA, CUSPARSE_MATRIX_TYPE_GENERAL));
CHECK_CUSPARSE(cusparseSetMatIndexBase(descrA, CUSPARSE_INDEX_BASE_ZERO));
// Allocate device memory for the matrices
CHECK(cudaMalloc((void **)&d_A, M * N * sizeof(double)));
CHECK(cudaMalloc((void **)&d_B, M * N * sizeof(double)));
CHECK(cudaMalloc((void **)&d_C, M * N * sizeof(double)));
// Transer matrices to device
CHECK(cudaMemcpy(d_A, A, M * N * sizeof(double), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_B, B, M * N * sizeof(double), cudaMemcpyHostToDevice));
// Allocate device memory for the number of non-zero elements of matrix A
CHECK(cudaMalloc(&d_AnnzPerRow, M * sizeof(int)));
// Compute the number of non-zero elements in A
CHECK_CUSPARSE(cusparseDnnz(handle, CUSPARSE_DIRECTION_ROW, M, N, descrA, d_A, M, d_AnnzPerRow, &nnzATotal));
if (actualNnzA != nnzATotal)
{
printf_s("Number of non-zero elements in A: %d don't match number returned by cuSPARSE NNZ: %d\n", actualNnzA, nnzATotal);
EXIT_SUCCESS;
}
// Allocate memory for the csr values
CHECK(cudaMalloc((void **)&d_csrValA, nnzATotal * sizeof(double)));
CHECK(cudaMalloc((void **)&d_csrRowPtrA, (M + 1) * sizeof(int)));
CHECK(cudaMalloc((void **)&d_csrColIndA, nnzATotal * sizeof(int)));
// convert dense matrix A into csr format for use in csrmm
CHECK_CUSPARSE(cusparseDdense2csr(handle, M, N, descrA, d_A, M, d_AnnzPerRow, d_csrValA, d_csrRowPtrA, d_csrColIndA));
// do the matrix multiplication with matrix A converted to csr format
CHECK_CUSPARSE(cusparseDcsrmm(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, M, N, N, nnzATotal, &alpha, descrA, d_csrValA, d_csrRowPtrA, d_csrColIndA,
d_B, M, &beta, d_C, M));
// Allocate memory for C and return it
C = (double *)malloc(M * N * sizeof(double));
CHECK(cudaMemcpy(C, d_C, M * N * sizeof(double), cudaMemcpyDeviceToHost));
printf_s("C:\n");
print_partial_matrix(C, M, N, 10, 10);
free(A);
free(B);
free(C);
CHECK(cudaFree(d_A));
CHECK(cudaFree(d_B));
CHECK(cudaFree(d_C));
CHECK(cudaFree(d_csrValA));
CHECK(cudaFree(d_csrRowPtrA));
CHECK(cudaFree(d_csrColIndA));
CHECK_CUSPARSE(cusparseDestroyMatDescr(descrA));
CHECK_CUSPARSE(cusparseDestroy(handle));
return EXIT_SUCCESS;
} |
e75adc7a9779bab6f89e4e4fb1b7bc10c6336bec.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "star3d4r-64x16-1-128_kernel.hu"
__device__ float __sbref_wrap(float *sb, size_t index) { return sb[index]; }
__global__ void kernel0_1(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 4 - 4);
const AN5D_TYPE __c1Pad = (4);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 4 - 4);
const AN5D_TYPE __c2Pad = (4);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 4 - 4);
const AN5D_TYPE __c3Pad = (4);
#define __c3 c3
const AN5D_TYPE __halo1 = 4;
const AN5D_TYPE __halo2 = 4;
const AN5D_TYPE __halo3 = 4;
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 8;
const AN5D_TYPE __side3Len = 56;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_0_3;
float __reg_0_4;
float __reg_0_5;
float __reg_0_6;
float __reg_0_7;
float __reg_0_8;
__shared__ float __e_sb_double[__blockSize * 2];
float *__e_sb = __e_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __storeValid = __writeValid1;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e, __f, __g, __h, __i) do { __rn0 = (((((((((((((((((((((((((0.25000f * (__REGREF(__e, 0, 0))) + (0.03228f * (__SBREF(__e_sb, 0, -4)))) + (0.03138f * (__SBREF(__e_sb, 0, -3)))) + (0.03118f * (__SBREF(__e_sb, 0, -2)))) + (0.03027f * (__SBREF(__e_sb, 0, -1)))) + (0.03022f * (__SBREF(__e_sb, 0, 1)))) + (0.03112f * (__SBREF(__e_sb, 0, 2)))) + (0.03132f * (__SBREF(__e_sb, 0, 3)))) + (0.03222f * (__SBREF(__e_sb, 0, 4)))) + (0.03026f * (__REGREF(__d, 0, 0)))) + (0.03024f * (__REGREF(__f, 0, 0)))) + (0.03027f * (__SBREF(__e_sb, -1, 0)))) + (0.03023f * (__SBREF(__e_sb, 1, 0)))) + (0.03116f * (__REGREF(__c, 0, 0)))) + (0.03114f * (__REGREF(__g, 0, 0)))) + (0.03117f * (__SBREF(__e_sb, -2, 0)))) + (0.03113f * (__SBREF(__e_sb, 2, 0)))) + (0.03136f * (__REGREF(__b, 0, 0)))) + (0.03134f * (__REGREF(__h, 0, 0)))) + (0.03137f * (__SBREF(__e_sb, -3, 0)))) + (0.03133f * (__SBREF(__e_sb, 3, 0)))) + (0.03226f * (__REGREF(__a, 0, 0)))) + (0.03224f * (__REGREF(__i, 0, 0)))) + (0.03227f * (__SBREF(__e_sb, -4, 0)))) + (0.03223f * (__SBREF(__e_sb, 4, 0)))); } while (0)
#define __DB_SWITCH() do { __e_sb = &__e_sb_double[(__e_sb == __e_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e, f, g, h, i) do { __DB_SWITCH(); __e_sb[__tid] = e; __syncthreads(); } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__LOAD(__reg_0_5, 5);
__LOAD(__reg_0_6, 6);
__LOAD(__reg_0_7, 7);
__LOAD(__reg_0_8, 8);
__STORE(4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__LOAD(__reg_0_5, 5);
__LOAD(__reg_0_6, 6);
__LOAD(__reg_0_7, 7);
__LOAD(__reg_0_8, 8);
__STORE(4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8);
}
__e_sb = __e_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 9;)
{
__LOAD(__reg_0_0, __h);
__STORE(__h - 4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__h++;
__LOAD(__reg_0_1, __h);
__STORE(__h - 4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__h++;
__LOAD(__reg_0_2, __h);
__STORE(__h - 4, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
__LOAD(__reg_0_3, __h);
__STORE(__h - 4, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__h++;
__LOAD(__reg_0_4, __h);
__STORE(__h - 4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__h++;
__LOAD(__reg_0_5, __h);
__STORE(__h - 4, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5);
__h++;
__LOAD(__reg_0_6, __h);
__STORE(__h - 4, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6);
__h++;
__LOAD(__reg_0_7, __h);
__STORE(__h - 4, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7);
__h++;
__LOAD(__reg_0_8, __h);
__STORE(__h - 4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 3, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 3, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, __h + 2);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 3, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, __h + 2);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, __h + 3);
__STORE(__h - 1, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 3, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, __h + 2);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, __h + 3);
__STORE(__h - 1, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__LOAD(__reg_0_4, __h + 4);
__STORE(__h + 0, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 3, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, __h + 2);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, __h + 3);
__STORE(__h - 1, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__LOAD(__reg_0_4, __h + 4);
__STORE(__h + 0, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_5, __h + 5);
__STORE(__h + 1, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5);
}
else if (__h + 7 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 3, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, __h + 2);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, __h + 3);
__STORE(__h - 1, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__LOAD(__reg_0_4, __h + 4);
__STORE(__h + 0, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_5, __h + 5);
__STORE(__h + 1, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5);
__LOAD(__reg_0_6, __h + 6);
__STORE(__h + 2, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6);
}
else if (__h + 8 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 3, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, __h + 2);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, __h + 3);
__STORE(__h - 1, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__LOAD(__reg_0_4, __h + 4);
__STORE(__h + 0, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_5, __h + 5);
__STORE(__h + 1, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5);
__LOAD(__reg_0_6, __h + 6);
__STORE(__h + 2, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6);
__LOAD(__reg_0_7, __h + 7);
__STORE(__h + 3, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7);
}
}
else
{
for (__h = 9; __h <= __side1LenOl - 9;)
{
__LOAD(__reg_0_0, __h);
__STORE(__h - 4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__h++;
__LOAD(__reg_0_1, __h);
__STORE(__h - 4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__h++;
__LOAD(__reg_0_2, __h);
__STORE(__h - 4, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
__LOAD(__reg_0_3, __h);
__STORE(__h - 4, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__h++;
__LOAD(__reg_0_4, __h);
__STORE(__h - 4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__h++;
__LOAD(__reg_0_5, __h);
__STORE(__h - 4, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5);
__h++;
__LOAD(__reg_0_6, __h);
__STORE(__h - 4, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6);
__h++;
__LOAD(__reg_0_7, __h);
__STORE(__h - 4, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7);
__h++;
__LOAD(__reg_0_8, __h);
__STORE(__h - 4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__STORE(__h - 4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__STORE(__h - 4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__STORE(__h - 4, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__STORE(__h - 4, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__STORE(__h - 4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_5, __h);
__STORE(__h - 4, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_6, __h);
__STORE(__h - 4, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_7, __h);
__STORE(__h - 4, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_8, __h);
__STORE(__h - 4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8);
__h++;
}
}
| e75adc7a9779bab6f89e4e4fb1b7bc10c6336bec.cu | #include "star3d4r-64x16-1-128_kernel.hu"
__device__ float __sbref_wrap(float *sb, size_t index) { return sb[index]; }
__global__ void kernel0_1(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 4 - 4);
const AN5D_TYPE __c1Pad = (4);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 4 - 4);
const AN5D_TYPE __c2Pad = (4);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 4 - 4);
const AN5D_TYPE __c3Pad = (4);
#define __c3 c3
const AN5D_TYPE __halo1 = 4;
const AN5D_TYPE __halo2 = 4;
const AN5D_TYPE __halo3 = 4;
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 8;
const AN5D_TYPE __side3Len = 56;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_0_3;
float __reg_0_4;
float __reg_0_5;
float __reg_0_6;
float __reg_0_7;
float __reg_0_8;
__shared__ float __e_sb_double[__blockSize * 2];
float *__e_sb = __e_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __storeValid = __writeValid1;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e, __f, __g, __h, __i) do { __rn0 = (((((((((((((((((((((((((0.25000f * (__REGREF(__e, 0, 0))) + (0.03228f * (__SBREF(__e_sb, 0, -4)))) + (0.03138f * (__SBREF(__e_sb, 0, -3)))) + (0.03118f * (__SBREF(__e_sb, 0, -2)))) + (0.03027f * (__SBREF(__e_sb, 0, -1)))) + (0.03022f * (__SBREF(__e_sb, 0, 1)))) + (0.03112f * (__SBREF(__e_sb, 0, 2)))) + (0.03132f * (__SBREF(__e_sb, 0, 3)))) + (0.03222f * (__SBREF(__e_sb, 0, 4)))) + (0.03026f * (__REGREF(__d, 0, 0)))) + (0.03024f * (__REGREF(__f, 0, 0)))) + (0.03027f * (__SBREF(__e_sb, -1, 0)))) + (0.03023f * (__SBREF(__e_sb, 1, 0)))) + (0.03116f * (__REGREF(__c, 0, 0)))) + (0.03114f * (__REGREF(__g, 0, 0)))) + (0.03117f * (__SBREF(__e_sb, -2, 0)))) + (0.03113f * (__SBREF(__e_sb, 2, 0)))) + (0.03136f * (__REGREF(__b, 0, 0)))) + (0.03134f * (__REGREF(__h, 0, 0)))) + (0.03137f * (__SBREF(__e_sb, -3, 0)))) + (0.03133f * (__SBREF(__e_sb, 3, 0)))) + (0.03226f * (__REGREF(__a, 0, 0)))) + (0.03224f * (__REGREF(__i, 0, 0)))) + (0.03227f * (__SBREF(__e_sb, -4, 0)))) + (0.03223f * (__SBREF(__e_sb, 4, 0)))); } while (0)
#define __DB_SWITCH() do { __e_sb = &__e_sb_double[(__e_sb == __e_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e, f, g, h, i) do { __DB_SWITCH(); __e_sb[__tid] = e; __syncthreads(); } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__LOAD(__reg_0_5, 5);
__LOAD(__reg_0_6, 6);
__LOAD(__reg_0_7, 7);
__LOAD(__reg_0_8, 8);
__STORE(4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__LOAD(__reg_0_5, 5);
__LOAD(__reg_0_6, 6);
__LOAD(__reg_0_7, 7);
__LOAD(__reg_0_8, 8);
__STORE(4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8);
}
__e_sb = __e_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 9;)
{
__LOAD(__reg_0_0, __h);
__STORE(__h - 4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__h++;
__LOAD(__reg_0_1, __h);
__STORE(__h - 4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__h++;
__LOAD(__reg_0_2, __h);
__STORE(__h - 4, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
__LOAD(__reg_0_3, __h);
__STORE(__h - 4, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__h++;
__LOAD(__reg_0_4, __h);
__STORE(__h - 4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__h++;
__LOAD(__reg_0_5, __h);
__STORE(__h - 4, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5);
__h++;
__LOAD(__reg_0_6, __h);
__STORE(__h - 4, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6);
__h++;
__LOAD(__reg_0_7, __h);
__STORE(__h - 4, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7);
__h++;
__LOAD(__reg_0_8, __h);
__STORE(__h - 4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 3, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 3, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, __h + 2);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 3, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, __h + 2);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, __h + 3);
__STORE(__h - 1, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 3, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, __h + 2);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, __h + 3);
__STORE(__h - 1, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__LOAD(__reg_0_4, __h + 4);
__STORE(__h + 0, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 3, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, __h + 2);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, __h + 3);
__STORE(__h - 1, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__LOAD(__reg_0_4, __h + 4);
__STORE(__h + 0, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_5, __h + 5);
__STORE(__h + 1, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5);
}
else if (__h + 7 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 3, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, __h + 2);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, __h + 3);
__STORE(__h - 1, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__LOAD(__reg_0_4, __h + 4);
__STORE(__h + 0, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_5, __h + 5);
__STORE(__h + 1, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5);
__LOAD(__reg_0_6, __h + 6);
__STORE(__h + 2, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6);
}
else if (__h + 8 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 3, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, __h + 2);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, __h + 3);
__STORE(__h - 1, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__LOAD(__reg_0_4, __h + 4);
__STORE(__h + 0, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_5, __h + 5);
__STORE(__h + 1, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5);
__LOAD(__reg_0_6, __h + 6);
__STORE(__h + 2, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6);
__LOAD(__reg_0_7, __h + 7);
__STORE(__h + 3, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7);
}
}
else
{
for (__h = 9; __h <= __side1LenOl - 9;)
{
__LOAD(__reg_0_0, __h);
__STORE(__h - 4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__h++;
__LOAD(__reg_0_1, __h);
__STORE(__h - 4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__h++;
__LOAD(__reg_0_2, __h);
__STORE(__h - 4, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
__LOAD(__reg_0_3, __h);
__STORE(__h - 4, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__h++;
__LOAD(__reg_0_4, __h);
__STORE(__h - 4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__h++;
__LOAD(__reg_0_5, __h);
__STORE(__h - 4, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5);
__h++;
__LOAD(__reg_0_6, __h);
__STORE(__h - 4, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6);
__h++;
__LOAD(__reg_0_7, __h);
__STORE(__h - 4, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7);
__h++;
__LOAD(__reg_0_8, __h);
__STORE(__h - 4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__STORE(__h - 4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__STORE(__h - 4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__STORE(__h - 4, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__STORE(__h - 4, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__STORE(__h - 4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_5, __h);
__STORE(__h - 4, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_6, __h);
__STORE(__h - 4, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_7, __h);
__STORE(__h - 4, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_8, __h);
__STORE(__h - 4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8);
__h++;
}
}
|
d1f6eec4b57772d333764bb8d85b250ad965f82f.hip | // !!! This is a file automatically generated by hipify!!!
#include "gpu_incremental_map_track.h"
#include "gpu_map_kernels.h"
#include "gpu_defines.h"
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <math.h>
namespace SySal
{
namespace GPU
{
int PrismMapTracker::ClusterChainer::FindEdges(EmulsionEdge &t, EmulsionEdge &b, IntClusterFile &cf, int threshold, int &refimg)
{
t.Valid = b.Valid = false;
int retval = 0;
int i;
int topout = -1, bottomout = cf.Images();
for (i = 0; i < cf.Images() && cf.ImageClusterCounts(i) < threshold; i++)
topout = i;
for (i = cf.Images() - 1; i >= 0 && cf.ImageClusterCounts(i) < threshold; i--)
bottomout = i;
if (topout >= bottomout) return 0;
if (topout > 0 && topout < cf.Images() - 1)
{
if (cf.StageZ(topout) == cf.StageZ(topout + 1)) t.Z = 0.5f * (cf.StageZ(topout) + cf.StageZ(topout + 1));
else t.Z = cf.StageZ(topout) + (cf.StageZ(topout + 1) - cf.StageZ(topout)) / (cf.ImageClusterCounts(topout + 1) - cf.ImageClusterCounts(topout)) * (threshold - cf.ImageClusterCounts(topout));
t.Valid = true;
retval++;
}
if (bottomout > 0 && bottomout < cf.Images() - 1)
{
if (cf.StageZ(bottomout) == cf.StageZ(bottomout - 1)) b.Z = 0.5f * (cf.StageZ(bottomout) + cf.StageZ(bottomout - 1));
else b.Z = cf.StageZ(bottomout) + (cf.StageZ(bottomout - 1) - cf.StageZ(bottomout)) / (cf.ImageClusterCounts(bottomout - 1) - cf.ImageClusterCounts(bottomout)) * (threshold - cf.ImageClusterCounts(bottomout));
b.Valid = true;
retval++;
}
refimg = (topout + bottomout) / 2;
return retval;
}
PrismMapTracker::ClusterChainer::ClusterChainer() :
CTOR_INIT(pClusterPos),
CTOR_INIT(pClusterData),
CTOR_INIT(pClusterChains),
CTOR_INIT(pCellContents),
CTOR_INIT(pCells),
CTOR_INIT(pChains),
CTOR_INIT(pCompactChains),
CTOR_INIT(pChainCounts),
CTOR_INIT(pDeltas),
CTOR_INIT(pMapCounts),
CTOR_INIT(pCurv),
CTOR_INIT(pStagePos),
CTOR_INIT(pHostStagePos),
CTOR_INIT(pChainMapHeader),
CTOR_INIT(pFixedZs),
CTOR_INIT(pChMapWnd),
CTOR_INIT(pLastView),
CTOR_INIT(pThisView)
{
ThicknessSamples = 0;
pThicknessSamples = (double *)malloc(sizeof(double) * 32);
C.MaxCellContent = 8;
C.CellSize = 160;
C.ClusterMapCoarseTolerance = 16;
C.ClusterMapFineTolerance = 1;
C.ClusterMapFineAcceptance = 8;
C.ClusterMapMaxXOffset = 160;
C.ClusterMapMaxYOffset = 24;
C.MinClustersPerChain = 2;
C.MinVolumePerChain = 8;
C.ClusterMapMinSize = 6;
C.ChainMapXYCoarseTolerance = 8;
C.ChainMapXYFineTolerance = 1;
C.ChainMapXYFineAcceptance = 8;
C.ChainMapZCoarseTolerance = 2 << Z_SCALE_SHIFT;
C.ChainMapZFineTolerance = (1 << Z_SCALE_SHIFT) / 8;
C.ChainMapZFineAcceptance = (1 << Z_SCALE_SHIFT);
C.ChainMapMaxXOffset = 160;
C.ChainMapMaxYOffset = 24;
C.ChainMapMaxZOffset = 10 << Z_SCALE_SHIFT;
C.ChainMapMinVolume = 16;
C.ChainMapSampleDivider = 10;
C.MaxChains = 2000000;
C.ClusterMapSampleDivider = 20;
C.ClusterThreshold = 3000;
C.MaxCellContent = 0;
C.CellSize = 0;
C.ClusterMapCoarseTolerance = 0;
C.ClusterMapFineTolerance = 0;
C.ClusterMapMaxXOffset = 0;
C.ClusterMapMaxYOffset = 0;
C.MinClustersPerChain = 0;
C.MinVolumePerChain = 0;
C.ChainMapXYCoarseTolerance = 0;
C.ChainMapXYFineTolerance = 0;
C.ChainMapZCoarseTolerance = 0;
C.ChainMapZFineTolerance = 0;
C.ChainMapMaxXOffset = 0;
C.ChainMapMaxYOffset = 0;
C.ChainMapMaxZOffset = 0;
C.MaxChains = 0;
IC.DMagDX = IC.DMagDY = IC.DMagDZ = 0.0f;
IC.XYCurvature = IC.ZCurvature = 0.0f;
IC.DMagDX = IC.DMagDY = IC.DMagDZ = -1.0f;
IC.XYCurvature = IC.ZCurvature = -1.0f;
}
PrismMapTracker::ClusterChainer::~ClusterChainer()
{
if (pThicknessSamples) free(pThicknessSamples);
hipError_t err;
THROW_ON_CUDA_ERR(hipSetDevice(pThis->m_DeviceId))
DEALLOC(pChMapWnd)
HOST_DEALLOC(pFixedZs)
DEALLOC(pChainMapHeader)
DEALLOC(pCurv)
HOST_DEALLOC(pHostStagePos)
DEALLOC(pStagePos)
DEALLOC(pMapCounts)
DEALLOC(pDeltas)
DEALLOC(pChainCounts)
DEALLOC(pCompactChains)
DEALLOC(pChains)
DEALLOC(pCells)
DEALLOC(pCellContents)
DEALLOC(pClusterChains)
DEALLOC(pChainCounts)
DEALLOC(pClusterData)
DEALLOC(pClusterPos)
}
void PrismMapTracker::ClusterChainer::Reset(SySal::ClusterChainer::Configuration &c, SySal::ImageCorrection &ic, bool istop)
{
C = c;
IC = ic;
IsTop = istop;
CurrentView = -1;
ThicknessSamples = 0;
hipError_t err;
THROW_ON_CUDA_ERR(hipSetDevice(pThis->m_DeviceId))
WISE_ALLOC(pLastView, sizeof(ChainView) + C.MaxChains * sizeof(IntChain))
WISE_ALLOC(pThisView, sizeof(ChainView) + C.MaxChains * sizeof(IntChain))
EXACT_ALLOC(pChMapWnd, sizeof(ChainMapWindow))
}
int PrismMapTracker::ClusterChainer::AddClusters(SySal::IntClusterFile &cf)
{
hipError_t err;
THROW_ON_CUDA_ERR(hipSetDevice(pThis->m_DeviceId))
int refimg = 0;
float dz = 0.0f;
{
EmulsionEdge t, b;
FindEdges(t, b, cf, C.ClusterThreshold, refimg);
if (IsTop)
{
if (b.Valid) dz = -b.Z;
else if (t.Valid)
{
dz = -(t.Z - pThis->GetThickness());
}
else throw "Cannot work out emulsion reference surface.";
}
else
{
if (t.Valid) dz = -t.Z;
else if (b.Valid)
{
dz = -(b.Z + pThis->GetThickness());
}
else throw "Cannot work out emulsion reference surface.";
}
}
CurrentView++;
float dxdz = (cf.StageX(cf.Images() - 1) - cf.StageX(0)) / (cf.StageZ(cf.Images() - 1) - cf.StageZ(0));
float dydz = (cf.StageY(cf.Images() - 1) - cf.StageY(0)) / (cf.StageZ(cf.Images() - 1) - cf.StageZ(0));
//printf("\nZ adjustment: %f\nDXDZ: %f\nDYDZ: %f", dz, dxdz, dydz);
int img;
HOST_WISE_ALLOC(pFixedZs, sizeof(double) * cf.Images());
if (cf.Images() < 5)
{
for (img = 0; img < cf.Images(); img++)
pFixedZs[img] = cf.StageZ(img);
}
else
{
for (img = 1; img < cf.Images(); img++)
pFixedZs[img - 1] = cf.StageZ(img) - cf.StageZ(img - 1);
double maxd = pFixedZs[0];
double mind = pFixedZs[0];
for (img = 1; img < cf.Images() - 1; img++)
if (pFixedZs[img] < mind) mind = pFixedZs[img];
else if (pFixedZs[img] > maxd) maxd = pFixedZs[img];
double sum = 0.0;
for (img = 0; img < cf.Images() - 1; img++)
if (pFixedZs[img] > mind && pFixedZs[img] < maxd)
sum += pFixedZs[img];
sum /= (cf.Images() - 3);
pFixedZs[0] = cf.StageZ(0);
for (img = 1; img < cf.Images(); img++)
pFixedZs[img] = cf.StageZ(0) + img * sum;
}
int width = cf.Width() * cf.Scale();
int height = cf.Height() * cf.Scale();
int cellsize = C.CellSize;
int ncellsx = (width / cellsize) + 1;
int ncellsy = (height / cellsize) + 1;
WISE_ALLOC(pCurv, 2 * sizeof(int) * (width + height));
WISE_ALLOC(pCells, sizeof(Cell) * ncellsx * ncellsy);
WISE_ALLOC(pCellContents, sizeof(IntCluster *) * ncellsx * ncellsy * C.MaxCellContent);
THROW_ON_CUDA_ERR(hipMemset(pCells, 0, ncellsx * ncellsy * sizeof(Cell)));
THROW_ON_CUDA_ERR(hipMemset(pCellContents, 0, sizeof(IntCluster *) * ncellsx * ncellsy * C.MaxCellContent));
int totalsize = cf.TotalSize;
WISE_ALLOC(pClusterData, totalsize);
THROW_ON_CUDA_ERR(hipMemcpy(pClusterData, cf.pData, totalsize, hipMemcpyHostToDevice));
_CUDA_THROW_ERR_;
int totalclusters = 0;
for (img = 0; img < cf.Images(); img++)
totalclusters += cf.pImageClusterCounts[img];
IntCluster *pImagesBase = (IntCluster *)(void *)(((char *)cf.pClusters - (char *)cf.pData) + (char *)pClusterData);
IntCluster *pImageNext;
WISE_ALLOC(pChains, totalclusters * sizeof(IntChain));
WISE_ALLOC(pClusterPos, totalclusters * 3 * sizeof(short));
short *pClusterXs = pClusterPos;
short *pClusterYs = pClusterXs + totalclusters;
short *pClusterZs = pClusterYs + totalclusters;
WISE_ALLOC(pClusterChains, sizeof(IntCluster *) * totalclusters);
THROW_ON_CUDA_ERR(hipMemset(pClusterChains, 0, sizeof(IntCluster *) * totalclusters));
WISE_ALLOC(pChainCounts, sizeof(int) * pThis->m_Prop.maxThreadsPerBlock * pThis->m_Prop.multiProcessorCount * 2);
int *pCompactChainCounts = pChainCounts + pThis->m_Prop.maxThreadsPerBlock * pThis->m_Prop.multiProcessorCount;
int *pCurvX, *pCurvY, *pZCurvX, *pZCurvY;
int deltasZ = 0;
int deltasXYZ = 0;
int refinedeltasXY = 0;
int refinedeltasZ = 0;
int refinedeltasXYZ = 0;
int best_matches = 0;
ChainView *pView = pLastView;
pLastView = pThisView;
pThisView = pView;
IntChain *pCompactChains = (IntChain *)(void *)((char *)(void *)pThisView + sizeof(ChainView));
hipMemset((int *)(void *)pThisView->Reserved, 0, sizeof(int));
_CUDA_THROW_ERR_;
WISE_ALLOC(pCurv, 2 * sizeof(int) * (width + height));
pCurvX = pCurv;
pCurvY = pCurv + width;
pZCurvX = pCurvY + height;
pZCurvY = pZCurvX + width;
{
dim3 ithreads = dim3(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 iblocks = dim3(width / pThis->m_Prop.maxThreadsPerBlock + 1, 1, 1);
hipLaunchKernelGGL(( curvaturemap_kernel), dim3(iblocks), dim3(ithreads), 0, 0, pCurvX, pZCurvX, width, (IC.XYCurvature * (1 << XY_CURVATURE_SHIFT) * (cf.PixMicronX() * cf.PixMicronX()) / (cf.Scale() * cf.Scale())), (IC.ZCurvature * (1 << (Z_CURVATURE_SHIFT + Z_SCALE_SHIFT)) * (cf.PixMicronX() * cf.PixMicronX()) / (cf.Scale() * cf.Scale())) );
_CUDA_THROW_ERR_
}
{
dim3 ithreads = dim3(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 iblocks = dim3(height / pThis->m_Prop.maxThreadsPerBlock + 1, 1, 1);
hipLaunchKernelGGL(( curvaturemap_kernel), dim3(iblocks), dim3(ithreads), 0, 0, pCurvY, pZCurvY, height, (IC.XYCurvature * (1 << XY_CURVATURE_SHIFT) * (cf.PixMicronY() * cf.PixMicronY()) / (cf.Scale() * cf.Scale())), (IC.ZCurvature * (1 << (Z_CURVATURE_SHIFT + Z_SCALE_SHIFT)) * (cf.PixMicronX() * cf.PixMicronX()) / (cf.Scale() * cf.Scale())) );
_CUDA_THROW_ERR_
}
int deltasX = (C.ClusterMapMaxXOffset / C.ClusterMapCoarseTolerance * 2 + 1);
int deltasY = (C.ClusterMapMaxYOffset / C.ClusterMapCoarseTolerance * 2 + 1);
int deltas2 = deltasX * deltasY;
int refinedeltas = 2 * C.ClusterMapCoarseTolerance / C.ClusterMapFineTolerance + 1;
int refinedeltas2 = refinedeltas * refinedeltas;
WISE_ALLOC(pDeltas, sizeof(int) * ((2 * refinedeltas + deltasX + deltasY)));
WISE_ALLOC(pMapCounts, sizeof(int) * max(deltas2, refinedeltas2) * (pThis->m_Prop.multiProcessorCount * pThis->m_Prop.maxThreadsPerBlock + 1) );
int *pBest = pMapCounts + max(deltas2, refinedeltas2) * (pThis->m_Prop.multiProcessorCount * pThis->m_Prop.maxThreadsPerBlock);
WISE_ALLOC(pStagePos, sizeof(short) * cf.Images() * 4);
short *pStagePosX = pStagePos;
short *pStagePosY = pStagePosX + cf.Images();
short *pDeltaStagePosX = pStagePosY + cf.Images();
short *pDeltaStagePosY = pDeltaStagePosX + cf.Images();
HOST_WISE_ALLOC(pHostStagePos, sizeof(short) * cf.Images() * 4);
for (img = 0; img < cf.Images(); img++)
{
pHostStagePos[img] =
(int)(((cf.StageX(img) - cf.StageX(0)) / cf.PixMicronX()) * cf.Scale());
pHostStagePos[img + cf.Images()] =
(int)(((cf.StageY(img) - cf.StageY(0)) / cf.PixMicronY()) * cf.Scale());
if (img == 0)
{
pHostStagePos[img + 2 * cf.Images()] = 0;
pHostStagePos[img + 3 * cf.Images()] = 0;
}
else
{
pHostStagePos[img + 2 * cf.Images()] = pHostStagePos[img] - pHostStagePos[img - 1];
pHostStagePos[img + 3 * cf.Images()] = pHostStagePos[img + cf.Images()] - pHostStagePos[img - 1 + cf.Images()];
}
}
THROW_ON_CUDA_ERR(hipMemcpy(pStagePos, pHostStagePos, sizeof(short) * 4 * cf.Images(), hipMemcpyHostToDevice));
THROW_ON_CUDA_ERR(hipMemset(pCells, 0, sizeof(Cell) * ncellsx * ncellsy));
int demagDZ1M;
int id = 0;
IntCluster *pImagesBase1 = pImagesBase;
{
dim3 ithreads = dim3(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 iblocks = dim3(totalclusters / pThis->m_Prop.maxThreadsPerBlock + 1, 1, 1);
hipLaunchKernelGGL(( correctcurvature_kernel), dim3(iblocks), dim3(ithreads), 0, 0, pImagesBase, pClusterZs, sin(IC.CameraRotation) * (1 << FRACT_RESCALE_SHIFT), (cos(IC.CameraRotation) - 1) * (1 << FRACT_RESCALE_SHIFT), pCurvX, pCurvY, pZCurvX, pZCurvY, IC.DMagDX * (1 << XY_MAGNIFICATION_SHIFT), IC.DMagDY * (1 << XY_MAGNIFICATION_SHIFT), totalclusters, width / 2, height / 2 );
_CUDA_THROW_ERR_
}
{
dim3 ithreads = dim3(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 iblocks = dim3(cf.ImageClusterCounts(0) / pThis->m_Prop.maxThreadsPerBlock + 1, 1, 1);
hipLaunchKernelGGL(( setXYZs_kernel), dim3(iblocks), dim3(ithreads), 0, 0, pClusterXs, pClusterYs, pClusterZs, cf.ImageClusterCounts(0), 0, pStagePosX, pStagePosY, 0);
_CUDA_THROW_ERR_
}
int bestclustermapcount = -1;
int bestclustermapcount_img = 0;
for (img = 0; img < cf.Images() - 1; img++)
{
pImageNext = pImagesBase + cf.ImageClusterCounts(img);
demagDZ1M = IC.DMagDZ * (pFixedZs[img + 1] - pFixedZs[img]) * (1 << DEMAG_SHIFT);
int launches;
// BEGIN COARSE MAPPING
{
hipLaunchKernelGGL(( makedeltas_kernel), dim3(dim3(1, 1, 1)), dim3(dim3(1, 1, 1)), 0, 0, pDeltas, C.ClusterMapCoarseTolerance, deltasX, deltasY, pDeltaStagePosX, pDeltaStagePosY, img + 1);
_CUDA_THROW_ERR_
}
{
dim3 ithreads = dim3(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 iblocks = dim3(deltas2 / ithreads.x + 1, 1, 1);
hipLaunchKernelGGL(( resetcounts_kernel), dim3(iblocks), dim3(ithreads), 0, 0, pMapCounts, deltas2, pBest);
_CUDA_THROW_ERR_
}
{
dim3 ithreads = dim3(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 iblocks = dim3(pThis->m_Prop.multiProcessorCount, 1, 1);
hipLaunchKernelGGL(( maphash_minarea_kernel), dim3(iblocks), dim3(ithreads), 0, 0, pImagesBase, cf.ImageClusterCounts(img), cf.ImageClusterCounts(img) / (pThis->m_Prop.maxThreadsPerBlock + pThis->m_Prop.multiProcessorCount) + 1, pCells, pCellContents, cellsize, C.MaxCellContent, ncellsx, ncellsy, C.ClusterMapMinSize);
_CUDA_THROW_ERR_
}
{
dim3 ithreads = dim3(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 iblocks = dim3(pThis->m_Prop.multiProcessorCount, 1, 1);
SySal::GPU::InterruptibleKernels::IntKernel<trymap_kernel_args, trymap_kernel_status, trymap2_Ikernel> Launcher;
Launcher.Arguments.pC = pImageNext;
Launcher.Arguments.nc = cf.ImageClusterCounts(img + 1);
Launcher.Arguments.clusterblocksize = cf.ImageClusterCounts(img + 1) / (pThis->m_Prop.maxThreadsPerBlock * pThis->m_Prop.multiProcessorCount) + 1;
Launcher.Arguments.pCell = pCells;
Launcher.Arguments.pCellContent = pCellContents;
Launcher.Arguments.maxcellcontent = C.MaxCellContent;
Launcher.Arguments.pDeltas = pDeltas;
Launcher.Arguments.deltasx = deltasX;
Launcher.Arguments.deltasy = deltasY;
Launcher.Arguments.cellsize = cellsize;
Launcher.Arguments.minclustersize = C.ClusterMapMinSize;
Launcher.Arguments.tol = C.ClusterMapCoarseTolerance;
Launcher.Arguments.w = width;
Launcher.Arguments.h = height;
Launcher.Arguments.demag = demagDZ1M;
Launcher.Arguments.nx = ncellsx;
Launcher.Arguments.ny = ncellsy;
Launcher.Arguments.pMapCounts = pMapCounts;
Launcher.Arguments.sampledivider = C.ClusterMapSampleDivider;
Launcher.Arguments.clustermapmin = C.MinClusterMapsValid;
Launcher.Arguments.pBest = pBest;
launches = Launcher.Launch(iblocks, ithreads, 5);
_CUDA_THROW_ERR_
}
{
THROW_ON_CUDA_ERR(hipMemcpy(&best_matches, pBest, sizeof(int), hipMemcpyDeviceToHost))
best_matches = best_matches >> 16;
}
// END COARSE MAPPING
// BEGIN FINE MAPPING
if (best_matches < C.MinClusterMapsValid)
{
hipLaunchKernelGGL(( makedeltas_kernel), dim3(dim3(1, 1, 1)), dim3(dim3(1, 1, 1)), 0, 0, pDeltas, 0, deltasX, deltasY, pDeltaStagePosX, pDeltaStagePosY, img + 1);
_CUDA_THROW_ERR_
}
{
hipLaunchKernelGGL(( makedeltas_fromshift_kernel), dim3(dim3(1, 1, 1)), dim3(dim3(1, 1, 1)), 0, 0, pDeltas + deltasX + deltasY, (best_matches < C.MinClusterMapsValid) ? 0 : C.ClusterMapFineTolerance, refinedeltas, refinedeltas, pDeltas, pBest, deltasX);
_CUDA_THROW_ERR_
}
{
dim3 ithreads = dim3(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 iblocks = dim3(refinedeltas2 / ithreads.x + 1, 1, 1);
hipLaunchKernelGGL(( resetcounts_kernel), dim3(iblocks), dim3(ithreads), 0, 0, pMapCounts, refinedeltas2, pBest);
_CUDA_THROW_ERR_
}
{
if (best_matches > bestclustermapcount)
{
bestclustermapcount = best_matches;
bestclustermapcount_img = img;
}
dim3 ithreads = dim3(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 iblocks = dim3(pThis->m_Prop.multiProcessorCount, 1, 1);
SySal::GPU::InterruptibleKernels::IntKernel<refinemap_kernel_args, refinemap_kernel_status, refinemap_Ikernel> Launcher;
Launcher.Arguments.pC = pImageNext;
Launcher.Arguments.nc = cf.ImageClusterCounts(img + 1);
Launcher.Arguments.clusterblocksize = cf.ImageClusterCounts(img + 1) / (pThis->m_Prop.maxThreadsPerBlock * pThis->m_Prop.multiProcessorCount) + 1;
Launcher.Arguments.pCell = pCells;
Launcher.Arguments.pCellContent = pCellContents;
Launcher.Arguments.maxcellcontent = C.MaxCellContent;
Launcher.Arguments.cellsize = cellsize;
Launcher.Arguments.tol = C.ClusterMapCoarseTolerance;
Launcher.Arguments.w = width;
Launcher.Arguments.h = height;
Launcher.Arguments.demag = demagDZ1M;
Launcher.Arguments.nx = ncellsx;
Launcher.Arguments.ny = ncellsy;
Launcher.Arguments.pMapCounts = pMapCounts;
Launcher.Arguments.pClusterChain = pClusterChains;
Launcher.Arguments.pBase = pImagesBase1;
Launcher.Arguments.pDeltas = pDeltas + deltasX + deltasY;
Launcher.Arguments.deltas = refinedeltas;
Launcher.Arguments.refinebin = C.ClusterMapFineTolerance;
Launcher.Arguments.pBest = pBest;
launches = Launcher.Launch(iblocks, ithreads, 5);
_CUDA_THROW_ERR_
}
{
THROW_ON_CUDA_ERR(hipMemcpy(&best_matches, pBest, sizeof(int), hipMemcpyDeviceToHost));
best_matches = best_matches >> 16;
//printf("\n%d %d", img, best_matches);
}
// END FINE MAPPING
// BEGIN FINAL MAPPING
{
dim3 ithreads = dim3(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 iblocks = dim3(pThis->m_Prop.multiProcessorCount, 1, 1);
hipLaunchKernelGGL(( clearhash_kernel), dim3(iblocks), dim3(ithreads), 0, 0, pImagesBase, cf.ImageClusterCounts(img), cf.ImageClusterCounts(img) / (pThis->m_Prop.maxThreadsPerBlock + pThis->m_Prop.multiProcessorCount) + 1, pCells, cellsize, ncellsx, ncellsy);
_CUDA_THROW_ERR_
}
{
dim3 ithreads = dim3(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 iblocks = dim3(pThis->m_Prop.multiProcessorCount, 1, 1);
hipLaunchKernelGGL(( maphash_kernel), dim3(iblocks), dim3(ithreads), 0, 0, pImagesBase, cf.ImageClusterCounts(img), cf.ImageClusterCounts(img) / (pThis->m_Prop.maxThreadsPerBlock + pThis->m_Prop.multiProcessorCount) + 1, pCells, pCellContents, cellsize, C.MaxCellContent, ncellsx, ncellsy);
_CUDA_THROW_ERR_
}
{
hipLaunchKernelGGL(( makefinaldeltas_fromshift_kernel), dim3(dim3(1,1,1)), dim3(dim3(1,1,1)), 0, 0, pDeltas, (best_matches > C.MinClusterMapsValid) ? C.ClusterMapFineTolerance : 0, pDeltas + deltasX + deltasY, pBest, refinedeltas, pStagePosX, pStagePosY, pDeltaStagePosX, pDeltaStagePosY, img + 1, cf.Images());
_CUDA_THROW_ERR_
}
{
dim3 ithreads = dim3(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 iblocks = dim3(cf.ImageClusterCounts(img + 1) / pThis->m_Prop.maxThreadsPerBlock + 1, 1, 1);
id = pImageNext - pImagesBase1;
hipLaunchKernelGGL(( setXYZs_kernel), dim3(iblocks), dim3(ithreads), 0, 0, pClusterXs + id, pClusterYs + id, pClusterZs + id, cf.ImageClusterCounts(img + 1), img + 1, pStagePosX, pStagePosY, ((pFixedZs[img + 1] /*- pFixedZs[0]*/ + dz) * (1 << Z_SCALE_SHIFT)) );
}
{
dim3 ithreads = dim3(1, 1, 1);
dim3 iblocks = dim3(1, 1, 1);
hipLaunchKernelGGL(( resetcounts_kernel), dim3(iblocks), dim3(ithreads), 0, 0, pMapCounts, 1, pBest);
_CUDA_THROW_ERR_
}
#if 1
{
dim3 ithreads = dim3(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 iblocks = dim3(pThis->m_Prop.multiProcessorCount, 1, 1);
SySal::GPU::InterruptibleKernels::IntKernel<finalmap_kernel_args, finalmap_kernel_status, finalmap_Ikernel> Launcher;
Launcher.Arguments.pC = pImageNext;
Launcher.Arguments.nc = cf.ImageClusterCounts(img + 1);
Launcher.Arguments.clusterblocksize = cf.ImageClusterCounts(img + 1) / (pThis->m_Prop.maxThreadsPerBlock * pThis->m_Prop.multiProcessorCount) + 1;
Launcher.Arguments.pCell = pCells;
Launcher.Arguments.pCellContent = pCellContents;
Launcher.Arguments.maxcellcontent = C.MaxCellContent;
Launcher.Arguments.cellsize = cellsize;
Launcher.Arguments.tol = C.ClusterMapFineAcceptance;
Launcher.Arguments.w = width;
Launcher.Arguments.h = height;
Launcher.Arguments.demag = demagDZ1M;
Launcher.Arguments.nx = ncellsx;
Launcher.Arguments.ny = ncellsy;
Launcher.Arguments.pMapCounts = pMapCounts;
Launcher.Arguments.pClusterChain = pClusterChains;
Launcher.Arguments.img = img + 1;
Launcher.Arguments.pBase = pImagesBase1;
Launcher.Arguments.pDX = pDeltaStagePosX;
Launcher.Arguments.pDY = pDeltaStagePosY;
int launches = Launcher.Launch(iblocks, ithreads, 5);
_CUDA_THROW_ERR_
}
#endif
{
int step;
for (step = 1; step <= (pThis->m_Prop.maxThreadsPerBlock * pThis->m_Prop.multiProcessorCount / 2); step <<= 1)
{
int thr = (pThis->m_Prop.maxThreadsPerBlock * pThis->m_Prop.multiProcessorCount / 2) / step;
dim3 ithreads = dim3(max(1, thr / pThis->m_Prop.multiProcessorCount), 1, 1);
dim3 iblocks = dim3(pThis->m_Prop.multiProcessorCount, 1, 1);
hipLaunchKernelGGL(( sumcounts_kernel), dim3(iblocks), dim3(ithreads), 0, 0, pMapCounts, 1, pThis->m_Prop.maxThreadsPerBlock * pThis->m_Prop.multiProcessorCount, step);
}
_CUDA_THROW_ERR_
}
// END FINAL MAPPING
{
dim3 ithreads = dim3(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 iblocks = dim3(pThis->m_Prop.multiProcessorCount, 1, 1);
hipLaunchKernelGGL(( clearhash_kernel), dim3(iblocks), dim3(ithreads), 0, 0, pImagesBase, cf.ImageClusterCounts(img), cf.ImageClusterCounts(img) / (pThis->m_Prop.maxThreadsPerBlock + pThis->m_Prop.multiProcessorCount) + 1, pCells, cellsize, ncellsx, ncellsy);
_CUDA_THROW_ERR_
}
pImagesBase = pImageNext;
}
if (CurrentView > 0)
{
hipLaunchKernelGGL(( makechainwindow_kernel), dim3(dim3(1,1,1)), dim3(dim3(1,1,1)), 0, 0, pChMapWnd, pStagePosX, pStagePosY, cf.Images(), width, height, cf.PixMicronX() / cf.Scale() * (1 << XY_SCALE_SHIFT), cf.PixMicronY() / cf.Scale() * (1 << XY_SCALE_SHIFT), ncellsx * ncellsy, C.ChainMapXYCoarseTolerance, pLastView, pCells, pCellContents, C.MaxCellContent, cf.StageX(0) * (1 << XY_SCALE_SHIFT), cf.StageY(0) * (1 << XY_SCALE_SHIFT), pLastView);
_CUDA_THROW_ERR_
}
{
dim3 ithreads = dim3(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 iblocks = dim3(pThis->m_Prop.multiProcessorCount, 1, 1);
SySal::GPU::InterruptibleKernels::IntKernel<makechain_kernel_args, makechain_kernel_status, makechain_Ikernel> Launcher;
Launcher.Arguments.pC = pImagesBase1;
Launcher.Arguments.pClusterChains = pClusterChains;
Launcher.Arguments.pClusterXs = pClusterXs;
Launcher.Arguments.pClusterYs = pClusterYs;
Launcher.Arguments.pClusterZs = pClusterZs;
Launcher.Arguments.pChain = pChains;
Launcher.Arguments.pChainCounts = pChainCounts;
Launcher.Arguments.totalclusters = totalclusters;
Launcher.Arguments.clusterblocksize = totalclusters / (pThis->m_Prop.maxThreadsPerBlock * pThis->m_Prop.multiProcessorCount) + 1;
Launcher.Arguments.minvol = C.MinVolumePerChain;
Launcher.Arguments.minclusters = C.MinClustersPerChain;
Launcher.Arguments.xtomicron = cf.PixMicronX() / cf.Scale();
Launcher.Arguments.ytomicron = cf.PixMicronY() / cf.Scale();
Launcher.Arguments.width = width;
Launcher.Arguments.height = height;
Launcher.Arguments.stagex = cf.StageX(0) * (1 << XY_SCALE_SHIFT);
Launcher.Arguments.stagey = cf.StageY(0) * (1 << XY_SCALE_SHIFT);
Launcher.Arguments.xslant = IC.XSlant * cf.PixMicronX() / cf.Scale() * (1 << (Z_SCALE_SHIFT + SLOPE_SHIFT));
Launcher.Arguments.yslant = IC.YSlant * cf.PixMicronY() / cf.Scale() * (1 << (Z_SCALE_SHIFT + SLOPE_SHIFT));
Launcher.Arguments.viewtag = CurrentView;
int launches = Launcher.Launch(iblocks, ithreads, 5);
//printf("\nLaunches %d", launches);
_CUDA_THROW_ERR_
}
if (CurrentView > 0)
{
{
dim3 ithreads = dim3(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 iblocks = dim3(pThis->m_Prop.multiProcessorCount, 1, 1);
hipLaunchKernelGGL(( maphashchain_kernel), dim3(iblocks), dim3(ithreads), 0, 0, pLastView, pChMapWnd, pThis->m_Prop.maxThreadsPerBlock * pThis->m_Prop.multiProcessorCount);
_CUDA_THROW_ERR_
}
// BEGIN COARSE CHAIN MAPPING
deltasX = (2 * C.ChainMapMaxXOffset / C.ChainMapXYCoarseTolerance + 1);
deltasY = (2 * C.ChainMapMaxYOffset / C.ChainMapXYCoarseTolerance + 1);
deltasZ = (2 * C.ChainMapMaxZOffset / C.ChainMapZCoarseTolerance + 1);
deltasXYZ = deltasX * deltasY * deltasZ;
refinedeltasXY = (2 * C.ChainMapXYCoarseTolerance / C.ChainMapXYFineTolerance) + 1;
refinedeltasZ = (2 * C.ChainMapZCoarseTolerance / C.ChainMapZFineTolerance) + 1;
refinedeltasXYZ = refinedeltasXY * refinedeltasXY * refinedeltasZ;
WISE_ALLOC(pDeltas, sizeof(int) * ((deltasX + deltasY + 3 * deltasZ + 2 * refinedeltasXY + refinedeltasZ)));
WISE_ALLOC(pMapCounts, sizeof(int) * max(deltasXYZ, refinedeltasXYZ) * (pThis->m_Prop.multiProcessorCount * pThis->m_Prop.maxThreadsPerBlock + 1) );
THROW_ON_CUDA_ERR(hipMemset(pMapCounts, 0, _MEM_(pMapCounts)));
pBest = (int *)(void *)(&pThisView->Reserved);
{
hipLaunchKernelGGL(( makechaindeltas_kernel), dim3(dim3(1, 1, 1)), dim3(dim3(1, 1, 1)), 0, 0, pDeltas, C.ChainMapXYCoarseTolerance, C.ChainMapZCoarseTolerance, deltasX, deltasY, deltasZ, pLastView, cf.StageX(0) * (1 << XY_SCALE_SHIFT), cf.StageY(0) * (1 << XY_SCALE_SHIFT), IC.XSlant, IC.YSlant, 0, 0/*dxdz, dydz*/);
_CUDA_THROW_ERR_
}
{
dim3 ithreads = dim3(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 iblocks = dim3(deltasXYZ / ithreads.x + 1, 1, 1);
hipLaunchKernelGGL(( resetcounts_kernel), dim3(iblocks), dim3(ithreads), 0, 0, pMapCounts, deltasXYZ, pBest);
_CUDA_THROW_ERR_
}
{
dim3 ithreads = dim3(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 iblocks = dim3(pThis->m_Prop.multiProcessorCount, 1, 1);
//printf("\nDeltas %d %d %d %d", deltasX, deltasY, deltasZ, deltasXYZ);
SySal::GPU::InterruptibleKernels::IntKernel<trymapchain_kernel_args, trymapchain_kernel_status, trymapchaindxydz_Ikernel> Launcher;
Launcher.Arguments.pChains = pChains;
Launcher.Arguments.pChainCounts = pChainCounts;
Launcher.Arguments.nc = totalclusters;
Launcher.Arguments.chainblocksize = totalclusters / (pThis->m_Prop.maxThreadsPerBlock * pThis->m_Prop.multiProcessorCount) + 1;
Launcher.Arguments.pChMapWnd = pChMapWnd;
Launcher.Arguments.pDeltas = pDeltas;
Launcher.Arguments.deltasX = deltasX;
Launcher.Arguments.deltasY = deltasY;
Launcher.Arguments.deltasZ = deltasZ;
Launcher.Arguments.xytol = C.ChainMapXYCoarseTolerance;
Launcher.Arguments.ztol = C.ChainMapZCoarseTolerance;
Launcher.Arguments.minchainsize = C.ChainMapMinVolume;
Launcher.Arguments.pMapCounts = pMapCounts;
Launcher.Arguments.sampledivider = C.ChainMapSampleDivider;
Launcher.Arguments.pBest = pBest;
int launches = Launcher.Launch(iblocks, ithreads, 2);
//printf("\nTryMapChain Launches: %d", launches);
_CUDA_THROW_ERR_
}
{
int mapc, best;
THROW_ON_CUDA_ERR(hipMemcpy(&best_matches, (int *)(void *)(&pThisView->Reserved), sizeof(int), hipMemcpyDeviceToHost));
//printf("\nDEBUG %08X", best_matches);
mapc = best_matches >> 16;
if (mapc < C.MinChainMapsValid) best = deltasX / 2 + (deltasY / 2) * deltasX + (deltasZ / 2) * deltasX * deltasY;
else best = best_matches & 0xffff;
best_matches = mapc;
}
// END COARSE CHAIN MAPPING
// BEGIN FINE CHAIN MAPPING
if (best_matches < C.MinChainMapsValid)
{
printf("\nBad chain mapping (%d/%d), switching to default", best_matches, C.MinChainMapsValid);
hipLaunchKernelGGL(( makechaindeltas_kernel), dim3(dim3(1, 1, 1)), dim3(dim3(1, 1, 1)), 0, 0, pDeltas, 0, 0, deltasX, deltasY, deltasZ, pLastView, cf.StageX(0) * (1 << XY_SCALE_SHIFT), cf.StageY(0) * (1 << XY_SCALE_SHIFT), IC.XSlant, IC.YSlant, dxdz, dydz);
_CUDA_THROW_ERR_
}
{
hipLaunchKernelGGL(( makechaindeltas_fromshift_kernel), dim3(dim3(1, 1, 1)), dim3(dim3(1, 1, 1)), 0, 0, pDeltas + deltasX + deltasY + 3 * deltasZ, (best_matches < C.MinChainMapsValid) ? 0 : C.ChainMapXYFineTolerance, 0, refinedeltasXY, refinedeltasXY, refinedeltasZ, pDeltas, pBest, deltasX, deltasY, deltasZ);
_CUDA_THROW_ERR_
}
{
dim3 ithreads = dim3(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 iblocks = dim3(refinedeltasXYZ / ithreads.x + 1, 1, 1);
hipLaunchKernelGGL(( resetcounts_kernel), dim3(iblocks), dim3(ithreads), 0, 0, pMapCounts, refinedeltasXYZ, (int *)(void *)(&pThisView->Reserved));
_CUDA_THROW_ERR_
}
{
dim3 ithreads = dim3(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 iblocks = dim3(pThis->m_Prop.multiProcessorCount, 1, 1);
SySal::GPU::InterruptibleKernels::IntKernel<trymapchain_kernel_args, trymapchain_kernel_status, trymapchain_Ikernel> Launcher;
Launcher.Arguments.pChains = pChains;
Launcher.Arguments.pChainCounts = pChainCounts;
Launcher.Arguments.nc = totalclusters;
Launcher.Arguments.chainblocksize = totalclusters / (pThis->m_Prop.maxThreadsPerBlock * pThis->m_Prop.multiProcessorCount) + 1;
Launcher.Arguments.pChMapWnd = pChMapWnd;
Launcher.Arguments.pDeltas = pDeltas + deltasX + deltasY + 3 * deltasZ;
Launcher.Arguments.deltasX = refinedeltasXY;
Launcher.Arguments.deltasY = refinedeltasXY;
Launcher.Arguments.deltasZ = refinedeltasZ;
Launcher.Arguments.xytol = C.ChainMapXYFineAcceptance;
Launcher.Arguments.ztol = C.ChainMapZFineAcceptance;
Launcher.Arguments.minchainsize = C.ChainMapMinVolume;
Launcher.Arguments.pMapCounts = pMapCounts;
Launcher.Arguments.sampledivider = C.ChainMapSampleDivider;
Launcher.Arguments.pBest = pBest;
int launches = Launcher.Launch(iblocks, ithreads, 2);
//printf("\nRefineMapChain Launches: %d", launches);
_CUDA_THROW_ERR_
}
{
int mapc, best;
THROW_ON_CUDA_ERR(hipMemcpy(&best_matches, pBest, sizeof(int), hipMemcpyDeviceToHost))
printf("\nDEBUG %08X", best_matches);
mapc = best_matches >> 16;
best = best_matches & 0xffff;
best_matches = mapc;
}
// END FINE CHAIN MAPPING
// BEGIN FINAL CHAIN MAPPING
{
dim3 ithreads = dim3(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 iblocks = dim3(pThis->m_Prop.multiProcessorCount, 1, 1);
hipLaunchKernelGGL(( finalmapchain_kernel), dim3(iblocks), dim3(ithreads), 0, 0, pChains, pChainCounts, totalclusters, totalclusters / (pThis->m_Prop.maxThreadsPerBlock * pThis->m_Prop.multiProcessorCount) + 1, pChMapWnd, pDeltas + deltasX + deltasY + 3 * deltasZ, refinedeltasXY, refinedeltasZ, C.ChainMapXYFineAcceptance, C.ChainMapZFineAcceptance, pBest);
_CUDA_THROW_ERR_
}
// END FINAL CHAIN MAPPING
{
dim3 ithreads = dim3(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 iblocks = dim3(pThis->m_Prop.multiProcessorCount, 1, 1);
hipLaunchKernelGGL(( clearhashchain_kernel), dim3(iblocks), dim3(ithreads), 0, 0, pLastView, pChMapWnd, pThis->m_Prop.maxThreadsPerBlock * pThis->m_Prop.multiProcessorCount);
_CUDA_THROW_ERR_
}
{
dim3 ithreads = dim3(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 iblocks = dim3(pThis->m_Prop.multiProcessorCount, 1, 1);
hipLaunchKernelGGL(( negshift_viewchains_kernel), dim3(iblocks), dim3(ithreads), 0, 0, pLastView, pDeltas + deltasX + deltasY + 3 * deltasZ, refinedeltasXY, refinedeltasZ, pBest);
_CUDA_THROW_ERR_
}
}
{
hipLaunchKernelGGL(( setchainbase_kernel), dim3(dim3(1,1,1)), dim3(dim3(1,1,1)), 0, 0, pCompactChainCounts, pChainCounts, pThis->m_Prop.maxThreadsPerBlock * pThis->m_Prop.multiProcessorCount, pChainMapHeader, pThisView, cf.StageX(0) * (1 << XY_SCALE_SHIFT), cf.StageY(0) * (1 << XY_SCALE_SHIFT), cf.StageZ(0) * (1 << Z_SCALE_SHIFT), (((int)width) << XY_SCALE_SHIFT) / cf.Scale() * fabs(cf.PixMicronX()), (((int)height) << XY_SCALE_SHIFT) / cf.Scale() * fabs(cf.PixMicronY()));
_CUDA_THROW_ERR_
}
{
dim3 ithreads = dim3(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 iblocks = dim3(pThis->m_Prop.multiProcessorCount, 1, 1);
hipLaunchKernelGGL(( compactchains_kernel), dim3(iblocks), dim3(ithreads), 0, 0, pCompactChains, pCompactChainCounts, pChains, pChainCounts, totalclusters / (pThis->m_Prop.maxThreadsPerBlock * pThis->m_Prop.multiProcessorCount) + 1);
_CUDA_THROW_ERR_
}
{
//if (CurrentView == 0)
hipLaunchKernelGGL(( setchainviewheader_kernel), dim3(dim3(1,1,1)), dim3(dim3(1,1,1)), 0, 0, pChainMapHeader, pThisView, cf.StageX(0) * (1 << XY_SCALE_SHIFT), cf.StageY(0) * (1 << XY_SCALE_SHIFT), cf.StageZ(0) * (1 << Z_SCALE_SHIFT), 0, 0, 0, 0);
//else
//setchainviewheader_kernel<<<dim3(1,1,1), dim3(1,1,1)>>>(pChainMapHeader, pThisView, cf.StageX(0) * (1 << XY_SCALE_SHIFT), cf.StageY(0) * (1 << XY_SCALE_SHIFT), cf.StageZ(0) * (1 << Z_SCALE_SHIFT), pDeltas + deltasX + deltasY + 3 * deltasZ, refinedeltasXY, refinedeltasZ, pBest);
_CUDA_THROW_ERR_
}
if (CurrentView > 0)
pThis->SendViewsToTracker(CurrentView, (((int)width) << XY_SCALE_SHIFT) / cf.Scale() * fabs(cf.PixMicronX()), (((int)height) << XY_SCALE_SHIFT) / cf.Scale() * fabs(cf.PixMicronY()), pLastView, pThisView);
return 0;
}
};
}; | d1f6eec4b57772d333764bb8d85b250ad965f82f.cu | #include "gpu_incremental_map_track.h"
#include "gpu_map_kernels.h"
#include "gpu_defines.h"
#include "cuda_runtime.h"
#include <stdlib.h>
#include <math.h>
namespace SySal
{
namespace GPU
{
int PrismMapTracker::ClusterChainer::FindEdges(EmulsionEdge &t, EmulsionEdge &b, IntClusterFile &cf, int threshold, int &refimg)
{
t.Valid = b.Valid = false;
int retval = 0;
int i;
int topout = -1, bottomout = cf.Images();
for (i = 0; i < cf.Images() && cf.ImageClusterCounts(i) < threshold; i++)
topout = i;
for (i = cf.Images() - 1; i >= 0 && cf.ImageClusterCounts(i) < threshold; i--)
bottomout = i;
if (topout >= bottomout) return 0;
if (topout > 0 && topout < cf.Images() - 1)
{
if (cf.StageZ(topout) == cf.StageZ(topout + 1)) t.Z = 0.5f * (cf.StageZ(topout) + cf.StageZ(topout + 1));
else t.Z = cf.StageZ(topout) + (cf.StageZ(topout + 1) - cf.StageZ(topout)) / (cf.ImageClusterCounts(topout + 1) - cf.ImageClusterCounts(topout)) * (threshold - cf.ImageClusterCounts(topout));
t.Valid = true;
retval++;
}
if (bottomout > 0 && bottomout < cf.Images() - 1)
{
if (cf.StageZ(bottomout) == cf.StageZ(bottomout - 1)) b.Z = 0.5f * (cf.StageZ(bottomout) + cf.StageZ(bottomout - 1));
else b.Z = cf.StageZ(bottomout) + (cf.StageZ(bottomout - 1) - cf.StageZ(bottomout)) / (cf.ImageClusterCounts(bottomout - 1) - cf.ImageClusterCounts(bottomout)) * (threshold - cf.ImageClusterCounts(bottomout));
b.Valid = true;
retval++;
}
refimg = (topout + bottomout) / 2;
return retval;
}
PrismMapTracker::ClusterChainer::ClusterChainer() :
CTOR_INIT(pClusterPos),
CTOR_INIT(pClusterData),
CTOR_INIT(pClusterChains),
CTOR_INIT(pCellContents),
CTOR_INIT(pCells),
CTOR_INIT(pChains),
CTOR_INIT(pCompactChains),
CTOR_INIT(pChainCounts),
CTOR_INIT(pDeltas),
CTOR_INIT(pMapCounts),
CTOR_INIT(pCurv),
CTOR_INIT(pStagePos),
CTOR_INIT(pHostStagePos),
CTOR_INIT(pChainMapHeader),
CTOR_INIT(pFixedZs),
CTOR_INIT(pChMapWnd),
CTOR_INIT(pLastView),
CTOR_INIT(pThisView)
{
ThicknessSamples = 0;
pThicknessSamples = (double *)malloc(sizeof(double) * 32);
C.MaxCellContent = 8;
C.CellSize = 160;
C.ClusterMapCoarseTolerance = 16;
C.ClusterMapFineTolerance = 1;
C.ClusterMapFineAcceptance = 8;
C.ClusterMapMaxXOffset = 160;
C.ClusterMapMaxYOffset = 24;
C.MinClustersPerChain = 2;
C.MinVolumePerChain = 8;
C.ClusterMapMinSize = 6;
C.ChainMapXYCoarseTolerance = 8;
C.ChainMapXYFineTolerance = 1;
C.ChainMapXYFineAcceptance = 8;
C.ChainMapZCoarseTolerance = 2 << Z_SCALE_SHIFT;
C.ChainMapZFineTolerance = (1 << Z_SCALE_SHIFT) / 8;
C.ChainMapZFineAcceptance = (1 << Z_SCALE_SHIFT);
C.ChainMapMaxXOffset = 160;
C.ChainMapMaxYOffset = 24;
C.ChainMapMaxZOffset = 10 << Z_SCALE_SHIFT;
C.ChainMapMinVolume = 16;
C.ChainMapSampleDivider = 10;
C.MaxChains = 2000000;
C.ClusterMapSampleDivider = 20;
C.ClusterThreshold = 3000;
C.MaxCellContent = 0;
C.CellSize = 0;
C.ClusterMapCoarseTolerance = 0;
C.ClusterMapFineTolerance = 0;
C.ClusterMapMaxXOffset = 0;
C.ClusterMapMaxYOffset = 0;
C.MinClustersPerChain = 0;
C.MinVolumePerChain = 0;
C.ChainMapXYCoarseTolerance = 0;
C.ChainMapXYFineTolerance = 0;
C.ChainMapZCoarseTolerance = 0;
C.ChainMapZFineTolerance = 0;
C.ChainMapMaxXOffset = 0;
C.ChainMapMaxYOffset = 0;
C.ChainMapMaxZOffset = 0;
C.MaxChains = 0;
IC.DMagDX = IC.DMagDY = IC.DMagDZ = 0.0f;
IC.XYCurvature = IC.ZCurvature = 0.0f;
IC.DMagDX = IC.DMagDY = IC.DMagDZ = -1.0f;
IC.XYCurvature = IC.ZCurvature = -1.0f;
}
PrismMapTracker::ClusterChainer::~ClusterChainer()
{
if (pThicknessSamples) free(pThicknessSamples);
cudaError_t err;
THROW_ON_CUDA_ERR(cudaSetDevice(pThis->m_DeviceId))
DEALLOC(pChMapWnd)
HOST_DEALLOC(pFixedZs)
DEALLOC(pChainMapHeader)
DEALLOC(pCurv)
HOST_DEALLOC(pHostStagePos)
DEALLOC(pStagePos)
DEALLOC(pMapCounts)
DEALLOC(pDeltas)
DEALLOC(pChainCounts)
DEALLOC(pCompactChains)
DEALLOC(pChains)
DEALLOC(pCells)
DEALLOC(pCellContents)
DEALLOC(pClusterChains)
DEALLOC(pChainCounts)
DEALLOC(pClusterData)
DEALLOC(pClusterPos)
}
void PrismMapTracker::ClusterChainer::Reset(SySal::ClusterChainer::Configuration &c, SySal::ImageCorrection &ic, bool istop)
{
C = c;
IC = ic;
IsTop = istop;
CurrentView = -1;
ThicknessSamples = 0;
cudaError_t err;
THROW_ON_CUDA_ERR(cudaSetDevice(pThis->m_DeviceId))
WISE_ALLOC(pLastView, sizeof(ChainView) + C.MaxChains * sizeof(IntChain))
WISE_ALLOC(pThisView, sizeof(ChainView) + C.MaxChains * sizeof(IntChain))
EXACT_ALLOC(pChMapWnd, sizeof(ChainMapWindow))
}
int PrismMapTracker::ClusterChainer::AddClusters(SySal::IntClusterFile &cf)
{
cudaError_t err;
THROW_ON_CUDA_ERR(cudaSetDevice(pThis->m_DeviceId))
int refimg = 0;
float dz = 0.0f;
{
EmulsionEdge t, b;
FindEdges(t, b, cf, C.ClusterThreshold, refimg);
if (IsTop)
{
if (b.Valid) dz = -b.Z;
else if (t.Valid)
{
dz = -(t.Z - pThis->GetThickness());
}
else throw "Cannot work out emulsion reference surface.";
}
else
{
if (t.Valid) dz = -t.Z;
else if (b.Valid)
{
dz = -(b.Z + pThis->GetThickness());
}
else throw "Cannot work out emulsion reference surface.";
}
}
CurrentView++;
float dxdz = (cf.StageX(cf.Images() - 1) - cf.StageX(0)) / (cf.StageZ(cf.Images() - 1) - cf.StageZ(0));
float dydz = (cf.StageY(cf.Images() - 1) - cf.StageY(0)) / (cf.StageZ(cf.Images() - 1) - cf.StageZ(0));
//printf("\nZ adjustment: %f\nDXDZ: %f\nDYDZ: %f", dz, dxdz, dydz);
int img;
HOST_WISE_ALLOC(pFixedZs, sizeof(double) * cf.Images());
if (cf.Images() < 5)
{
for (img = 0; img < cf.Images(); img++)
pFixedZs[img] = cf.StageZ(img);
}
else
{
for (img = 1; img < cf.Images(); img++)
pFixedZs[img - 1] = cf.StageZ(img) - cf.StageZ(img - 1);
double maxd = pFixedZs[0];
double mind = pFixedZs[0];
for (img = 1; img < cf.Images() - 1; img++)
if (pFixedZs[img] < mind) mind = pFixedZs[img];
else if (pFixedZs[img] > maxd) maxd = pFixedZs[img];
double sum = 0.0;
for (img = 0; img < cf.Images() - 1; img++)
if (pFixedZs[img] > mind && pFixedZs[img] < maxd)
sum += pFixedZs[img];
sum /= (cf.Images() - 3);
pFixedZs[0] = cf.StageZ(0);
for (img = 1; img < cf.Images(); img++)
pFixedZs[img] = cf.StageZ(0) + img * sum;
}
int width = cf.Width() * cf.Scale();
int height = cf.Height() * cf.Scale();
int cellsize = C.CellSize;
int ncellsx = (width / cellsize) + 1;
int ncellsy = (height / cellsize) + 1;
WISE_ALLOC(pCurv, 2 * sizeof(int) * (width + height));
WISE_ALLOC(pCells, sizeof(Cell) * ncellsx * ncellsy);
WISE_ALLOC(pCellContents, sizeof(IntCluster *) * ncellsx * ncellsy * C.MaxCellContent);
THROW_ON_CUDA_ERR(cudaMemset(pCells, 0, ncellsx * ncellsy * sizeof(Cell)));
THROW_ON_CUDA_ERR(cudaMemset(pCellContents, 0, sizeof(IntCluster *) * ncellsx * ncellsy * C.MaxCellContent));
int totalsize = cf.TotalSize;
WISE_ALLOC(pClusterData, totalsize);
THROW_ON_CUDA_ERR(cudaMemcpy(pClusterData, cf.pData, totalsize, cudaMemcpyHostToDevice));
_CUDA_THROW_ERR_;
int totalclusters = 0;
for (img = 0; img < cf.Images(); img++)
totalclusters += cf.pImageClusterCounts[img];
IntCluster *pImagesBase = (IntCluster *)(void *)(((char *)cf.pClusters - (char *)cf.pData) + (char *)pClusterData);
IntCluster *pImageNext;
WISE_ALLOC(pChains, totalclusters * sizeof(IntChain));
WISE_ALLOC(pClusterPos, totalclusters * 3 * sizeof(short));
short *pClusterXs = pClusterPos;
short *pClusterYs = pClusterXs + totalclusters;
short *pClusterZs = pClusterYs + totalclusters;
WISE_ALLOC(pClusterChains, sizeof(IntCluster *) * totalclusters);
THROW_ON_CUDA_ERR(cudaMemset(pClusterChains, 0, sizeof(IntCluster *) * totalclusters));
WISE_ALLOC(pChainCounts, sizeof(int) * pThis->m_Prop.maxThreadsPerBlock * pThis->m_Prop.multiProcessorCount * 2);
int *pCompactChainCounts = pChainCounts + pThis->m_Prop.maxThreadsPerBlock * pThis->m_Prop.multiProcessorCount;
int *pCurvX, *pCurvY, *pZCurvX, *pZCurvY;
int deltasZ = 0;
int deltasXYZ = 0;
int refinedeltasXY = 0;
int refinedeltasZ = 0;
int refinedeltasXYZ = 0;
int best_matches = 0;
ChainView *pView = pLastView;
pLastView = pThisView;
pThisView = pView;
IntChain *pCompactChains = (IntChain *)(void *)((char *)(void *)pThisView + sizeof(ChainView));
cudaMemset((int *)(void *)pThisView->Reserved, 0, sizeof(int));
_CUDA_THROW_ERR_;
WISE_ALLOC(pCurv, 2 * sizeof(int) * (width + height));
pCurvX = pCurv;
pCurvY = pCurv + width;
pZCurvX = pCurvY + height;
pZCurvY = pZCurvX + width;
{
dim3 ithreads = dim3(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 iblocks = dim3(width / pThis->m_Prop.maxThreadsPerBlock + 1, 1, 1);
curvaturemap_kernel<<<iblocks, ithreads>>>(pCurvX, pZCurvX, width, (IC.XYCurvature * (1 << XY_CURVATURE_SHIFT) * (cf.PixMicronX() * cf.PixMicronX()) / (cf.Scale() * cf.Scale())), (IC.ZCurvature * (1 << (Z_CURVATURE_SHIFT + Z_SCALE_SHIFT)) * (cf.PixMicronX() * cf.PixMicronX()) / (cf.Scale() * cf.Scale())) );
_CUDA_THROW_ERR_
}
{
dim3 ithreads = dim3(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 iblocks = dim3(height / pThis->m_Prop.maxThreadsPerBlock + 1, 1, 1);
curvaturemap_kernel<<<iblocks, ithreads>>>(pCurvY, pZCurvY, height, (IC.XYCurvature * (1 << XY_CURVATURE_SHIFT) * (cf.PixMicronY() * cf.PixMicronY()) / (cf.Scale() * cf.Scale())), (IC.ZCurvature * (1 << (Z_CURVATURE_SHIFT + Z_SCALE_SHIFT)) * (cf.PixMicronX() * cf.PixMicronX()) / (cf.Scale() * cf.Scale())) );
_CUDA_THROW_ERR_
}
int deltasX = (C.ClusterMapMaxXOffset / C.ClusterMapCoarseTolerance * 2 + 1);
int deltasY = (C.ClusterMapMaxYOffset / C.ClusterMapCoarseTolerance * 2 + 1);
int deltas2 = deltasX * deltasY;
int refinedeltas = 2 * C.ClusterMapCoarseTolerance / C.ClusterMapFineTolerance + 1;
int refinedeltas2 = refinedeltas * refinedeltas;
WISE_ALLOC(pDeltas, sizeof(int) * ((2 * refinedeltas + deltasX + deltasY)));
WISE_ALLOC(pMapCounts, sizeof(int) * max(deltas2, refinedeltas2) * (pThis->m_Prop.multiProcessorCount * pThis->m_Prop.maxThreadsPerBlock + 1) );
int *pBest = pMapCounts + max(deltas2, refinedeltas2) * (pThis->m_Prop.multiProcessorCount * pThis->m_Prop.maxThreadsPerBlock);
WISE_ALLOC(pStagePos, sizeof(short) * cf.Images() * 4);
short *pStagePosX = pStagePos;
short *pStagePosY = pStagePosX + cf.Images();
short *pDeltaStagePosX = pStagePosY + cf.Images();
short *pDeltaStagePosY = pDeltaStagePosX + cf.Images();
HOST_WISE_ALLOC(pHostStagePos, sizeof(short) * cf.Images() * 4);
for (img = 0; img < cf.Images(); img++)
{
pHostStagePos[img] =
(int)(((cf.StageX(img) - cf.StageX(0)) / cf.PixMicronX()) * cf.Scale());
pHostStagePos[img + cf.Images()] =
(int)(((cf.StageY(img) - cf.StageY(0)) / cf.PixMicronY()) * cf.Scale());
if (img == 0)
{
pHostStagePos[img + 2 * cf.Images()] = 0;
pHostStagePos[img + 3 * cf.Images()] = 0;
}
else
{
pHostStagePos[img + 2 * cf.Images()] = pHostStagePos[img] - pHostStagePos[img - 1];
pHostStagePos[img + 3 * cf.Images()] = pHostStagePos[img + cf.Images()] - pHostStagePos[img - 1 + cf.Images()];
}
}
THROW_ON_CUDA_ERR(cudaMemcpy(pStagePos, pHostStagePos, sizeof(short) * 4 * cf.Images(), cudaMemcpyHostToDevice));
THROW_ON_CUDA_ERR(cudaMemset(pCells, 0, sizeof(Cell) * ncellsx * ncellsy));
int demagDZ1M;
int id = 0;
IntCluster *pImagesBase1 = pImagesBase;
{
dim3 ithreads = dim3(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 iblocks = dim3(totalclusters / pThis->m_Prop.maxThreadsPerBlock + 1, 1, 1);
correctcurvature_kernel<<<iblocks, ithreads>>>(pImagesBase, pClusterZs, sin(IC.CameraRotation) * (1 << FRACT_RESCALE_SHIFT), (cos(IC.CameraRotation) - 1) * (1 << FRACT_RESCALE_SHIFT), pCurvX, pCurvY, pZCurvX, pZCurvY, IC.DMagDX * (1 << XY_MAGNIFICATION_SHIFT), IC.DMagDY * (1 << XY_MAGNIFICATION_SHIFT), totalclusters, width / 2, height / 2 );
_CUDA_THROW_ERR_
}
{
dim3 ithreads = dim3(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 iblocks = dim3(cf.ImageClusterCounts(0) / pThis->m_Prop.maxThreadsPerBlock + 1, 1, 1);
setXYZs_kernel<<<iblocks, ithreads>>>(pClusterXs, pClusterYs, pClusterZs, cf.ImageClusterCounts(0), 0, pStagePosX, pStagePosY, 0);
_CUDA_THROW_ERR_
}
int bestclustermapcount = -1;
int bestclustermapcount_img = 0;
for (img = 0; img < cf.Images() - 1; img++)
{
pImageNext = pImagesBase + cf.ImageClusterCounts(img);
demagDZ1M = IC.DMagDZ * (pFixedZs[img + 1] - pFixedZs[img]) * (1 << DEMAG_SHIFT);
int launches;
// BEGIN COARSE MAPPING
{
makedeltas_kernel<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(pDeltas, C.ClusterMapCoarseTolerance, deltasX, deltasY, pDeltaStagePosX, pDeltaStagePosY, img + 1);
_CUDA_THROW_ERR_
}
{
dim3 ithreads = dim3(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 iblocks = dim3(deltas2 / ithreads.x + 1, 1, 1);
resetcounts_kernel<<<iblocks, ithreads>>>(pMapCounts, deltas2, pBest);
_CUDA_THROW_ERR_
}
{
dim3 ithreads = dim3(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 iblocks = dim3(pThis->m_Prop.multiProcessorCount, 1, 1);
maphash_minarea_kernel<<<iblocks, ithreads>>>(pImagesBase, cf.ImageClusterCounts(img), cf.ImageClusterCounts(img) / (pThis->m_Prop.maxThreadsPerBlock + pThis->m_Prop.multiProcessorCount) + 1, pCells, pCellContents, cellsize, C.MaxCellContent, ncellsx, ncellsy, C.ClusterMapMinSize);
_CUDA_THROW_ERR_
}
{
dim3 ithreads = dim3(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 iblocks = dim3(pThis->m_Prop.multiProcessorCount, 1, 1);
SySal::GPU::InterruptibleKernels::IntKernel<trymap_kernel_args, trymap_kernel_status, trymap2_Ikernel> Launcher;
Launcher.Arguments.pC = pImageNext;
Launcher.Arguments.nc = cf.ImageClusterCounts(img + 1);
Launcher.Arguments.clusterblocksize = cf.ImageClusterCounts(img + 1) / (pThis->m_Prop.maxThreadsPerBlock * pThis->m_Prop.multiProcessorCount) + 1;
Launcher.Arguments.pCell = pCells;
Launcher.Arguments.pCellContent = pCellContents;
Launcher.Arguments.maxcellcontent = C.MaxCellContent;
Launcher.Arguments.pDeltas = pDeltas;
Launcher.Arguments.deltasx = deltasX;
Launcher.Arguments.deltasy = deltasY;
Launcher.Arguments.cellsize = cellsize;
Launcher.Arguments.minclustersize = C.ClusterMapMinSize;
Launcher.Arguments.tol = C.ClusterMapCoarseTolerance;
Launcher.Arguments.w = width;
Launcher.Arguments.h = height;
Launcher.Arguments.demag = demagDZ1M;
Launcher.Arguments.nx = ncellsx;
Launcher.Arguments.ny = ncellsy;
Launcher.Arguments.pMapCounts = pMapCounts;
Launcher.Arguments.sampledivider = C.ClusterMapSampleDivider;
Launcher.Arguments.clustermapmin = C.MinClusterMapsValid;
Launcher.Arguments.pBest = pBest;
launches = Launcher.Launch(iblocks, ithreads, 5);
_CUDA_THROW_ERR_
}
{
THROW_ON_CUDA_ERR(cudaMemcpy(&best_matches, pBest, sizeof(int), cudaMemcpyDeviceToHost))
best_matches = best_matches >> 16;
}
// END COARSE MAPPING
// BEGIN FINE MAPPING
if (best_matches < C.MinClusterMapsValid)
{
makedeltas_kernel<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(pDeltas, 0, deltasX, deltasY, pDeltaStagePosX, pDeltaStagePosY, img + 1);
_CUDA_THROW_ERR_
}
{
makedeltas_fromshift_kernel<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(pDeltas + deltasX + deltasY, (best_matches < C.MinClusterMapsValid) ? 0 : C.ClusterMapFineTolerance, refinedeltas, refinedeltas, pDeltas, pBest, deltasX);
_CUDA_THROW_ERR_
}
{
dim3 ithreads = dim3(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 iblocks = dim3(refinedeltas2 / ithreads.x + 1, 1, 1);
resetcounts_kernel<<<iblocks, ithreads>>>(pMapCounts, refinedeltas2, pBest);
_CUDA_THROW_ERR_
}
{
if (best_matches > bestclustermapcount)
{
bestclustermapcount = best_matches;
bestclustermapcount_img = img;
}
dim3 ithreads = dim3(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 iblocks = dim3(pThis->m_Prop.multiProcessorCount, 1, 1);
SySal::GPU::InterruptibleKernels::IntKernel<refinemap_kernel_args, refinemap_kernel_status, refinemap_Ikernel> Launcher;
Launcher.Arguments.pC = pImageNext;
Launcher.Arguments.nc = cf.ImageClusterCounts(img + 1);
Launcher.Arguments.clusterblocksize = cf.ImageClusterCounts(img + 1) / (pThis->m_Prop.maxThreadsPerBlock * pThis->m_Prop.multiProcessorCount) + 1;
Launcher.Arguments.pCell = pCells;
Launcher.Arguments.pCellContent = pCellContents;
Launcher.Arguments.maxcellcontent = C.MaxCellContent;
Launcher.Arguments.cellsize = cellsize;
Launcher.Arguments.tol = C.ClusterMapCoarseTolerance;
Launcher.Arguments.w = width;
Launcher.Arguments.h = height;
Launcher.Arguments.demag = demagDZ1M;
Launcher.Arguments.nx = ncellsx;
Launcher.Arguments.ny = ncellsy;
Launcher.Arguments.pMapCounts = pMapCounts;
Launcher.Arguments.pClusterChain = pClusterChains;
Launcher.Arguments.pBase = pImagesBase1;
Launcher.Arguments.pDeltas = pDeltas + deltasX + deltasY;
Launcher.Arguments.deltas = refinedeltas;
Launcher.Arguments.refinebin = C.ClusterMapFineTolerance;
Launcher.Arguments.pBest = pBest;
launches = Launcher.Launch(iblocks, ithreads, 5);
_CUDA_THROW_ERR_
}
{
THROW_ON_CUDA_ERR(cudaMemcpy(&best_matches, pBest, sizeof(int), cudaMemcpyDeviceToHost));
best_matches = best_matches >> 16;
//printf("\n%d %d", img, best_matches);
}
// END FINE MAPPING
// BEGIN FINAL MAPPING
{
dim3 ithreads = dim3(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 iblocks = dim3(pThis->m_Prop.multiProcessorCount, 1, 1);
clearhash_kernel<<<iblocks, ithreads>>>(pImagesBase, cf.ImageClusterCounts(img), cf.ImageClusterCounts(img) / (pThis->m_Prop.maxThreadsPerBlock + pThis->m_Prop.multiProcessorCount) + 1, pCells, cellsize, ncellsx, ncellsy);
_CUDA_THROW_ERR_
}
{
dim3 ithreads = dim3(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 iblocks = dim3(pThis->m_Prop.multiProcessorCount, 1, 1);
maphash_kernel<<<iblocks, ithreads>>>(pImagesBase, cf.ImageClusterCounts(img), cf.ImageClusterCounts(img) / (pThis->m_Prop.maxThreadsPerBlock + pThis->m_Prop.multiProcessorCount) + 1, pCells, pCellContents, cellsize, C.MaxCellContent, ncellsx, ncellsy);
_CUDA_THROW_ERR_
}
{
makefinaldeltas_fromshift_kernel<<<dim3(1,1,1), dim3(1,1,1)>>>(pDeltas, (best_matches > C.MinClusterMapsValid) ? C.ClusterMapFineTolerance : 0, pDeltas + deltasX + deltasY, pBest, refinedeltas, pStagePosX, pStagePosY, pDeltaStagePosX, pDeltaStagePosY, img + 1, cf.Images());
_CUDA_THROW_ERR_
}
{
dim3 ithreads = dim3(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 iblocks = dim3(cf.ImageClusterCounts(img + 1) / pThis->m_Prop.maxThreadsPerBlock + 1, 1, 1);
id = pImageNext - pImagesBase1;
setXYZs_kernel<<<iblocks, ithreads>>>(pClusterXs + id, pClusterYs + id, pClusterZs + id, cf.ImageClusterCounts(img + 1), img + 1, pStagePosX, pStagePosY, ((pFixedZs[img + 1] /*- pFixedZs[0]*/ + dz) * (1 << Z_SCALE_SHIFT)) );
}
{
dim3 ithreads = dim3(1, 1, 1);
dim3 iblocks = dim3(1, 1, 1);
resetcounts_kernel<<<iblocks, ithreads>>>(pMapCounts, 1, pBest);
_CUDA_THROW_ERR_
}
#if 1
{
dim3 ithreads = dim3(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 iblocks = dim3(pThis->m_Prop.multiProcessorCount, 1, 1);
SySal::GPU::InterruptibleKernels::IntKernel<finalmap_kernel_args, finalmap_kernel_status, finalmap_Ikernel> Launcher;
Launcher.Arguments.pC = pImageNext;
Launcher.Arguments.nc = cf.ImageClusterCounts(img + 1);
Launcher.Arguments.clusterblocksize = cf.ImageClusterCounts(img + 1) / (pThis->m_Prop.maxThreadsPerBlock * pThis->m_Prop.multiProcessorCount) + 1;
Launcher.Arguments.pCell = pCells;
Launcher.Arguments.pCellContent = pCellContents;
Launcher.Arguments.maxcellcontent = C.MaxCellContent;
Launcher.Arguments.cellsize = cellsize;
Launcher.Arguments.tol = C.ClusterMapFineAcceptance;
Launcher.Arguments.w = width;
Launcher.Arguments.h = height;
Launcher.Arguments.demag = demagDZ1M;
Launcher.Arguments.nx = ncellsx;
Launcher.Arguments.ny = ncellsy;
Launcher.Arguments.pMapCounts = pMapCounts;
Launcher.Arguments.pClusterChain = pClusterChains;
Launcher.Arguments.img = img + 1;
Launcher.Arguments.pBase = pImagesBase1;
Launcher.Arguments.pDX = pDeltaStagePosX;
Launcher.Arguments.pDY = pDeltaStagePosY;
int launches = Launcher.Launch(iblocks, ithreads, 5);
_CUDA_THROW_ERR_
}
#endif
{
int step;
for (step = 1; step <= (pThis->m_Prop.maxThreadsPerBlock * pThis->m_Prop.multiProcessorCount / 2); step <<= 1)
{
int thr = (pThis->m_Prop.maxThreadsPerBlock * pThis->m_Prop.multiProcessorCount / 2) / step;
dim3 ithreads = dim3(max(1, thr / pThis->m_Prop.multiProcessorCount), 1, 1);
dim3 iblocks = dim3(pThis->m_Prop.multiProcessorCount, 1, 1);
sumcounts_kernel<<<iblocks, ithreads>>>(pMapCounts, 1, pThis->m_Prop.maxThreadsPerBlock * pThis->m_Prop.multiProcessorCount, step);
}
_CUDA_THROW_ERR_
}
// END FINAL MAPPING
{
dim3 ithreads = dim3(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 iblocks = dim3(pThis->m_Prop.multiProcessorCount, 1, 1);
clearhash_kernel<<<iblocks, ithreads>>>(pImagesBase, cf.ImageClusterCounts(img), cf.ImageClusterCounts(img) / (pThis->m_Prop.maxThreadsPerBlock + pThis->m_Prop.multiProcessorCount) + 1, pCells, cellsize, ncellsx, ncellsy);
_CUDA_THROW_ERR_
}
pImagesBase = pImageNext;
}
if (CurrentView > 0)
{
makechainwindow_kernel<<<dim3(1,1,1), dim3(1,1,1)>>>(pChMapWnd, pStagePosX, pStagePosY, cf.Images(), width, height, cf.PixMicronX() / cf.Scale() * (1 << XY_SCALE_SHIFT), cf.PixMicronY() / cf.Scale() * (1 << XY_SCALE_SHIFT), ncellsx * ncellsy, C.ChainMapXYCoarseTolerance, pLastView, pCells, pCellContents, C.MaxCellContent, cf.StageX(0) * (1 << XY_SCALE_SHIFT), cf.StageY(0) * (1 << XY_SCALE_SHIFT), pLastView);
_CUDA_THROW_ERR_
}
{
dim3 ithreads = dim3(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 iblocks = dim3(pThis->m_Prop.multiProcessorCount, 1, 1);
SySal::GPU::InterruptibleKernels::IntKernel<makechain_kernel_args, makechain_kernel_status, makechain_Ikernel> Launcher;
Launcher.Arguments.pC = pImagesBase1;
Launcher.Arguments.pClusterChains = pClusterChains;
Launcher.Arguments.pClusterXs = pClusterXs;
Launcher.Arguments.pClusterYs = pClusterYs;
Launcher.Arguments.pClusterZs = pClusterZs;
Launcher.Arguments.pChain = pChains;
Launcher.Arguments.pChainCounts = pChainCounts;
Launcher.Arguments.totalclusters = totalclusters;
Launcher.Arguments.clusterblocksize = totalclusters / (pThis->m_Prop.maxThreadsPerBlock * pThis->m_Prop.multiProcessorCount) + 1;
Launcher.Arguments.minvol = C.MinVolumePerChain;
Launcher.Arguments.minclusters = C.MinClustersPerChain;
Launcher.Arguments.xtomicron = cf.PixMicronX() / cf.Scale();
Launcher.Arguments.ytomicron = cf.PixMicronY() / cf.Scale();
Launcher.Arguments.width = width;
Launcher.Arguments.height = height;
Launcher.Arguments.stagex = cf.StageX(0) * (1 << XY_SCALE_SHIFT);
Launcher.Arguments.stagey = cf.StageY(0) * (1 << XY_SCALE_SHIFT);
Launcher.Arguments.xslant = IC.XSlant * cf.PixMicronX() / cf.Scale() * (1 << (Z_SCALE_SHIFT + SLOPE_SHIFT));
Launcher.Arguments.yslant = IC.YSlant * cf.PixMicronY() / cf.Scale() * (1 << (Z_SCALE_SHIFT + SLOPE_SHIFT));
Launcher.Arguments.viewtag = CurrentView;
int launches = Launcher.Launch(iblocks, ithreads, 5);
//printf("\nLaunches %d", launches);
_CUDA_THROW_ERR_
}
if (CurrentView > 0)
{
{
dim3 ithreads = dim3(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 iblocks = dim3(pThis->m_Prop.multiProcessorCount, 1, 1);
maphashchain_kernel<<<iblocks, ithreads>>>(pLastView, pChMapWnd, pThis->m_Prop.maxThreadsPerBlock * pThis->m_Prop.multiProcessorCount);
_CUDA_THROW_ERR_
}
// BEGIN COARSE CHAIN MAPPING
deltasX = (2 * C.ChainMapMaxXOffset / C.ChainMapXYCoarseTolerance + 1);
deltasY = (2 * C.ChainMapMaxYOffset / C.ChainMapXYCoarseTolerance + 1);
deltasZ = (2 * C.ChainMapMaxZOffset / C.ChainMapZCoarseTolerance + 1);
deltasXYZ = deltasX * deltasY * deltasZ;
refinedeltasXY = (2 * C.ChainMapXYCoarseTolerance / C.ChainMapXYFineTolerance) + 1;
refinedeltasZ = (2 * C.ChainMapZCoarseTolerance / C.ChainMapZFineTolerance) + 1;
refinedeltasXYZ = refinedeltasXY * refinedeltasXY * refinedeltasZ;
WISE_ALLOC(pDeltas, sizeof(int) * ((deltasX + deltasY + 3 * deltasZ + 2 * refinedeltasXY + refinedeltasZ)));
WISE_ALLOC(pMapCounts, sizeof(int) * max(deltasXYZ, refinedeltasXYZ) * (pThis->m_Prop.multiProcessorCount * pThis->m_Prop.maxThreadsPerBlock + 1) );
THROW_ON_CUDA_ERR(cudaMemset(pMapCounts, 0, _MEM_(pMapCounts)));
pBest = (int *)(void *)(&pThisView->Reserved);
{
makechaindeltas_kernel<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(pDeltas, C.ChainMapXYCoarseTolerance, C.ChainMapZCoarseTolerance, deltasX, deltasY, deltasZ, pLastView, cf.StageX(0) * (1 << XY_SCALE_SHIFT), cf.StageY(0) * (1 << XY_SCALE_SHIFT), IC.XSlant, IC.YSlant, 0, 0/*dxdz, dydz*/);
_CUDA_THROW_ERR_
}
{
dim3 ithreads = dim3(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 iblocks = dim3(deltasXYZ / ithreads.x + 1, 1, 1);
resetcounts_kernel<<<iblocks, ithreads>>>(pMapCounts, deltasXYZ, pBest);
_CUDA_THROW_ERR_
}
{
dim3 ithreads = dim3(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 iblocks = dim3(pThis->m_Prop.multiProcessorCount, 1, 1);
//printf("\nDeltas %d %d %d %d", deltasX, deltasY, deltasZ, deltasXYZ);
SySal::GPU::InterruptibleKernels::IntKernel<trymapchain_kernel_args, trymapchain_kernel_status, trymapchaindxydz_Ikernel> Launcher;
Launcher.Arguments.pChains = pChains;
Launcher.Arguments.pChainCounts = pChainCounts;
Launcher.Arguments.nc = totalclusters;
Launcher.Arguments.chainblocksize = totalclusters / (pThis->m_Prop.maxThreadsPerBlock * pThis->m_Prop.multiProcessorCount) + 1;
Launcher.Arguments.pChMapWnd = pChMapWnd;
Launcher.Arguments.pDeltas = pDeltas;
Launcher.Arguments.deltasX = deltasX;
Launcher.Arguments.deltasY = deltasY;
Launcher.Arguments.deltasZ = deltasZ;
Launcher.Arguments.xytol = C.ChainMapXYCoarseTolerance;
Launcher.Arguments.ztol = C.ChainMapZCoarseTolerance;
Launcher.Arguments.minchainsize = C.ChainMapMinVolume;
Launcher.Arguments.pMapCounts = pMapCounts;
Launcher.Arguments.sampledivider = C.ChainMapSampleDivider;
Launcher.Arguments.pBest = pBest;
int launches = Launcher.Launch(iblocks, ithreads, 2);
//printf("\nTryMapChain Launches: %d", launches);
_CUDA_THROW_ERR_
}
{
int mapc, best;
THROW_ON_CUDA_ERR(cudaMemcpy(&best_matches, (int *)(void *)(&pThisView->Reserved), sizeof(int), cudaMemcpyDeviceToHost));
//printf("\nDEBUG %08X", best_matches);
mapc = best_matches >> 16;
if (mapc < C.MinChainMapsValid) best = deltasX / 2 + (deltasY / 2) * deltasX + (deltasZ / 2) * deltasX * deltasY;
else best = best_matches & 0xffff;
best_matches = mapc;
}
// END COARSE CHAIN MAPPING
// BEGIN FINE CHAIN MAPPING
if (best_matches < C.MinChainMapsValid)
{
printf("\nBad chain mapping (%d/%d), switching to default", best_matches, C.MinChainMapsValid);
makechaindeltas_kernel<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(pDeltas, 0, 0, deltasX, deltasY, deltasZ, pLastView, cf.StageX(0) * (1 << XY_SCALE_SHIFT), cf.StageY(0) * (1 << XY_SCALE_SHIFT), IC.XSlant, IC.YSlant, dxdz, dydz);
_CUDA_THROW_ERR_
}
{
makechaindeltas_fromshift_kernel<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(pDeltas + deltasX + deltasY + 3 * deltasZ, (best_matches < C.MinChainMapsValid) ? 0 : C.ChainMapXYFineTolerance, 0, refinedeltasXY, refinedeltasXY, refinedeltasZ, pDeltas, pBest, deltasX, deltasY, deltasZ);
_CUDA_THROW_ERR_
}
{
dim3 ithreads = dim3(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 iblocks = dim3(refinedeltasXYZ / ithreads.x + 1, 1, 1);
resetcounts_kernel<<<iblocks, ithreads>>>(pMapCounts, refinedeltasXYZ, (int *)(void *)(&pThisView->Reserved));
_CUDA_THROW_ERR_
}
{
dim3 ithreads = dim3(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 iblocks = dim3(pThis->m_Prop.multiProcessorCount, 1, 1);
SySal::GPU::InterruptibleKernels::IntKernel<trymapchain_kernel_args, trymapchain_kernel_status, trymapchain_Ikernel> Launcher;
Launcher.Arguments.pChains = pChains;
Launcher.Arguments.pChainCounts = pChainCounts;
Launcher.Arguments.nc = totalclusters;
Launcher.Arguments.chainblocksize = totalclusters / (pThis->m_Prop.maxThreadsPerBlock * pThis->m_Prop.multiProcessorCount) + 1;
Launcher.Arguments.pChMapWnd = pChMapWnd;
Launcher.Arguments.pDeltas = pDeltas + deltasX + deltasY + 3 * deltasZ;
Launcher.Arguments.deltasX = refinedeltasXY;
Launcher.Arguments.deltasY = refinedeltasXY;
Launcher.Arguments.deltasZ = refinedeltasZ;
Launcher.Arguments.xytol = C.ChainMapXYFineAcceptance;
Launcher.Arguments.ztol = C.ChainMapZFineAcceptance;
Launcher.Arguments.minchainsize = C.ChainMapMinVolume;
Launcher.Arguments.pMapCounts = pMapCounts;
Launcher.Arguments.sampledivider = C.ChainMapSampleDivider;
Launcher.Arguments.pBest = pBest;
int launches = Launcher.Launch(iblocks, ithreads, 2);
//printf("\nRefineMapChain Launches: %d", launches);
_CUDA_THROW_ERR_
}
{
int mapc, best;
THROW_ON_CUDA_ERR(cudaMemcpy(&best_matches, pBest, sizeof(int), cudaMemcpyDeviceToHost))
printf("\nDEBUG %08X", best_matches);
mapc = best_matches >> 16;
best = best_matches & 0xffff;
best_matches = mapc;
}
// END FINE CHAIN MAPPING
// BEGIN FINAL CHAIN MAPPING
{
dim3 ithreads = dim3(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 iblocks = dim3(pThis->m_Prop.multiProcessorCount, 1, 1);
finalmapchain_kernel<<<iblocks, ithreads>>>(pChains, pChainCounts, totalclusters, totalclusters / (pThis->m_Prop.maxThreadsPerBlock * pThis->m_Prop.multiProcessorCount) + 1, pChMapWnd, pDeltas + deltasX + deltasY + 3 * deltasZ, refinedeltasXY, refinedeltasZ, C.ChainMapXYFineAcceptance, C.ChainMapZFineAcceptance, pBest);
_CUDA_THROW_ERR_
}
// END FINAL CHAIN MAPPING
{
dim3 ithreads = dim3(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 iblocks = dim3(pThis->m_Prop.multiProcessorCount, 1, 1);
clearhashchain_kernel<<<iblocks, ithreads>>>(pLastView, pChMapWnd, pThis->m_Prop.maxThreadsPerBlock * pThis->m_Prop.multiProcessorCount);
_CUDA_THROW_ERR_
}
{
dim3 ithreads = dim3(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 iblocks = dim3(pThis->m_Prop.multiProcessorCount, 1, 1);
negshift_viewchains_kernel<<<iblocks, ithreads>>>(pLastView, pDeltas + deltasX + deltasY + 3 * deltasZ, refinedeltasXY, refinedeltasZ, pBest);
_CUDA_THROW_ERR_
}
}
{
setchainbase_kernel<<<dim3(1,1,1), dim3(1,1,1)>>>(pCompactChainCounts, pChainCounts, pThis->m_Prop.maxThreadsPerBlock * pThis->m_Prop.multiProcessorCount, pChainMapHeader, pThisView, cf.StageX(0) * (1 << XY_SCALE_SHIFT), cf.StageY(0) * (1 << XY_SCALE_SHIFT), cf.StageZ(0) * (1 << Z_SCALE_SHIFT), (((int)width) << XY_SCALE_SHIFT) / cf.Scale() * fabs(cf.PixMicronX()), (((int)height) << XY_SCALE_SHIFT) / cf.Scale() * fabs(cf.PixMicronY()));
_CUDA_THROW_ERR_
}
{
dim3 ithreads = dim3(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 iblocks = dim3(pThis->m_Prop.multiProcessorCount, 1, 1);
compactchains_kernel<<<iblocks, ithreads>>>(pCompactChains, pCompactChainCounts, pChains, pChainCounts, totalclusters / (pThis->m_Prop.maxThreadsPerBlock * pThis->m_Prop.multiProcessorCount) + 1);
_CUDA_THROW_ERR_
}
{
//if (CurrentView == 0)
setchainviewheader_kernel<<<dim3(1,1,1), dim3(1,1,1)>>>(pChainMapHeader, pThisView, cf.StageX(0) * (1 << XY_SCALE_SHIFT), cf.StageY(0) * (1 << XY_SCALE_SHIFT), cf.StageZ(0) * (1 << Z_SCALE_SHIFT), 0, 0, 0, 0);
//else
//setchainviewheader_kernel<<<dim3(1,1,1), dim3(1,1,1)>>>(pChainMapHeader, pThisView, cf.StageX(0) * (1 << XY_SCALE_SHIFT), cf.StageY(0) * (1 << XY_SCALE_SHIFT), cf.StageZ(0) * (1 << Z_SCALE_SHIFT), pDeltas + deltasX + deltasY + 3 * deltasZ, refinedeltasXY, refinedeltasZ, pBest);
_CUDA_THROW_ERR_
}
if (CurrentView > 0)
pThis->SendViewsToTracker(CurrentView, (((int)width) << XY_SCALE_SHIFT) / cf.Scale() * fabs(cf.PixMicronX()), (((int)height) << XY_SCALE_SHIFT) / cf.Scale() * fabs(cf.PixMicronY()), pLastView, pThisView);
return 0;
}
};
}; |
d0227e6bf1ca4979b2991c7edf9ff7aaf30b41bf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "MM2chainKernel.h"
__global__ void multiply(const float* A, float* partial_mult ){
int block = blockIdx.x;
int thread = threadIdx.x;
/*
if(thread == 1){
for( int i = 0; i<240; i++){
printf("%f\n", *(A+i));
}
}*/
/*
if(block==32 && thread == 1) {
for( int i = 0; i<240; i++){
printf("%f\n", *(A+i+31*240+((BLOCK_SIZE)*(blockIdx.x))));
}
}*/
__shared__ float shared_A[B*C*4];
const float* AStart = A + ((B)*(blockIdx.x)*C*4);
//if(threadIdx.x == 1) printf("A %p Astart %p \n ", A, AStart);
for( int i = thread; i<C*4*B; i += B){
shared_A[i] = AStart[i];
//if(block==32) printf("%f \n", shared_A[i+240*threadIdx.x]);
}
__syncthreads();
/*
if(block==32 && thread == 1) {
for( int i = 0; i<240; i++){
printf("%f %f\n", shared_A[i + 240*31], *(A+i+31*240+(BLOCK_SIZE)*(blockIdx.x)));
}
}*/
float current[] = { 1.0, 0.0, 0.0, 1.0} ;
float temp[4];
for(int i = 0; i<C*4; i += 4){
float mat00 = shared_A[i + C*4*threadIdx.x];
float mat01 = shared_A[i+1 + C*4*threadIdx.x];
float mat10 = shared_A[i+2 + C*4*threadIdx.x];
float mat11 = shared_A[i+3 + C*4*threadIdx.x];
//if(threadIdx.x == 2) printf("next matrix %f %f %f %f \n", mat00, mat01, mat10, mat11);
//if(threadIdx.x == 2) printf("should be %f %f %f %f \n", *(A+i+threadIdx.x*240), *(A+i+1+threadIdx.x*240),*(A+i+2+threadIdx.x*240),*(A+i+3+threadIdx.x*240));
//if(threadIdx.x == 2) printf("this one \n %f %f \n %f %f \n", current[0], current[1], current[2], current[3]);
//if(threadIdx.x == 2) printf("times this \n %f %f \n %f %f \n", mat00, mat01, mat10, mat11);
temp[0] = current[0] * mat00 + current[1]*mat10;
temp[1] = current[0] * mat01 + current[1]*mat11;
temp[2] = current[2] * mat00 + current[3]*mat10;
temp[3] = current[2] * mat01 + current[3]*mat11;
current[0] = temp[0];
current[1] = temp[1];
current[2] = temp[2];
current[3] = temp[3];
//if(threadIdx.x == 2) printf("equals \n %f %f \n %f %f \n", current[0], current[1], current[2], current[3]);
}
//if(block==32)printf("writing shared results \n %d %d \n %d %d \n", current[0], current[1], current[2], current[3]);
__shared__ float shared_results[B*4];
shared_results[threadIdx.x*4] = current[0];
shared_results[threadIdx.x*4+1] = current[1];
shared_results[threadIdx.x*4+2] = current[2];
shared_results[threadIdx.x*4+3] = current[3];
__syncthreads();
if(threadIdx.x == 0){
float current[] = { 1.0, 0.0, 0.0, 1.0} ;
for(int i = 0; i<B; i += 4){
temp[0] = current[0] * shared_results[threadIdx.x*4] + current[1] * shared_results[threadIdx.x*4 + 2];
temp[1] = current[0] * shared_results[threadIdx.x*4+1] + current[1] * shared_results[threadIdx.x*4 + 3];
temp[2] = current[2] * shared_results[threadIdx.x*4] + current[3] * shared_results[threadIdx.x*4 + 2];
temp[3] = current[2] * shared_results[threadIdx.x*4+1] + current[3] * shared_results[threadIdx.x*4 + 3];
current[0] = temp[0];
current[1] = temp[1];
current[2] = temp[2];
current[3] = temp[3];
//printf(" shared results \n %d %d \n %d %d \n", shared_results[threadIdx.x*4], shared_results[threadIdx.x*4+1], shared_results[threadIdx.x*4+2], shared_results[threadIdx.x*4+3]);
}
//if(block==32)printf("block partial \n %f %f \n %f %f\n", current[0], current[1], current[2], current[3]);
float* partial_mult_start = partial_mult + 4 * block;
partial_mult_start[0] = current[0];
partial_mult_start[1] = current[1];
partial_mult_start[2] = current[2];
partial_mult_start[3] = current[3];
}
}
| d0227e6bf1ca4979b2991c7edf9ff7aaf30b41bf.cu | #include <stdio.h>
#include "MM2chainKernel.h"
__global__ void multiply(const float* A, float* partial_mult ){
int block = blockIdx.x;
int thread = threadIdx.x;
/*
if(thread == 1){
for( int i = 0; i<240; i++){
printf("%f\n", *(A+i));
}
}*/
/*
if(block==32 && thread == 1) {
for( int i = 0; i<240; i++){
printf("%f\n", *(A+i+31*240+((BLOCK_SIZE)*(blockIdx.x))));
}
}*/
__shared__ float shared_A[B*C*4];
const float* AStart = A + ((B)*(blockIdx.x)*C*4);
//if(threadIdx.x == 1) printf("A %p Astart %p \n ", A, AStart);
for( int i = thread; i<C*4*B; i += B){
shared_A[i] = AStart[i];
//if(block==32) printf("%f \n", shared_A[i+240*threadIdx.x]);
}
__syncthreads();
/*
if(block==32 && thread == 1) {
for( int i = 0; i<240; i++){
printf("%f %f\n", shared_A[i + 240*31], *(A+i+31*240+(BLOCK_SIZE)*(blockIdx.x)));
}
}*/
float current[] = { 1.0, 0.0, 0.0, 1.0} ;
float temp[4];
for(int i = 0; i<C*4; i += 4){
float mat00 = shared_A[i + C*4*threadIdx.x];
float mat01 = shared_A[i+1 + C*4*threadIdx.x];
float mat10 = shared_A[i+2 + C*4*threadIdx.x];
float mat11 = shared_A[i+3 + C*4*threadIdx.x];
//if(threadIdx.x == 2) printf("next matrix %f %f %f %f \n", mat00, mat01, mat10, mat11);
//if(threadIdx.x == 2) printf("should be %f %f %f %f \n", *(A+i+threadIdx.x*240), *(A+i+1+threadIdx.x*240),*(A+i+2+threadIdx.x*240),*(A+i+3+threadIdx.x*240));
//if(threadIdx.x == 2) printf("this one \n %f %f \n %f %f \n", current[0], current[1], current[2], current[3]);
//if(threadIdx.x == 2) printf("times this \n %f %f \n %f %f \n", mat00, mat01, mat10, mat11);
temp[0] = current[0] * mat00 + current[1]*mat10;
temp[1] = current[0] * mat01 + current[1]*mat11;
temp[2] = current[2] * mat00 + current[3]*mat10;
temp[3] = current[2] * mat01 + current[3]*mat11;
current[0] = temp[0];
current[1] = temp[1];
current[2] = temp[2];
current[3] = temp[3];
//if(threadIdx.x == 2) printf("equals \n %f %f \n %f %f \n", current[0], current[1], current[2], current[3]);
}
//if(block==32)printf("writing shared results \n %d %d \n %d %d \n", current[0], current[1], current[2], current[3]);
__shared__ float shared_results[B*4];
shared_results[threadIdx.x*4] = current[0];
shared_results[threadIdx.x*4+1] = current[1];
shared_results[threadIdx.x*4+2] = current[2];
shared_results[threadIdx.x*4+3] = current[3];
__syncthreads();
if(threadIdx.x == 0){
float current[] = { 1.0, 0.0, 0.0, 1.0} ;
for(int i = 0; i<B; i += 4){
temp[0] = current[0] * shared_results[threadIdx.x*4] + current[1] * shared_results[threadIdx.x*4 + 2];
temp[1] = current[0] * shared_results[threadIdx.x*4+1] + current[1] * shared_results[threadIdx.x*4 + 3];
temp[2] = current[2] * shared_results[threadIdx.x*4] + current[3] * shared_results[threadIdx.x*4 + 2];
temp[3] = current[2] * shared_results[threadIdx.x*4+1] + current[3] * shared_results[threadIdx.x*4 + 3];
current[0] = temp[0];
current[1] = temp[1];
current[2] = temp[2];
current[3] = temp[3];
//printf(" shared results \n %d %d \n %d %d \n", shared_results[threadIdx.x*4], shared_results[threadIdx.x*4+1], shared_results[threadIdx.x*4+2], shared_results[threadIdx.x*4+3]);
}
//if(block==32)printf("block partial \n %f %f \n %f %f\n", current[0], current[1], current[2], current[3]);
float* partial_mult_start = partial_mult + 4 * block;
partial_mult_start[0] = current[0];
partial_mult_start[1] = current[1];
partial_mult_start[2] = current[2];
partial_mult_start[3] = current[3];
}
}
|
3a9e11c05885a5a516358d4c75702127655484da.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <set>
#include <cmath>
#include <cfloat>
#include <csignal>
#include <algorithm>
#include <boost/iterator/counting_iterator.hpp>
#include "BBoxUtil.h"
#include "MathFunctions.h"
#include "SysLog.h"
#include "StdOutLog.h"
//using namespace std;
bool SortBBoxAscend(const NormalizedBBox& bbox1, const NormalizedBBox& bbox2) {
return bbox1.score < bbox2.score;
}
bool SortBBoxDescend(const NormalizedBBox& bbox1, const NormalizedBBox& bbox2) {
return bbox1.score > bbox2.score;
}
template <typename T>
bool SortScorePairAscend(const std::pair<float, T>& pair1, const std::pair<float, T>& pair2) {
return pair1.first < pair2.first;
}
template bool SortScorePairAscend(const std::pair<float, int>& pair1,
const std::pair<float, int>& pair2);
template bool SortScorePairAscend(const std::pair<float, std::pair<int, int>>& pair1,
const std::pair<float, std::pair<int, int>>& pair2);
template <typename T>
bool SortScorePairDescend(const std::pair<float, T>& pair1, const std::pair<float, T>& pair2) {
return pair1.first > pair2.first;
}
template bool SortScorePairDescend(const std::pair<float, int>& pair1,
const std::pair<float, int>& pair2);
template bool SortScorePairDescend(const std::pair<float, std::pair<int, int>>& pair1,
const std::pair<float, std::pair<int, int>>& pair2);
template <typename Dtype>
__device__ Dtype Min(const Dtype x, const Dtype y) {
return x < y ? x : y;
}
/**
* Max MaxDevice
* (Max )
*/
template <typename Dtype>
__device__ Dtype MaxDevice(const Dtype x, const Dtype y) {
return x > y ? x : y;
}
template <typename Dtype>
__device__ void ClipBBoxGPU(const Dtype* bbox, Dtype* clip_bbox) {
for (int i = 0; i < 4; ++i) {
clip_bbox[i] = MaxDevice(Min(bbox[i], Dtype(1.)), Dtype(0.));
}
}
template __device__ void ClipBBoxGPU(const float* bbox, float* clip_bbox);
template <typename Dtype>
void GetGroundTruth(const Dtype* gtData, const int numGt, const int backgroundLabelId,
const bool useDifficultGt, std::map<int, std::vector<NormalizedBBox>>* allGtBboxes) {
allGtBboxes->clear();
for (int i = 0; i < numGt; i++) {
int startIdx = i * 8;
int itemId = gtData[startIdx];
if (itemId == -1) {
continue;
}
int label = gtData[startIdx + 1];
SASSERT(backgroundLabelId != label,
"Found background label in the dataset\nbackground label id: %d\nlabel:%d",
backgroundLabelId, label);
bool difficult = static_cast<bool>(gtData[startIdx + 7]);
if (!useDifficultGt && difficult) {
// Skip reading difficult ground truth
continue;
}
NormalizedBBox bbox;
bbox.label = label;
bbox.xmin = gtData[startIdx + 3];
bbox.ymin = gtData[startIdx + 4];
bbox.xmax = gtData[startIdx + 5];
bbox.ymax = gtData[startIdx + 6];
bbox.difficult = difficult;
float bboxSize = BBoxSize(bbox);
bbox.size = bboxSize;
(*allGtBboxes)[itemId].push_back(bbox);
}
}
template void GetGroundTruth(const float* gtData, const int numGt,
const int backgroundLabelId, const bool useDifficultGt,
std::map<int, std::vector<NormalizedBBox>>* allGtBboxes);
template <typename Dtype>
void GetGroundTruth(const Dtype* gtData, const int numGt, const int backgroundLabelId,
const bool useDifficultGt, std::map<int, LabelBBox>* allGtBBoxes) {
allGtBBoxes->clear();
for (int i = 0; i < numGt; i++) {
int startIdx = i * 8;
int itemId = gtData[startIdx];
if (itemId == -1) {
continue;
}
NormalizedBBox bbox;
int label = gtData[startIdx + 1];
if (backgroundLabelId == label) {
SASSERT(backgroundLabelId != label, "Found background label in the dataset.");
}
bool difficult = static_cast<bool>(gtData[startIdx + 7]);
if (!useDifficultGt && difficult) {
// Skip reading difficult ground truth.
continue;
}
bbox.xmin = gtData[startIdx + 3];
bbox.ymin = gtData[startIdx + 4];
bbox.xmax = gtData[startIdx + 5];
bbox.ymax = gtData[startIdx + 6];
bbox.difficult = difficult;
float bboxSize = BBoxSize(bbox);
bbox.size = bboxSize;
(*allGtBBoxes)[itemId][label].push_back(bbox);
}
}
template void GetGroundTruth(const float* gtData, const int numGt,
const int backgroundLabelId, const bool useDifficultGt,
std::map<int, LabelBBox>* allGtBBoxes);
template <typename Dtype>
void GetPriorBBoxes(const Dtype* priorData, const int numPriors,
std::vector<NormalizedBBox>* priorBBoxes,
std::vector<std::vector<float>>* priorVariances) {
priorBBoxes->clear();
priorVariances->clear();
for (int i = 0; i < numPriors; i++) {
int startIdx = i * 4;
NormalizedBBox bbox;
bbox.xmin = priorData[startIdx];
bbox.ymin = priorData[startIdx + 1];
bbox.xmax = priorData[startIdx + 2];
bbox.ymax = priorData[startIdx + 3];
float bboxSize = BBoxSize(bbox);
bbox.size = bboxSize;
priorBBoxes->push_back(bbox);
}
for (int i = 0; i < numPriors; i++) {
int startIdx = (numPriors + i) * 4;
std::vector<float> var;
for (int j = 0; j < 4; j++) {
var.push_back(priorData[startIdx + j]);
}
priorVariances->push_back(var);
}
}
template void GetPriorBBoxes(const float* priorData, const int numPriors,
std::vector<NormalizedBBox>* priorBBoxes, std::vector<std::vector<float>>* priorVariances);
template <typename Dtype>
void GetLocPredictions(const Dtype* locData, const int num, const int numPredsPerClass,
const int numLocClasses, const bool shareLocation, std::vector<LabelBBox>* locPreds) {
locPreds->clear();
if (shareLocation) {
SASSERT0(numLocClasses == 1);
}
locPreds->resize(num);
for (int i = 0; i < num; i++) {
LabelBBox& labelBBox = (*locPreds)[i];
for (int p = 0; p < numPredsPerClass; p++) {
int startIdx = p * numLocClasses * 4;
for (int c = 0; c < numLocClasses; c++) {
int label = shareLocation ? -1 : c;
if (labelBBox.find(label) == labelBBox.end()) {
labelBBox[label].resize(numPredsPerClass);
}
labelBBox[label][p].xmin = locData[startIdx + c * 4];
labelBBox[label][p].ymin = locData[startIdx + c * 4 + 1];
labelBBox[label][p].xmax = locData[startIdx + c * 4 + 2];
labelBBox[label][p].ymax = locData[startIdx + c * 4 + 3];
}
}
locData += numPredsPerClass * numLocClasses * 4;
}
}
template void GetLocPredictions(const float* locData, const int num,
const int numPredsPerClass, const int numLocClasses, const bool shareLocation,
std::vector<LabelBBox>* locPreds);
/*
* allLocPreds: batch prediction
* allGtBBoxes: batch gt bboxes
* prioBBoxes: scale prior boxes
* priorVariances: scale prior variances
*
* allMatchOverlaps
*/
void FindMatches(const std::vector<LabelBBox>& allLocPreds,
const std::map<int, std::vector<NormalizedBBox>>& allGtBBoxes,
const std::vector<NormalizedBBox>& priorBBoxes,
const std::vector<std::vector<float>>& priorVariances,
const int numClasses, const bool shareLocation, const MatchType matchType,
const float overlapThreshold, const bool usePriorForMatching,
const int backgroundLabelId, const CodeType codeType,
const bool encodeVarianceInTarget, const bool ignoreCrossBoundaryBBox,
std::vector<std::map<int, std::vector<float>>>* allMatchOverlaps,
std::vector<std::map<int, std::vector<int>>>* allMatchIndices) {
SASSERT(numClasses > 0, "numClasses should not be less than 1.");
const int locClasses = shareLocation ? 1 : numClasses;
// Find the matches.
// num batch size
int num = allLocPreds.size();
// batch
for (int i = 0; i < num; i++) {
// (label : pred bbox overlap gt index)
std::map<int, std::vector<int>> matchIndices;
std::map<int, std::vector<float>> matchOverlpas;
// Check if there is ground truth for current image.
if (allGtBBoxes.find(i) == allGtBBoxes.end()) {
// There is no gt for current image. All predictions are negative.
allMatchIndices->push_back(matchIndices);
allMatchOverlaps->push_back(matchOverlpas);
continue;
}
// Find match between predictions and ground truth.
// gtBBoxes: batch gt boxes
const std::vector<NormalizedBBox>& gtBBoxes = allGtBBoxes.find(i)->second;
// . usePriorForMatching = true.
if (!usePriorForMatching) {
for (int c = 0; c < locClasses; c++) {
int label = shareLocation ? -1 : c;
if (!shareLocation && label == backgroundLabelId) {
// Ignore background loc predictions.
continue;
}
// Decode the prediction into bbox first.
std::vector<NormalizedBBox> locBBoxes;
bool clipBBox = false;
DecodeBBoxes(priorBBoxes, priorVariances, codeType, encodeVarianceInTarget,
clipBBox, allLocPreds[i].find(label)->second, &locBBoxes);
MatchBBox(gtBBoxes, locBBoxes, label, matchType, overlapThreshold,
ignoreCrossBoundaryBBox, &matchIndices[label], &matchOverlpas[label]);
}
} else {
// Use prior bboxes to match against all ground truth.
std::vector<int> tempMatchIndices;
std::vector<float> tempMatchOverlaps;
const int label = -1;
// batch gt prior box match
// for break point ...
MatchBBox(gtBBoxes, priorBBoxes, label, matchType, overlapThreshold,
ignoreCrossBoundaryBBox, &tempMatchIndices, &tempMatchOverlaps);
// label == -1 , class
if (shareLocation) {
matchIndices[label] = tempMatchIndices;
matchOverlpas[label] = tempMatchOverlaps;
} else {
// Get ground truth label for each ground truth bbox.
std::vector<int> gtLabels;
for (int g = 0; g < gtBBoxes.size(); g++) {
gtLabels.push_back(gtBBoxes[g].label);
}
// Distribute the matching results to different locClass.
for (int c = 0; c < locClasses; c++) {
if (c == backgroundLabelId) {
// Ignore background loc predictions.
continue;
}
matchIndices[c].resize(tempMatchIndices.size(), -1);
matchOverlpas[c] = tempMatchOverlaps;
for (int m = 0; m < tempMatchIndices.size(); m++) {
if (tempMatchIndices[m] > -1) {
const int gtIdx = tempMatchIndices[m];
SASSERT0(gtIdx < gtLabels.size());
if (c == gtLabels[gtIdx]) {
matchIndices[c][m] = gtIdx;
}
}
}
}
}
}
allMatchIndices->push_back(matchIndices);
allMatchOverlaps->push_back(matchOverlpas);
}
}
void DecodeBBoxes(const std::vector<NormalizedBBox>& priorBBoxes,
const std::vector<std::vector<float>>& priorVariances,
const CodeType codeType, const bool varianceEncodedInTarget,
const bool clipBBox, const std::vector<NormalizedBBox>& bboxes,
std::vector<NormalizedBBox>* decodeBBoxes) {
SASSERT0(priorBBoxes.size() == priorVariances.size());
SASSERT0(priorBBoxes.size() == bboxes.size());
int numBBoxes = priorBBoxes.size();
if (numBBoxes >= 1) {
SASSERT0(priorVariances[0].size() == 4);
}
decodeBBoxes->clear();
for (int i = 0; i < numBBoxes; i++) {
NormalizedBBox decodeBBox;
DecodeBBox(priorBBoxes[i], priorVariances[i], codeType, varianceEncodedInTarget,
clipBBox, bboxes[i], &decodeBBox);
decodeBBoxes->push_back(decodeBBox);
}
}
void DecodeBBox(const NormalizedBBox& priorBBox, const std::vector<float>& priorVariances,
const CodeType codeType, const bool varianceEncodedInTarget,
const bool clipBBox, const NormalizedBBox& bbox, NormalizedBBox* decodeBBox) {
if (codeType == CodeType::CORNER) {
if (varianceEncodedInTarget) {
// variance is encoded intarget, we simply need to add the offset predictions.
decodeBBox->xmin = priorBBox.xmin + bbox.xmin;
decodeBBox->ymin = priorBBox.ymin + bbox.ymin;
decodeBBox->xmax = priorBBox.xmax + bbox.xmax;
decodeBBox->ymax = priorBBox.ymax + bbox.ymax;
} else {
// variance is encoded in bbox, we need to scale the offset accordingly.
decodeBBox->xmin = priorBBox.xmin + priorVariances[0] * bbox.xmin;
decodeBBox->ymin = priorBBox.ymin + priorVariances[1] * bbox.ymin;
decodeBBox->xmax = priorBBox.xmax + priorVariances[2] * bbox.xmax;
decodeBBox->ymax = priorBBox.ymax + priorVariances[3] * bbox.ymax;
}
} else if (codeType == CodeType::CENTER_SIZE) {
float priorWidth = priorBBox.xmax - priorBBox.xmin;
SASSERT0(priorWidth > 0);
float priorHeight = priorBBox.ymax - priorBBox.ymin;
SASSERT0(priorHeight > 0);
float priorCenterX = (priorBBox.xmin + priorBBox.xmax) / 2.f;
float priorCenterY = (priorBBox.ymin + priorBBox.ymax) / 2.f;
float decodeBBoxCenterX;
float decodeBBoxCenterY;
float decodeBBoxWidth;
float decodeBBoxHeight;
if (varianceEncodedInTarget) {
// variance is encoded in target, we simply need to restore the offset
// predictions.
decodeBBoxCenterX = bbox.xmin * priorWidth + priorCenterX;
decodeBBoxCenterY = bbox.ymin * priorHeight + priorCenterY;
decodeBBoxWidth = ::exp(bbox.xmax) * priorWidth;
decodeBBoxHeight = ::exp(bbox.ymax) * priorHeight;
} else {
// variance is encoded in bbox, we need to scale the offset accordingly.
decodeBBoxCenterX = priorVariances[0] * bbox.xmin * priorWidth + priorCenterX;
decodeBBoxCenterY = priorVariances[1] * bbox.ymin * priorHeight + priorCenterY;
decodeBBoxWidth = ::exp(priorVariances[2] * bbox.xmax) * priorWidth;
decodeBBoxHeight = ::exp(priorVariances[3] * bbox.ymax) * priorHeight;
}
decodeBBox->xmin = decodeBBoxCenterX - decodeBBoxWidth / 2.f;
decodeBBox->ymin = decodeBBoxCenterY - decodeBBoxHeight / 2.f;
decodeBBox->xmax = decodeBBoxCenterX + decodeBBoxWidth / 2.f;
decodeBBox->ymax = decodeBBoxCenterY + decodeBBoxHeight / 2.f;
} else if (codeType == CodeType::CORNER_SIZE) {
float priorWidth = priorBBox.xmax - priorBBox.xmin;
SASSERT0(priorWidth > 0);
float priorHeight = priorBBox.ymax - priorBBox.ymin;
SASSERT0(priorHeight > 0);
if (varianceEncodedInTarget) {
// variance is encoded in target, we simply need to add the offset predictions.
decodeBBox->xmin = priorBBox.xmin + bbox.xmin * priorWidth;
decodeBBox->ymin = priorBBox.ymin + bbox.ymin * priorHeight;
decodeBBox->xmax = priorBBox.xmax + bbox.xmax * priorWidth;
decodeBBox->ymax = priorBBox.ymax + bbox.ymax * priorHeight;
} else {
// variance is encoded in bbox, we need to scale the offset accordingly.
decodeBBox->xmin = priorBBox.xmin + priorVariances[0] * bbox.xmin * priorWidth;
decodeBBox->ymin = priorBBox.ymin + priorVariances[1] * bbox.ymin * priorHeight;
decodeBBox->xmax = priorBBox.xmax + priorVariances[2] * bbox.xmax * priorWidth;
decodeBBox->ymax = priorBBox.ymax + priorVariances[3] * bbox.ymax * priorHeight;
}
} else {
SASSERT(false, "Unknown LocLossType: %s", codeType);
}
float bboxSize = BBoxSize(*decodeBBox);
decodeBBox->size = bboxSize;
if (clipBBox) {
ClipBBox(*decodeBBox, decodeBBox);
}
}
// gtBBoxes: gt boxes
// predBBoxes: prior boxes
// label
// matchIndices: predBBox match gt box index
// matchOverlaps: predBBox match gt box overlap,
// eps
//
void MatchBBox(const std::vector<NormalizedBBox>& gtBBoxes,
const std::vector<NormalizedBBox>& predBBoxes, const int label,
const MatchType matchType, const float overlapThreshold,
const bool ignoreCrossBoundaryBBox, std::vector<int>* matchIndices,
std::vector<float>* matchOverlaps) {
int numPred = predBBoxes.size();
matchIndices->clear();
matchIndices->resize(numPred, -1);
matchOverlaps->clear();
matchOverlaps->resize(numPred, 0.);
int numGt = 0;
std::vector<int> gtIndices;
if (label == -1) {
// label -1 means comparing against all ground truth.
numGt = gtBBoxes.size();
for (int i = 0; i < numGt; i++) {
gtIndices.push_back(i);
}
} else {
// Count number of ground truth boxes which has the desired label.
for (int i = 0; i < gtBBoxes.size(); i++) {
if (gtBBoxes[i].label == label) {
numGt++;
gtIndices.push_back(i);
}
}
}
if (numGt == 0) {
return;
}
// Store the positive overlap between predictions and ground truth.
// map -> (pred box index : (gt index : overlap))
// pred bbox gt bbox overlap, 1e-6 map .
// overlap map entry .
std::map<int, std::map<int, float>> overlaps;
for (int i = 0; i < numPred; i++) {
// ignoreCrossBoundarBBox = false
if (ignoreCrossBoundaryBBox && IsCrossBoundaryBBox(predBBoxes[i])) {
(*matchIndices)[i] = -2;
continue;
}
for (int j = 0; j < numGt; j++) {
// i predBbox j gtBox overlap
float overlap = JaccardOverlap(predBBoxes[i], gtBBoxes[gtIndices[j]]);
if (overlap > 1e-6) {
// i predBBox overlap
(*matchOverlaps)[i] = ::max((*matchOverlaps)[i], overlap);
// i predBBox j gtBox overlap
// overlap eps
overlaps[i][j] = overlap;
//cout << "overlap of pred bbox " << i << " and gt bbox " << j << ": " <<
// overlap << endl;
}
}
}
/*
if (numGt == 16) {
for (std::map<int, std::map<int, float>>::iterator itr = overlaps.begin();
itr != overlaps.end(); itr++) {
std::cout << "predbbox#" << itr->first << ",";
for (int i = 0; i < numGt; i++) {
if (itr->second.find(i) != itr->second.end()) {
std::cout << itr->second[i] << ",";
} else {
std::cout << 0 << ",";
}
}
std::cout << std::endl;
}
exit(1);
}
*/
/*
if (numGt == 16) {
for (int j = 0; j < numGt; j++) {
if (overlaps[1277].find(j) != overlaps[1277].end()) {
std::cout << "overlaps[1277][" << j << "]=" << overlaps[1277][j] << std::endl;
}
}
for (int j = 0; j < numGt; j++) {
if (overlaps[1334].find(j) != overlaps[1334].end()) {
std::cout << "overlaps[1334][" << j << "]=" << overlaps[1334][j] << std::endl;
}
}
}
*/
// Bipartite matching
// gt
// gt index pool
std::vector<int> gtPool;
for (int i = 0; i < numGt; i++) {
gtPool.push_back(i);
}
/*
for (int i = 0; i < gtPool.size(); i++) {
std::cout << "gtPool[" << i << "]: " << gtPool[i] << std::endl;
}
*/
// gt bbox matching pred bbox .
while (gtPool.size() > 0) {
// Find the most overlapped gt and coresponding predictions.
int maxIdx = -1;
int maxGtIdx = -1;
float maxOverlap = -1;
// predBox overlap gt
for (std::map<int, std::map<int, float>>::iterator it = overlaps.begin();
it != overlaps.end(); it++) {
// predBBox index
int i= it->first;
//std::cout << "for i=" << i << std::endl;
if ((*matchIndices)[i] != -1) {
// The prediction already has matched ground truth or is ignored.
continue;
}
for (int p = 0; p < gtPool.size(); p++) {
int j = gtPool[p];
// overlap < eps map .
if (it->second.find(j) == it->second.end()) {
// No overlap between the i-th predcition and j-th ground truth.
continue;
}
// Find the maximu overlapped pari.
if (it->second[j] > maxOverlap) {
// If the predction has not been matched to any ground truth,
// and the overlap is larger than maximum overlap, update.
/*
if (numGt == 16 && i == 1277) {
std::cout << "break for j =" << j << std::endl;
std::cout << "matchIndices[1277]=" << (*matchIndices)[1277] << std::endl;
std::cout << "matchOverlaps[1277]=" << (*matchOverlaps)[1277] << std::endl;
}
if (numGt == 16 && i == 1334) {
std::cout << "break for j =" << j << std::endl;
std::cout << "matchIndices[1334]=" << (*matchIndices)[1334] << std::endl;
std::cout << "matchOverlaps[1334]=" << (*matchOverlaps)[1334] << std::endl;
}
*/
//std::cout << "checking for i=" << i << ", j=" << j << std::endl;
maxIdx = i;
maxGtIdx = j;
maxOverlap = it->second[j];
}
}
}
// gt bbox matching pred bbox .
if (maxIdx == -1) {
// Cannot find good match.
break;
} else {
// matching pred bbox .
SASSERT0((*matchIndices)[maxIdx] == -1);
(*matchIndices)[maxIdx] = gtIndices[maxGtIdx];
(*matchOverlaps)[maxIdx] = maxOverlap;
// Erase the ground truth.
// predBBox gt overlap
// gt predBBox match
gtPool.erase(std::find(gtPool.begin(), gtPool.end(), maxGtIdx));
/*
std::cout << "maxIdx=" << maxIdx << ", gtIdx=" << gtIndices[maxGtIdx] <<
", maxGtIdx=" << maxGtIdx << ", maxOverlap=" << maxOverlap << std::endl;
*/
}
}
/*
if (numGt == 16) {
std::cout << "matchIndices[1277]=" << (*matchIndices)[1277] << std::endl;
std::cout << "matchOverlaps[1277]=" << (*matchOverlaps)[1277] << std::endl;
std::cout << "matchIndices[1334]=" << (*matchIndices)[1334] << std::endl;
std::cout << "matchOverlaps[1334]=" << (*matchOverlaps)[1334] << std::endl;
exit(1);
}
*/
switch (matchType) {
case BIPARTITE:
// Already done.
break;
case PER_PREDICTION:
// gt overlap pred bbox
// pred bbox overlap gt
// index overlap .
// Get most overlapped for the rest prediction bboxes.
// overlap: (pred bbox idx : (gt idx : overlap))
for (std::map<int, std::map<int, float>>::iterator it = overlaps.begin();
it != overlaps.end(); it++) {
int i = it->first;
// gt overlap match pred bbox
if ((*matchIndices)[i] != -1) {
// Thre predction already has matched ground truth or is ignored.
continue;
}
int maxGtIdx = -1;
float maxOverlap = -1;
for (int j = 0; j < numGt; j++) {
if (it->second.find(j) == it->second.end()) {
// No overlap between the i-th predction and j-th ground truth.
continue;
}
// Find the maximum overlapped pair
float overlap = it->second[j];
if (overlap >= overlapThreshold && overlap > maxOverlap) {
// If the predcition has not been matched to any ground truth,
// and the overlap is larger than maximum overlap, update.
maxGtIdx = j;
maxOverlap = overlap;
}
}
if (maxGtIdx != -1) {
// Found a matched ground truth.
SASSERT0((*matchIndices)[i] == -1);
(*matchIndices)[i] = gtIndices[maxGtIdx];
(*matchOverlaps)[i] = maxOverlap;
}
}
break;
default:
SASSERT(false, "Unknown matching type: %s", matchType);
break;
}
return;
}
float BBoxSize(const NormalizedBBox& bbox, const bool normalized) {
if (bbox.xmax < bbox.xmin || bbox.ymax < bbox.ymin) {
// If bbox is invalid (e.g. xmax < xmin or ymax < ymin), return 0.
return 0;
} else {
if (bbox.size > 0) {
return bbox.size;
} else {
float width = bbox.xmax - bbox.xmin;
float height = bbox.ymax - bbox.ymin;
if (normalized) {
return width * height;
} else {
// If bbox is not within range [0, 1]
return (width + 1) * (height + 1);
}
}
}
}
template <typename Dtype>
Dtype BBoxSize(const Dtype* bbox, const bool normalized) {
if (bbox[2] < bbox[0] || bbox[3] < bbox[1]) {
// If bbox is invalid (e.g. xmax < xmin or ymax < ymin), return 0.
return Dtype(0.);
} else {
const Dtype width = bbox[2] - bbox[0];
const Dtype height = bbox[3] - bbox[1];
if (normalized) {
return width * height;
} else {
// If bbox is not within range [0, 1].
return (width + 1) * (height + 1);
}
}
}
template float BBoxSize(const float* bbox, const bool normalized);
void ClipBBox(const NormalizedBBox& bbox, NormalizedBBox* clipBBox) {
clipBBox->xmin = ::max(::min(bbox.xmin, 1.f), 0.f);
clipBBox->ymin = ::max(::min(bbox.ymin, 1.f), 0.f);
clipBBox->xmax = ::max(::min(bbox.xmax, 1.f), 0.f);
clipBBox->ymax = ::max(::min(bbox.ymax, 1.f), 0.f);
clipBBox->size = BBoxSize(*clipBBox);
clipBBox->difficult = bbox.difficult;
}
bool IsCrossBoundaryBBox(const NormalizedBBox& bbox) {
return bbox.xmin < 0 || bbox.xmin > 1 ||
bbox.ymin < 0 || bbox.ymin > 1 ||
bbox.xmax < 0 || bbox.xmax > 1 ||
bbox.ymax < 0 || bbox.ymax > 1;
}
float JaccardOverlap(const NormalizedBBox& bbox1, const NormalizedBBox& bbox2,
const bool normalized) {
NormalizedBBox intersectBBox;
IntersectBBox(bbox1, bbox2, &intersectBBox);
float intersectWidth;
float intersectHeight;
if (normalized) {
intersectWidth = intersectBBox.xmax - intersectBBox.xmin;
intersectHeight = intersectBBox.ymax - intersectBBox.ymin;
} else {
intersectWidth = intersectBBox.xmax - intersectBBox.xmin + 1;
intersectHeight = intersectBBox.ymax - intersectBBox.ymin + 1;
}
if (intersectWidth > 0 && intersectHeight > 0) {
float intersectSize = intersectWidth * intersectHeight;
float bbox1Size = BBoxSize(bbox1);
float bbox2Size = BBoxSize(bbox2);
return intersectSize / (bbox1Size + bbox2Size - intersectSize);
} else {
return 0.;
}
}
template <typename Dtype>
Dtype JaccardOverlap(const Dtype* bbox1, const Dtype* bbox2) {
if (bbox2[0] > bbox1[2] || bbox2[2] < bbox1[0] ||
bbox2[1] > bbox1[3] || bbox2[3] < bbox1[1]) {
return Dtype(0.);
} else {
const Dtype interXMin = ::max(bbox1[0], bbox2[0]);
const Dtype interYMin = ::max(bbox1[1], bbox2[1]);
const Dtype interXMax = ::min(bbox1[2], bbox2[2]);
const Dtype interYMax = ::min(bbox1[3], bbox2[3]);
const Dtype interWidth = interXMax - interXMin;
const Dtype interHeight = interYMax - interYMin;
const Dtype interSize = interWidth * interHeight;
const Dtype bbox1Size = BBoxSize(bbox1);
const Dtype bbox2Size = BBoxSize(bbox2);
return interSize / (bbox1Size + bbox2Size - interSize);
}
}
template float JaccardOverlap(const float* bbox1, const float* bbox2);
void IntersectBBox(const NormalizedBBox& bbox1, const NormalizedBBox& bbox2,
NormalizedBBox* intersectBBox) {
if (bbox2.xmin > bbox1.xmax || bbox2.xmax < bbox1.xmin ||
bbox2.ymin > bbox1.ymax || bbox2.ymax < bbox1.ymin) {
// Return [0, 0, 0, 0] if there is no intersection.
intersectBBox->xmin = 0;
intersectBBox->ymin = 0;
intersectBBox->xmax = 0;
intersectBBox->ymax = 0;
} else {
intersectBBox->xmin = ::max(bbox1.xmin, bbox2.xmin);
intersectBBox->ymin = ::max(bbox1.ymin, bbox2.ymin);
intersectBBox->xmax = ::min(bbox1.xmax, bbox2.xmax);
intersectBBox->ymax = ::min(bbox1.ymax, bbox2.ymax);
}
}
inline bool IsEligibleMining(const MiningType miningType, const int matchIdx,
const float matchOverlap, const float negOverlap) {
if (miningType == MiningType::MAX_NEGATIVE) {
return matchIdx == -1 && matchOverlap < negOverlap;
} else if (miningType == MiningType::HARD_EXAMPLE) {
return true;
} else {
return false;
}
}
int CountNumMatches(const std::vector<std::map<int, std::vector<int>>>& allMatchIndices,
const int num) {
int numMatches = 0;
for (int i = 0; i < num; i++) {
const std::map<int, std::vector<int>>& matchIndices = allMatchIndices[i];
for (std::map<int, std::vector<int>>::const_iterator it = matchIndices.begin();
it != matchIndices.end(); it++) {
const std::vector<int>& matchIndex = it->second;
for (int m = 0; m < matchIndex.size(); m++) {
if (matchIndex[m] > -1) {
numMatches++;
}
}
}
}
return numMatches;
}
void GetTopKScoreIndex(const std::vector<float>& scores, const std::vector<int>& indices,
const int topK, std::vector<std::pair<float, int>>* scoreIndexVec) {
SASSERT0(scores.size() == indices.size());
// Generate index score pairs.
for (int i = 0; i < scores.size(); i++) {
scoreIndexVec->push_back(std::make_pair(scores[i], indices[i]));
}
// Sort the score pair according to the scores in descending order
std::stable_sort(scoreIndexVec->begin(), scoreIndexVec->end(), SortScorePairDescend<int>);
// Keep topK scores if needed.
if (topK > -1 && topK < scoreIndexVec->size()) {
scoreIndexVec->resize(topK);
}
}
void ApplyNMS(const std::vector<NormalizedBBox>& bboxes, const std::vector<float>& scores,
const float threshold, const int topK, const bool reuseOverlaps,
std::map<int, std::map<int, float>>* overlaps, std::vector<int>* indices) {
// Sanity check.
SASSERT(bboxes.size() == scores.size(), "bboxes and scores have different size.");
// Get topK scores (with coreesponding indices).
std::vector<int> idx(boost::counting_iterator<int>(0),
boost::counting_iterator<int>(scores.size()));
std::vector<std::pair<float, int>> scoreIndexVec;
GetTopKScoreIndex(scores, idx, topK, &scoreIndexVec);
// Do nms.
indices->clear();
while (scoreIndexVec.size() != 0) {
// Get the current highest score box.
int bestIdx = scoreIndexVec.front().second;
const NormalizedBBox& bestBBox = bboxes[bestIdx];
if (BBoxSize(bestBBox) < 1e-5) {
// Erase small box.
scoreIndexVec.erase(scoreIndexVec.begin());
continue;
}
indices->push_back(bestIdx);
// Erase the best box.
scoreIndexVec.erase(scoreIndexVec.begin());
if (topK > -1 && indices->size() >= topK) {
// Stop if finding enough bboxes for nms.
break;
}
// Compute overlap between bestBBox and other remaining bboxes.
// Remove a bbox if the overlap with bestBBox is larger than nmsThreshold.
for (std::vector<std::pair<float, int>>::iterator it = scoreIndexVec.begin();
it != scoreIndexVec.end(); ) {
int curIdx = it->second;
const NormalizedBBox& curBBox = bboxes[curIdx];
if (BBoxSize(curBBox) < 1e-5) {
// Erase small box.
it = scoreIndexVec.erase(it);
continue;
}
float curOverlap = 0.;
if (reuseOverlaps) {
if (overlaps->find(bestIdx) != overlaps->end() &&
overlaps->find(bestIdx)->second.find(curIdx) !=
(*overlaps)[bestIdx].end()) {
// Use the computed overlap.
curOverlap = (*overlaps)[bestIdx][curIdx];
} else if (overlaps->find(curIdx) != overlaps->end() &&
overlaps->find(curIdx)->second.find(bestIdx) !=
(*overlaps)[curIdx].end()) {
// Use the computed overlap.
curOverlap = (*overlaps)[curIdx][bestIdx];
} else {
curOverlap = JaccardOverlap(bestBBox, curBBox);
// Store the overlap for future use.
(*overlaps)[bestIdx][curIdx] = curOverlap;
}
} else {
curOverlap = JaccardOverlap(bestBBox, curBBox);
}
// Remove it if necessary
if (curOverlap > threshold) {
it = scoreIndexVec.erase(it);
} else {
it++;
}
}
}
}
void ApplyNMS(const std::vector<NormalizedBBox>& bboxes, const std::vector<float>& scores,
const float threshold, const int topK, std::vector<int>* indices) {
bool reuseOverlap = false;
std::map<int, std::map<int, float>> overlaps;
ApplyNMS(bboxes, scores, threshold, topK, reuseOverlap, &overlaps, indices);
}
void ApplyNMS(const bool* overlapped, const int num, std::vector<int>* indices) {
std::vector<int> indexVec(boost::counting_iterator<int>(0),
boost::counting_iterator<int>(num));
// Do nms.
indices->clear();
while (indexVec.size() != 0) {
// Get the current highest score box.
int bestIdx = indexVec.front();
indices->push_back(bestIdx);
// Erase the best box.
indexVec.erase(indexVec.begin());
for (std::vector<int>::iterator it = indexVec.begin(); it != indexVec.end(); ) {
int curIdx = *it;
// Remove it if necessary
if (overlapped[bestIdx * num + curIdx]) {
it = indexVec.erase(it);
} else {
it++;
}
}
}
}
template <typename Dtype>
__global__ void ComputeConfLossKernel(const int nthreads, const Dtype* conf_data,
const int num_preds_per_class, const int num_classes, const int loss_type,
const Dtype* match_data, Dtype* conf_loss_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int label = match_data[index];
int num = index / num_preds_per_class;
int p = index % num_preds_per_class;
int start_idx = (num * num_preds_per_class + p) * num_classes;
Dtype loss = 0;
if (loss_type == 0) {
// Compute softmax probability.
Dtype prob = conf_data[start_idx + label];
loss = -log(MaxDevice(prob, Dtype(FLT_MIN)));
} else if (loss_type == 1) {
int target = 0;
for (int c = 0; c < num_classes; ++c) {
if (c == label) {
target = 1;
} else {
target = 0;
}
Dtype input = conf_data[start_idx + c];
loss -= input * (target - (input >= 0)) -
log(1 + exp(input - 2 * input * (input >= 0)));
}
}
conf_loss_data[index] = loss;
}
}
template <typename Dtype>
__global__ void kernel_channel_max(const int num, const int channels, const int spatial_dim,
const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype maxval = -FLT_MAX;
for (int c = 0; c < channels; ++c) {
maxval = max(data[(n * channels + c) * spatial_dim + s], maxval);
}
out[index] = maxval;
}
}
template <typename Dtype>
__global__ void kernel_channel_subtract(const int count, const int num, const int channels,
const int spatial_dim, const Dtype* channel_data, const Dtype* channel_max,
Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] = channel_data[index] - channel_max[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_exp(const int count, const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, count) {
out[index] = exp(data[index]);
}
}
template <typename Dtype>
__global__ void kernel_channel_sum(const int num, const int channels, const int spatial_dim,
const Dtype* data, Dtype* channel_sum) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype sum = 0;
for (int c = 0; c < channels; ++c) {
sum += data[(n * channels + c) * spatial_dim + s];
}
channel_sum[index] = sum;
}
}
template <typename Dtype>
__global__ void kernel_channel_div(const int count, const int num, const int channels,
const int spatial_dim, const Dtype* channel_sum, Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] /= channel_sum[n * spatial_dim + s];
}
}
template <typename Dtype>
void SoftMaxGPU(const Dtype* data, const int outerNum, const int channels, const int innerNum,
Dtype* prob) {
std::vector<uint32_t> shape(4, 1);
shape[0] = outerNum;
shape[1] = channels;
shape[2] = innerNum;
Data<Dtype> scale("scale", shape);
Dtype* scaleData = scale.mutable_device_data();
int count = outerNum * channels * innerNum;
// We need to subtract the max to avoid numerical issues, compute the exp,
// and the normalize.
// compute max
hipLaunchKernelGGL(( kernel_channel_max<Dtype>), dim3(SOOOA_GET_BLOCKS(outerNum * innerNum)),
dim3( SOOOA_CUDA_NUM_THREADS), 0, 0, outerNum, channels, innerNum, data, scaleData);
// subtract
hipLaunchKernelGGL(( kernel_channel_subtract<Dtype>), dim3(SOOOA_GET_BLOCKS(count)), dim3(SOOOA_CUDA_NUM_THREADS), 0, 0,
count, outerNum, channels, innerNum, data, scaleData, prob);
// exponentiate
hipLaunchKernelGGL(( kernel_exp<Dtype>), dim3(SOOOA_GET_BLOCKS(count)), dim3(SOOOA_CUDA_NUM_THREADS), 0, 0, count, prob, prob);
// sum after exp
hipLaunchKernelGGL(( kernel_channel_sum<Dtype>), dim3(SOOOA_GET_BLOCKS(outerNum * innerNum)),
dim3( SOOOA_CUDA_NUM_THREADS), 0, 0, outerNum, channels, innerNum, prob, scaleData);
// divide
hipLaunchKernelGGL(( kernel_channel_div<Dtype>), dim3(SOOOA_GET_BLOCKS(count)), dim3(SOOOA_CUDA_NUM_THREADS), 0, 0,
count, outerNum, channels, innerNum, scaleData, prob);
}
// confData: (numBatches, numPriors * numClasses) shape. confidence .
// num:
// ...jjjjj
// allConfLoss: prior box conf loss .
template <typename Dtype>
void ComputeConfLossGPU(Data<Dtype>& confData, const int num,
const int numPredsPerClass, const int numClasses, const int backgroundLabelId,
const ConfLossType lossType,
const std::vector<std::map<int, std::vector<int>>>& allMatchIndices,
const std::map<int, std::vector<NormalizedBBox>>& allGtBBoxes,
std::vector<std::vector<float>>* allConfLoss) {
SASSERT0(backgroundLabelId < numClasses);
// match: prior box matching gt box label ( label)
// batch prior box
// matching gt box 0 . 0 matching .
Data<Dtype> match("match", {uint32_t(num), uint32_t(numPredsPerClass), 1, 1});
Dtype* matchData = match.mutable_host_data();
for (int i = 0; i < num; i++) {
const std::map<int, std::vector<int>>& matchIndices = allMatchIndices[i];
// p prior box
for (int p = 0; p < numPredsPerClass; p++) {
// Get the label index.
int label = backgroundLabelId;
// shareLocation == true key -1 .
// iteration .
for (std::map<int, std::vector<int>>::const_iterator it = matchIndices.begin();
it != matchIndices.end(); it++) {
const std::vector<int>& matchIndex = it->second;
SASSERT0(matchIndex.size() == numPredsPerClass);
// p prior box matching gt box
if (matchIndex[p] > -1) {
SASSERT0(allGtBBoxes.find(i) != allGtBBoxes.end());
const std::vector<NormalizedBBox>& gtBBoxes = allGtBBoxes.find(i)->second;
SASSERT0(matchIndex[p] < gtBBoxes.size());
label = gtBBoxes[matchIndex[p]].label;
SASSERT0(label >= 0);
SASSERT0(label != backgroundLabelId);
SASSERT0(label < numClasses);
// A prior can only be matched to one gt bbox.
break;
}
}
// p prior box label matchData
matchData[i * numPredsPerClass + p] = label;
}
}
// Get probability data.
// prior box, class probability.
const Dtype* confGpuData = confData.device_data();
Data<Dtype> prob("prob");
prob.reshapeLike(&confData);
// lossType 'SOFTMAX' raw conf data softmax conf data .
if (lossType == ConfLossType::SOFTMAX) {
Dtype* probData = prob.mutable_device_data();
SoftMaxGPU(confData.device_data(), num * numPredsPerClass, numClasses, 1, probData);
confGpuData = prob.device_data();
}
// Compute the loss.
Data<Dtype> confLoss("confLoss", {uint32_t(num), uint32_t(numPredsPerClass), 1, 1});
Dtype* confLossData = confLoss.mutable_device_data();
const int numThreads = num * numPredsPerClass;
int intLossType = 0;
if (lossType == ConfLossType::SOFTMAX) intLossType = 0;
else if (lossType == ConfLossType::LOGISTIC) intLossType = 1;
else SASSERT0(false);
// matching gt box label loss.
// matching gt box background class loss.
hipLaunchKernelGGL(( ComputeConfLossKernel<Dtype>), dim3(SOOOA_GET_BLOCKS(numThreads)), dim3(SOOOA_CUDA_NUM_THREADS), 0, 0,
numThreads, confGpuData, numPredsPerClass, numClasses, intLossType,
match.device_data(), confLossData);
// Save the loss.
allConfLoss->clear();
const Dtype* lossData = confLoss.host_data();
for (int i = 0; i < num; i++) {
std::vector<float> confLoss(lossData, lossData + numPredsPerClass);
allConfLoss->push_back(confLoss);
lossData += numPredsPerClass;
}
}
template void ComputeConfLossGPU(Data<float>& confData, const int num,
const int numPredsPerClass, const int numClasses, const int backgroundLabelId,
const ConfLossType lossType,
const std::vector<std::map<int, std::vector<int>>>& allMatchIndices,
const std::map<int, std::vector<NormalizedBBox>>& allGtBBoxes,
std::vector<std::vector<float>>* allConfLoss);
// confData: mbox_conf
// allLocPreds: location prediction
// allGtBBoxes: gt box
// priorBBoxes: prior bboxes
// priorVariances
// allMatchOverlaps:
// ...
// allMatchIndices:
// allNegIndices: Negative Example indices
template <typename Dtype>
void MineHardExamples(Data<Dtype>& confData,
const std::vector<LabelBBox>& allLocPreds,
const std::map<int, std::vector<NormalizedBBox>>& allGtBBoxes,
const std::vector<NormalizedBBox>& priorBBoxes,
const std::vector<std::vector<float>>& priorVariances,
const std::vector<std::map<int, std::vector<float>>>& allMatchOverlaps,
const int numClasses, const int backgroundLabelId, const bool usePriorForNms,
const ConfLossType confLossType, const MiningType miningType,
const LocLossType locLossType, const float negPosRatio, const float negOverlap,
const CodeType codeType, const bool encodeVarianceInTarget,
const float nmsThreshold, const int topK, const int sampleSize, const bool bpInside,
const bool usePriorForMatching, int* numMatches, int* numNegs,
std::vector<std::map<int, std::vector<int>>>* allMatchIndices,
std::vector<std::vector<int>>* allNegIndices) {
// num images in batch
int num = allLocPreds.size();
// batch gt match prior box . (label > -1)
*numMatches = CountNumMatches(*allMatchIndices, num);
*numNegs = 0;
int numPriors = priorBBoxes.size();
SASSERT0(numPriors == priorVariances.size());
SASSERT(numClasses >= 1, "numClasses should not be less than 1.");
if (miningType == MiningType::MINING_NONE) {
return;
}
bool hasNmsParam = true;
if (topK <= 0) {
hasNmsParam = false;
}
// Compute confidence losses based on matching results.
// batch prior box loss .
// [num][num_priors]
std::vector<std::vector<float>> allConfLoss;
ComputeConfLossGPU(confData, num, numPriors, numClasses, backgroundLabelId, confLossType,
*allMatchIndices, allGtBBoxes, &allConfLoss);
// batch prior box loss .
// [num][num_priors]
std::vector<std::vector<float>> allLocLoss;
if (miningType == MiningType::HARD_EXAMPLE) {
// Compute localization losses based on matching results.
Data<Dtype> locPred("locPred");
Data<Dtype> locGt("locGt");
if (*numMatches != 0) {
std::vector<uint32_t> locShape(4, 1);
locShape[3] = *numMatches * 4;
locPred.reshape(locShape);
locGt.reshape(locShape);
Dtype* locPredData = locPred.mutable_host_data();
Dtype* locGtData = locGt.mutable_host_data();
EncodeLocPrediction(allLocPreds, allGtBBoxes, *allMatchIndices, priorBBoxes,
priorVariances, codeType, encodeVarianceInTarget, bpInside,
usePriorForMatching, locPredData, locGtData);
}
ComputeLocLoss(locPred, locGt, *allMatchIndices, num, numPriors, locLossType,
&allLocLoss);
} else {
// No localization loss.
// loc loss 0.f .
for (int i = 0; i < num; i++) {
std::vector<float> locLoss(numPriors, 0.f);
allLocLoss.push_back(locLoss);
}
}
for (int i = 0; i < num; i++) {
std::map<int, std::vector<int>>& matchIndices = (*allMatchIndices)[i];
const std::map<int, std::vector<float>>& matchOverlaps = allMatchOverlaps[i];
// loc + conf loss.
const std::vector<float>& confLoss = allConfLoss[i];
const std::vector<float>& locLoss = allLocLoss[i];
std::vector<float> loss;
std::transform(confLoss.begin(), confLoss.end(), locLoss.begin(),
std::back_inserter(loss), std::plus<float>());
// Pick negatives or hard examples based on loss.
std::set<int> selIndices;
std::vector<int> negIndices;
for (std::map<int, std::vector<int>>::iterator it = matchIndices.begin();
it != matchIndices.end(); it++) {
const int label = it->first;
int numSel = 0;
// Get potential indices and loss pairs.
// negative sample overlap, index pair
std::vector<std::pair<float, int>> lossIndices;
for (int m = 0; m < matchIndices[label].size(); m++) {
// matchIdx == -1 && matchOverlap < negOverlap true
if (IsEligibleMining(miningType, matchIndices[label][m],
matchOverlaps.find(label)->second[m], negOverlap)) {
lossIndices.push_back(std::make_pair(loss[m], m));
numSel++;
}
}
if (miningType == MiningType::MAX_NEGATIVE) {
int numPos = 0;
for (int m = 0; m < matchIndices[label].size(); m++) {
if (matchIndices[label][m] > -1) {
numPos++;
}
}
numSel = ::min(static_cast<int>(numPos * negPosRatio), numSel);
} else if (miningType == MiningType::HARD_EXAMPLE) {
SASSERT0(sampleSize > 0);
numSel = ::min(sampleSize, numSel);
}
// XXX: nmsThreshold
// Select samples.
if (hasNmsParam && nmsThreshold > 0) {
// Do nms before selecting samples.
std::vector<float> selLoss;
std::vector<NormalizedBBox> selBBoxes;
if (usePriorForNms) {
for (int m = 0; m < matchIndices[label].size(); m++) {
if (IsEligibleMining(miningType, matchIndices[label][m],
matchOverlaps.find(label)->second[m], negOverlap)) {
selLoss.push_back(loss[m]);
selBBoxes.push_back(priorBBoxes[m]);
}
}
} else {
// Decode the prediction into bbox first.
std::vector<NormalizedBBox> locBBoxes;
bool clipBBox = false;
DecodeBBoxes(priorBBoxes, priorVariances, codeType,
encodeVarianceInTarget, clipBBox,
allLocPreds[i].find(label)->second, &locBBoxes);
for (int m = 0; m < matchIndices[label].size(); m++) {
if (IsEligibleMining(miningType, matchIndices[label][m],
matchOverlaps.find(label)->second[m], negOverlap)) {
selLoss.push_back(loss[m]);
selBBoxes.push_back(locBBoxes[m]);
}
}
}
// Do non-maximum suppression based on the loss.
std::vector<int> nmsIndices;
ApplyNMS(selBBoxes, selLoss, nmsThreshold, topK, &nmsIndices);
if (nmsIndices.size() < numSel) {
STDOUT_LOG("not enought sample after nms: %d", nmsIndices.size());
}
// Pick top example indices after nms.
numSel = ::min(static_cast<int>(nmsIndices.size()), numSel);
for (int n = 0; n < numSel; n++) {
selIndices.insert(lossIndices[nmsIndices[n]].second);
}
} else {
// Pick top example indices based on loss.
std::sort(lossIndices.begin(), lossIndices.end(),
SortScorePairDescend<int>);
for (int n = 0; n < numSel; n++) {
selIndices.insert(lossIndices[n].second);
}
}
// Update the matchIndices and select negIndices.
// negative prior box indices .
for (int m = 0; m < matchIndices[label].size(); m++) {
if (matchIndices[label][m] > -1) {
if (miningType == MiningType::HARD_EXAMPLE &&
selIndices.find(m) == selIndices.end()) {
matchIndices[label][m] = -1;
*numMatches -= 1;
}
} else if (matchIndices[label][m] == -1) {
if (selIndices.find(m) != selIndices.end()) {
negIndices.push_back(m);
*numNegs += 1;
}
}
}
}
allNegIndices->push_back(negIndices);
}
}
template void MineHardExamples(Data<float>& confData,
const std::vector<LabelBBox>& allLocPreds,
const std::map<int, std::vector<NormalizedBBox>>& allGtBBoxes,
const std::vector<NormalizedBBox>& priorBBoxes,
const std::vector<std::vector<float>>& priorVariances,
const std::vector<std::map<int, std::vector<float>>>& allMatchOverlaps,
const int numClasses, const int backgroundLabelId, const bool usePriorForNms,
const ConfLossType confLossType, const MiningType miningType,
const LocLossType locLossType, const float negPosRatio, const float negOverlap,
const CodeType codeType, const bool encodeVarianceInTarget, const float nmsThresh,
const int topK, const int sampleSize, const bool bpInside,
const bool usePriorForMatching, int* numMatches, int* numNegs,
std::vector<std::map<int, std::vector<int>>>* allMatchIndices,
std::vector<std::vector<int>>* allNegIndices);
// allLocPreds:
template <typename Dtype>
void EncodeLocPrediction(const std::vector<LabelBBox>& allLocPreds,
const std::map<int, std::vector<NormalizedBBox>>& allGtBBoxes,
const std::vector<std::map<int, std::vector<int>>>& allMatchIndices,
const std::vector<NormalizedBBox>& priorBBoxes,
const std::vector<std::vector<float>>& priorVariances,
const CodeType codeType, const bool encodeVarianceInTarget,
const bool bpInside, const bool usePriorForMatching,
Dtype* locPredData, Dtype* locGtData) {
int num = allLocPreds.size();
int count = 0;
for (int i = 0; i < num; i++) {
for (std::map<int, std::vector<int>>::const_iterator it = allMatchIndices[i].begin();
it != allMatchIndices[i].end(); it++) {
const int label = it->first;
const std::vector<int>& matchIndex = it->second;
SASSERT0(allLocPreds[i].find(label) != allLocPreds[i].end());
const std::vector<NormalizedBBox>& locPred = allLocPreds[i].find(label)->second;
for (int j = 0; j < matchIndex.size(); j++) {
if (matchIndex[j] <= -1) {
continue;
}
// Store encoded ground truth.
const int gtIdx = matchIndex[j];
SASSERT0(allGtBBoxes.find(i) != allGtBBoxes.end());
SASSERT0(gtIdx < allGtBBoxes.find(i)->second.size());
const NormalizedBBox& gtBBox = allGtBBoxes.find(i)->second[gtIdx];
NormalizedBBox gtEncode;
SASSERT0(j < priorBBoxes.size());
EncodeBBox(priorBBoxes[j], priorVariances[j], codeType,
encodeVarianceInTarget, gtBBox, >Encode);
locGtData[count * 4] = gtEncode.xmin;
locGtData[count * 4 + 1] = gtEncode.ymin;
locGtData[count * 4 + 2] = gtEncode.xmax;
locGtData[count * 4 + 3] = gtEncode.ymax;
// Store location prediction.
SASSERT0(j < locPred.size());
if (bpInside) {
NormalizedBBox matchBBox = priorBBoxes[j];
if (!usePriorForMatching) {
const bool clipBBox = false;
DecodeBBox(priorBBoxes[j], priorVariances[j], codeType,
encodeVarianceInTarget, clipBBox, locPred[j], &matchBBox);
}
// When a dimension of matchBBox is outside of image region, use
// gtEncode to simulate zero gradient.
locPredData[count * 4] = (matchBBox.xmin < 0 || matchBBox.xmin > 1) ?
gtEncode.xmin : locPred[j].ymin;
locPredData[count * 4 + 1] = (matchBBox.ymin < 0 || matchBBox.ymin > 1) ?
gtEncode.ymin : locPred[j].ymin;
locPredData[count * 4 + 2] = (matchBBox.xmax < 0 || matchBBox.xmax > 1) ?
gtEncode.xmax : locPred[j].xmax;
locPredData[count * 4 + 3] = (matchBBox.ymax < 0 || matchBBox.ymax > 1) ?
gtEncode.ymax : locPred[j].ymax;
} else {
locPredData[count * 4] = locPred[j].xmin;
locPredData[count * 4 + 1] = locPred[j].ymin;
locPredData[count * 4 + 2] = locPred[j].xmax;
locPredData[count * 4 + 3] = locPred[j].ymax;
}
if (encodeVarianceInTarget) {
for (int k = 0; k < 4; k++) {
SASSERT0(priorVariances[j][k] > 0);
locPredData[count * 4 + k] /= priorVariances[j][k];
locGtData[count * 4 + k] /= priorVariances[j][k];
}
}
count++;
}
}
}
}
template void EncodeLocPrediction(const std::vector<LabelBBox>& allLocPreds,
const std::map<int, std::vector<NormalizedBBox>>& allGtBBoxes,
const std::vector<std::map<int, std::vector<int>>>& allMatchIndices,
const std::vector<NormalizedBBox>& priorBBoxes,
const std::vector<std::vector<float>>& priorVariances,
const CodeType codeType, const bool encodeVarianceInTarget,
const bool bpInside, const bool usePriorForMatching,
float* locPredData, float* locGtData);
void EncodeBBox(const NormalizedBBox& priorBBox, const std::vector<float>& priorVariance,
const CodeType codeType, const bool encodeVarianceInTarget,
const NormalizedBBox& bbox, NormalizedBBox* encodeBBox) {
if (codeType == CodeType::CORNER) {
if (encodeVarianceInTarget) {
encodeBBox->xmin = bbox.xmin - priorBBox.xmin;
encodeBBox->ymin = bbox.ymin - priorBBox.ymin;
encodeBBox->xmax = bbox.xmax - priorBBox.xmax;
encodeBBox->ymax = bbox.ymax - priorBBox.ymax;
} else {
// Encode variance in bbox.
SASSERT0(priorVariance.size() == 4);
for (int i = 0; i < priorVariance.size(); i++) {
SASSERT0(priorVariance[i] > 0);
}
encodeBBox->xmin = (bbox.xmin - priorBBox.xmin) / priorVariance[0];
encodeBBox->ymin = (bbox.ymin - priorBBox.ymin) / priorVariance[1];
encodeBBox->xmax = (bbox.xmax - priorBBox.xmax) / priorVariance[2];
encodeBBox->ymax = (bbox.ymax - priorBBox.ymax) / priorVariance[3];
}
} else if (codeType == CodeType::CENTER_SIZE) {
float priorWidth = priorBBox.xmax - priorBBox.xmin;
SASSERT0(priorWidth > 0);
float priorHeight = priorBBox.ymax - priorBBox.ymin;
SASSERT0(priorHeight > 0);
float priorCenterX = (priorBBox.xmin + priorBBox.xmax) / 2.;
float priorCenterY = (priorBBox.ymin + priorBBox.ymax) / 2.;
float bboxWidth = bbox.xmax - bbox.xmin;
SASSERT0(bboxWidth > 0);
float bboxHeight = bbox.ymax - bbox.ymin;
SASSERT0(bboxHeight > 0);
float bboxCenterX = (bbox.xmin + bbox.xmax) / 2.;
float bboxCenterY = (bbox.ymin + bbox.ymax) / 2.;
if (encodeVarianceInTarget) {
encodeBBox->xmin = (bboxCenterX - priorCenterX) / priorWidth;
encodeBBox->ymin = (bboxCenterY - priorCenterY) / priorHeight;
encodeBBox->xmax = ::log(bboxWidth / priorWidth);
encodeBBox->ymax = ::log(bboxHeight / priorHeight);
} else {
// Encode variance in bbox.
encodeBBox->xmin = (bboxCenterX - priorCenterX) / priorWidth / priorVariance[0];
encodeBBox->ymin = (bboxCenterY - priorCenterY) / priorHeight / priorVariance[1];
encodeBBox->xmax = ::log(bboxWidth / priorWidth) / priorVariance[2];
encodeBBox->ymax = ::log(bboxHeight / priorHeight) / priorVariance[3];
}
} else if (codeType == CodeType::CORNER_SIZE) {
float priorWidth = priorBBox.xmax - priorBBox.xmin;
SASSERT0(priorWidth > 0);
float priorHeight = priorBBox.ymax - priorBBox.ymin;
SASSERT0(priorHeight > 0);
if (encodeVarianceInTarget) {
encodeBBox->xmin = (bbox.xmin - priorBBox.xmin) / priorWidth;
encodeBBox->ymin = (bbox.ymin - priorBBox.ymin) / priorHeight;
encodeBBox->xmax = (bbox.xmax - priorBBox.xmax) / priorWidth;
encodeBBox->ymax = (bbox.ymax - priorBBox.ymax) / priorHeight;
} else {
// Encode variance in bbox.
SASSERT0(priorVariance.size() == 4);
for (int i = 0; i < priorVariance.size(); i++) {
SASSERT0(priorVariance[i] > 0);
}
encodeBBox->xmin = (bbox.xmin - priorBBox.xmin) / priorWidth / priorVariance[0];
encodeBBox->ymin = (bbox.ymin - priorBBox.ymin) / priorHeight / priorVariance[1];
encodeBBox->xmax = (bbox.xmax - priorBBox.xmax) / priorWidth / priorVariance[2];
encodeBBox->ymax = (bbox.ymax - priorBBox.ymax) / priorHeight / priorVariance[3];
}
} else {
SASSERT(false, "Unknown LocLossType.");
}
}
template <typename Dtype>
void ComputeLocLoss(Data<Dtype>& locPred, Data<Dtype>& locGt,
const std::vector<std::map<int, std::vector<int>>>& allMatchIndices, const int num,
const int numPriors, const LocLossType locLossType,
std::vector<std::vector<float>>* allLocLoss) {
int locCount = locPred.getCount();
SASSERT0(locCount == locGt.getCount());
Data<Dtype> diff("diff");
const Dtype* diffData = NULL;
if (locCount != 0) {
diff.reshape(locPred.getShape());
soooa_gpu_sub(locCount, locPred.device_data(), locGt.device_data(), diff.mutable_device_data());
diffData = diff.host_data();
}
int count = 0;
for (int i = 0; i < num; i++) {
std::vector<float> locLoss(numPriors, 0.f);
for (std::map<int, std::vector<int>>::const_iterator it = allMatchIndices[i].begin();
it != allMatchIndices[i].end(); it++) {
const std::vector<int>& matchIndex = it->second;
SASSERT0(numPriors == matchIndex.size());
for (int j = 0; j < matchIndex.size(); j++) {
if (matchIndex[j] <= -1) {
continue;
}
Dtype loss = 0;
for (int k = 0; k < 4; k++) {
Dtype val = diffData[count * 4 + k];
if (locLossType == LocLossType::SMOOTH_L1) {
Dtype absVal = fabs(val);
if (absVal < 1.) {
loss += 0.5 * val * val;
} else {
loss += absVal - 0.5;
}
} else if (locLossType == LocLossType::L2) {
loss += 0.5 * val * val;
} else {
SASSERT(false, "Unknown loc loss type.");
}
}
locLoss[j] = loss;
count++;
}
}
allLocLoss->push_back(locLoss);
}
}
template void ComputeLocLoss(Data<float>& locPred, Data<float>& locGt,
const std::vector<std::map<int, std::vector<int>>>& allMatchIndices, const int num,
const int numPriors, const LocLossType locLossType,
std::vector<std::vector<float>>* allLocLoss);
template <typename Dtype>
void EncodeConfPrediction(const Dtype* confData, const int num, const int numPriors,
const int numClasses, const int backgroundLabelId, const bool mapObjectToAgnostic,
const MiningType miningType, const ConfLossType confLossType,
const std::vector<std::map<int, std::vector<int>>>& allMatchIndices,
const std::vector<std::vector<int>>& allNegIndices,
const std::map<int, std::vector<NormalizedBBox>>& allGtBBoxes,
Dtype* confPredData, Dtype* confGtData) {
SASSERT(numClasses > 1, "numClasses should not be less than 1.");
if (mapObjectToAgnostic) {
if (backgroundLabelId >= 0) {
SASSERT0(numClasses == 2);
} else {
SASSERT0(numClasses == 1);
}
}
bool doNegMining = (miningType != MiningType::MINING_NONE);
int count = 0;
for (int i = 0; i < num; i++) {
if (allGtBBoxes.find(i) != allGtBBoxes.end()) {
// Save matched (positive) bboxes scores and labels.
const std::map<int, std::vector<int>>& matchIndices = allMatchIndices[i];
for (std::map<int, std::vector<int>>::const_iterator it = matchIndices.begin();
it != matchIndices.end(); it++) {
const std::vector<int>& matchIndex = it->second;
SASSERT0(matchIndex.size() == numPriors);
for (int j = 0; j < numPriors; j++) {
if (matchIndex[j] <= -1) {
continue;
}
const int gtLabel = mapObjectToAgnostic ?
backgroundLabelId + 1 :
allGtBBoxes.find(i)->second[matchIndex[j]].label;
int idx = doNegMining ? count : j;
if (confLossType == ConfLossType::SOFTMAX) {
confGtData[idx] = gtLabel;
} else if (confLossType == ConfLossType::LOGISTIC) {
confGtData[idx * numClasses + gtLabel] = 1;
} else {
SASSERT(false, "Unknown conf loss type.");
}
if (doNegMining) {
// Copy scores for matched bboxes.
soooa_copy<Dtype>(numClasses, confData + j * numClasses,
confPredData + count * numClasses);
count++;
}
}
}
// Go to next image
if (doNegMining) {
// Save negative bboxes scores and labels.
for (int n = 0; n < allNegIndices[i].size(); n++) {
int j = allNegIndices[i][n];
SASSERT0(j < numPriors);
soooa_copy<Dtype>(numClasses, confData + j * numClasses,
confPredData + count * numClasses);
if (confLossType == ConfLossType::SOFTMAX) {
confGtData[count] = backgroundLabelId;
} else if (confLossType == ConfLossType::LOGISTIC) {
if (backgroundLabelId >= 0 && backgroundLabelId < numClasses) {
confGtData[count * numClasses + backgroundLabelId] = 1;
}
} else {
SASSERT(false, "Unknown conf loss type.");
}
count++;
}
}
}
if (doNegMining) {
confData += numPriors * numClasses;
} else {
confGtData += numPriors;
}
}
}
template void EncodeConfPrediction(const float* confData, const int num, const int numPriors,
const int numClasses, const int backgroundLabelId, const bool mapObjectToAgnostic,
const MiningType miningType, const ConfLossType confLossType,
const std::vector<std::map<int, std::vector<int>>>& allMatchIndices,
const std::vector<std::vector<int>>& allNegIndices,
const std::map<int, std::vector<NormalizedBBox>>& allGtBBoxes,
float* confPredData, float* confGtData);
template <typename Dtype>
void GetConfidenceScores(const Dtype* confData, const int num, const int numPredsPerClass,
const int numClasses, std::vector<std::map<int, std::vector<float>>>* confPreds) {
confPreds->clear();
confPreds->resize(num);
for (int i = 0; i < num; i++) {
std::map<int, std::vector<float>>& labelScores = (*confPreds)[i];
for (int p = 0; p < numPredsPerClass; p++) {
int startIdx = p * numClasses;
for (int c = 0; c < numClasses; c++) {
labelScores[c].push_back(confData[startIdx + c]);
}
}
confData += numPredsPerClass * numClasses;
}
}
template void GetConfidenceScores(const float* confData, const int num,
const int numPredsPerClass, const int numClasses,
std::vector<std::map<int, std::vector<float>>>* confPreds);
template void GetConfidenceScores(const double* confData, const int num,
const int numPredsPerClass, const int numClasses,
std::vector<std::map<int, std::vector<float>>>* confPreds);
void DecodeBBoxesAll(const std::vector<LabelBBox>& allLocPreds,
const std::vector<NormalizedBBox>& priorBBoxes,
const std::vector<std::vector<float>>& priorVariances,
const int num, const bool shareLocation,
const int numLocClasses, const int backgroundLabelId,
const CodeType codeType, const bool varianceEncodedInTarget,
const bool clip, std::vector<LabelBBox>* allDecodeBBoxes) {
SASSERT0(allLocPreds.size() == num);
allDecodeBBoxes->clear();
allDecodeBBoxes->resize(num);
for (int i = 0; i < num; i++) {
// Decode predictions into bboxes.
LabelBBox& decodeBBoxes = (*allDecodeBBoxes)[i];
for (int c = 0; c < numLocClasses; c++) {
int label = shareLocation ? -1 : c;
if (label == backgroundLabelId) {
// Ignore background class.
continue;
}
if (allLocPreds[i].find(label) == allLocPreds[i].end()) {
// Something bad happend if there are no predictions for current label.
SASSERT(false, "Could not find location predictions for label %d.", label);
}
const std::vector<NormalizedBBox>& labelLocPreds =
allLocPreds[i].find(label)->second;
DecodeBBoxes(priorBBoxes, priorVariances, codeType, varianceEncodedInTarget, clip,
labelLocPreds, &(decodeBBoxes[label]));
}
}
}
void GetMaxScoreIndex(const std::vector<float>& scores, const float threshold, const int topK,
std::vector<std::pair<float, int>>* scoreIndexVec) {
// Generate index score pairs.
for (int i = 0; i < scores.size(); i++) {
if (scores[i] > threshold) {
scoreIndexVec->push_back(std::make_pair(scores[i], i));
}
}
// Sort the score pair according to the scores in descending order
std::stable_sort(scoreIndexVec->begin(), scoreIndexVec->end(),
SortScorePairDescend<int>);
// Keep topK scores if needed.
if (topK > -1 && topK < scoreIndexVec->size()) {
scoreIndexVec->resize(topK);
}
}
template <typename Dtype>
void GetMaxScoreIndex(const Dtype* scores, const int num, const float threshold,
const int topK, std::vector<std::pair<Dtype, int>>* scoreIndexVec) {
// Generate index score pairs.
for (int i = 0; i < num; i++) {
if (scores[i] > threshold) {
scoreIndexVec->push_back(std::make_pair(scores[i], i));
}
}
// Sort the score pair according to the scores in descending order
std::stable_sort(scoreIndexVec->begin(), scoreIndexVec->end(),
SortScorePairDescend<int>);
// Keep topK scores if needed.
if (topK > -1 && topK < scoreIndexVec->size()) {
scoreIndexVec->resize(topK);
}
}
template void GetMaxScoreIndex(const float* scores, const int num, const float threshold,
const int topK, std::vector<std::pair<float, int>>* scoreIndexVec);
void ApplyNMSFast(const std::vector<NormalizedBBox>& bboxes, const std::vector<float>& scores,
const float scoreThreshold, const float nmsThreshold,
const float eta, const int topK, std::vector<int>* indices) {
// Sanity check.
SASSERT(bboxes.size() == scores.size(), "bboxes and scores have different size.");
// Get topK scores (with corresponding indices).
std::vector<std::pair<float, int>> scoreIndexVec;
GetMaxScoreIndex(scores, scoreThreshold, topK, &scoreIndexVec);
// Do nms.
float adaptiveThreshold = nmsThreshold;
indices->clear();
while (scoreIndexVec.size() != 0) {
const int idx = scoreIndexVec.front().second;
bool keep = true;
for (int k = 0; k < indices->size(); k++) {
if (keep) {
const int keptIdx = (*indices)[k];
float overlap = JaccardOverlap(bboxes[idx], bboxes[keptIdx]);
keep = overlap <= adaptiveThreshold;
} else {
break;
}
}
if (keep) {
indices->push_back(idx);
}
scoreIndexVec.erase(scoreIndexVec.begin());
if (keep && eta < 1 && adaptiveThreshold > 0.5) {
adaptiveThreshold *= eta;
}
}
}
template <typename Dtype>
void ApplyNMSFast(const Dtype* bboxes, const Dtype* scores, const int num,
const float scoreThreshold, const float nmsThreshold, const float eta,
const int topK, std::vector<int>* indices) {
// Get topK scores (with corresponding indices).
std::vector<std::pair<float, int>> scoreIndexVec;
GetMaxScoreIndex(scores, num, scoreThreshold, topK, &scoreIndexVec);
// Do nms.
float adaptiveThreshold = nmsThreshold;
indices->clear();
while (scoreIndexVec.size() != 0) {
const int idx = scoreIndexVec.front().second;
bool keep = true;
for (int k = 0; k < indices->size(); k++) {
if (keep) {
const int keptIdx = (*indices)[k];
Dtype overlap = JaccardOverlap(bboxes + idx * 4, bboxes + keptIdx * 4);
keep = overlap <= adaptiveThreshold;
} else {
break;
}
}
if (keep) {
indices->push_back(idx);
}
scoreIndexVec.erase(scoreIndexVec.begin());
if (keep && eta < 1 && adaptiveThreshold > 0.5) {
adaptiveThreshold *= eta;
}
}
}
template void ApplyNMSFast(const float* bboxes, const float* scores, const int num,
const float scoreThreshold, const float nmsThreshold, const float eta,
const int topK, std::vector<int>* indices);
void ScaleBBox(const NormalizedBBox& bbox, const int height, const int width,
NormalizedBBox* scaleBBox) {
scaleBBox->xmin = bbox.xmin * width;
scaleBBox->ymin = bbox.ymin * height;
scaleBBox->xmax = bbox.xmax * width;
scaleBBox->ymax = bbox.ymax * height;
scaleBBox->size = 0.f;
bool normalized = !(width > 1 || height > 1);
scaleBBox->size = BBoxSize(*scaleBBox, normalized);
scaleBBox->difficult = bbox.difficult;
}
void OutputBBox(const NormalizedBBox& bbox, const std::pair<int, int>& imgSize,
const bool hasResize, NormalizedBBox* outBBox) {
// resize .
SASSERT0(!hasResize);
const int height = imgSize.first;
const int width = imgSize.second;
NormalizedBBox tempBBox = bbox;
if (hasResize) {
} else {
// Clip the normalized bbox first.
ClipBBox(tempBBox, &tempBBox);
// Scale the bbox according to the original image size.
ScaleBBox(tempBBox, height, width, outBBox);
}
}
cv::Scalar HSV2RGB(const float h, const float s, const float v) {
const int h_i = static_cast<int>(h * 6);
const float f = h * 6 - h_i;
const float p = v * (1 - s);
const float q = v * (1 - f*s);
const float t = v * (1 - (1 - f) * s);
float r, g, b;
switch (h_i) {
case 0:
r = v; g = t; b = p;
break;
case 1:
r = q; g = v; b = p;
break;
case 2:
r = p; g = v; b = t;
break;
case 3:
r = p; g = q; b = v;
break;
case 4:
r = t; g = p; b = v;
break;
case 5:
r = v; g = p; b = q;
break;
default:
r = 1; g = 1; b = 1;
break;
}
return cv::Scalar(r * 255, g * 255, b * 255);
}
std::vector<cv::Scalar> GetColors(const int n) {
std::vector<cv::Scalar> colors;
cv::RNG rng(12345);
const float goldenRatioConjugate = 0.618033988749895;
const float s = 0.3;
const float v = 0.99;
for (int i = 0; i < n; i++) {
const float h = ::fmod(rng.uniform(0.f, 1.f) + goldenRatioConjugate, 1.f);
colors.push_back(HSV2RGB(h, s, v));
}
return colors;
}
static clock_t startClock = clock();
static cv::VideoWriter capOut;
template <typename Dtype>
void VisualizeBBox(const std::vector<cv::Mat>& images, Data<Dtype>* detections,
const float threshold, const std::vector<cv::Scalar>& colors,
const std::map<int, std::string>& labelToDisplayName, const std::string& saveFile) {
// Retrieve detections.
SASSERT0(detections->width() == 7);
const int numDet = detections->height();
const int numImg = images.size();
if (numDet == 0 || numImg == 0) {
return;
}
// Compute FPS.
float fps = numImg / (static_cast<double>(clock() - startClock) / CLOCKS_PER_SEC);
const Dtype* detectionsData = detections->host_data();
const int width = images[0].cols;
const int height = images[0].rows;
std::vector<LabelBBox> allDetections(numImg);
for (int i = 0; i < numDet; i++) {
const int imgIdx = detectionsData[i * 7];
SASSERT0(imgIdx < numImg);
const int label = detectionsData[i * 7 + 1];
const float score = detectionsData[i * 7 + 2];
if (score < threshold) {
continue;
}
NormalizedBBox bbox;
bbox.xmin = detectionsData[i * 7 + 3] * width;
bbox.ymin = detectionsData[i * 7 + 4] * height;
bbox.xmax = detectionsData[i * 7 + 5] * width;
bbox.ymax = detectionsData[i * 7 + 6] * height;
bbox.score = score;
allDetections[imgIdx][label].push_back(bbox);
}
int fontface = cv::FONT_HERSHEY_SIMPLEX;
double scale = 1;
int thickness = 2;
int baseline = 0;
char buffer[50];
for (int i = 0; i < numImg; i++) {
cv::Mat image = images[i];
// Show FPS
snprintf(buffer, sizeof(buffer), "FPS: %.2f", fps);
cv::Size text = cv::getTextSize(buffer, fontface, scale, thickness, &baseline);
cv::rectangle(image, cv::Point(0, 0), cv::Point(text.width, text.height + baseline),
CV_RGB(255, 255, 255), CV_FILLED);
cv::putText(image, buffer, cv::Point(0, text.height + baseline / 2.),
fontface, scale, CV_RGB(0, 0, 0), thickness, 8);
// Draw bboxes.
for (std::map<int, std::vector<NormalizedBBox>>::iterator it = allDetections[i].begin();
it != allDetections[i].end(); it++) {
int label = it->first;
std::string labelName = "Unknown";
if (labelToDisplayName.find(label) != labelToDisplayName.end()) {
labelName = labelToDisplayName.find(label)->second;
}
SASSERT0(label < colors.size());
const cv::Scalar& color = colors[label];
const std::vector<NormalizedBBox>& bboxes = it->second;
for (int j = 0; j < bboxes.size(); j++) {
cv::Point topLeftPt(bboxes[j].xmin, bboxes[j].ymin);
cv::Point bottomRightPt(bboxes[j].xmax, bboxes[j].ymax);
cv::rectangle(image, topLeftPt, bottomRightPt, color, 4);
cv::Point bottomLeftPt(bboxes[j].xmin, bboxes[j].ymax);
snprintf(buffer, sizeof(buffer), "%s: %.2f", labelName.c_str(),
bboxes[j].score);
cv::Size text = cv::getTextSize(buffer, fontface, scale, thickness,
&baseline);
cv::rectangle(image, bottomLeftPt + cv::Point(0, 0),
bottomLeftPt + cv::Point(text.width, -text.height - baseline),
color, CV_FILLED);
cv::putText(image, buffer, bottomLeftPt - cv::Point(0, baseline),
fontface, scale, CV_RGB(0, 0, 0), thickness, 8);
}
}
// Save result if required.
if (!saveFile.empty()) {
if (!capOut.isOpened()) {
cv::Size size(image.size().width, image.size().height);
cv::VideoWriter outputVideo(saveFile, CV_FOURCC('D', 'I', 'V', 'X'),
30, size, true);
capOut = outputVideo;
}
capOut.write(image);
}
cv::imshow("detections", image);
if (cv::waitKey(1) == 27) {
raise(SIGINT);
}
}
startClock = clock();
}
template void VisualizeBBox(const std::vector<cv::Mat>& images, Data<float>* detections,
const float threshold, const std::vector<cv::Scalar>& colors,
const std::map<int, std::string>& labelToDisplayName, const std::string& saveFile);
template <typename Dtype>
void GetDetectionResults(const Dtype* detData, const int numDet, const int backgroundLabelId,
std::map<int, LabelBBox>* allDetections) {
allDetections->clear();
for (int i = 0; i < numDet; i++) {
int startIdx = i * 7;
int itemId = detData[startIdx];
if (itemId == -1) {
continue;
}
int label = detData[startIdx + 1];
SASSERT(backgroundLabelId != label,
"Found background label in the detection results.");
NormalizedBBox bbox;
bbox.score = detData[startIdx + 2];
bbox.xmin = detData[startIdx + 3];
bbox.ymin = detData[startIdx + 4];
bbox.xmax = detData[startIdx + 5];
bbox.ymax = detData[startIdx + 6];
float bboxSize = BBoxSize(bbox);
bbox.size = bboxSize;
(*allDetections)[itemId][label].push_back(bbox);
}
}
template void GetDetectionResults(const float* detData, const int numDet,
const int backgroundLabelId, std::map<int, LabelBBox>* allDetections);
template <typename Dtype>
__global__ void DecodeBBoxesKernel(const int nthreads, const Dtype* loc_data,
const Dtype* prior_data, const int code_type,
const bool variance_encoded_in_target, const int num_priors,
const bool share_location, const int num_loc_classes, const int background_label_id,
const bool clip_bbox, Dtype* bbox_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int i = index % 4;
const int c = (index / 4) % num_loc_classes;
const int d = (index / 4 / num_loc_classes) % num_priors;
if (!share_location && c == background_label_id) {
// Ignore background class if not share_location.
return;
}
const int pi = d * 4;
const int vi = pi + num_priors * 4;
//if (code_type == PriorBoxParameter_CodeType_CORNER) {
if (code_type == 0) {
if (variance_encoded_in_target) {
// variance is encoded in target, we simply need to add the offset
// predictions.
bbox_data[index] = prior_data[pi + i] + loc_data[index];
} else {
// variance is encoded in bbox, we need to scale the offset accordingly.
bbox_data[index] =
prior_data[pi + i] + loc_data[index] * prior_data[vi + i];
}
//} else if (code_type == PriorBoxParameter_CodeType_CENTER_SIZE) {
} else if (code_type == 1) {
const Dtype p_xmin = prior_data[pi];
const Dtype p_ymin = prior_data[pi + 1];
const Dtype p_xmax = prior_data[pi + 2];
const Dtype p_ymax = prior_data[pi + 3];
const Dtype prior_width = p_xmax - p_xmin;
const Dtype prior_height = p_ymax - p_ymin;
const Dtype prior_center_x = (p_xmin + p_xmax) / 2.;
const Dtype prior_center_y = (p_ymin + p_ymax) / 2.;
const Dtype xmin = loc_data[index - i];
const Dtype ymin = loc_data[index - i + 1];
const Dtype xmax = loc_data[index - i + 2];
const Dtype ymax = loc_data[index - i + 3];
Dtype decode_bbox_center_x, decode_bbox_center_y;
Dtype decode_bbox_width, decode_bbox_height;
if (variance_encoded_in_target) {
// variance is encoded in target, we simply need to retore the offset
// predictions.
decode_bbox_center_x = xmin * prior_width + prior_center_x;
decode_bbox_center_y = ymin * prior_height + prior_center_y;
decode_bbox_width = exp(xmax) * prior_width;
decode_bbox_height = exp(ymax) * prior_height;
} else {
// variance is encoded in bbox, we need to scale the offset accordingly.
decode_bbox_center_x =
prior_data[vi] * xmin * prior_width + prior_center_x;
decode_bbox_center_y =
prior_data[vi + 1] * ymin * prior_height + prior_center_y;
decode_bbox_width =
exp(prior_data[vi + 2] * xmax) * prior_width;
decode_bbox_height =
exp(prior_data[vi + 3] * ymax) * prior_height;
}
switch (i) {
case 0:
bbox_data[index] = decode_bbox_center_x - decode_bbox_width / 2.;
break;
case 1:
bbox_data[index] = decode_bbox_center_y - decode_bbox_height / 2.;
break;
case 2:
bbox_data[index] = decode_bbox_center_x + decode_bbox_width / 2.;
break;
case 3:
bbox_data[index] = decode_bbox_center_y + decode_bbox_height / 2.;
break;
}
//} else if (code_type == PriorBoxParameter_CodeType_CORNER_SIZE) {
} else if (code_type == 2) {
const Dtype p_xmin = prior_data[pi];
const Dtype p_ymin = prior_data[pi + 1];
const Dtype p_xmax = prior_data[pi + 2];
const Dtype p_ymax = prior_data[pi + 3];
const Dtype prior_width = p_xmax - p_xmin;
const Dtype prior_height = p_ymax - p_ymin;
Dtype p_size;
if (i == 0 || i == 2) {
p_size = prior_width;
} else {
p_size = prior_height;
}
if (variance_encoded_in_target) {
// variance is encoded in target, we simply need to add the offset
// predictions.
bbox_data[index] = prior_data[pi + i] + loc_data[index] * p_size;
} else {
// variance is encoded in bbox, we need to scale the offset accordingly.
bbox_data[index] =
prior_data[pi + i] + loc_data[index] * prior_data[vi + i] * p_size;
}
} else {
// Unknown code type.
}
if (clip_bbox) {
bbox_data[index] = max(min(bbox_data[index], Dtype(1.)), Dtype(0.));
}
}
}
template <typename Dtype>
void DecodeBBoxesGPU(const int nthreads, const Dtype* locData, const Dtype* priorData,
const CodeType codeType, const bool varianceEncodedInTarget,
const int numPriors, const bool shareLocation,
const int numLocClasses, const int backgroundLabelId,
const bool clipBBox, Dtype* bboxData) {
int codeTypeInt = -1;
if (codeType == CodeType::CORNER) codeTypeInt = 0;
else if (codeType == CodeType::CENTER_SIZE) codeTypeInt = 1;
else if (codeType == CodeType::CORNER_SIZE) codeTypeInt = 2;
hipLaunchKernelGGL(( DecodeBBoxesKernel<Dtype>), dim3(SOOOA_GET_BLOCKS(nthreads)), dim3(SOOOA_CUDA_NUM_THREADS), 0, 0,
nthreads, locData, priorData, codeTypeInt, varianceEncodedInTarget, numPriors,
shareLocation, numLocClasses, backgroundLabelId, clipBBox, bboxData);
CUDA_POST_KERNEL_CHECK;
}
template void DecodeBBoxesGPU(const int nthreads, const float* locData, const float* priorData,
const CodeType codeType, const bool varianceEncodedInTarget,
const int numPriors, const bool shareLocation,
const int numLocClasses, const int backgroundLabelId,
const bool clipBBox, float* bboxData);
template <typename Dtype>
__global__ void PermuteDataKernel(const int nthreads, const Dtype* data,
const int num_classes, const int num_data, const int num_dim, Dtype* new_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int i = index % num_dim;
const int c = (index / num_dim) % num_classes;
const int d = (index / num_dim / num_classes) % num_data;
const int n = index / num_dim / num_classes / num_data;
const int new_index = ((n * num_classes + c) * num_data + d) * num_dim + i;
new_data[new_index] = data[index];
}
}
template <typename Dtype>
void PermuteDataGPU(const int nthreads,
const Dtype* data, const int numClasses, const int numData,
const int numDim, Dtype* newData) {
hipLaunchKernelGGL(( PermuteDataKernel<Dtype>), dim3(SOOOA_GET_BLOCKS(nthreads)), dim3(SOOOA_CUDA_NUM_THREADS), 0, 0,
nthreads, data, numClasses, numData, numDim, newData);
CUDA_POST_KERNEL_CHECK;
}
template void PermuteDataGPU(const int nthreads,
const float* data, const int numClasses, const int numData,
const int numDim, float* newData);
// Project bbox onto the coordinate system defined by src_bbox.
bool ProjectBBox(const NormalizedBBox& srcBBox, const NormalizedBBox& bbox,
NormalizedBBox* projBBox) {
if (bbox.xmin >= srcBBox.xmax || bbox.xmax <= srcBBox.xmin ||
bbox.ymin >= srcBBox.ymax || bbox.ymax <= srcBBox.ymin) {
return false;
}
float srcWidth = srcBBox.xmax - srcBBox.xmin;
float srcHeight = srcBBox.ymax - srcBBox.ymin;
projBBox->xmin = (bbox.xmin - srcBBox.xmin) / srcWidth;
projBBox->ymin = (bbox.ymin - srcBBox.ymin) / srcHeight;
projBBox->xmax = (bbox.xmax - srcBBox.xmin) / srcWidth;
projBBox->ymax = (bbox.ymax - srcBBox.ymin) / srcHeight;
projBBox->difficult = bbox.difficult;
ClipBBox(*projBBox, projBBox);
if (BBoxSize(*projBBox) > 0) {
return true;
} else {
return false;
}
}
void LocateBBox(const NormalizedBBox& srcBBox, const NormalizedBBox& bbox,
NormalizedBBox* locBBox) {
float srcWidth = srcBBox.xmax - srcBBox.xmin;
float srcHeight = srcBBox.ymax - srcBBox.ymin;
locBBox->xmin = srcBBox.xmin + bbox.xmin * srcWidth;
locBBox->ymin = srcBBox.ymin + bbox.ymin * srcHeight;
locBBox->xmax = srcBBox.xmin + bbox.xmax * srcWidth;
locBBox->ymax = srcBBox.ymin + bbox.ymax * srcHeight;
locBBox->difficult = bbox.difficult;
}
float BBoxCoverage(const NormalizedBBox& bbox1, const NormalizedBBox& bbox2) {
NormalizedBBox intersectBBox;
IntersectBBox(bbox1, bbox2, &intersectBBox);
float intersectSize = BBoxSize(intersectBBox);
if (intersectSize > 0) {
float bbox1Size = BBoxSize(bbox1);
return intersectSize / bbox1Size;
} else {
return 0.f;
}
}
bool MeetEmitConstraint(const NormalizedBBox& srcBBox, const NormalizedBBox& bbox,
const EmitConstraint& emitConstraint) {
EmitType emitType = emitConstraint.emitType;
if (emitType == EmitType::CENTER) {
float xcenter = (bbox.xmin + bbox.xmax) / 2;
float ycenter = (bbox.ymin + bbox.ymax) / 2;
if (xcenter >= srcBBox.xmin && xcenter <= srcBBox.xmax &&
ycenter >= srcBBox.ymin && ycenter <= srcBBox.ymax) {
return true;
} else {
return false;
}
} else if (emitType == EmitType::MIN_OVERLAP) {
float bboxCoverage = BBoxCoverage(bbox, srcBBox);
return bboxCoverage > emitConstraint.emitOverlap;
} else {
SASSERT(false, "Unknown emit type.");
return false;
}
}
void ExtrapolateBBox(const ResizeParam& param, const int height, const int width,
const NormalizedBBox& cropBBox, NormalizedBBox* bbox) {
float heightScale = param.heightScale;
float widthScale = param.widthScale;
if (heightScale > 0 && widthScale > 0 &&
param.resizeMode == ResizeMode::FIT_SMALL_SIZE) {
float origAspect = static_cast<float>(width) / height;
float resizeHeight = param.height;
float resizeWidth = param.width;
float resizeAspect = resizeWidth / resizeHeight;
if (origAspect < resizeAspect) {
resizeHeight = resizeWidth / origAspect;
} else {
resizeWidth = resizeHeight * origAspect;
}
float cropHeight = resizeHeight * (cropBBox.ymax - cropBBox.ymin);
float cropWidth = resizeWidth * (cropBBox.xmax - cropBBox.xmin);
SASSERT0(cropWidth >= widthScale);
SASSERT0(cropHeight >= heightScale);
bbox->xmin = bbox->xmin * cropWidth / widthScale;
bbox->xmax = bbox->xmax * cropWidth / widthScale;
bbox->ymin = bbox->ymin * cropHeight / heightScale;
bbox->ymax = bbox->ymax * cropHeight / heightScale;
}
}
void CumSum(const std::vector<std::pair<float, int>>& pairs, std::vector<int>* cumSum) {
// Sort the pairs based on first item of the pair.
std::vector<std::pair<float, int>> sortPairs = pairs;
std::stable_sort(sortPairs.begin(), sortPairs.end(), SortScorePairDescend<int>);
cumSum->clear();
for (int i = 0; i < sortPairs.size(); i++) {
if (i == 0) {
cumSum->push_back(sortPairs[i].second);
} else {
cumSum->push_back(cumSum->back() + sortPairs[i].second);
}
}
}
void ComputeAP(const std::vector<std::pair<float, int>>& tp, const int numPos,
const std::vector<std::pair<float, int>>& fp, const std::string apVersion,
std::vector<float>* prec, std::vector<float>* rec, float* ap) {
const float eps = 1e-6;
SASSERT(tp.size() == fp.size(), "tp must have same size as fp.");
const int num = tp.size();
// Make sure that tp and fp have complement value.
for (int i = 0; i < num; i++) {
SASSERT0(::fabs(tp[i].first - fp[i].first) <= eps);
SASSERT0(tp[i].second == 1 - fp[i].second);
}
prec->clear();
rec->clear();
*ap = 0;
if (tp.size() == 0 || numPos == 0) {
return;
}
// Compute cumSum of tp.
std::vector<int> tpCumSum;
CumSum(tp, &tpCumSum);
SASSERT0(tpCumSum.size() == num);
// Compute cumSum of fp.
std::vector<int> fpCumSum;
CumSum(fp, &fpCumSum);
SASSERT0(fpCumSum.size() == num);
// Compute precision.
for (int i = 0; i < num; i++) {
prec->push_back(static_cast<float>(tpCumSum[i]) / (tpCumSum[i] + fpCumSum[i]));
}
// Compute recall.
for (int i = 0; i < num; i++) {
SASSERT0(tpCumSum[i] <= numPos);
rec->push_back(static_cast<float>(tpCumSum[i]) / numPos);
}
if (apVersion == "11point") {
// VOC2007 style for computing AP.
std::vector<float> maxPrecs(11, 0.f);
int startIdx = num - 1;
for (int j = 10; j >= 0; j--) {
for (int i = startIdx; i >= 0; i--) {
if ((*rec)[i] < j / 10.f) {
startIdx = i;
if (j > 0) {
maxPrecs[j - 1] = maxPrecs[j];
}
break;
} else {
if (maxPrecs[j] < (*prec)[i]) {
maxPrecs[j] = (*prec)[i];
}
}
}
}
for (int j = 10; j >= 0; j--) {
*ap += maxPrecs[j] / 11;
}
} else if (apVersion == "MaxIntegral") {
// VOC2012 or ILSVRC style for computing AP.
float curRec = rec->back();
float curPrec = prec->back();
for (int i = num - 2; i >= 0; i--) {
curPrec = std::max<float>((*prec)[i], curPrec);
if (fabs(curRec - (*rec)[i]) > eps) {
*ap += curPrec * fabs(curRec - (*rec)[i]);
}
curRec = (*rec)[i];
}
*ap += curRec * curPrec;
} else if (apVersion == "Integral") {
// Natural integral.
float prevRec = 0.f;
for (int i = 0; i < num; i++) {
if (fabs((*rec)[i] - prevRec) > eps) {
*ap += (*prec)[i] * fabs((*rec)[i] - prevRec);
}
prevRec = (*rec)[i];
}
} else {
STDOUT_LOG("Unknown apVersion: %s", apVersion.c_str());
}
}
| 3a9e11c05885a5a516358d4c75702127655484da.cu | #include <set>
#include <cmath>
#include <cfloat>
#include <csignal>
#include <algorithm>
#include <boost/iterator/counting_iterator.hpp>
#include "BBoxUtil.h"
#include "MathFunctions.h"
#include "SysLog.h"
#include "StdOutLog.h"
//using namespace std;
bool SortBBoxAscend(const NormalizedBBox& bbox1, const NormalizedBBox& bbox2) {
return bbox1.score < bbox2.score;
}
bool SortBBoxDescend(const NormalizedBBox& bbox1, const NormalizedBBox& bbox2) {
return bbox1.score > bbox2.score;
}
template <typename T>
bool SortScorePairAscend(const std::pair<float, T>& pair1, const std::pair<float, T>& pair2) {
return pair1.first < pair2.first;
}
template bool SortScorePairAscend(const std::pair<float, int>& pair1,
const std::pair<float, int>& pair2);
template bool SortScorePairAscend(const std::pair<float, std::pair<int, int>>& pair1,
const std::pair<float, std::pair<int, int>>& pair2);
template <typename T>
bool SortScorePairDescend(const std::pair<float, T>& pair1, const std::pair<float, T>& pair2) {
return pair1.first > pair2.first;
}
template bool SortScorePairDescend(const std::pair<float, int>& pair1,
const std::pair<float, int>& pair2);
template bool SortScorePairDescend(const std::pair<float, std::pair<int, int>>& pair1,
const std::pair<float, std::pair<int, int>>& pair2);
template <typename Dtype>
__device__ Dtype Min(const Dtype x, const Dtype y) {
return x < y ? x : y;
}
/**
* Max 이름이 충돌나 일단 임시로 MaxDevice로 변경
* (Max 이름이 어디에 정의되어 있는지 모르겠음)
*/
template <typename Dtype>
__device__ Dtype MaxDevice(const Dtype x, const Dtype y) {
return x > y ? x : y;
}
template <typename Dtype>
__device__ void ClipBBoxGPU(const Dtype* bbox, Dtype* clip_bbox) {
for (int i = 0; i < 4; ++i) {
clip_bbox[i] = MaxDevice(Min(bbox[i], Dtype(1.)), Dtype(0.));
}
}
template __device__ void ClipBBoxGPU(const float* bbox, float* clip_bbox);
template <typename Dtype>
void GetGroundTruth(const Dtype* gtData, const int numGt, const int backgroundLabelId,
const bool useDifficultGt, std::map<int, std::vector<NormalizedBBox>>* allGtBboxes) {
allGtBboxes->clear();
for (int i = 0; i < numGt; i++) {
int startIdx = i * 8;
int itemId = gtData[startIdx];
if (itemId == -1) {
continue;
}
int label = gtData[startIdx + 1];
SASSERT(backgroundLabelId != label,
"Found background label in the dataset\nbackground label id: %d\nlabel:%d",
backgroundLabelId, label);
bool difficult = static_cast<bool>(gtData[startIdx + 7]);
if (!useDifficultGt && difficult) {
// Skip reading difficult ground truth
continue;
}
NormalizedBBox bbox;
bbox.label = label;
bbox.xmin = gtData[startIdx + 3];
bbox.ymin = gtData[startIdx + 4];
bbox.xmax = gtData[startIdx + 5];
bbox.ymax = gtData[startIdx + 6];
bbox.difficult = difficult;
float bboxSize = BBoxSize(bbox);
bbox.size = bboxSize;
(*allGtBboxes)[itemId].push_back(bbox);
}
}
template void GetGroundTruth(const float* gtData, const int numGt,
const int backgroundLabelId, const bool useDifficultGt,
std::map<int, std::vector<NormalizedBBox>>* allGtBboxes);
template <typename Dtype>
void GetGroundTruth(const Dtype* gtData, const int numGt, const int backgroundLabelId,
const bool useDifficultGt, std::map<int, LabelBBox>* allGtBBoxes) {
allGtBBoxes->clear();
for (int i = 0; i < numGt; i++) {
int startIdx = i * 8;
int itemId = gtData[startIdx];
if (itemId == -1) {
continue;
}
NormalizedBBox bbox;
int label = gtData[startIdx + 1];
if (backgroundLabelId == label) {
SASSERT(backgroundLabelId != label, "Found background label in the dataset.");
}
bool difficult = static_cast<bool>(gtData[startIdx + 7]);
if (!useDifficultGt && difficult) {
// Skip reading difficult ground truth.
continue;
}
bbox.xmin = gtData[startIdx + 3];
bbox.ymin = gtData[startIdx + 4];
bbox.xmax = gtData[startIdx + 5];
bbox.ymax = gtData[startIdx + 6];
bbox.difficult = difficult;
float bboxSize = BBoxSize(bbox);
bbox.size = bboxSize;
(*allGtBBoxes)[itemId][label].push_back(bbox);
}
}
template void GetGroundTruth(const float* gtData, const int numGt,
const int backgroundLabelId, const bool useDifficultGt,
std::map<int, LabelBBox>* allGtBBoxes);
template <typename Dtype>
void GetPriorBBoxes(const Dtype* priorData, const int numPriors,
std::vector<NormalizedBBox>* priorBBoxes,
std::vector<std::vector<float>>* priorVariances) {
priorBBoxes->clear();
priorVariances->clear();
for (int i = 0; i < numPriors; i++) {
int startIdx = i * 4;
NormalizedBBox bbox;
bbox.xmin = priorData[startIdx];
bbox.ymin = priorData[startIdx + 1];
bbox.xmax = priorData[startIdx + 2];
bbox.ymax = priorData[startIdx + 3];
float bboxSize = BBoxSize(bbox);
bbox.size = bboxSize;
priorBBoxes->push_back(bbox);
}
for (int i = 0; i < numPriors; i++) {
int startIdx = (numPriors + i) * 4;
std::vector<float> var;
for (int j = 0; j < 4; j++) {
var.push_back(priorData[startIdx + j]);
}
priorVariances->push_back(var);
}
}
template void GetPriorBBoxes(const float* priorData, const int numPriors,
std::vector<NormalizedBBox>* priorBBoxes, std::vector<std::vector<float>>* priorVariances);
template <typename Dtype>
void GetLocPredictions(const Dtype* locData, const int num, const int numPredsPerClass,
const int numLocClasses, const bool shareLocation, std::vector<LabelBBox>* locPreds) {
locPreds->clear();
if (shareLocation) {
SASSERT0(numLocClasses == 1);
}
locPreds->resize(num);
for (int i = 0; i < num; i++) {
LabelBBox& labelBBox = (*locPreds)[i];
for (int p = 0; p < numPredsPerClass; p++) {
int startIdx = p * numLocClasses * 4;
for (int c = 0; c < numLocClasses; c++) {
int label = shareLocation ? -1 : c;
if (labelBBox.find(label) == labelBBox.end()) {
labelBBox[label].resize(numPredsPerClass);
}
labelBBox[label][p].xmin = locData[startIdx + c * 4];
labelBBox[label][p].ymin = locData[startIdx + c * 4 + 1];
labelBBox[label][p].xmax = locData[startIdx + c * 4 + 2];
labelBBox[label][p].ymax = locData[startIdx + c * 4 + 3];
}
}
locData += numPredsPerClass * numLocClasses * 4;
}
}
template void GetLocPredictions(const float* locData, const int num,
const int numPredsPerClass, const int numLocClasses, const bool shareLocation,
std::vector<LabelBBox>* locPreds);
/*
* allLocPreds: batch내의 이미지별 prediction
* allGtBBoxes: batch내의 이미지별 gt bboxes
* prioBBoxes: 전체 scale에서의 prior boxes
* priorVariances: 전체 scale에서의 prior variances
*
* allMatchOverlaps
*/
void FindMatches(const std::vector<LabelBBox>& allLocPreds,
const std::map<int, std::vector<NormalizedBBox>>& allGtBBoxes,
const std::vector<NormalizedBBox>& priorBBoxes,
const std::vector<std::vector<float>>& priorVariances,
const int numClasses, const bool shareLocation, const MatchType matchType,
const float overlapThreshold, const bool usePriorForMatching,
const int backgroundLabelId, const CodeType codeType,
const bool encodeVarianceInTarget, const bool ignoreCrossBoundaryBBox,
std::vector<std::map<int, std::vector<float>>>* allMatchOverlaps,
std::vector<std::map<int, std::vector<int>>>* allMatchIndices) {
SASSERT(numClasses > 0, "numClasses should not be less than 1.");
const int locClasses = shareLocation ? 1 : numClasses;
// Find the matches.
// num은 batch size를 의미
int num = allLocPreds.size();
// batch내의 각 이미지에 대해
for (int i = 0; i < num; i++) {
// (label : pred bbox별 최대 overlap gt index)
std::map<int, std::vector<int>> matchIndices;
std::map<int, std::vector<float>> matchOverlpas;
// Check if there is ground truth for current image.
if (allGtBBoxes.find(i) == allGtBBoxes.end()) {
// There is no gt for current image. All predictions are negative.
allMatchIndices->push_back(matchIndices);
allMatchOverlaps->push_back(matchOverlpas);
continue;
}
// Find match between predictions and ground truth.
// gtBBoxes: batch내 이미지 한장의 전체 gt boxes
const std::vector<NormalizedBBox>& gtBBoxes = allGtBBoxes.find(i)->second;
// 현재 사용하지 않음. usePriorForMatching = true.
if (!usePriorForMatching) {
for (int c = 0; c < locClasses; c++) {
int label = shareLocation ? -1 : c;
if (!shareLocation && label == backgroundLabelId) {
// Ignore background loc predictions.
continue;
}
// Decode the prediction into bbox first.
std::vector<NormalizedBBox> locBBoxes;
bool clipBBox = false;
DecodeBBoxes(priorBBoxes, priorVariances, codeType, encodeVarianceInTarget,
clipBBox, allLocPreds[i].find(label)->second, &locBBoxes);
MatchBBox(gtBBoxes, locBBoxes, label, matchType, overlapThreshold,
ignoreCrossBoundaryBBox, &matchIndices[label], &matchOverlpas[label]);
}
} else {
// Use prior bboxes to match against all ground truth.
std::vector<int> tempMatchIndices;
std::vector<float> tempMatchOverlaps;
const int label = -1;
// batch내 현재 이미지에 대해 gt와 prior box간의 match 정보
// for break point ...
MatchBBox(gtBBoxes, priorBBoxes, label, matchType, overlapThreshold,
ignoreCrossBoundaryBBox, &tempMatchIndices, &tempMatchOverlaps);
// label == -1 케이스, 별도로 class별 처리를 하지 않음
if (shareLocation) {
matchIndices[label] = tempMatchIndices;
matchOverlpas[label] = tempMatchOverlaps;
} else {
// Get ground truth label for each ground truth bbox.
std::vector<int> gtLabels;
for (int g = 0; g < gtBBoxes.size(); g++) {
gtLabels.push_back(gtBBoxes[g].label);
}
// Distribute the matching results to different locClass.
for (int c = 0; c < locClasses; c++) {
if (c == backgroundLabelId) {
// Ignore background loc predictions.
continue;
}
matchIndices[c].resize(tempMatchIndices.size(), -1);
matchOverlpas[c] = tempMatchOverlaps;
for (int m = 0; m < tempMatchIndices.size(); m++) {
if (tempMatchIndices[m] > -1) {
const int gtIdx = tempMatchIndices[m];
SASSERT0(gtIdx < gtLabels.size());
if (c == gtLabels[gtIdx]) {
matchIndices[c][m] = gtIdx;
}
}
}
}
}
}
allMatchIndices->push_back(matchIndices);
allMatchOverlaps->push_back(matchOverlpas);
}
}
void DecodeBBoxes(const std::vector<NormalizedBBox>& priorBBoxes,
const std::vector<std::vector<float>>& priorVariances,
const CodeType codeType, const bool varianceEncodedInTarget,
const bool clipBBox, const std::vector<NormalizedBBox>& bboxes,
std::vector<NormalizedBBox>* decodeBBoxes) {
SASSERT0(priorBBoxes.size() == priorVariances.size());
SASSERT0(priorBBoxes.size() == bboxes.size());
int numBBoxes = priorBBoxes.size();
if (numBBoxes >= 1) {
SASSERT0(priorVariances[0].size() == 4);
}
decodeBBoxes->clear();
for (int i = 0; i < numBBoxes; i++) {
NormalizedBBox decodeBBox;
DecodeBBox(priorBBoxes[i], priorVariances[i], codeType, varianceEncodedInTarget,
clipBBox, bboxes[i], &decodeBBox);
decodeBBoxes->push_back(decodeBBox);
}
}
void DecodeBBox(const NormalizedBBox& priorBBox, const std::vector<float>& priorVariances,
const CodeType codeType, const bool varianceEncodedInTarget,
const bool clipBBox, const NormalizedBBox& bbox, NormalizedBBox* decodeBBox) {
if (codeType == CodeType::CORNER) {
if (varianceEncodedInTarget) {
// variance is encoded intarget, we simply need to add the offset predictions.
decodeBBox->xmin = priorBBox.xmin + bbox.xmin;
decodeBBox->ymin = priorBBox.ymin + bbox.ymin;
decodeBBox->xmax = priorBBox.xmax + bbox.xmax;
decodeBBox->ymax = priorBBox.ymax + bbox.ymax;
} else {
// variance is encoded in bbox, we need to scale the offset accordingly.
decodeBBox->xmin = priorBBox.xmin + priorVariances[0] * bbox.xmin;
decodeBBox->ymin = priorBBox.ymin + priorVariances[1] * bbox.ymin;
decodeBBox->xmax = priorBBox.xmax + priorVariances[2] * bbox.xmax;
decodeBBox->ymax = priorBBox.ymax + priorVariances[3] * bbox.ymax;
}
} else if (codeType == CodeType::CENTER_SIZE) {
float priorWidth = priorBBox.xmax - priorBBox.xmin;
SASSERT0(priorWidth > 0);
float priorHeight = priorBBox.ymax - priorBBox.ymin;
SASSERT0(priorHeight > 0);
float priorCenterX = (priorBBox.xmin + priorBBox.xmax) / 2.f;
float priorCenterY = (priorBBox.ymin + priorBBox.ymax) / 2.f;
float decodeBBoxCenterX;
float decodeBBoxCenterY;
float decodeBBoxWidth;
float decodeBBoxHeight;
if (varianceEncodedInTarget) {
// variance is encoded in target, we simply need to restore the offset
// predictions.
decodeBBoxCenterX = bbox.xmin * priorWidth + priorCenterX;
decodeBBoxCenterY = bbox.ymin * priorHeight + priorCenterY;
decodeBBoxWidth = std::exp(bbox.xmax) * priorWidth;
decodeBBoxHeight = std::exp(bbox.ymax) * priorHeight;
} else {
// variance is encoded in bbox, we need to scale the offset accordingly.
decodeBBoxCenterX = priorVariances[0] * bbox.xmin * priorWidth + priorCenterX;
decodeBBoxCenterY = priorVariances[1] * bbox.ymin * priorHeight + priorCenterY;
decodeBBoxWidth = std::exp(priorVariances[2] * bbox.xmax) * priorWidth;
decodeBBoxHeight = std::exp(priorVariances[3] * bbox.ymax) * priorHeight;
}
decodeBBox->xmin = decodeBBoxCenterX - decodeBBoxWidth / 2.f;
decodeBBox->ymin = decodeBBoxCenterY - decodeBBoxHeight / 2.f;
decodeBBox->xmax = decodeBBoxCenterX + decodeBBoxWidth / 2.f;
decodeBBox->ymax = decodeBBoxCenterY + decodeBBoxHeight / 2.f;
} else if (codeType == CodeType::CORNER_SIZE) {
float priorWidth = priorBBox.xmax - priorBBox.xmin;
SASSERT0(priorWidth > 0);
float priorHeight = priorBBox.ymax - priorBBox.ymin;
SASSERT0(priorHeight > 0);
if (varianceEncodedInTarget) {
// variance is encoded in target, we simply need to add the offset predictions.
decodeBBox->xmin = priorBBox.xmin + bbox.xmin * priorWidth;
decodeBBox->ymin = priorBBox.ymin + bbox.ymin * priorHeight;
decodeBBox->xmax = priorBBox.xmax + bbox.xmax * priorWidth;
decodeBBox->ymax = priorBBox.ymax + bbox.ymax * priorHeight;
} else {
// variance is encoded in bbox, we need to scale the offset accordingly.
decodeBBox->xmin = priorBBox.xmin + priorVariances[0] * bbox.xmin * priorWidth;
decodeBBox->ymin = priorBBox.ymin + priorVariances[1] * bbox.ymin * priorHeight;
decodeBBox->xmax = priorBBox.xmax + priorVariances[2] * bbox.xmax * priorWidth;
decodeBBox->ymax = priorBBox.ymax + priorVariances[3] * bbox.ymax * priorHeight;
}
} else {
SASSERT(false, "Unknown LocLossType: %s", codeType);
}
float bboxSize = BBoxSize(*decodeBBox);
decodeBBox->size = bboxSize;
if (clipBBox) {
ClipBBox(*decodeBBox, decodeBBox);
}
}
// gtBBoxes: 현재 이미지의 gt boxes
// predBBoxes: 전체 prior boxes
// label
// matchIndices: 전체 predBBox별 최대 match gt box index
// matchOverlaps: 전체 predBBox별 최대 match gt box와의 overlap,
// eps보다 커야하며 그 중 최대값을 저장
//
void MatchBBox(const std::vector<NormalizedBBox>& gtBBoxes,
const std::vector<NormalizedBBox>& predBBoxes, const int label,
const MatchType matchType, const float overlapThreshold,
const bool ignoreCrossBoundaryBBox, std::vector<int>* matchIndices,
std::vector<float>* matchOverlaps) {
int numPred = predBBoxes.size();
matchIndices->clear();
matchIndices->resize(numPred, -1);
matchOverlaps->clear();
matchOverlaps->resize(numPred, 0.);
int numGt = 0;
std::vector<int> gtIndices;
if (label == -1) {
// label -1 means comparing against all ground truth.
numGt = gtBBoxes.size();
for (int i = 0; i < numGt; i++) {
gtIndices.push_back(i);
}
} else {
// Count number of ground truth boxes which has the desired label.
for (int i = 0; i < gtBBoxes.size(); i++) {
if (gtBBoxes[i].label == label) {
numGt++;
gtIndices.push_back(i);
}
}
}
if (numGt == 0) {
return;
}
// Store the positive overlap between predictions and ground truth.
// map 구조 -> (pred box index : (gt index : overlap))
// 모든 pred bbox와 gt bbox간의 overlap, 1e-6보다 큰 경우에 map에 저장.
// 따라서 overlap 값에 따라 map에 해당 entry가 존재 하지 않을 수 있음.
std::map<int, std::map<int, float>> overlaps;
for (int i = 0; i < numPred; i++) {
// ignoreCrossBoundarBBox = false
if (ignoreCrossBoundaryBBox && IsCrossBoundaryBBox(predBBoxes[i])) {
(*matchIndices)[i] = -2;
continue;
}
for (int j = 0; j < numGt; j++) {
// i번째 predBbox와 j번째 gtBox간의 overlap
float overlap = JaccardOverlap(predBBoxes[i], gtBBoxes[gtIndices[j]]);
if (overlap > 1e-6) {
// i번째 predBBox에 대한 최대 overlap 갱신
(*matchOverlaps)[i] = std::max((*matchOverlaps)[i], overlap);
// i번째 predBBox와 j번째 gtBox간의 overlap 저장
// overlap이 eps이상인 경우에 대해서만 저장
overlaps[i][j] = overlap;
//cout << "overlap of pred bbox " << i << " and gt bbox " << j << ": " <<
// overlap << endl;
}
}
}
/*
if (numGt == 16) {
for (std::map<int, std::map<int, float>>::iterator itr = overlaps.begin();
itr != overlaps.end(); itr++) {
std::cout << "predbbox#" << itr->first << ",";
for (int i = 0; i < numGt; i++) {
if (itr->second.find(i) != itr->second.end()) {
std::cout << itr->second[i] << ",";
} else {
std::cout << 0 << ",";
}
}
std::cout << std::endl;
}
exit(1);
}
*/
/*
if (numGt == 16) {
for (int j = 0; j < numGt; j++) {
if (overlaps[1277].find(j) != overlaps[1277].end()) {
std::cout << "overlaps[1277][" << j << "]=" << overlaps[1277][j] << std::endl;
}
}
for (int j = 0; j < numGt; j++) {
if (overlaps[1334].find(j) != overlaps[1334].end()) {
std::cout << "overlaps[1334][" << j << "]=" << overlaps[1334][j] << std::endl;
}
}
}
*/
// Bipartite matching
// gt 갯수만큼 수행
// gt index pool
std::vector<int> gtPool;
for (int i = 0; i < numGt; i++) {
gtPool.push_back(i);
}
/*
for (int i = 0; i < gtPool.size(); i++) {
std::cout << "gtPool[" << i << "]: " << gtPool[i] << std::endl;
}
*/
// gt bbox들의 최대 matching pred bbox를 찾음.
while (gtPool.size() > 0) {
// Find the most overlapped gt and coresponding predictions.
int maxIdx = -1;
int maxGtIdx = -1;
float maxOverlap = -1;
// 각 predBox에 대한 최대 overlap의 gt 찾기
for (std::map<int, std::map<int, float>>::iterator it = overlaps.begin();
it != overlaps.end(); it++) {
// predBBox index
int i= it->first;
//std::cout << "for i=" << i << std::endl;
if ((*matchIndices)[i] != -1) {
// The prediction already has matched ground truth or is ignored.
continue;
}
for (int p = 0; p < gtPool.size(); p++) {
int j = gtPool[p];
// overlap < eps인 경우 map에 추가되지 않았음.
if (it->second.find(j) == it->second.end()) {
// No overlap between the i-th predcition and j-th ground truth.
continue;
}
// Find the maximu overlapped pari.
if (it->second[j] > maxOverlap) {
// If the predction has not been matched to any ground truth,
// and the overlap is larger than maximum overlap, update.
/*
if (numGt == 16 && i == 1277) {
std::cout << "break for j =" << j << std::endl;
std::cout << "matchIndices[1277]=" << (*matchIndices)[1277] << std::endl;
std::cout << "matchOverlaps[1277]=" << (*matchOverlaps)[1277] << std::endl;
}
if (numGt == 16 && i == 1334) {
std::cout << "break for j =" << j << std::endl;
std::cout << "matchIndices[1334]=" << (*matchIndices)[1334] << std::endl;
std::cout << "matchOverlaps[1334]=" << (*matchOverlaps)[1334] << std::endl;
}
*/
//std::cout << "checking for i=" << i << ", j=" << j << std::endl;
maxIdx = i;
maxGtIdx = j;
maxOverlap = it->second[j];
}
}
}
// 현재 gt bbox에 대해 matching되는 pred bbox를 찾지 못함.
if (maxIdx == -1) {
// Cannot find good match.
break;
} else {
// 이전에 다른 경로를 통해 matching되지 않은 pred bbox여야 함.
SASSERT0((*matchIndices)[maxIdx] == -1);
(*matchIndices)[maxIdx] = gtIndices[maxGtIdx];
(*matchOverlaps)[maxIdx] = maxOverlap;
// Erase the ground truth.
// 전체 predBBox와 특정 gt가 최대 overlap으로 찾아진 경우
// 해당 gt는 다른 predBBox와 match하지 않도록 삭제
gtPool.erase(std::find(gtPool.begin(), gtPool.end(), maxGtIdx));
/*
std::cout << "maxIdx=" << maxIdx << ", gtIdx=" << gtIndices[maxGtIdx] <<
", maxGtIdx=" << maxGtIdx << ", maxOverlap=" << maxOverlap << std::endl;
*/
}
}
/*
if (numGt == 16) {
std::cout << "matchIndices[1277]=" << (*matchIndices)[1277] << std::endl;
std::cout << "matchOverlaps[1277]=" << (*matchOverlaps)[1277] << std::endl;
std::cout << "matchIndices[1334]=" << (*matchIndices)[1334] << std::endl;
std::cout << "matchOverlaps[1334]=" << (*matchOverlaps)[1334] << std::endl;
exit(1);
}
*/
switch (matchType) {
case BIPARTITE:
// Already done.
break;
case PER_PREDICTION:
// gt 최대 overlap pred bbox뿐 아니라
// 최대는 아니어도 pred bbox 자신과 overlap되는 gt가 있는 경우
// 해당 index와 overlap을 추가 업데이트.
// Get most overlapped for the rest prediction bboxes.
// overlap: (pred bbox idx : (gt idx : overlap)) 구조의 맵
for (std::map<int, std::map<int, float>>::iterator it = overlaps.begin();
it != overlaps.end(); it++) {
int i = it->first;
// gt와 최대 overlap으로 match된 pred bbox
if ((*matchIndices)[i] != -1) {
// Thre predction already has matched ground truth or is ignored.
continue;
}
int maxGtIdx = -1;
float maxOverlap = -1;
for (int j = 0; j < numGt; j++) {
if (it->second.find(j) == it->second.end()) {
// No overlap between the i-th predction and j-th ground truth.
continue;
}
// Find the maximum overlapped pair
float overlap = it->second[j];
if (overlap >= overlapThreshold && overlap > maxOverlap) {
// If the predcition has not been matched to any ground truth,
// and the overlap is larger than maximum overlap, update.
maxGtIdx = j;
maxOverlap = overlap;
}
}
if (maxGtIdx != -1) {
// Found a matched ground truth.
SASSERT0((*matchIndices)[i] == -1);
(*matchIndices)[i] = gtIndices[maxGtIdx];
(*matchOverlaps)[i] = maxOverlap;
}
}
break;
default:
SASSERT(false, "Unknown matching type: %s", matchType);
break;
}
return;
}
float BBoxSize(const NormalizedBBox& bbox, const bool normalized) {
if (bbox.xmax < bbox.xmin || bbox.ymax < bbox.ymin) {
// If bbox is invalid (e.g. xmax < xmin or ymax < ymin), return 0.
return 0;
} else {
if (bbox.size > 0) {
return bbox.size;
} else {
float width = bbox.xmax - bbox.xmin;
float height = bbox.ymax - bbox.ymin;
if (normalized) {
return width * height;
} else {
// If bbox is not within range [0, 1]
return (width + 1) * (height + 1);
}
}
}
}
template <typename Dtype>
Dtype BBoxSize(const Dtype* bbox, const bool normalized) {
if (bbox[2] < bbox[0] || bbox[3] < bbox[1]) {
// If bbox is invalid (e.g. xmax < xmin or ymax < ymin), return 0.
return Dtype(0.);
} else {
const Dtype width = bbox[2] - bbox[0];
const Dtype height = bbox[3] - bbox[1];
if (normalized) {
return width * height;
} else {
// If bbox is not within range [0, 1].
return (width + 1) * (height + 1);
}
}
}
template float BBoxSize(const float* bbox, const bool normalized);
void ClipBBox(const NormalizedBBox& bbox, NormalizedBBox* clipBBox) {
clipBBox->xmin = std::max(std::min(bbox.xmin, 1.f), 0.f);
clipBBox->ymin = std::max(std::min(bbox.ymin, 1.f), 0.f);
clipBBox->xmax = std::max(std::min(bbox.xmax, 1.f), 0.f);
clipBBox->ymax = std::max(std::min(bbox.ymax, 1.f), 0.f);
clipBBox->size = BBoxSize(*clipBBox);
clipBBox->difficult = bbox.difficult;
}
bool IsCrossBoundaryBBox(const NormalizedBBox& bbox) {
return bbox.xmin < 0 || bbox.xmin > 1 ||
bbox.ymin < 0 || bbox.ymin > 1 ||
bbox.xmax < 0 || bbox.xmax > 1 ||
bbox.ymax < 0 || bbox.ymax > 1;
}
float JaccardOverlap(const NormalizedBBox& bbox1, const NormalizedBBox& bbox2,
const bool normalized) {
NormalizedBBox intersectBBox;
IntersectBBox(bbox1, bbox2, &intersectBBox);
float intersectWidth;
float intersectHeight;
if (normalized) {
intersectWidth = intersectBBox.xmax - intersectBBox.xmin;
intersectHeight = intersectBBox.ymax - intersectBBox.ymin;
} else {
intersectWidth = intersectBBox.xmax - intersectBBox.xmin + 1;
intersectHeight = intersectBBox.ymax - intersectBBox.ymin + 1;
}
if (intersectWidth > 0 && intersectHeight > 0) {
float intersectSize = intersectWidth * intersectHeight;
float bbox1Size = BBoxSize(bbox1);
float bbox2Size = BBoxSize(bbox2);
return intersectSize / (bbox1Size + bbox2Size - intersectSize);
} else {
return 0.;
}
}
template <typename Dtype>
Dtype JaccardOverlap(const Dtype* bbox1, const Dtype* bbox2) {
if (bbox2[0] > bbox1[2] || bbox2[2] < bbox1[0] ||
bbox2[1] > bbox1[3] || bbox2[3] < bbox1[1]) {
return Dtype(0.);
} else {
const Dtype interXMin = std::max(bbox1[0], bbox2[0]);
const Dtype interYMin = std::max(bbox1[1], bbox2[1]);
const Dtype interXMax = std::min(bbox1[2], bbox2[2]);
const Dtype interYMax = std::min(bbox1[3], bbox2[3]);
const Dtype interWidth = interXMax - interXMin;
const Dtype interHeight = interYMax - interYMin;
const Dtype interSize = interWidth * interHeight;
const Dtype bbox1Size = BBoxSize(bbox1);
const Dtype bbox2Size = BBoxSize(bbox2);
return interSize / (bbox1Size + bbox2Size - interSize);
}
}
template float JaccardOverlap(const float* bbox1, const float* bbox2);
void IntersectBBox(const NormalizedBBox& bbox1, const NormalizedBBox& bbox2,
NormalizedBBox* intersectBBox) {
if (bbox2.xmin > bbox1.xmax || bbox2.xmax < bbox1.xmin ||
bbox2.ymin > bbox1.ymax || bbox2.ymax < bbox1.ymin) {
// Return [0, 0, 0, 0] if there is no intersection.
intersectBBox->xmin = 0;
intersectBBox->ymin = 0;
intersectBBox->xmax = 0;
intersectBBox->ymax = 0;
} else {
intersectBBox->xmin = std::max(bbox1.xmin, bbox2.xmin);
intersectBBox->ymin = std::max(bbox1.ymin, bbox2.ymin);
intersectBBox->xmax = std::min(bbox1.xmax, bbox2.xmax);
intersectBBox->ymax = std::min(bbox1.ymax, bbox2.ymax);
}
}
inline bool IsEligibleMining(const MiningType miningType, const int matchIdx,
const float matchOverlap, const float negOverlap) {
if (miningType == MiningType::MAX_NEGATIVE) {
return matchIdx == -1 && matchOverlap < negOverlap;
} else if (miningType == MiningType::HARD_EXAMPLE) {
return true;
} else {
return false;
}
}
int CountNumMatches(const std::vector<std::map<int, std::vector<int>>>& allMatchIndices,
const int num) {
int numMatches = 0;
for (int i = 0; i < num; i++) {
const std::map<int, std::vector<int>>& matchIndices = allMatchIndices[i];
for (std::map<int, std::vector<int>>::const_iterator it = matchIndices.begin();
it != matchIndices.end(); it++) {
const std::vector<int>& matchIndex = it->second;
for (int m = 0; m < matchIndex.size(); m++) {
if (matchIndex[m] > -1) {
numMatches++;
}
}
}
}
return numMatches;
}
void GetTopKScoreIndex(const std::vector<float>& scores, const std::vector<int>& indices,
const int topK, std::vector<std::pair<float, int>>* scoreIndexVec) {
SASSERT0(scores.size() == indices.size());
// Generate index score pairs.
for (int i = 0; i < scores.size(); i++) {
scoreIndexVec->push_back(std::make_pair(scores[i], indices[i]));
}
// Sort the score pair according to the scores in descending order
std::stable_sort(scoreIndexVec->begin(), scoreIndexVec->end(), SortScorePairDescend<int>);
// Keep topK scores if needed.
if (topK > -1 && topK < scoreIndexVec->size()) {
scoreIndexVec->resize(topK);
}
}
void ApplyNMS(const std::vector<NormalizedBBox>& bboxes, const std::vector<float>& scores,
const float threshold, const int topK, const bool reuseOverlaps,
std::map<int, std::map<int, float>>* overlaps, std::vector<int>* indices) {
// Sanity check.
SASSERT(bboxes.size() == scores.size(), "bboxes and scores have different size.");
// Get topK scores (with coreesponding indices).
std::vector<int> idx(boost::counting_iterator<int>(0),
boost::counting_iterator<int>(scores.size()));
std::vector<std::pair<float, int>> scoreIndexVec;
GetTopKScoreIndex(scores, idx, topK, &scoreIndexVec);
// Do nms.
indices->clear();
while (scoreIndexVec.size() != 0) {
// Get the current highest score box.
int bestIdx = scoreIndexVec.front().second;
const NormalizedBBox& bestBBox = bboxes[bestIdx];
if (BBoxSize(bestBBox) < 1e-5) {
// Erase small box.
scoreIndexVec.erase(scoreIndexVec.begin());
continue;
}
indices->push_back(bestIdx);
// Erase the best box.
scoreIndexVec.erase(scoreIndexVec.begin());
if (topK > -1 && indices->size() >= topK) {
// Stop if finding enough bboxes for nms.
break;
}
// Compute overlap between bestBBox and other remaining bboxes.
// Remove a bbox if the overlap with bestBBox is larger than nmsThreshold.
for (std::vector<std::pair<float, int>>::iterator it = scoreIndexVec.begin();
it != scoreIndexVec.end(); ) {
int curIdx = it->second;
const NormalizedBBox& curBBox = bboxes[curIdx];
if (BBoxSize(curBBox) < 1e-5) {
// Erase small box.
it = scoreIndexVec.erase(it);
continue;
}
float curOverlap = 0.;
if (reuseOverlaps) {
if (overlaps->find(bestIdx) != overlaps->end() &&
overlaps->find(bestIdx)->second.find(curIdx) !=
(*overlaps)[bestIdx].end()) {
// Use the computed overlap.
curOverlap = (*overlaps)[bestIdx][curIdx];
} else if (overlaps->find(curIdx) != overlaps->end() &&
overlaps->find(curIdx)->second.find(bestIdx) !=
(*overlaps)[curIdx].end()) {
// Use the computed overlap.
curOverlap = (*overlaps)[curIdx][bestIdx];
} else {
curOverlap = JaccardOverlap(bestBBox, curBBox);
// Store the overlap for future use.
(*overlaps)[bestIdx][curIdx] = curOverlap;
}
} else {
curOverlap = JaccardOverlap(bestBBox, curBBox);
}
// Remove it if necessary
if (curOverlap > threshold) {
it = scoreIndexVec.erase(it);
} else {
it++;
}
}
}
}
void ApplyNMS(const std::vector<NormalizedBBox>& bboxes, const std::vector<float>& scores,
const float threshold, const int topK, std::vector<int>* indices) {
bool reuseOverlap = false;
std::map<int, std::map<int, float>> overlaps;
ApplyNMS(bboxes, scores, threshold, topK, reuseOverlap, &overlaps, indices);
}
void ApplyNMS(const bool* overlapped, const int num, std::vector<int>* indices) {
std::vector<int> indexVec(boost::counting_iterator<int>(0),
boost::counting_iterator<int>(num));
// Do nms.
indices->clear();
while (indexVec.size() != 0) {
// Get the current highest score box.
int bestIdx = indexVec.front();
indices->push_back(bestIdx);
// Erase the best box.
indexVec.erase(indexVec.begin());
for (std::vector<int>::iterator it = indexVec.begin(); it != indexVec.end(); ) {
int curIdx = *it;
// Remove it if necessary
if (overlapped[bestIdx * num + curIdx]) {
it = indexVec.erase(it);
} else {
it++;
}
}
}
}
template <typename Dtype>
__global__ void ComputeConfLossKernel(const int nthreads, const Dtype* conf_data,
const int num_preds_per_class, const int num_classes, const int loss_type,
const Dtype* match_data, Dtype* conf_loss_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int label = match_data[index];
int num = index / num_preds_per_class;
int p = index % num_preds_per_class;
int start_idx = (num * num_preds_per_class + p) * num_classes;
Dtype loss = 0;
if (loss_type == 0) {
// Compute softmax probability.
Dtype prob = conf_data[start_idx + label];
loss = -log(MaxDevice(prob, Dtype(FLT_MIN)));
} else if (loss_type == 1) {
int target = 0;
for (int c = 0; c < num_classes; ++c) {
if (c == label) {
target = 1;
} else {
target = 0;
}
Dtype input = conf_data[start_idx + c];
loss -= input * (target - (input >= 0)) -
log(1 + exp(input - 2 * input * (input >= 0)));
}
}
conf_loss_data[index] = loss;
}
}
template <typename Dtype>
__global__ void kernel_channel_max(const int num, const int channels, const int spatial_dim,
const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype maxval = -FLT_MAX;
for (int c = 0; c < channels; ++c) {
maxval = max(data[(n * channels + c) * spatial_dim + s], maxval);
}
out[index] = maxval;
}
}
template <typename Dtype>
__global__ void kernel_channel_subtract(const int count, const int num, const int channels,
const int spatial_dim, const Dtype* channel_data, const Dtype* channel_max,
Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] = channel_data[index] - channel_max[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_exp(const int count, const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, count) {
out[index] = exp(data[index]);
}
}
template <typename Dtype>
__global__ void kernel_channel_sum(const int num, const int channels, const int spatial_dim,
const Dtype* data, Dtype* channel_sum) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype sum = 0;
for (int c = 0; c < channels; ++c) {
sum += data[(n * channels + c) * spatial_dim + s];
}
channel_sum[index] = sum;
}
}
template <typename Dtype>
__global__ void kernel_channel_div(const int count, const int num, const int channels,
const int spatial_dim, const Dtype* channel_sum, Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] /= channel_sum[n * spatial_dim + s];
}
}
template <typename Dtype>
void SoftMaxGPU(const Dtype* data, const int outerNum, const int channels, const int innerNum,
Dtype* prob) {
std::vector<uint32_t> shape(4, 1);
shape[0] = outerNum;
shape[1] = channels;
shape[2] = innerNum;
Data<Dtype> scale("scale", shape);
Dtype* scaleData = scale.mutable_device_data();
int count = outerNum * channels * innerNum;
// We need to subtract the max to avoid numerical issues, compute the exp,
// and the normalize.
// compute max
kernel_channel_max<Dtype><<<SOOOA_GET_BLOCKS(outerNum * innerNum),
SOOOA_CUDA_NUM_THREADS>>>(outerNum, channels, innerNum, data, scaleData);
// subtract
kernel_channel_subtract<Dtype><<<SOOOA_GET_BLOCKS(count), SOOOA_CUDA_NUM_THREADS>>>(
count, outerNum, channels, innerNum, data, scaleData, prob);
// exponentiate
kernel_exp<Dtype><<<SOOOA_GET_BLOCKS(count), SOOOA_CUDA_NUM_THREADS>>>(count, prob, prob);
// sum after exp
kernel_channel_sum<Dtype><<<SOOOA_GET_BLOCKS(outerNum * innerNum),
SOOOA_CUDA_NUM_THREADS>>>(outerNum, channels, innerNum, prob, scaleData);
// divide
kernel_channel_div<Dtype><<<SOOOA_GET_BLOCKS(count), SOOOA_CUDA_NUM_THREADS>>>(
count, outerNum, channels, innerNum, scaleData, prob);
}
// confData: (numBatches, numPriors * numClasses)의 shape. confidence 예측값.
// num:
// ...jjjjj
// allConfLoss: 이미지별 각 prior box에 대한 conf loss 리스트.
template <typename Dtype>
void ComputeConfLossGPU(Data<Dtype>& confData, const int num,
const int numPredsPerClass, const int numClasses, const int backgroundLabelId,
const ConfLossType lossType,
const std::vector<std::map<int, std::vector<int>>>& allMatchIndices,
const std::map<int, std::vector<NormalizedBBox>>& allGtBBoxes,
std::vector<std::vector<float>>* allConfLoss) {
SASSERT0(backgroundLabelId < numClasses);
// match: prior box와 matching된 gt box의 label (정답 label)을 담음
// batch내 전체 이미지의 각 prior box에 대한 정답값을 담음
// matching된 gt box가 없는 경우 0이 기본값임. 0이 matching되지 않음을 의미.
Data<Dtype> match("match", {uint32_t(num), uint32_t(numPredsPerClass), 1, 1});
Dtype* matchData = match.mutable_host_data();
for (int i = 0; i < num; i++) {
const std::map<int, std::vector<int>>& matchIndices = allMatchIndices[i];
// p번째 prior box에 대해
for (int p = 0; p < numPredsPerClass; p++) {
// Get the label index.
int label = backgroundLabelId;
// shareLocation == true인 케이스에서 key에 해당하는 값은 -1 하나 뿐.
// 실제로 iteration을 돈다기 보다는 한 번만 실행됨.
for (std::map<int, std::vector<int>>::const_iterator it = matchIndices.begin();
it != matchIndices.end(); it++) {
const std::vector<int>& matchIndex = it->second;
SASSERT0(matchIndex.size() == numPredsPerClass);
// p번째 prior box에 대해 matching된 gt box가 있는지 확인
if (matchIndex[p] > -1) {
SASSERT0(allGtBBoxes.find(i) != allGtBBoxes.end());
const std::vector<NormalizedBBox>& gtBBoxes = allGtBBoxes.find(i)->second;
SASSERT0(matchIndex[p] < gtBBoxes.size());
label = gtBBoxes[matchIndex[p]].label;
SASSERT0(label >= 0);
SASSERT0(label != backgroundLabelId);
SASSERT0(label < numClasses);
// A prior can only be matched to one gt bbox.
break;
}
}
// p번째 prior box에 대한 정답 label을 matchData에 저장
matchData[i * numPredsPerClass + p] = label;
}
}
// Get probability data.
// 네트워크가 예측한 각 prior box별, class별 probability.
const Dtype* confGpuData = confData.device_data();
Data<Dtype> prob("prob");
prob.reshapeLike(&confData);
// lossType이 'SOFTMAX'인 경우 raw conf data대신 softmax를 취한 값을 conf data로 사용.
if (lossType == ConfLossType::SOFTMAX) {
Dtype* probData = prob.mutable_device_data();
SoftMaxGPU(confData.device_data(), num * numPredsPerClass, numClasses, 1, probData);
confGpuData = prob.device_data();
}
// Compute the loss.
Data<Dtype> confLoss("confLoss", {uint32_t(num), uint32_t(numPredsPerClass), 1, 1});
Dtype* confLossData = confLoss.mutable_device_data();
const int numThreads = num * numPredsPerClass;
int intLossType = 0;
if (lossType == ConfLossType::SOFTMAX) intLossType = 0;
else if (lossType == ConfLossType::LOGISTIC) intLossType = 1;
else SASSERT0(false);
// matching된 gt box가 있는 경우 해당 label에 대해서만 loss계산.
// matching된 gt box가 없는 경우 background class에 대해서 loss계산.
ComputeConfLossKernel<Dtype><<<SOOOA_GET_BLOCKS(numThreads), SOOOA_CUDA_NUM_THREADS>>>(
numThreads, confGpuData, numPredsPerClass, numClasses, intLossType,
match.device_data(), confLossData);
// Save the loss.
allConfLoss->clear();
const Dtype* lossData = confLoss.host_data();
for (int i = 0; i < num; i++) {
std::vector<float> confLoss(lossData, lossData + numPredsPerClass);
allConfLoss->push_back(confLoss);
lossData += numPredsPerClass;
}
}
template void ComputeConfLossGPU(Data<float>& confData, const int num,
const int numPredsPerClass, const int numClasses, const int backgroundLabelId,
const ConfLossType lossType,
const std::vector<std::map<int, std::vector<int>>>& allMatchIndices,
const std::map<int, std::vector<NormalizedBBox>>& allGtBBoxes,
std::vector<std::vector<float>>* allConfLoss);
// confData: mbox_conf
// allLocPreds: 이미지별 location prediction 리스트
// allGtBBoxes: 이미지별 gt box 리스트 맵
// priorBBoxes: 전체 prior bboxes 리스트
// priorVariances
// allMatchOverlaps:
// ...
// allMatchIndices:
// allNegIndices: Negative Example indices로 추정
template <typename Dtype>
void MineHardExamples(Data<Dtype>& confData,
const std::vector<LabelBBox>& allLocPreds,
const std::map<int, std::vector<NormalizedBBox>>& allGtBBoxes,
const std::vector<NormalizedBBox>& priorBBoxes,
const std::vector<std::vector<float>>& priorVariances,
const std::vector<std::map<int, std::vector<float>>>& allMatchOverlaps,
const int numClasses, const int backgroundLabelId, const bool usePriorForNms,
const ConfLossType confLossType, const MiningType miningType,
const LocLossType locLossType, const float negPosRatio, const float negOverlap,
const CodeType codeType, const bool encodeVarianceInTarget,
const float nmsThreshold, const int topK, const int sampleSize, const bool bpInside,
const bool usePriorForMatching, int* numMatches, int* numNegs,
std::vector<std::map<int, std::vector<int>>>* allMatchIndices,
std::vector<std::vector<int>>* allNegIndices) {
// num images in batch
int num = allLocPreds.size();
// batch내 전체 이미지에 대해서 gt에 대해 match된 prior box의 갯수. (label > -1)
*numMatches = CountNumMatches(*allMatchIndices, num);
*numNegs = 0;
int numPriors = priorBBoxes.size();
SASSERT0(numPriors == priorVariances.size());
SASSERT(numClasses >= 1, "numClasses should not be less than 1.");
if (miningType == MiningType::MINING_NONE) {
return;
}
bool hasNmsParam = true;
if (topK <= 0) {
hasNmsParam = false;
}
// Compute confidence losses based on matching results.
// batch내 이미지별 prior box 전체에 대한 loss 리스트.
// [num][num_priors]
std::vector<std::vector<float>> allConfLoss;
ComputeConfLossGPU(confData, num, numPriors, numClasses, backgroundLabelId, confLossType,
*allMatchIndices, allGtBBoxes, &allConfLoss);
// batch내 이미지별 prior box 전체에 대한 loss 리스트.
// [num][num_priors]
std::vector<std::vector<float>> allLocLoss;
if (miningType == MiningType::HARD_EXAMPLE) {
// Compute localization losses based on matching results.
Data<Dtype> locPred("locPred");
Data<Dtype> locGt("locGt");
if (*numMatches != 0) {
std::vector<uint32_t> locShape(4, 1);
locShape[3] = *numMatches * 4;
locPred.reshape(locShape);
locGt.reshape(locShape);
Dtype* locPredData = locPred.mutable_host_data();
Dtype* locGtData = locGt.mutable_host_data();
EncodeLocPrediction(allLocPreds, allGtBBoxes, *allMatchIndices, priorBBoxes,
priorVariances, codeType, encodeVarianceInTarget, bpInside,
usePriorForMatching, locPredData, locGtData);
}
ComputeLocLoss(locPred, locGt, *allMatchIndices, num, numPriors, locLossType,
&allLocLoss);
} else {
// No localization loss.
// 전체 loc loss를 0.f로 초기화.
for (int i = 0; i < num; i++) {
std::vector<float> locLoss(numPriors, 0.f);
allLocLoss.push_back(locLoss);
}
}
for (int i = 0; i < num; i++) {
std::map<int, std::vector<int>>& matchIndices = (*allMatchIndices)[i];
const std::map<int, std::vector<float>>& matchOverlaps = allMatchOverlaps[i];
// loc + conf loss.
const std::vector<float>& confLoss = allConfLoss[i];
const std::vector<float>& locLoss = allLocLoss[i];
std::vector<float> loss;
std::transform(confLoss.begin(), confLoss.end(), locLoss.begin(),
std::back_inserter(loss), std::plus<float>());
// Pick negatives or hard examples based on loss.
std::set<int> selIndices;
std::vector<int> negIndices;
for (std::map<int, std::vector<int>>::iterator it = matchIndices.begin();
it != matchIndices.end(); it++) {
const int label = it->first;
int numSel = 0;
// Get potential indices and loss pairs.
// negative sample의 overlap, index pair 리스트
std::vector<std::pair<float, int>> lossIndices;
for (int m = 0; m < matchIndices[label].size(); m++) {
// matchIdx == -1 && matchOverlap < negOverlap일때 true
if (IsEligibleMining(miningType, matchIndices[label][m],
matchOverlaps.find(label)->second[m], negOverlap)) {
lossIndices.push_back(std::make_pair(loss[m], m));
numSel++;
}
}
if (miningType == MiningType::MAX_NEGATIVE) {
int numPos = 0;
for (int m = 0; m < matchIndices[label].size(); m++) {
if (matchIndices[label][m] > -1) {
numPos++;
}
}
numSel = std::min(static_cast<int>(numPos * negPosRatio), numSel);
} else if (miningType == MiningType::HARD_EXAMPLE) {
SASSERT0(sampleSize > 0);
numSel = std::min(sampleSize, numSel);
}
// XXX: nmsThreshold 테스트 통과하는지 확인
// Select samples.
if (hasNmsParam && nmsThreshold > 0) {
// Do nms before selecting samples.
std::vector<float> selLoss;
std::vector<NormalizedBBox> selBBoxes;
if (usePriorForNms) {
for (int m = 0; m < matchIndices[label].size(); m++) {
if (IsEligibleMining(miningType, matchIndices[label][m],
matchOverlaps.find(label)->second[m], negOverlap)) {
selLoss.push_back(loss[m]);
selBBoxes.push_back(priorBBoxes[m]);
}
}
} else {
// Decode the prediction into bbox first.
std::vector<NormalizedBBox> locBBoxes;
bool clipBBox = false;
DecodeBBoxes(priorBBoxes, priorVariances, codeType,
encodeVarianceInTarget, clipBBox,
allLocPreds[i].find(label)->second, &locBBoxes);
for (int m = 0; m < matchIndices[label].size(); m++) {
if (IsEligibleMining(miningType, matchIndices[label][m],
matchOverlaps.find(label)->second[m], negOverlap)) {
selLoss.push_back(loss[m]);
selBBoxes.push_back(locBBoxes[m]);
}
}
}
// Do non-maximum suppression based on the loss.
std::vector<int> nmsIndices;
ApplyNMS(selBBoxes, selLoss, nmsThreshold, topK, &nmsIndices);
if (nmsIndices.size() < numSel) {
STDOUT_LOG("not enought sample after nms: %d", nmsIndices.size());
}
// Pick top example indices after nms.
numSel = std::min(static_cast<int>(nmsIndices.size()), numSel);
for (int n = 0; n < numSel; n++) {
selIndices.insert(lossIndices[nmsIndices[n]].second);
}
} else {
// Pick top example indices based on loss.
std::sort(lossIndices.begin(), lossIndices.end(),
SortScorePairDescend<int>);
for (int n = 0; n < numSel; n++) {
selIndices.insert(lossIndices[n].second);
}
}
// Update the matchIndices and select negIndices.
// negative prior box indices 생성.
for (int m = 0; m < matchIndices[label].size(); m++) {
if (matchIndices[label][m] > -1) {
if (miningType == MiningType::HARD_EXAMPLE &&
selIndices.find(m) == selIndices.end()) {
matchIndices[label][m] = -1;
*numMatches -= 1;
}
} else if (matchIndices[label][m] == -1) {
if (selIndices.find(m) != selIndices.end()) {
negIndices.push_back(m);
*numNegs += 1;
}
}
}
}
allNegIndices->push_back(negIndices);
}
}
template void MineHardExamples(Data<float>& confData,
const std::vector<LabelBBox>& allLocPreds,
const std::map<int, std::vector<NormalizedBBox>>& allGtBBoxes,
const std::vector<NormalizedBBox>& priorBBoxes,
const std::vector<std::vector<float>>& priorVariances,
const std::vector<std::map<int, std::vector<float>>>& allMatchOverlaps,
const int numClasses, const int backgroundLabelId, const bool usePriorForNms,
const ConfLossType confLossType, const MiningType miningType,
const LocLossType locLossType, const float negPosRatio, const float negOverlap,
const CodeType codeType, const bool encodeVarianceInTarget, const float nmsThresh,
const int topK, const int sampleSize, const bool bpInside,
const bool usePriorForMatching, int* numMatches, int* numNegs,
std::vector<std::map<int, std::vector<int>>>* allMatchIndices,
std::vector<std::vector<int>>* allNegIndices);
// allLocPreds:
template <typename Dtype>
void EncodeLocPrediction(const std::vector<LabelBBox>& allLocPreds,
const std::map<int, std::vector<NormalizedBBox>>& allGtBBoxes,
const std::vector<std::map<int, std::vector<int>>>& allMatchIndices,
const std::vector<NormalizedBBox>& priorBBoxes,
const std::vector<std::vector<float>>& priorVariances,
const CodeType codeType, const bool encodeVarianceInTarget,
const bool bpInside, const bool usePriorForMatching,
Dtype* locPredData, Dtype* locGtData) {
int num = allLocPreds.size();
int count = 0;
for (int i = 0; i < num; i++) {
for (std::map<int, std::vector<int>>::const_iterator it = allMatchIndices[i].begin();
it != allMatchIndices[i].end(); it++) {
const int label = it->first;
const std::vector<int>& matchIndex = it->second;
SASSERT0(allLocPreds[i].find(label) != allLocPreds[i].end());
const std::vector<NormalizedBBox>& locPred = allLocPreds[i].find(label)->second;
for (int j = 0; j < matchIndex.size(); j++) {
if (matchIndex[j] <= -1) {
continue;
}
// Store encoded ground truth.
const int gtIdx = matchIndex[j];
SASSERT0(allGtBBoxes.find(i) != allGtBBoxes.end());
SASSERT0(gtIdx < allGtBBoxes.find(i)->second.size());
const NormalizedBBox& gtBBox = allGtBBoxes.find(i)->second[gtIdx];
NormalizedBBox gtEncode;
SASSERT0(j < priorBBoxes.size());
EncodeBBox(priorBBoxes[j], priorVariances[j], codeType,
encodeVarianceInTarget, gtBBox, >Encode);
locGtData[count * 4] = gtEncode.xmin;
locGtData[count * 4 + 1] = gtEncode.ymin;
locGtData[count * 4 + 2] = gtEncode.xmax;
locGtData[count * 4 + 3] = gtEncode.ymax;
// Store location prediction.
SASSERT0(j < locPred.size());
if (bpInside) {
NormalizedBBox matchBBox = priorBBoxes[j];
if (!usePriorForMatching) {
const bool clipBBox = false;
DecodeBBox(priorBBoxes[j], priorVariances[j], codeType,
encodeVarianceInTarget, clipBBox, locPred[j], &matchBBox);
}
// When a dimension of matchBBox is outside of image region, use
// gtEncode to simulate zero gradient.
locPredData[count * 4] = (matchBBox.xmin < 0 || matchBBox.xmin > 1) ?
gtEncode.xmin : locPred[j].ymin;
locPredData[count * 4 + 1] = (matchBBox.ymin < 0 || matchBBox.ymin > 1) ?
gtEncode.ymin : locPred[j].ymin;
locPredData[count * 4 + 2] = (matchBBox.xmax < 0 || matchBBox.xmax > 1) ?
gtEncode.xmax : locPred[j].xmax;
locPredData[count * 4 + 3] = (matchBBox.ymax < 0 || matchBBox.ymax > 1) ?
gtEncode.ymax : locPred[j].ymax;
} else {
locPredData[count * 4] = locPred[j].xmin;
locPredData[count * 4 + 1] = locPred[j].ymin;
locPredData[count * 4 + 2] = locPred[j].xmax;
locPredData[count * 4 + 3] = locPred[j].ymax;
}
if (encodeVarianceInTarget) {
for (int k = 0; k < 4; k++) {
SASSERT0(priorVariances[j][k] > 0);
locPredData[count * 4 + k] /= priorVariances[j][k];
locGtData[count * 4 + k] /= priorVariances[j][k];
}
}
count++;
}
}
}
}
template void EncodeLocPrediction(const std::vector<LabelBBox>& allLocPreds,
const std::map<int, std::vector<NormalizedBBox>>& allGtBBoxes,
const std::vector<std::map<int, std::vector<int>>>& allMatchIndices,
const std::vector<NormalizedBBox>& priorBBoxes,
const std::vector<std::vector<float>>& priorVariances,
const CodeType codeType, const bool encodeVarianceInTarget,
const bool bpInside, const bool usePriorForMatching,
float* locPredData, float* locGtData);
void EncodeBBox(const NormalizedBBox& priorBBox, const std::vector<float>& priorVariance,
const CodeType codeType, const bool encodeVarianceInTarget,
const NormalizedBBox& bbox, NormalizedBBox* encodeBBox) {
if (codeType == CodeType::CORNER) {
if (encodeVarianceInTarget) {
encodeBBox->xmin = bbox.xmin - priorBBox.xmin;
encodeBBox->ymin = bbox.ymin - priorBBox.ymin;
encodeBBox->xmax = bbox.xmax - priorBBox.xmax;
encodeBBox->ymax = bbox.ymax - priorBBox.ymax;
} else {
// Encode variance in bbox.
SASSERT0(priorVariance.size() == 4);
for (int i = 0; i < priorVariance.size(); i++) {
SASSERT0(priorVariance[i] > 0);
}
encodeBBox->xmin = (bbox.xmin - priorBBox.xmin) / priorVariance[0];
encodeBBox->ymin = (bbox.ymin - priorBBox.ymin) / priorVariance[1];
encodeBBox->xmax = (bbox.xmax - priorBBox.xmax) / priorVariance[2];
encodeBBox->ymax = (bbox.ymax - priorBBox.ymax) / priorVariance[3];
}
} else if (codeType == CodeType::CENTER_SIZE) {
float priorWidth = priorBBox.xmax - priorBBox.xmin;
SASSERT0(priorWidth > 0);
float priorHeight = priorBBox.ymax - priorBBox.ymin;
SASSERT0(priorHeight > 0);
float priorCenterX = (priorBBox.xmin + priorBBox.xmax) / 2.;
float priorCenterY = (priorBBox.ymin + priorBBox.ymax) / 2.;
float bboxWidth = bbox.xmax - bbox.xmin;
SASSERT0(bboxWidth > 0);
float bboxHeight = bbox.ymax - bbox.ymin;
SASSERT0(bboxHeight > 0);
float bboxCenterX = (bbox.xmin + bbox.xmax) / 2.;
float bboxCenterY = (bbox.ymin + bbox.ymax) / 2.;
if (encodeVarianceInTarget) {
encodeBBox->xmin = (bboxCenterX - priorCenterX) / priorWidth;
encodeBBox->ymin = (bboxCenterY - priorCenterY) / priorHeight;
encodeBBox->xmax = std::log(bboxWidth / priorWidth);
encodeBBox->ymax = std::log(bboxHeight / priorHeight);
} else {
// Encode variance in bbox.
encodeBBox->xmin = (bboxCenterX - priorCenterX) / priorWidth / priorVariance[0];
encodeBBox->ymin = (bboxCenterY - priorCenterY) / priorHeight / priorVariance[1];
encodeBBox->xmax = std::log(bboxWidth / priorWidth) / priorVariance[2];
encodeBBox->ymax = std::log(bboxHeight / priorHeight) / priorVariance[3];
}
} else if (codeType == CodeType::CORNER_SIZE) {
float priorWidth = priorBBox.xmax - priorBBox.xmin;
SASSERT0(priorWidth > 0);
float priorHeight = priorBBox.ymax - priorBBox.ymin;
SASSERT0(priorHeight > 0);
if (encodeVarianceInTarget) {
encodeBBox->xmin = (bbox.xmin - priorBBox.xmin) / priorWidth;
encodeBBox->ymin = (bbox.ymin - priorBBox.ymin) / priorHeight;
encodeBBox->xmax = (bbox.xmax - priorBBox.xmax) / priorWidth;
encodeBBox->ymax = (bbox.ymax - priorBBox.ymax) / priorHeight;
} else {
// Encode variance in bbox.
SASSERT0(priorVariance.size() == 4);
for (int i = 0; i < priorVariance.size(); i++) {
SASSERT0(priorVariance[i] > 0);
}
encodeBBox->xmin = (bbox.xmin - priorBBox.xmin) / priorWidth / priorVariance[0];
encodeBBox->ymin = (bbox.ymin - priorBBox.ymin) / priorHeight / priorVariance[1];
encodeBBox->xmax = (bbox.xmax - priorBBox.xmax) / priorWidth / priorVariance[2];
encodeBBox->ymax = (bbox.ymax - priorBBox.ymax) / priorHeight / priorVariance[3];
}
} else {
SASSERT(false, "Unknown LocLossType.");
}
}
template <typename Dtype>
void ComputeLocLoss(Data<Dtype>& locPred, Data<Dtype>& locGt,
const std::vector<std::map<int, std::vector<int>>>& allMatchIndices, const int num,
const int numPriors, const LocLossType locLossType,
std::vector<std::vector<float>>* allLocLoss) {
int locCount = locPred.getCount();
SASSERT0(locCount == locGt.getCount());
Data<Dtype> diff("diff");
const Dtype* diffData = NULL;
if (locCount != 0) {
diff.reshape(locPred.getShape());
soooa_gpu_sub(locCount, locPred.device_data(), locGt.device_data(), diff.mutable_device_data());
diffData = diff.host_data();
}
int count = 0;
for (int i = 0; i < num; i++) {
std::vector<float> locLoss(numPriors, 0.f);
for (std::map<int, std::vector<int>>::const_iterator it = allMatchIndices[i].begin();
it != allMatchIndices[i].end(); it++) {
const std::vector<int>& matchIndex = it->second;
SASSERT0(numPriors == matchIndex.size());
for (int j = 0; j < matchIndex.size(); j++) {
if (matchIndex[j] <= -1) {
continue;
}
Dtype loss = 0;
for (int k = 0; k < 4; k++) {
Dtype val = diffData[count * 4 + k];
if (locLossType == LocLossType::SMOOTH_L1) {
Dtype absVal = fabs(val);
if (absVal < 1.) {
loss += 0.5 * val * val;
} else {
loss += absVal - 0.5;
}
} else if (locLossType == LocLossType::L2) {
loss += 0.5 * val * val;
} else {
SASSERT(false, "Unknown loc loss type.");
}
}
locLoss[j] = loss;
count++;
}
}
allLocLoss->push_back(locLoss);
}
}
template void ComputeLocLoss(Data<float>& locPred, Data<float>& locGt,
const std::vector<std::map<int, std::vector<int>>>& allMatchIndices, const int num,
const int numPriors, const LocLossType locLossType,
std::vector<std::vector<float>>* allLocLoss);
template <typename Dtype>
void EncodeConfPrediction(const Dtype* confData, const int num, const int numPriors,
const int numClasses, const int backgroundLabelId, const bool mapObjectToAgnostic,
const MiningType miningType, const ConfLossType confLossType,
const std::vector<std::map<int, std::vector<int>>>& allMatchIndices,
const std::vector<std::vector<int>>& allNegIndices,
const std::map<int, std::vector<NormalizedBBox>>& allGtBBoxes,
Dtype* confPredData, Dtype* confGtData) {
SASSERT(numClasses > 1, "numClasses should not be less than 1.");
if (mapObjectToAgnostic) {
if (backgroundLabelId >= 0) {
SASSERT0(numClasses == 2);
} else {
SASSERT0(numClasses == 1);
}
}
bool doNegMining = (miningType != MiningType::MINING_NONE);
int count = 0;
for (int i = 0; i < num; i++) {
if (allGtBBoxes.find(i) != allGtBBoxes.end()) {
// Save matched (positive) bboxes scores and labels.
const std::map<int, std::vector<int>>& matchIndices = allMatchIndices[i];
for (std::map<int, std::vector<int>>::const_iterator it = matchIndices.begin();
it != matchIndices.end(); it++) {
const std::vector<int>& matchIndex = it->second;
SASSERT0(matchIndex.size() == numPriors);
for (int j = 0; j < numPriors; j++) {
if (matchIndex[j] <= -1) {
continue;
}
const int gtLabel = mapObjectToAgnostic ?
backgroundLabelId + 1 :
allGtBBoxes.find(i)->second[matchIndex[j]].label;
int idx = doNegMining ? count : j;
if (confLossType == ConfLossType::SOFTMAX) {
confGtData[idx] = gtLabel;
} else if (confLossType == ConfLossType::LOGISTIC) {
confGtData[idx * numClasses + gtLabel] = 1;
} else {
SASSERT(false, "Unknown conf loss type.");
}
if (doNegMining) {
// Copy scores for matched bboxes.
soooa_copy<Dtype>(numClasses, confData + j * numClasses,
confPredData + count * numClasses);
count++;
}
}
}
// Go to next image
if (doNegMining) {
// Save negative bboxes scores and labels.
for (int n = 0; n < allNegIndices[i].size(); n++) {
int j = allNegIndices[i][n];
SASSERT0(j < numPriors);
soooa_copy<Dtype>(numClasses, confData + j * numClasses,
confPredData + count * numClasses);
if (confLossType == ConfLossType::SOFTMAX) {
confGtData[count] = backgroundLabelId;
} else if (confLossType == ConfLossType::LOGISTIC) {
if (backgroundLabelId >= 0 && backgroundLabelId < numClasses) {
confGtData[count * numClasses + backgroundLabelId] = 1;
}
} else {
SASSERT(false, "Unknown conf loss type.");
}
count++;
}
}
}
if (doNegMining) {
confData += numPriors * numClasses;
} else {
confGtData += numPriors;
}
}
}
template void EncodeConfPrediction(const float* confData, const int num, const int numPriors,
const int numClasses, const int backgroundLabelId, const bool mapObjectToAgnostic,
const MiningType miningType, const ConfLossType confLossType,
const std::vector<std::map<int, std::vector<int>>>& allMatchIndices,
const std::vector<std::vector<int>>& allNegIndices,
const std::map<int, std::vector<NormalizedBBox>>& allGtBBoxes,
float* confPredData, float* confGtData);
template <typename Dtype>
void GetConfidenceScores(const Dtype* confData, const int num, const int numPredsPerClass,
const int numClasses, std::vector<std::map<int, std::vector<float>>>* confPreds) {
confPreds->clear();
confPreds->resize(num);
for (int i = 0; i < num; i++) {
std::map<int, std::vector<float>>& labelScores = (*confPreds)[i];
for (int p = 0; p < numPredsPerClass; p++) {
int startIdx = p * numClasses;
for (int c = 0; c < numClasses; c++) {
labelScores[c].push_back(confData[startIdx + c]);
}
}
confData += numPredsPerClass * numClasses;
}
}
template void GetConfidenceScores(const float* confData, const int num,
const int numPredsPerClass, const int numClasses,
std::vector<std::map<int, std::vector<float>>>* confPreds);
template void GetConfidenceScores(const double* confData, const int num,
const int numPredsPerClass, const int numClasses,
std::vector<std::map<int, std::vector<float>>>* confPreds);
void DecodeBBoxesAll(const std::vector<LabelBBox>& allLocPreds,
const std::vector<NormalizedBBox>& priorBBoxes,
const std::vector<std::vector<float>>& priorVariances,
const int num, const bool shareLocation,
const int numLocClasses, const int backgroundLabelId,
const CodeType codeType, const bool varianceEncodedInTarget,
const bool clip, std::vector<LabelBBox>* allDecodeBBoxes) {
SASSERT0(allLocPreds.size() == num);
allDecodeBBoxes->clear();
allDecodeBBoxes->resize(num);
for (int i = 0; i < num; i++) {
// Decode predictions into bboxes.
LabelBBox& decodeBBoxes = (*allDecodeBBoxes)[i];
for (int c = 0; c < numLocClasses; c++) {
int label = shareLocation ? -1 : c;
if (label == backgroundLabelId) {
// Ignore background class.
continue;
}
if (allLocPreds[i].find(label) == allLocPreds[i].end()) {
// Something bad happend if there are no predictions for current label.
SASSERT(false, "Could not find location predictions for label %d.", label);
}
const std::vector<NormalizedBBox>& labelLocPreds =
allLocPreds[i].find(label)->second;
DecodeBBoxes(priorBBoxes, priorVariances, codeType, varianceEncodedInTarget, clip,
labelLocPreds, &(decodeBBoxes[label]));
}
}
}
void GetMaxScoreIndex(const std::vector<float>& scores, const float threshold, const int topK,
std::vector<std::pair<float, int>>* scoreIndexVec) {
// Generate index score pairs.
for (int i = 0; i < scores.size(); i++) {
if (scores[i] > threshold) {
scoreIndexVec->push_back(std::make_pair(scores[i], i));
}
}
// Sort the score pair according to the scores in descending order
std::stable_sort(scoreIndexVec->begin(), scoreIndexVec->end(),
SortScorePairDescend<int>);
// Keep topK scores if needed.
if (topK > -1 && topK < scoreIndexVec->size()) {
scoreIndexVec->resize(topK);
}
}
template <typename Dtype>
void GetMaxScoreIndex(const Dtype* scores, const int num, const float threshold,
const int topK, std::vector<std::pair<Dtype, int>>* scoreIndexVec) {
// Generate index score pairs.
for (int i = 0; i < num; i++) {
if (scores[i] > threshold) {
scoreIndexVec->push_back(std::make_pair(scores[i], i));
}
}
// Sort the score pair according to the scores in descending order
std::stable_sort(scoreIndexVec->begin(), scoreIndexVec->end(),
SortScorePairDescend<int>);
// Keep topK scores if needed.
if (topK > -1 && topK < scoreIndexVec->size()) {
scoreIndexVec->resize(topK);
}
}
template void GetMaxScoreIndex(const float* scores, const int num, const float threshold,
const int topK, std::vector<std::pair<float, int>>* scoreIndexVec);
void ApplyNMSFast(const std::vector<NormalizedBBox>& bboxes, const std::vector<float>& scores,
const float scoreThreshold, const float nmsThreshold,
const float eta, const int topK, std::vector<int>* indices) {
// Sanity check.
SASSERT(bboxes.size() == scores.size(), "bboxes and scores have different size.");
// Get topK scores (with corresponding indices).
std::vector<std::pair<float, int>> scoreIndexVec;
GetMaxScoreIndex(scores, scoreThreshold, topK, &scoreIndexVec);
// Do nms.
float adaptiveThreshold = nmsThreshold;
indices->clear();
while (scoreIndexVec.size() != 0) {
const int idx = scoreIndexVec.front().second;
bool keep = true;
for (int k = 0; k < indices->size(); k++) {
if (keep) {
const int keptIdx = (*indices)[k];
float overlap = JaccardOverlap(bboxes[idx], bboxes[keptIdx]);
keep = overlap <= adaptiveThreshold;
} else {
break;
}
}
if (keep) {
indices->push_back(idx);
}
scoreIndexVec.erase(scoreIndexVec.begin());
if (keep && eta < 1 && adaptiveThreshold > 0.5) {
adaptiveThreshold *= eta;
}
}
}
template <typename Dtype>
void ApplyNMSFast(const Dtype* bboxes, const Dtype* scores, const int num,
const float scoreThreshold, const float nmsThreshold, const float eta,
const int topK, std::vector<int>* indices) {
// Get topK scores (with corresponding indices).
std::vector<std::pair<float, int>> scoreIndexVec;
GetMaxScoreIndex(scores, num, scoreThreshold, topK, &scoreIndexVec);
// Do nms.
float adaptiveThreshold = nmsThreshold;
indices->clear();
while (scoreIndexVec.size() != 0) {
const int idx = scoreIndexVec.front().second;
bool keep = true;
for (int k = 0; k < indices->size(); k++) {
if (keep) {
const int keptIdx = (*indices)[k];
Dtype overlap = JaccardOverlap(bboxes + idx * 4, bboxes + keptIdx * 4);
keep = overlap <= adaptiveThreshold;
} else {
break;
}
}
if (keep) {
indices->push_back(idx);
}
scoreIndexVec.erase(scoreIndexVec.begin());
if (keep && eta < 1 && adaptiveThreshold > 0.5) {
adaptiveThreshold *= eta;
}
}
}
template void ApplyNMSFast(const float* bboxes, const float* scores, const int num,
const float scoreThreshold, const float nmsThreshold, const float eta,
const int topK, std::vector<int>* indices);
void ScaleBBox(const NormalizedBBox& bbox, const int height, const int width,
NormalizedBBox* scaleBBox) {
scaleBBox->xmin = bbox.xmin * width;
scaleBBox->ymin = bbox.ymin * height;
scaleBBox->xmax = bbox.xmax * width;
scaleBBox->ymax = bbox.ymax * height;
scaleBBox->size = 0.f;
bool normalized = !(width > 1 || height > 1);
scaleBBox->size = BBoxSize(*scaleBBox, normalized);
scaleBBox->difficult = bbox.difficult;
}
void OutputBBox(const NormalizedBBox& bbox, const std::pair<int, int>& imgSize,
const bool hasResize, NormalizedBBox* outBBox) {
// 현재 resize 지원하지 않음.
SASSERT0(!hasResize);
const int height = imgSize.first;
const int width = imgSize.second;
NormalizedBBox tempBBox = bbox;
if (hasResize) {
} else {
// Clip the normalized bbox first.
ClipBBox(tempBBox, &tempBBox);
// Scale the bbox according to the original image size.
ScaleBBox(tempBBox, height, width, outBBox);
}
}
cv::Scalar HSV2RGB(const float h, const float s, const float v) {
const int h_i = static_cast<int>(h * 6);
const float f = h * 6 - h_i;
const float p = v * (1 - s);
const float q = v * (1 - f*s);
const float t = v * (1 - (1 - f) * s);
float r, g, b;
switch (h_i) {
case 0:
r = v; g = t; b = p;
break;
case 1:
r = q; g = v; b = p;
break;
case 2:
r = p; g = v; b = t;
break;
case 3:
r = p; g = q; b = v;
break;
case 4:
r = t; g = p; b = v;
break;
case 5:
r = v; g = p; b = q;
break;
default:
r = 1; g = 1; b = 1;
break;
}
return cv::Scalar(r * 255, g * 255, b * 255);
}
std::vector<cv::Scalar> GetColors(const int n) {
std::vector<cv::Scalar> colors;
cv::RNG rng(12345);
const float goldenRatioConjugate = 0.618033988749895;
const float s = 0.3;
const float v = 0.99;
for (int i = 0; i < n; i++) {
const float h = std::fmod(rng.uniform(0.f, 1.f) + goldenRatioConjugate, 1.f);
colors.push_back(HSV2RGB(h, s, v));
}
return colors;
}
static clock_t startClock = clock();
static cv::VideoWriter capOut;
template <typename Dtype>
void VisualizeBBox(const std::vector<cv::Mat>& images, Data<Dtype>* detections,
const float threshold, const std::vector<cv::Scalar>& colors,
const std::map<int, std::string>& labelToDisplayName, const std::string& saveFile) {
// Retrieve detections.
SASSERT0(detections->width() == 7);
const int numDet = detections->height();
const int numImg = images.size();
if (numDet == 0 || numImg == 0) {
return;
}
// Compute FPS.
float fps = numImg / (static_cast<double>(clock() - startClock) / CLOCKS_PER_SEC);
const Dtype* detectionsData = detections->host_data();
const int width = images[0].cols;
const int height = images[0].rows;
std::vector<LabelBBox> allDetections(numImg);
for (int i = 0; i < numDet; i++) {
const int imgIdx = detectionsData[i * 7];
SASSERT0(imgIdx < numImg);
const int label = detectionsData[i * 7 + 1];
const float score = detectionsData[i * 7 + 2];
if (score < threshold) {
continue;
}
NormalizedBBox bbox;
bbox.xmin = detectionsData[i * 7 + 3] * width;
bbox.ymin = detectionsData[i * 7 + 4] * height;
bbox.xmax = detectionsData[i * 7 + 5] * width;
bbox.ymax = detectionsData[i * 7 + 6] * height;
bbox.score = score;
allDetections[imgIdx][label].push_back(bbox);
}
int fontface = cv::FONT_HERSHEY_SIMPLEX;
double scale = 1;
int thickness = 2;
int baseline = 0;
char buffer[50];
for (int i = 0; i < numImg; i++) {
cv::Mat image = images[i];
// Show FPS
snprintf(buffer, sizeof(buffer), "FPS: %.2f", fps);
cv::Size text = cv::getTextSize(buffer, fontface, scale, thickness, &baseline);
cv::rectangle(image, cv::Point(0, 0), cv::Point(text.width, text.height + baseline),
CV_RGB(255, 255, 255), CV_FILLED);
cv::putText(image, buffer, cv::Point(0, text.height + baseline / 2.),
fontface, scale, CV_RGB(0, 0, 0), thickness, 8);
// Draw bboxes.
for (std::map<int, std::vector<NormalizedBBox>>::iterator it = allDetections[i].begin();
it != allDetections[i].end(); it++) {
int label = it->first;
std::string labelName = "Unknown";
if (labelToDisplayName.find(label) != labelToDisplayName.end()) {
labelName = labelToDisplayName.find(label)->second;
}
SASSERT0(label < colors.size());
const cv::Scalar& color = colors[label];
const std::vector<NormalizedBBox>& bboxes = it->second;
for (int j = 0; j < bboxes.size(); j++) {
cv::Point topLeftPt(bboxes[j].xmin, bboxes[j].ymin);
cv::Point bottomRightPt(bboxes[j].xmax, bboxes[j].ymax);
cv::rectangle(image, topLeftPt, bottomRightPt, color, 4);
cv::Point bottomLeftPt(bboxes[j].xmin, bboxes[j].ymax);
snprintf(buffer, sizeof(buffer), "%s: %.2f", labelName.c_str(),
bboxes[j].score);
cv::Size text = cv::getTextSize(buffer, fontface, scale, thickness,
&baseline);
cv::rectangle(image, bottomLeftPt + cv::Point(0, 0),
bottomLeftPt + cv::Point(text.width, -text.height - baseline),
color, CV_FILLED);
cv::putText(image, buffer, bottomLeftPt - cv::Point(0, baseline),
fontface, scale, CV_RGB(0, 0, 0), thickness, 8);
}
}
// Save result if required.
if (!saveFile.empty()) {
if (!capOut.isOpened()) {
cv::Size size(image.size().width, image.size().height);
cv::VideoWriter outputVideo(saveFile, CV_FOURCC('D', 'I', 'V', 'X'),
30, size, true);
capOut = outputVideo;
}
capOut.write(image);
}
cv::imshow("detections", image);
if (cv::waitKey(1) == 27) {
raise(SIGINT);
}
}
startClock = clock();
}
template void VisualizeBBox(const std::vector<cv::Mat>& images, Data<float>* detections,
const float threshold, const std::vector<cv::Scalar>& colors,
const std::map<int, std::string>& labelToDisplayName, const std::string& saveFile);
template <typename Dtype>
void GetDetectionResults(const Dtype* detData, const int numDet, const int backgroundLabelId,
std::map<int, LabelBBox>* allDetections) {
allDetections->clear();
for (int i = 0; i < numDet; i++) {
int startIdx = i * 7;
int itemId = detData[startIdx];
if (itemId == -1) {
continue;
}
int label = detData[startIdx + 1];
SASSERT(backgroundLabelId != label,
"Found background label in the detection results.");
NormalizedBBox bbox;
bbox.score = detData[startIdx + 2];
bbox.xmin = detData[startIdx + 3];
bbox.ymin = detData[startIdx + 4];
bbox.xmax = detData[startIdx + 5];
bbox.ymax = detData[startIdx + 6];
float bboxSize = BBoxSize(bbox);
bbox.size = bboxSize;
(*allDetections)[itemId][label].push_back(bbox);
}
}
template void GetDetectionResults(const float* detData, const int numDet,
const int backgroundLabelId, std::map<int, LabelBBox>* allDetections);
template <typename Dtype>
__global__ void DecodeBBoxesKernel(const int nthreads, const Dtype* loc_data,
const Dtype* prior_data, const int code_type,
const bool variance_encoded_in_target, const int num_priors,
const bool share_location, const int num_loc_classes, const int background_label_id,
const bool clip_bbox, Dtype* bbox_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int i = index % 4;
const int c = (index / 4) % num_loc_classes;
const int d = (index / 4 / num_loc_classes) % num_priors;
if (!share_location && c == background_label_id) {
// Ignore background class if not share_location.
return;
}
const int pi = d * 4;
const int vi = pi + num_priors * 4;
//if (code_type == PriorBoxParameter_CodeType_CORNER) {
if (code_type == 0) {
if (variance_encoded_in_target) {
// variance is encoded in target, we simply need to add the offset
// predictions.
bbox_data[index] = prior_data[pi + i] + loc_data[index];
} else {
// variance is encoded in bbox, we need to scale the offset accordingly.
bbox_data[index] =
prior_data[pi + i] + loc_data[index] * prior_data[vi + i];
}
//} else if (code_type == PriorBoxParameter_CodeType_CENTER_SIZE) {
} else if (code_type == 1) {
const Dtype p_xmin = prior_data[pi];
const Dtype p_ymin = prior_data[pi + 1];
const Dtype p_xmax = prior_data[pi + 2];
const Dtype p_ymax = prior_data[pi + 3];
const Dtype prior_width = p_xmax - p_xmin;
const Dtype prior_height = p_ymax - p_ymin;
const Dtype prior_center_x = (p_xmin + p_xmax) / 2.;
const Dtype prior_center_y = (p_ymin + p_ymax) / 2.;
const Dtype xmin = loc_data[index - i];
const Dtype ymin = loc_data[index - i + 1];
const Dtype xmax = loc_data[index - i + 2];
const Dtype ymax = loc_data[index - i + 3];
Dtype decode_bbox_center_x, decode_bbox_center_y;
Dtype decode_bbox_width, decode_bbox_height;
if (variance_encoded_in_target) {
// variance is encoded in target, we simply need to retore the offset
// predictions.
decode_bbox_center_x = xmin * prior_width + prior_center_x;
decode_bbox_center_y = ymin * prior_height + prior_center_y;
decode_bbox_width = exp(xmax) * prior_width;
decode_bbox_height = exp(ymax) * prior_height;
} else {
// variance is encoded in bbox, we need to scale the offset accordingly.
decode_bbox_center_x =
prior_data[vi] * xmin * prior_width + prior_center_x;
decode_bbox_center_y =
prior_data[vi + 1] * ymin * prior_height + prior_center_y;
decode_bbox_width =
exp(prior_data[vi + 2] * xmax) * prior_width;
decode_bbox_height =
exp(prior_data[vi + 3] * ymax) * prior_height;
}
switch (i) {
case 0:
bbox_data[index] = decode_bbox_center_x - decode_bbox_width / 2.;
break;
case 1:
bbox_data[index] = decode_bbox_center_y - decode_bbox_height / 2.;
break;
case 2:
bbox_data[index] = decode_bbox_center_x + decode_bbox_width / 2.;
break;
case 3:
bbox_data[index] = decode_bbox_center_y + decode_bbox_height / 2.;
break;
}
//} else if (code_type == PriorBoxParameter_CodeType_CORNER_SIZE) {
} else if (code_type == 2) {
const Dtype p_xmin = prior_data[pi];
const Dtype p_ymin = prior_data[pi + 1];
const Dtype p_xmax = prior_data[pi + 2];
const Dtype p_ymax = prior_data[pi + 3];
const Dtype prior_width = p_xmax - p_xmin;
const Dtype prior_height = p_ymax - p_ymin;
Dtype p_size;
if (i == 0 || i == 2) {
p_size = prior_width;
} else {
p_size = prior_height;
}
if (variance_encoded_in_target) {
// variance is encoded in target, we simply need to add the offset
// predictions.
bbox_data[index] = prior_data[pi + i] + loc_data[index] * p_size;
} else {
// variance is encoded in bbox, we need to scale the offset accordingly.
bbox_data[index] =
prior_data[pi + i] + loc_data[index] * prior_data[vi + i] * p_size;
}
} else {
// Unknown code type.
}
if (clip_bbox) {
bbox_data[index] = max(min(bbox_data[index], Dtype(1.)), Dtype(0.));
}
}
}
template <typename Dtype>
void DecodeBBoxesGPU(const int nthreads, const Dtype* locData, const Dtype* priorData,
const CodeType codeType, const bool varianceEncodedInTarget,
const int numPriors, const bool shareLocation,
const int numLocClasses, const int backgroundLabelId,
const bool clipBBox, Dtype* bboxData) {
int codeTypeInt = -1;
if (codeType == CodeType::CORNER) codeTypeInt = 0;
else if (codeType == CodeType::CENTER_SIZE) codeTypeInt = 1;
else if (codeType == CodeType::CORNER_SIZE) codeTypeInt = 2;
DecodeBBoxesKernel<Dtype><<<SOOOA_GET_BLOCKS(nthreads), SOOOA_CUDA_NUM_THREADS>>>(
nthreads, locData, priorData, codeTypeInt, varianceEncodedInTarget, numPriors,
shareLocation, numLocClasses, backgroundLabelId, clipBBox, bboxData);
CUDA_POST_KERNEL_CHECK;
}
template void DecodeBBoxesGPU(const int nthreads, const float* locData, const float* priorData,
const CodeType codeType, const bool varianceEncodedInTarget,
const int numPriors, const bool shareLocation,
const int numLocClasses, const int backgroundLabelId,
const bool clipBBox, float* bboxData);
template <typename Dtype>
__global__ void PermuteDataKernel(const int nthreads, const Dtype* data,
const int num_classes, const int num_data, const int num_dim, Dtype* new_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int i = index % num_dim;
const int c = (index / num_dim) % num_classes;
const int d = (index / num_dim / num_classes) % num_data;
const int n = index / num_dim / num_classes / num_data;
const int new_index = ((n * num_classes + c) * num_data + d) * num_dim + i;
new_data[new_index] = data[index];
}
}
template <typename Dtype>
void PermuteDataGPU(const int nthreads,
const Dtype* data, const int numClasses, const int numData,
const int numDim, Dtype* newData) {
PermuteDataKernel<Dtype><<<SOOOA_GET_BLOCKS(nthreads), SOOOA_CUDA_NUM_THREADS>>>(
nthreads, data, numClasses, numData, numDim, newData);
CUDA_POST_KERNEL_CHECK;
}
template void PermuteDataGPU(const int nthreads,
const float* data, const int numClasses, const int numData,
const int numDim, float* newData);
// Project bbox onto the coordinate system defined by src_bbox.
bool ProjectBBox(const NormalizedBBox& srcBBox, const NormalizedBBox& bbox,
NormalizedBBox* projBBox) {
if (bbox.xmin >= srcBBox.xmax || bbox.xmax <= srcBBox.xmin ||
bbox.ymin >= srcBBox.ymax || bbox.ymax <= srcBBox.ymin) {
return false;
}
float srcWidth = srcBBox.xmax - srcBBox.xmin;
float srcHeight = srcBBox.ymax - srcBBox.ymin;
projBBox->xmin = (bbox.xmin - srcBBox.xmin) / srcWidth;
projBBox->ymin = (bbox.ymin - srcBBox.ymin) / srcHeight;
projBBox->xmax = (bbox.xmax - srcBBox.xmin) / srcWidth;
projBBox->ymax = (bbox.ymax - srcBBox.ymin) / srcHeight;
projBBox->difficult = bbox.difficult;
ClipBBox(*projBBox, projBBox);
if (BBoxSize(*projBBox) > 0) {
return true;
} else {
return false;
}
}
void LocateBBox(const NormalizedBBox& srcBBox, const NormalizedBBox& bbox,
NormalizedBBox* locBBox) {
float srcWidth = srcBBox.xmax - srcBBox.xmin;
float srcHeight = srcBBox.ymax - srcBBox.ymin;
locBBox->xmin = srcBBox.xmin + bbox.xmin * srcWidth;
locBBox->ymin = srcBBox.ymin + bbox.ymin * srcHeight;
locBBox->xmax = srcBBox.xmin + bbox.xmax * srcWidth;
locBBox->ymax = srcBBox.ymin + bbox.ymax * srcHeight;
locBBox->difficult = bbox.difficult;
}
float BBoxCoverage(const NormalizedBBox& bbox1, const NormalizedBBox& bbox2) {
NormalizedBBox intersectBBox;
IntersectBBox(bbox1, bbox2, &intersectBBox);
float intersectSize = BBoxSize(intersectBBox);
if (intersectSize > 0) {
float bbox1Size = BBoxSize(bbox1);
return intersectSize / bbox1Size;
} else {
return 0.f;
}
}
bool MeetEmitConstraint(const NormalizedBBox& srcBBox, const NormalizedBBox& bbox,
const EmitConstraint& emitConstraint) {
EmitType emitType = emitConstraint.emitType;
if (emitType == EmitType::CENTER) {
float xcenter = (bbox.xmin + bbox.xmax) / 2;
float ycenter = (bbox.ymin + bbox.ymax) / 2;
if (xcenter >= srcBBox.xmin && xcenter <= srcBBox.xmax &&
ycenter >= srcBBox.ymin && ycenter <= srcBBox.ymax) {
return true;
} else {
return false;
}
} else if (emitType == EmitType::MIN_OVERLAP) {
float bboxCoverage = BBoxCoverage(bbox, srcBBox);
return bboxCoverage > emitConstraint.emitOverlap;
} else {
SASSERT(false, "Unknown emit type.");
return false;
}
}
void ExtrapolateBBox(const ResizeParam& param, const int height, const int width,
const NormalizedBBox& cropBBox, NormalizedBBox* bbox) {
float heightScale = param.heightScale;
float widthScale = param.widthScale;
if (heightScale > 0 && widthScale > 0 &&
param.resizeMode == ResizeMode::FIT_SMALL_SIZE) {
float origAspect = static_cast<float>(width) / height;
float resizeHeight = param.height;
float resizeWidth = param.width;
float resizeAspect = resizeWidth / resizeHeight;
if (origAspect < resizeAspect) {
resizeHeight = resizeWidth / origAspect;
} else {
resizeWidth = resizeHeight * origAspect;
}
float cropHeight = resizeHeight * (cropBBox.ymax - cropBBox.ymin);
float cropWidth = resizeWidth * (cropBBox.xmax - cropBBox.xmin);
SASSERT0(cropWidth >= widthScale);
SASSERT0(cropHeight >= heightScale);
bbox->xmin = bbox->xmin * cropWidth / widthScale;
bbox->xmax = bbox->xmax * cropWidth / widthScale;
bbox->ymin = bbox->ymin * cropHeight / heightScale;
bbox->ymax = bbox->ymax * cropHeight / heightScale;
}
}
void CumSum(const std::vector<std::pair<float, int>>& pairs, std::vector<int>* cumSum) {
// Sort the pairs based on first item of the pair.
std::vector<std::pair<float, int>> sortPairs = pairs;
std::stable_sort(sortPairs.begin(), sortPairs.end(), SortScorePairDescend<int>);
cumSum->clear();
for (int i = 0; i < sortPairs.size(); i++) {
if (i == 0) {
cumSum->push_back(sortPairs[i].second);
} else {
cumSum->push_back(cumSum->back() + sortPairs[i].second);
}
}
}
void ComputeAP(const std::vector<std::pair<float, int>>& tp, const int numPos,
const std::vector<std::pair<float, int>>& fp, const std::string apVersion,
std::vector<float>* prec, std::vector<float>* rec, float* ap) {
const float eps = 1e-6;
SASSERT(tp.size() == fp.size(), "tp must have same size as fp.");
const int num = tp.size();
// Make sure that tp and fp have complement value.
for (int i = 0; i < num; i++) {
SASSERT0(std::fabs(tp[i].first - fp[i].first) <= eps);
SASSERT0(tp[i].second == 1 - fp[i].second);
}
prec->clear();
rec->clear();
*ap = 0;
if (tp.size() == 0 || numPos == 0) {
return;
}
// Compute cumSum of tp.
std::vector<int> tpCumSum;
CumSum(tp, &tpCumSum);
SASSERT0(tpCumSum.size() == num);
// Compute cumSum of fp.
std::vector<int> fpCumSum;
CumSum(fp, &fpCumSum);
SASSERT0(fpCumSum.size() == num);
// Compute precision.
for (int i = 0; i < num; i++) {
prec->push_back(static_cast<float>(tpCumSum[i]) / (tpCumSum[i] + fpCumSum[i]));
}
// Compute recall.
for (int i = 0; i < num; i++) {
SASSERT0(tpCumSum[i] <= numPos);
rec->push_back(static_cast<float>(tpCumSum[i]) / numPos);
}
if (apVersion == "11point") {
// VOC2007 style for computing AP.
std::vector<float> maxPrecs(11, 0.f);
int startIdx = num - 1;
for (int j = 10; j >= 0; j--) {
for (int i = startIdx; i >= 0; i--) {
if ((*rec)[i] < j / 10.f) {
startIdx = i;
if (j > 0) {
maxPrecs[j - 1] = maxPrecs[j];
}
break;
} else {
if (maxPrecs[j] < (*prec)[i]) {
maxPrecs[j] = (*prec)[i];
}
}
}
}
for (int j = 10; j >= 0; j--) {
*ap += maxPrecs[j] / 11;
}
} else if (apVersion == "MaxIntegral") {
// VOC2012 or ILSVRC style for computing AP.
float curRec = rec->back();
float curPrec = prec->back();
for (int i = num - 2; i >= 0; i--) {
curPrec = std::max<float>((*prec)[i], curPrec);
if (fabs(curRec - (*rec)[i]) > eps) {
*ap += curPrec * fabs(curRec - (*rec)[i]);
}
curRec = (*rec)[i];
}
*ap += curRec * curPrec;
} else if (apVersion == "Integral") {
// Natural integral.
float prevRec = 0.f;
for (int i = 0; i < num; i++) {
if (fabs((*rec)[i] - prevRec) > eps) {
*ap += (*prec)[i] * fabs((*rec)[i] - prevRec);
}
prevRec = (*rec)[i];
}
} else {
STDOUT_LOG("Unknown apVersion: %s", apVersion.c_str());
}
}
|
dbb15eaf4d6fd62f505b0255f4a4730acd2fbb8b.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include "Utilities.cuh"
using namespace std;
#define NUM_THREADS 32
#define NUM_BLOCKS 16
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
inline
hipError_t checkCuda(hipError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != hipSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result));
assert(result == hipSuccess);
}
#endif
return result;
}
struct BinaryOp{ __host__ __device__ int operator()(const int& o1, const int& o2) { return o1 * o2; } };
void fill(int *a, int N, int val)
{
for (int i = 0; i < N; i++) {
a[i] = val;
}
}
int main(int argc, char **argv)
{
const int N = 2296960*100;
int NUM_STREAMS = 1;
if (argc > 1) NUM_STREAMS = atoi(argv[1]);
// --- Host side
/*int *h_in = new int[N];
fill(h_in, N, 5);
checkCuda(hipHostRegister(h_in, N * sizeof(int), hipHostRegisterPortable));
int *h_out = new int[N];
fill(h_out, N, 0);
checkCuda(hipHostRegister(h_out, N * sizeof(int), hipHostRegisterPortable));
int *h_checkResults = new int[N];
fill(h_checkResults, N, 25);*/
// --- Host side (pinned memory)
int *h_in, *h_out, *h_checkResults;
checkCuda( hipHostMalloc((void**)&h_in, N * sizeof(int)) );
checkCuda( hipHostMalloc((void**)&h_out, N * sizeof(int)) );
checkCuda( hipHostMalloc((void**)&h_checkResults, N * sizeof(int)) );
fill(h_in, N, 5);
fill(h_out, N, 0);
fill(h_checkResults, N, 25);
// --- Device side
int *d_in, *d_out;
checkCuda(hipMalloc((void **)&d_in, N * sizeof(int)));
checkCuda(hipMalloc((void **)&d_out, N * sizeof(int)));
int streamSize = N / NUM_STREAMS;
size_t streamMemSize = N * sizeof(int) / NUM_STREAMS;
// --- Set kernel launch configuration
dim3 nThreads = dim3(NUM_THREADS, 1, 1);
dim3 nBlocks = dim3(NUM_BLOCKS, 1, 1);
dim3 subKernelBlock = dim3((int) ceil( (float) nBlocks.x / 2));
// --- Crate CUDA events to capture time
hipEvent_t startEvent, stopEvent;
checkCuda( hipEventCreate(&startEvent) );
checkCuda( hipEventCreate(&stopEvent) );
float ms;
// --- Create CUDA streams
hipStream_t streams[NUM_STREAMS];
for (int i = 0; i < NUM_STREAMS; i++)
checkCuda(hipStreamCreate(&streams[i]));
checkCuda( hipEventRecord(startEvent, 0) );
for (int i = 0; i < NUM_STREAMS; i++) {
int offset = i * streamSize;
hipMemcpyAsync(&d_in[offset], &h_in[offset], streamMemSize, hipMemcpyHostToDevice, streams[i]);
//printf("Input: %d %d %d...%d %d %d\n", h_in[offset+0], h_in[offset+1], h_in[offset+2], h_in[offset+streamSize-3], h_in[offset+streamSize-2], h_in[offset+streamSize-1]);
}
for (int i = 0; i < NUM_STREAMS; i++) {
int offset = i * streamSize;
thrust::transform(thrust::hip::par.on(streams[i]),
thrust::device_pointer_cast(&d_in[offset]),
thrust::device_pointer_cast(&d_in[offset]) + streamSize / 2,
thrust::device_pointer_cast(&d_in[offset]),
thrust::device_pointer_cast(&d_out[offset]),
BinaryOp());
thrust::transform(thrust::hip::par.on(streams[i]),
thrust::device_pointer_cast(&d_in[offset + streamSize / 2]),
thrust::device_pointer_cast(&d_in[offset + streamSize / 2]) + streamSize / 2,
thrust::device_pointer_cast(&d_in[offset + streamSize / 2]),
thrust::device_pointer_cast(&d_out[offset + streamSize / 2]),
BinaryOp());
}
// copy data out
for (int i = 0; i < NUM_STREAMS; i ++) {
int offset = i * streamSize;
hipMemcpyAsync(&h_out[offset], &d_out[offset], streamMemSize, hipMemcpyDeviceToHost, streams[i]);
//printf("Output: %d %d %d...%d %d %d\n", h_out[offset+0], h_out[offset+1], h_out[offset+2], h_out[offset+streamSize-3], h_out[offset+streamSize-2], h_out[offset+streamSize-1]);
}
checkCuda( hipEventRecord(stopEvent, 0) );
checkCuda( hipEventSynchronize(stopEvent) );
checkCuda( hipEventElapsedTime(&ms, startEvent, stopEvent) );
printf("Total Time (ms): %f\n", ms);
for (int i = 0; i < NUM_STREAMS; i++) {
checkCuda(hipStreamSynchronize(streams[i]));
}
checkCuda(hipDeviceSynchronize());
// -- GPU output check
int sum = 0;
for (int i = 0; i < N; i++) {
sum += h_checkResults[i] - h_out[i];
//if ( i < 10 ) printf("%d %d", h_checkResults[i], h_out[i]);
}
cout << "Error between CPU and GPU: " << sum << endl;
// -- Release resources
//checkCuda(hipHostUnregister(h_in));
//checkCuda(hipHostUnregister(h_out));
checkCuda( hipHostFree(h_in) );
checkCuda( hipHostFree(h_out) );
checkCuda( hipFree(d_in) );
checkCuda( hipFree(d_out) );
for (int i = 0; i < NUM_STREAMS; i++)
checkCuda(hipStreamDestroy(streams[i]));
hipDeviceReset();
//delete[] h_in;
//delete[] h_out;
//delete[] h_checkResults;
return 0;
}
| dbb15eaf4d6fd62f505b0255f4a4730acd2fbb8b.cu | #include <iostream>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include "Utilities.cuh"
using namespace std;
#define NUM_THREADS 32
#define NUM_BLOCKS 16
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
inline
cudaError_t checkCuda(cudaError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
assert(result == cudaSuccess);
}
#endif
return result;
}
struct BinaryOp{ __host__ __device__ int operator()(const int& o1, const int& o2) { return o1 * o2; } };
void fill(int *a, int N, int val)
{
for (int i = 0; i < N; i++) {
a[i] = val;
}
}
int main(int argc, char **argv)
{
const int N = 2296960*100;
int NUM_STREAMS = 1;
if (argc > 1) NUM_STREAMS = atoi(argv[1]);
// --- Host side
/*int *h_in = new int[N];
fill(h_in, N, 5);
checkCuda(cudaHostRegister(h_in, N * sizeof(int), cudaHostRegisterPortable));
int *h_out = new int[N];
fill(h_out, N, 0);
checkCuda(cudaHostRegister(h_out, N * sizeof(int), cudaHostRegisterPortable));
int *h_checkResults = new int[N];
fill(h_checkResults, N, 25);*/
// --- Host side (pinned memory)
int *h_in, *h_out, *h_checkResults;
checkCuda( cudaMallocHost((void**)&h_in, N * sizeof(int)) );
checkCuda( cudaMallocHost((void**)&h_out, N * sizeof(int)) );
checkCuda( cudaMallocHost((void**)&h_checkResults, N * sizeof(int)) );
fill(h_in, N, 5);
fill(h_out, N, 0);
fill(h_checkResults, N, 25);
// --- Device side
int *d_in, *d_out;
checkCuda(cudaMalloc((void **)&d_in, N * sizeof(int)));
checkCuda(cudaMalloc((void **)&d_out, N * sizeof(int)));
int streamSize = N / NUM_STREAMS;
size_t streamMemSize = N * sizeof(int) / NUM_STREAMS;
// --- Set kernel launch configuration
dim3 nThreads = dim3(NUM_THREADS, 1, 1);
dim3 nBlocks = dim3(NUM_BLOCKS, 1, 1);
dim3 subKernelBlock = dim3((int) ceil( (float) nBlocks.x / 2));
// --- Crate CUDA events to capture time
cudaEvent_t startEvent, stopEvent;
checkCuda( cudaEventCreate(&startEvent) );
checkCuda( cudaEventCreate(&stopEvent) );
float ms;
// --- Create CUDA streams
cudaStream_t streams[NUM_STREAMS];
for (int i = 0; i < NUM_STREAMS; i++)
checkCuda(cudaStreamCreate(&streams[i]));
checkCuda( cudaEventRecord(startEvent, 0) );
for (int i = 0; i < NUM_STREAMS; i++) {
int offset = i * streamSize;
cudaMemcpyAsync(&d_in[offset], &h_in[offset], streamMemSize, cudaMemcpyHostToDevice, streams[i]);
//printf("Input: %d %d %d...%d %d %d\n", h_in[offset+0], h_in[offset+1], h_in[offset+2], h_in[offset+streamSize-3], h_in[offset+streamSize-2], h_in[offset+streamSize-1]);
}
for (int i = 0; i < NUM_STREAMS; i++) {
int offset = i * streamSize;
thrust::transform(thrust::cuda::par.on(streams[i]),
thrust::device_pointer_cast(&d_in[offset]),
thrust::device_pointer_cast(&d_in[offset]) + streamSize / 2,
thrust::device_pointer_cast(&d_in[offset]),
thrust::device_pointer_cast(&d_out[offset]),
BinaryOp());
thrust::transform(thrust::cuda::par.on(streams[i]),
thrust::device_pointer_cast(&d_in[offset + streamSize / 2]),
thrust::device_pointer_cast(&d_in[offset + streamSize / 2]) + streamSize / 2,
thrust::device_pointer_cast(&d_in[offset + streamSize / 2]),
thrust::device_pointer_cast(&d_out[offset + streamSize / 2]),
BinaryOp());
}
// copy data out
for (int i = 0; i < NUM_STREAMS; i ++) {
int offset = i * streamSize;
cudaMemcpyAsync(&h_out[offset], &d_out[offset], streamMemSize, cudaMemcpyDeviceToHost, streams[i]);
//printf("Output: %d %d %d...%d %d %d\n", h_out[offset+0], h_out[offset+1], h_out[offset+2], h_out[offset+streamSize-3], h_out[offset+streamSize-2], h_out[offset+streamSize-1]);
}
checkCuda( cudaEventRecord(stopEvent, 0) );
checkCuda( cudaEventSynchronize(stopEvent) );
checkCuda( cudaEventElapsedTime(&ms, startEvent, stopEvent) );
printf("Total Time (ms): %f\n", ms);
for (int i = 0; i < NUM_STREAMS; i++) {
checkCuda(cudaStreamSynchronize(streams[i]));
}
checkCuda(cudaDeviceSynchronize());
// -- GPU output check
int sum = 0;
for (int i = 0; i < N; i++) {
sum += h_checkResults[i] - h_out[i];
//if ( i < 10 ) printf("%d %d", h_checkResults[i], h_out[i]);
}
cout << "Error between CPU and GPU: " << sum << endl;
// -- Release resources
//checkCuda(cudaHostUnregister(h_in));
//checkCuda(cudaHostUnregister(h_out));
checkCuda( cudaFreeHost(h_in) );
checkCuda( cudaFreeHost(h_out) );
checkCuda( cudaFree(d_in) );
checkCuda( cudaFree(d_out) );
for (int i = 0; i < NUM_STREAMS; i++)
checkCuda(cudaStreamDestroy(streams[i]));
cudaDeviceReset();
//delete[] h_in;
//delete[] h_out;
//delete[] h_checkResults;
return 0;
}
|
d8bc5958a3e0b190c1e871885e232b2c8b5f490b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <chrono>
#include <cstdio>
#include <cmath>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
using FLOAT = float;
#define CHECK(test) if (test != hipSuccess) throw "error";
const int NTHR_PER_BLK = 256; // Number of CUDA threads per block
const int NBLOCK = 56*4; // Number of CUDA blocks (SMs on P100)
const int Npoint = NBLOCK*NTHR_PER_BLK; // No. of independent samples
const int Neq = 100000; // No. of generations to equilibrate
const int Ngen_per_block = 5000; // No. of generations per block
const float DELTA = 2.0; // Random step size
// Explicitly typed constants so can easily work with both floats and floats
const FLOAT FOUR = 4.0;
const FLOAT TWO = 2.0;
const FLOAT ONE = 1.0;
const FLOAT HALF = 0.5;
const FLOAT ZERO = 0.0;
// Specialized/overloaded functions to support both double and float
template <typename T> __device__ __forceinline__ T rand(hiprandState_t* state);
template<> __device__ __forceinline__ float rand<float>(hiprandState_t* state) {
return hiprand_uniform(state);
}
template<> __device__ __forceinline__ double rand<double>(hiprandState_t* state) {
return hiprand_uniform_double(state);
}
__device__ __forceinline__ float EXP(float x) {return expf(x);}
__device__ __forceinline__ double EXP(double x) {return exp(x);}
__device__ __forceinline__ float SQRT(float x) {return sqrtf(x);}
__device__ __forceinline__ double SQRT(double x) {return sqrt(x);}
__global__ void SumWithinBlocks(const int n, const FLOAT* data, FLOAT* blocksums) {
int nthread = blockDim.x*gridDim.x;
int i = blockDim.x * blockIdx.x + threadIdx.x;
__shared__ FLOAT sdata[512]; // max threads
// Every thread in every block computes partial sum over rest of vector
FLOAT st=ZERO;
while (i < n) {
st += data[i];
i+=nthread;
}
sdata[threadIdx.x] = st;
__syncthreads();
// Now do binary tree sum within a block
int tid = threadIdx.x;
for (unsigned int s=128; s>0; s>>=1) {
if (tid<s && (tid+s)<blockDim.x) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
if (tid==0) blocksums[blockIdx.x] = sdata[0];
}
void sum_stats(const int Npoint, const FLOAT* stats, FLOAT* statsum, FLOAT* blocksums) {
for (int what=0; what<4; what++) {
hipLaunchKernelGGL(( SumWithinBlocks), dim3(NBLOCK),dim3(NTHR_PER_BLK), 0, 0, Npoint, stats+what*Npoint, blocksums);
hipLaunchKernelGGL(( SumWithinBlocks), dim3(1),dim3(NBLOCK), 0, 0, NBLOCK, blocksums, statsum+what);
}
}
__device__ __forceinline__ void compute_distances(FLOAT x1, FLOAT y1, FLOAT z1, FLOAT x2, FLOAT y2, FLOAT z2,
FLOAT& r1, FLOAT& r2, FLOAT& r12) {
r1 = SQRT(x1*x1 + y1*y1 + z1*z1);
r2 = SQRT(x2*x2 + y2*y2 + z2*z2);
FLOAT xx = x1-x2;
FLOAT yy = y1-y2;
FLOAT zz = z1-z2;
r12 = SQRT(xx*xx + yy*yy + zz*zz);
}
__device__ __forceinline__ FLOAT wave_function(FLOAT x1, FLOAT y1, FLOAT z1, FLOAT x2, FLOAT y2, FLOAT z2) {
FLOAT r1, r2, r12;
compute_distances(x1, y1, z1, x2, y2, z2, r1, r2, r12);
return (ONE + HALF*r12)*EXP(-TWO*(r1 + r2));
}
// Initialize random number generator
__global__ void initran(unsigned int seed, hiprandState_t* states) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
hiprand_init(seed, i, 0, &states[i]);
}
// ZERO stats counters on the GPU
__global__ void zero_stats(int Npoint, FLOAT* stats) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
stats[0*Npoint+i] = ZERO; // r1
stats[1*Npoint+i] = ZERO; // r2
stats[2*Npoint+i] = ZERO; // r12
stats[3*Npoint+i] = ZERO; // accept count
}
// initializes samples
__global__ void initialize(FLOAT* x1, FLOAT* y1, FLOAT* z1, FLOAT* x2, FLOAT* y2, FLOAT* z2, FLOAT* psi, hiprandState_t* states) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
x1[i] = (rand<FLOAT>(states+i) - HALF)*FOUR;
y1[i] = (rand<FLOAT>(states+i) - HALF)*FOUR;
z1[i] = (rand<FLOAT>(states+i) - HALF)*FOUR;
x2[i] = (rand<FLOAT>(states+i) - HALF)*FOUR;
y2[i] = (rand<FLOAT>(states+i) - HALF)*FOUR;
z2[i] = (rand<FLOAT>(states+i) - HALF)*FOUR;
psi[i] = wave_function(x1[i], y1[i], z1[i], x2[i], y2[i], z2[i]);
}
__global__ void propagate(const int Npoint, const int nstep, FLOAT* X1, FLOAT* Y1, FLOAT* Z1,
FLOAT* X2, FLOAT* Y2, FLOAT* Z2, FLOAT* P, FLOAT* stats, hiprandState_t* states) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
FLOAT x1 = X1[i];
FLOAT y1 = Y1[i];
FLOAT z1 = Z1[i];
FLOAT x2 = X2[i];
FLOAT y2 = Y2[i];
FLOAT z2 = Z2[i];
FLOAT p = P[i];
for (int step=0; step<nstep; step++) {
FLOAT x1new = x1 + (rand<FLOAT>(states+i)-HALF)*DELTA;
FLOAT y1new = y1 + (rand<FLOAT>(states+i)-HALF)*DELTA;
FLOAT z1new = z1 + (rand<FLOAT>(states+i)-HALF)*DELTA;
FLOAT x2new = x2 + (rand<FLOAT>(states+i)-HALF)*DELTA;
FLOAT y2new = y2 + (rand<FLOAT>(states+i)-HALF)*DELTA;
FLOAT z2new = z2 + (rand<FLOAT>(states+i)-HALF)*DELTA;
FLOAT pnew = wave_function(x1new, y1new, z1new, x2new, y2new, z2new);
if (pnew*pnew > p*p*rand<FLOAT>(states+i)) {
stats[3*Npoint+i]++; //naccept ++;
p = pnew;
x1 = x1new;
y1 = y1new;
z1 = z1new;
x2 = x2new;
y2 = y2new;
z2 = z2new;
}
FLOAT r1, r2, r12;
compute_distances(x1, y1, z1, x2, y2, z2, r1, r2, r12);
stats[0*Npoint+i] += r1;
stats[1*Npoint+i] += r2;
stats[2*Npoint+i] += r12;
}
X1[i] = x1;
Y1[i] = y1;
Z1[i] = z1;
X2[i] = x2;
Y2[i] = y2;
Z2[i] = z2;
P[i] = p;
}
int main(int argc, char* argv[]) {
if (argc != 2) {
printf("Usage: %s <number of blocks to sample>\n", argv[0]);
return 1;
}
const int Nsample = atoi(argv[1]); // No. of blocks to sample
FLOAT *x1, *y1, *z1, *x2, *y2, *z2, *psi, *stats, *statsum, *blocksums;
hiprandState_t *ranstates;
CHECK(hipMalloc((void **)&x1, Npoint * sizeof(FLOAT)));
CHECK(hipMalloc((void **)&y1, Npoint * sizeof(FLOAT)));
CHECK(hipMalloc((void **)&z1, Npoint * sizeof(FLOAT)));
CHECK(hipMalloc((void **)&x2, Npoint * sizeof(FLOAT)));
CHECK(hipMalloc((void **)&y2, Npoint * sizeof(FLOAT)));
CHECK(hipMalloc((void **)&z2, Npoint * sizeof(FLOAT)));
CHECK(hipMalloc((void **)&psi, Npoint * sizeof(FLOAT)));
CHECK(hipMalloc((void **)&stats, 4 * Npoint * sizeof(FLOAT)));
CHECK(hipMalloc((void **)&blocksums, NBLOCK * sizeof(FLOAT))); // workspace for summation
CHECK(hipMalloc((void **)&statsum, 4 * sizeof(FLOAT))); // workspace for summation
CHECK(hipMalloc((void **)&ranstates, Npoint*sizeof(hiprandState_t)));
hipLaunchKernelGGL(( initran), dim3(NBLOCK),dim3(NTHR_PER_BLK), 0, 0, 5551212, ranstates);
hipLaunchKernelGGL(( initialize), dim3(NBLOCK),dim3(NTHR_PER_BLK), 0, 0, x1, y1, z1, x2, y2, z2, psi, ranstates);
hipLaunchKernelGGL(( zero_stats), dim3(NBLOCK),dim3(NTHR_PER_BLK), 0, 0, Npoint, stats);
// Equilibrate
hipLaunchKernelGGL(( propagate), dim3(NBLOCK),dim3(NTHR_PER_BLK), 0, 0, Npoint, Neq, x1, y1, z1, x2, y2, z2, psi, stats, ranstates);
// Accumulators for averages over blocks --- use doubles
double r1_tot = ZERO, r1_sq_tot = ZERO;
double r2_tot = ZERO, r2_sq_tot = ZERO;
double r12_tot = ZERO, r12_sq_tot = ZERO;
double naccept = ZERO; // Keeps track of propagation efficiency
double time = 0.0;
for (int sample=0; sample<Nsample; sample++) {
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
hipLaunchKernelGGL(( zero_stats), dim3(NBLOCK),dim3(NTHR_PER_BLK), 0, 0, Npoint, stats);
hipLaunchKernelGGL(( propagate), dim3(NBLOCK),dim3(NTHR_PER_BLK), 0, 0, Npoint, Ngen_per_block, x1, y1, z1, x2, y2, z2, psi, stats, ranstates);
sum_stats(Npoint, stats, statsum, blocksums);
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
time += std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
struct {FLOAT r1, r2, r12, accept;} s;
CHECK(hipMemcpy(&s, statsum, sizeof(s), hipMemcpyDeviceToHost));
naccept += s.accept;
s.r1 /= Ngen_per_block*Npoint;
s.r2 /= Ngen_per_block*Npoint;
s.r12 /= Ngen_per_block*Npoint;
#ifdef DEBUG
printf(" block %6d %.6f %.6f %.6f\n", sample, s.r1, s.r2, s.r12);
#endif
r1_tot += s.r1; r1_sq_tot += s.r1*s.r1;
r2_tot += s.r2; r2_sq_tot += s.r2*s.r2;
r12_tot += s.r12; r12_sq_tot += s.r12*s.r12;
}
r1_tot /= Nsample; r1_sq_tot /= Nsample;
r2_tot /= Nsample; r2_sq_tot /= Nsample;
r12_tot /= Nsample; r12_sq_tot /= Nsample;
double r1s = sqrt((r1_sq_tot - r1_tot*r1_tot) / Nsample);
double r2s = sqrt((r2_sq_tot - r2_tot*r2_tot) / Nsample);
double r12s = sqrt((r12_sq_tot - r12_tot*r12_tot) / Nsample);
printf(" <r1> = %.6f +- %.6f\n", r1_tot, r1s);
printf(" <r2> = %.6f +- %.6f\n", r2_tot, r2s);
printf(" <r12> = %.6f +- %.6f\n", r12_tot, r12s);
// avoid int overflow
printf(" acceptance ratio=%.1f%%\n",
100.0*naccept/double(Npoint)/double(Ngen_per_block)/double(Nsample));
printf("Average execution time of kernels: %f (s)\n", (time * 1e-9f) / Nsample);
CHECK(hipFree(x1));
CHECK(hipFree(y1));
CHECK(hipFree(z1));
CHECK(hipFree(x2));
CHECK(hipFree(y2));
CHECK(hipFree(z2));
CHECK(hipFree(psi));
CHECK(hipFree(stats));
CHECK(hipFree(blocksums));
CHECK(hipFree(statsum));
CHECK(hipFree(ranstates));
return 0;
}
| d8bc5958a3e0b190c1e871885e232b2c8b5f490b.cu | #include <chrono>
#include <cstdio>
#include <cmath>
#include <curand.h>
#include <curand_kernel.h>
using FLOAT = float;
#define CHECK(test) if (test != cudaSuccess) throw "error";
const int NTHR_PER_BLK = 256; // Number of CUDA threads per block
const int NBLOCK = 56*4; // Number of CUDA blocks (SMs on P100)
const int Npoint = NBLOCK*NTHR_PER_BLK; // No. of independent samples
const int Neq = 100000; // No. of generations to equilibrate
const int Ngen_per_block = 5000; // No. of generations per block
const float DELTA = 2.0; // Random step size
// Explicitly typed constants so can easily work with both floats and floats
const FLOAT FOUR = 4.0;
const FLOAT TWO = 2.0;
const FLOAT ONE = 1.0;
const FLOAT HALF = 0.5;
const FLOAT ZERO = 0.0;
// Specialized/overloaded functions to support both double and float
template <typename T> __device__ __forceinline__ T rand(curandState_t* state);
template<> __device__ __forceinline__ float rand<float>(curandState_t* state) {
return curand_uniform(state);
}
template<> __device__ __forceinline__ double rand<double>(curandState_t* state) {
return curand_uniform_double(state);
}
__device__ __forceinline__ float EXP(float x) {return expf(x);}
__device__ __forceinline__ double EXP(double x) {return exp(x);}
__device__ __forceinline__ float SQRT(float x) {return sqrtf(x);}
__device__ __forceinline__ double SQRT(double x) {return sqrt(x);}
__global__ void SumWithinBlocks(const int n, const FLOAT* data, FLOAT* blocksums) {
int nthread = blockDim.x*gridDim.x;
int i = blockDim.x * blockIdx.x + threadIdx.x;
__shared__ FLOAT sdata[512]; // max threads
// Every thread in every block computes partial sum over rest of vector
FLOAT st=ZERO;
while (i < n) {
st += data[i];
i+=nthread;
}
sdata[threadIdx.x] = st;
__syncthreads();
// Now do binary tree sum within a block
int tid = threadIdx.x;
for (unsigned int s=128; s>0; s>>=1) {
if (tid<s && (tid+s)<blockDim.x) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
if (tid==0) blocksums[blockIdx.x] = sdata[0];
}
void sum_stats(const int Npoint, const FLOAT* stats, FLOAT* statsum, FLOAT* blocksums) {
for (int what=0; what<4; what++) {
SumWithinBlocks<<<NBLOCK,NTHR_PER_BLK>>>(Npoint, stats+what*Npoint, blocksums);
SumWithinBlocks<<<1,NBLOCK>>>(NBLOCK, blocksums, statsum+what);
}
}
__device__ __forceinline__ void compute_distances(FLOAT x1, FLOAT y1, FLOAT z1, FLOAT x2, FLOAT y2, FLOAT z2,
FLOAT& r1, FLOAT& r2, FLOAT& r12) {
r1 = SQRT(x1*x1 + y1*y1 + z1*z1);
r2 = SQRT(x2*x2 + y2*y2 + z2*z2);
FLOAT xx = x1-x2;
FLOAT yy = y1-y2;
FLOAT zz = z1-z2;
r12 = SQRT(xx*xx + yy*yy + zz*zz);
}
__device__ __forceinline__ FLOAT wave_function(FLOAT x1, FLOAT y1, FLOAT z1, FLOAT x2, FLOAT y2, FLOAT z2) {
FLOAT r1, r2, r12;
compute_distances(x1, y1, z1, x2, y2, z2, r1, r2, r12);
return (ONE + HALF*r12)*EXP(-TWO*(r1 + r2));
}
// Initialize random number generator
__global__ void initran(unsigned int seed, curandState_t* states) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
curand_init(seed, i, 0, &states[i]);
}
// ZERO stats counters on the GPU
__global__ void zero_stats(int Npoint, FLOAT* stats) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
stats[0*Npoint+i] = ZERO; // r1
stats[1*Npoint+i] = ZERO; // r2
stats[2*Npoint+i] = ZERO; // r12
stats[3*Npoint+i] = ZERO; // accept count
}
// initializes samples
__global__ void initialize(FLOAT* x1, FLOAT* y1, FLOAT* z1, FLOAT* x2, FLOAT* y2, FLOAT* z2, FLOAT* psi, curandState_t* states) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
x1[i] = (rand<FLOAT>(states+i) - HALF)*FOUR;
y1[i] = (rand<FLOAT>(states+i) - HALF)*FOUR;
z1[i] = (rand<FLOAT>(states+i) - HALF)*FOUR;
x2[i] = (rand<FLOAT>(states+i) - HALF)*FOUR;
y2[i] = (rand<FLOAT>(states+i) - HALF)*FOUR;
z2[i] = (rand<FLOAT>(states+i) - HALF)*FOUR;
psi[i] = wave_function(x1[i], y1[i], z1[i], x2[i], y2[i], z2[i]);
}
__global__ void propagate(const int Npoint, const int nstep, FLOAT* X1, FLOAT* Y1, FLOAT* Z1,
FLOAT* X2, FLOAT* Y2, FLOAT* Z2, FLOAT* P, FLOAT* stats, curandState_t* states) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
FLOAT x1 = X1[i];
FLOAT y1 = Y1[i];
FLOAT z1 = Z1[i];
FLOAT x2 = X2[i];
FLOAT y2 = Y2[i];
FLOAT z2 = Z2[i];
FLOAT p = P[i];
for (int step=0; step<nstep; step++) {
FLOAT x1new = x1 + (rand<FLOAT>(states+i)-HALF)*DELTA;
FLOAT y1new = y1 + (rand<FLOAT>(states+i)-HALF)*DELTA;
FLOAT z1new = z1 + (rand<FLOAT>(states+i)-HALF)*DELTA;
FLOAT x2new = x2 + (rand<FLOAT>(states+i)-HALF)*DELTA;
FLOAT y2new = y2 + (rand<FLOAT>(states+i)-HALF)*DELTA;
FLOAT z2new = z2 + (rand<FLOAT>(states+i)-HALF)*DELTA;
FLOAT pnew = wave_function(x1new, y1new, z1new, x2new, y2new, z2new);
if (pnew*pnew > p*p*rand<FLOAT>(states+i)) {
stats[3*Npoint+i]++; //naccept ++;
p = pnew;
x1 = x1new;
y1 = y1new;
z1 = z1new;
x2 = x2new;
y2 = y2new;
z2 = z2new;
}
FLOAT r1, r2, r12;
compute_distances(x1, y1, z1, x2, y2, z2, r1, r2, r12);
stats[0*Npoint+i] += r1;
stats[1*Npoint+i] += r2;
stats[2*Npoint+i] += r12;
}
X1[i] = x1;
Y1[i] = y1;
Z1[i] = z1;
X2[i] = x2;
Y2[i] = y2;
Z2[i] = z2;
P[i] = p;
}
int main(int argc, char* argv[]) {
if (argc != 2) {
printf("Usage: %s <number of blocks to sample>\n", argv[0]);
return 1;
}
const int Nsample = atoi(argv[1]); // No. of blocks to sample
FLOAT *x1, *y1, *z1, *x2, *y2, *z2, *psi, *stats, *statsum, *blocksums;
curandState_t *ranstates;
CHECK(cudaMalloc((void **)&x1, Npoint * sizeof(FLOAT)));
CHECK(cudaMalloc((void **)&y1, Npoint * sizeof(FLOAT)));
CHECK(cudaMalloc((void **)&z1, Npoint * sizeof(FLOAT)));
CHECK(cudaMalloc((void **)&x2, Npoint * sizeof(FLOAT)));
CHECK(cudaMalloc((void **)&y2, Npoint * sizeof(FLOAT)));
CHECK(cudaMalloc((void **)&z2, Npoint * sizeof(FLOAT)));
CHECK(cudaMalloc((void **)&psi, Npoint * sizeof(FLOAT)));
CHECK(cudaMalloc((void **)&stats, 4 * Npoint * sizeof(FLOAT)));
CHECK(cudaMalloc((void **)&blocksums, NBLOCK * sizeof(FLOAT))); // workspace for summation
CHECK(cudaMalloc((void **)&statsum, 4 * sizeof(FLOAT))); // workspace for summation
CHECK(cudaMalloc((void **)&ranstates, Npoint*sizeof(curandState_t)));
initran<<<NBLOCK,NTHR_PER_BLK>>>(5551212, ranstates);
initialize<<<NBLOCK,NTHR_PER_BLK>>>(x1, y1, z1, x2, y2, z2, psi, ranstates);
zero_stats<<<NBLOCK,NTHR_PER_BLK>>>(Npoint, stats);
// Equilibrate
propagate<<<NBLOCK,NTHR_PER_BLK>>>(Npoint, Neq, x1, y1, z1, x2, y2, z2, psi, stats, ranstates);
// Accumulators for averages over blocks --- use doubles
double r1_tot = ZERO, r1_sq_tot = ZERO;
double r2_tot = ZERO, r2_sq_tot = ZERO;
double r12_tot = ZERO, r12_sq_tot = ZERO;
double naccept = ZERO; // Keeps track of propagation efficiency
double time = 0.0;
for (int sample=0; sample<Nsample; sample++) {
cudaDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
zero_stats<<<NBLOCK,NTHR_PER_BLK>>>(Npoint, stats);
propagate<<<NBLOCK,NTHR_PER_BLK>>>(Npoint, Ngen_per_block, x1, y1, z1, x2, y2, z2, psi, stats, ranstates);
sum_stats(Npoint, stats, statsum, blocksums);
cudaDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
time += std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
struct {FLOAT r1, r2, r12, accept;} s;
CHECK(cudaMemcpy(&s, statsum, sizeof(s), cudaMemcpyDeviceToHost));
naccept += s.accept;
s.r1 /= Ngen_per_block*Npoint;
s.r2 /= Ngen_per_block*Npoint;
s.r12 /= Ngen_per_block*Npoint;
#ifdef DEBUG
printf(" block %6d %.6f %.6f %.6f\n", sample, s.r1, s.r2, s.r12);
#endif
r1_tot += s.r1; r1_sq_tot += s.r1*s.r1;
r2_tot += s.r2; r2_sq_tot += s.r2*s.r2;
r12_tot += s.r12; r12_sq_tot += s.r12*s.r12;
}
r1_tot /= Nsample; r1_sq_tot /= Nsample;
r2_tot /= Nsample; r2_sq_tot /= Nsample;
r12_tot /= Nsample; r12_sq_tot /= Nsample;
double r1s = sqrt((r1_sq_tot - r1_tot*r1_tot) / Nsample);
double r2s = sqrt((r2_sq_tot - r2_tot*r2_tot) / Nsample);
double r12s = sqrt((r12_sq_tot - r12_tot*r12_tot) / Nsample);
printf(" <r1> = %.6f +- %.6f\n", r1_tot, r1s);
printf(" <r2> = %.6f +- %.6f\n", r2_tot, r2s);
printf(" <r12> = %.6f +- %.6f\n", r12_tot, r12s);
// avoid int overflow
printf(" acceptance ratio=%.1f%%\n",
100.0*naccept/double(Npoint)/double(Ngen_per_block)/double(Nsample));
printf("Average execution time of kernels: %f (s)\n", (time * 1e-9f) / Nsample);
CHECK(cudaFree(x1));
CHECK(cudaFree(y1));
CHECK(cudaFree(z1));
CHECK(cudaFree(x2));
CHECK(cudaFree(y2));
CHECK(cudaFree(z2));
CHECK(cudaFree(psi));
CHECK(cudaFree(stats));
CHECK(cudaFree(blocksums));
CHECK(cudaFree(statsum));
CHECK(cudaFree(ranstates));
return 0;
}
|
0f5f417f5b15b0ef1726efcaabb9745bbf4b11f4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<iostream>
#include"../cuda_note.h"
#include"../cuda_debug.h"
#define ROW 1024
#define COL 1024
/*Chapter 3 Problem 1*/
int main(int argc, char* argv[]){
int size = sizeof(float) * ROW * COL;
float *h_firstInput = (float *) malloc(size);
float *h_secondInput = (float *) malloc(size);
float *h_output = (float *) malloc(size);
initial2DMatrix<float>(h_firstInput, ROW, COL, 1);
initial2DMatrix<float>(h_secondInput, ROW, COL, 1);
float *d_firstInput, *d_secondInput, *d_output;
hipError_t err;
int debug = 0;
if (debug){
peakMatrix<float> (h_firstInput, ROW, COL);
peakMatrix<float> (h_secondInput, ROW, COL);
}
err = hipMalloc(&d_firstInput, size);
if (err != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__);
}
err = hipMemcpy(d_firstInput, h_firstInput, size, hipMemcpyHostToDevice);
if (err != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__);
}
err = hipMalloc(&d_secondInput, size);
if (err != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__);
}
err = hipMemcpy(d_secondInput, h_secondInput, size, hipMemcpyHostToDevice);
if (err != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__);
}
err = hipMalloc(&d_output, size);
if (err != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__);
}
/*Chapter 3 problem 1.a --> matrixAdd*/
//matrixAdd <<<ceil(ROW * COL/512), 512>>> (d_firstInput, d_secondInput, d_output, ROW * COL);
/*Chapter 3 problem 1.b --> matrixAddRow*/
//matrixAddRow <<<ceil(ROW/ 512), 512>>> (d_firstInput, d_secondInput, d_output, ROW * COL, COL);
/*Chapter 3 problem 1.c --> matrixAddCol*/
hipLaunchKernelGGL(( matrixAddRow) , dim3(ceil(COL/ 512)), dim3(512), 0, 0, d_firstInput, d_secondInput, d_output, ROW * COL, ROW);
err = hipMemcpy(h_output, d_output, size, hipMemcpyDeviceToHost);
if (err != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__);
}
hipFree(d_firstInput);
hipFree(d_secondInput);
hipFree(d_output);
checkAdditionResult<float>(h_firstInput, h_secondInput, h_output, ROW, COL);
free(h_firstInput);
free(h_secondInput);
free(h_output);
} | 0f5f417f5b15b0ef1726efcaabb9745bbf4b11f4.cu | #include<iostream>
#include"../cuda_note.h"
#include"../cuda_debug.h"
#define ROW 1024
#define COL 1024
/*Chapter 3 Problem 1*/
int main(int argc, char* argv[]){
int size = sizeof(float) * ROW * COL;
float *h_firstInput = (float *) malloc(size);
float *h_secondInput = (float *) malloc(size);
float *h_output = (float *) malloc(size);
initial2DMatrix<float>(h_firstInput, ROW, COL, 1);
initial2DMatrix<float>(h_secondInput, ROW, COL, 1);
float *d_firstInput, *d_secondInput, *d_output;
cudaError_t err;
int debug = 0;
if (debug){
peakMatrix<float> (h_firstInput, ROW, COL);
peakMatrix<float> (h_secondInput, ROW, COL);
}
err = cudaMalloc(&d_firstInput, size);
if (err != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__);
}
err = cudaMemcpy(d_firstInput, h_firstInput, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__);
}
err = cudaMalloc(&d_secondInput, size);
if (err != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__);
}
err = cudaMemcpy(d_secondInput, h_secondInput, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__);
}
err = cudaMalloc(&d_output, size);
if (err != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__);
}
/*Chapter 3 problem 1.a --> matrixAdd*/
//matrixAdd <<<ceil(ROW * COL/512), 512>>> (d_firstInput, d_secondInput, d_output, ROW * COL);
/*Chapter 3 problem 1.b --> matrixAddRow*/
//matrixAddRow <<<ceil(ROW/ 512), 512>>> (d_firstInput, d_secondInput, d_output, ROW * COL, COL);
/*Chapter 3 problem 1.c --> matrixAddCol*/
matrixAddRow <<<ceil(COL/ 512), 512>>> (d_firstInput, d_secondInput, d_output, ROW * COL, ROW);
err = cudaMemcpy(h_output, d_output, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__);
}
cudaFree(d_firstInput);
cudaFree(d_secondInput);
cudaFree(d_output);
checkAdditionResult<float>(h_firstInput, h_secondInput, h_output, ROW, COL);
free(h_firstInput);
free(h_secondInput);
free(h_output);
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.