repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
|---|---|---|---|---|---|---|
null |
pytorch-main/c10/util/Bitset.h
|
#pragma once
#include <c10/macros/Macros.h>
#include <c10/util/C++17.h>
#include <c10/util/Optional.h>
#if defined(_MSC_VER)
#include <intrin.h>
#endif
namespace c10 {
namespace utils {
/**
* This is a simple bitset class with sizeof(long long int) bits.
* You can set bits, unset bits, query bits by index,
* and query for the first set bit.
* Before using this class, please also take a look at std::bitset,
* which has more functionality and is more generic. It is probably
* a better fit for your use case. The sole reason for c10::utils::bitset
* to exist is that std::bitset misses a find_first_set() method.
*/
struct bitset final {
private:
#if defined(_MSC_VER)
// MSVCs _BitScanForward64 expects int64_t
using bitset_type = int64_t;
#else
// POSIX ffsll expects long long int
using bitset_type = long long int;
#endif
public:
static constexpr size_t NUM_BITS() {
return 8 * sizeof(bitset_type);
}
constexpr bitset() noexcept = default;
constexpr bitset(const bitset&) noexcept = default;
constexpr bitset(bitset&&) noexcept = default;
// there is an issure for gcc 5.3.0 when define default function as constexpr
// see https://gcc.gnu.org/bugzilla/show_bug.cgi?id=68754.
bitset& operator=(const bitset&) noexcept = default;
bitset& operator=(bitset&&) noexcept = default;
constexpr void set(size_t index) noexcept {
bitset_ |= (static_cast<long long int>(1) << index);
}
constexpr void unset(size_t index) noexcept {
bitset_ &= ~(static_cast<long long int>(1) << index);
}
constexpr bool get(size_t index) const noexcept {
return bitset_ & (static_cast<long long int>(1) << index);
}
constexpr bool is_entirely_unset() const noexcept {
return 0 == bitset_;
}
// Call the given functor with the index of each bit that is set
template <class Func>
void for_each_set_bit(Func&& func) const {
bitset cur = *this;
size_t index = cur.find_first_set();
while (0 != index) {
// -1 because find_first_set() is not one-indexed.
index -= 1;
func(index);
cur.unset(index);
index = cur.find_first_set();
}
}
private:
// Return the index of the first set bit. The returned index is one-indexed
// (i.e. if the very first bit is set, this function returns '1'), and a
// return of '0' means that there was no bit set.
size_t find_first_set() const {
#if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_ARM64))
unsigned long result;
bool has_bits_set = (0 != _BitScanForward64(&result, bitset_));
if (!has_bits_set) {
return 0;
}
return result + 1;
#elif defined(_MSC_VER) && defined(_M_IX86)
unsigned long result;
if (static_cast<uint32_t>(bitset_) != 0) {
bool has_bits_set =
(0 != _BitScanForward(&result, static_cast<uint32_t>(bitset_)));
if (!has_bits_set) {
return 0;
}
return result + 1;
} else {
bool has_bits_set =
(0 != _BitScanForward(&result, static_cast<uint32_t>(bitset_ >> 32)));
if (!has_bits_set) {
return 32;
}
return result + 33;
}
#else
return __builtin_ffsll(bitset_);
#endif
}
friend bool operator==(bitset lhs, bitset rhs) noexcept {
return lhs.bitset_ == rhs.bitset_;
}
bitset_type bitset_{0};
};
inline bool operator!=(bitset lhs, bitset rhs) noexcept {
return !(lhs == rhs);
}
} // namespace utils
} // namespace c10
| 3,435
| 27.396694
| 80
|
h
|
null |
pytorch-main/c10/util/C++17.h
|
#pragma once
#ifndef C10_UTIL_CPP17_H_
#define C10_UTIL_CPP17_H_
#include <c10/macros/Macros.h>
#include <cstdlib>
#include <functional>
#include <memory>
#include <sstream>
#include <string>
#include <type_traits>
#include <utility>
#if !defined(__clang__) && !defined(_MSC_VER) && defined(__GNUC__) && \
__GNUC__ < 5
#error \
"You're trying to build PyTorch with a too old version of GCC. We need GCC 5 or later."
#endif
#if defined(__clang__) && __clang_major__ < 4
#error \
"You're trying to build PyTorch with a too old version of Clang. We need Clang 4 or later."
#endif
#if (defined(_MSC_VER) && (!defined(_MSVC_LANG) || _MSVC_LANG < 201703L)) || \
(!defined(_MSC_VER) && __cplusplus < 201703L)
#error You need C++17 to compile PyTorch
#endif
#if defined(_WIN32) && (defined(min) || defined(max))
#error Macro clash with min and max -- define NOMINMAX when compiling your program on Windows
#endif
/*
* This header adds some polyfills with C++17 functionality
*/
namespace c10 {
// in c++17 std::result_of has been superceded by std::invoke_result. Since
// c++20, std::result_of is removed.
template <typename F, typename... args>
#if defined(__cpp_lib_is_invocable) && __cpp_lib_is_invocable >= 201703L
using invoke_result = typename std::invoke_result<F, args...>;
#else
using invoke_result = typename std::result_of<F && (args && ...)>;
#endif
template <typename F, typename... args>
using invoke_result_t = typename invoke_result<F, args...>::type;
// std::is_pod is deprecated in C++20, std::is_standard_layout and
// std::is_trivial are introduced in C++11, std::conjunction has been introduced
// in C++17.
template <typename T>
#if defined(__cpp_lib_logical_traits) && __cpp_lib_logical_traits >= 201510L
using is_pod = std::conjunction<std::is_standard_layout<T>, std::is_trivial<T>>;
#else
using is_pod = std::is_pod<T>;
#endif
template <typename T>
constexpr bool is_pod_v = is_pod<T>::value;
namespace guts {
template <typename Base, typename Child, typename... Args>
typename std::enable_if<
!std::is_array<Base>::value && !std::is_array<Child>::value &&
std::is_base_of<Base, Child>::value,
std::unique_ptr<Base>>::type
make_unique_base(Args&&... args) {
return std::unique_ptr<Base>(new Child(std::forward<Args>(args)...));
}
#if defined(__cpp_lib_logical_traits) && !(defined(_MSC_VER) && _MSC_VER < 1920)
template <class... B>
using conjunction = std::conjunction<B...>;
template <class... B>
using disjunction = std::disjunction<B...>;
template <bool B>
using bool_constant = std::bool_constant<B>;
template <class B>
using negation = std::negation<B>;
#else
// Implementation taken from http://en.cppreference.com/w/cpp/types/conjunction
template <class...>
struct conjunction : std::true_type {};
template <class B1>
struct conjunction<B1> : B1 {};
template <class B1, class... Bn>
struct conjunction<B1, Bn...>
: std::conditional_t<bool(B1::value), conjunction<Bn...>, B1> {};
// Implementation taken from http://en.cppreference.com/w/cpp/types/disjunction
template <class...>
struct disjunction : std::false_type {};
template <class B1>
struct disjunction<B1> : B1 {};
template <class B1, class... Bn>
struct disjunction<B1, Bn...>
: std::conditional_t<bool(B1::value), B1, disjunction<Bn...>> {};
// Implementation taken from
// http://en.cppreference.com/w/cpp/types/integral_constant
template <bool B>
using bool_constant = std::integral_constant<bool, B>;
// Implementation taken from http://en.cppreference.com/w/cpp/types/negation
template <class B>
struct negation : bool_constant<!bool(B::value)> {};
#endif
#ifdef __cpp_lib_void_t
template <class T>
using void_t = std::void_t<T>;
#else
// Implementation taken from http://en.cppreference.com/w/cpp/types/void_t
// (it takes CWG1558 into account and also works for older compilers)
template <typename... Ts>
struct make_void {
typedef void type;
};
template <typename... Ts>
using void_t = typename make_void<Ts...>::type;
#endif
#if defined(USE_ROCM)
// rocm doesn't like the C10_HOST_DEVICE
#define CUDA_HOST_DEVICE
#else
#define CUDA_HOST_DEVICE C10_HOST_DEVICE
#endif
#if defined(__cpp_lib_apply) && !defined(__CUDA_ARCH__)
template <class F, class Tuple>
CUDA_HOST_DEVICE inline constexpr decltype(auto) apply(F&& f, Tuple&& t) {
return std::apply(std::forward<F>(f), std::forward<Tuple>(t));
}
#else
// Implementation from http://en.cppreference.com/w/cpp/utility/apply (but
// modified)
// TODO This is an incomplete implementation of std::apply, not working for
// member functions.
namespace detail {
template <class F, class Tuple, std::size_t... INDEX>
#if defined(_MSC_VER)
// MSVC has a problem with the decltype() return type, but it also doesn't need
// it
C10_HOST_DEVICE constexpr auto apply_impl(
F&& f,
Tuple&& t,
std::index_sequence<INDEX...>)
#else
// GCC/Clang need the decltype() return type
CUDA_HOST_DEVICE constexpr decltype(auto) apply_impl(
F&& f,
Tuple&& t,
std::index_sequence<INDEX...>)
#endif
{
return std::forward<F>(f)(std::get<INDEX>(std::forward<Tuple>(t))...);
}
} // namespace detail
template <class F, class Tuple>
CUDA_HOST_DEVICE constexpr decltype(auto) apply(F&& f, Tuple&& t) {
return detail::apply_impl(
std::forward<F>(f),
std::forward<Tuple>(t),
std::make_index_sequence<
std::tuple_size<std::remove_reference_t<Tuple>>::value>{});
}
#endif
#undef CUDA_HOST_DEVICE
template <typename Functor, typename... Args>
typename std::enable_if<
std::is_member_pointer<typename std::decay<Functor>::type>::value,
typename c10::invoke_result_t<Functor, Args...>>::type
invoke(Functor&& f, Args&&... args) {
return std::mem_fn(std::forward<Functor>(f))(std::forward<Args>(args)...);
}
template <typename Functor, typename... Args>
typename std::enable_if<
!std::is_member_pointer<typename std::decay<Functor>::type>::value,
typename c10::invoke_result_t<Functor, Args...>>::type
invoke(Functor&& f, Args&&... args) {
return std::forward<Functor>(f)(std::forward<Args>(args)...);
}
namespace detail {
struct _identity final {
template <class T>
using type_identity = T;
template <class T>
decltype(auto) operator()(T&& arg) {
return std::forward<T>(arg);
}
};
template <class Func, class Enable = void>
struct function_takes_identity_argument : std::false_type {};
#if defined(_MSC_VER)
// For some weird reason, MSVC shows a compiler error when using guts::void_t
// instead of std::void_t. But we're only building on MSVC versions that have
// std::void_t, so let's just use that one.
template <class Func>
struct function_takes_identity_argument<
Func,
std::void_t<decltype(std::declval<Func>()(_identity()))>> : std::true_type {
};
#else
template <class Func>
struct function_takes_identity_argument<
Func,
void_t<decltype(std::declval<Func>()(_identity()))>> : std::true_type {};
#endif
} // namespace detail
// GCC 4.8 doesn't define std::to_string, even though that's in C++11. Let's
// define it.
namespace detail {
class DummyClassForToString final {};
} // namespace detail
} // namespace guts
} // namespace c10
namespace std {
// We use SFINAE to detect if std::to_string exists for a type, but that only
// works if the function name is defined. So let's define a std::to_string for a
// dummy type. If you're getting an error here saying that this overload doesn't
// match your std::to_string() call, then you're calling std::to_string() but
// should be calling c10::guts::to_string().
inline std::string to_string(c10::guts::detail::DummyClassForToString) {
return "";
}
} // namespace std
namespace c10 {
namespace guts {
namespace detail {
template <class T, class Enable = void>
struct to_string_ final {
static std::string call(T value) {
std::ostringstream str;
str << value;
return str.str();
}
};
// If a std::to_string exists, use that instead
template <class T>
struct to_string_<T, void_t<decltype(std::to_string(std::declval<T>()))>>
final {
static std::string call(T value) {
return std::to_string(value);
}
};
} // namespace detail
template <class T>
inline std::string to_string(T value) {
return detail::to_string_<T>::call(value);
}
} // namespace guts
} // namespace c10
#endif // C10_UTIL_CPP17_H_
| 8,287
| 28.183099
| 95
|
h
|
null |
pytorch-main/c10/util/CallOnce.h
|
#pragma once
#include <atomic>
#include <mutex>
#include <thread>
#include <utility>
#include <c10/macros/Macros.h>
#include <c10/util/C++17.h>
namespace c10 {
// custom c10 call_once implementation to avoid the deadlock in std::call_once.
// The implementation here is a simplified version from folly and likely much
// much higher memory footprint.
template <typename Flag, typename F, typename... Args>
inline void call_once(Flag& flag, F&& f, Args&&... args) {
if (C10_LIKELY(flag.test_once())) {
return;
}
flag.call_once_slow(std::forward<F>(f), std::forward<Args>(args)...);
}
class once_flag {
public:
#ifndef _WIN32
// running into build error on MSVC. Can't seem to get a repro locally so I'm
// just avoiding constexpr
//
// C:/actions-runner/_work/pytorch/pytorch\c10/util/CallOnce.h(26): error:
// defaulted default constructor cannot be constexpr because the
// corresponding implicitly declared default constructor would not be
// constexpr 1 error detected in the compilation of
// "C:/actions-runner/_work/pytorch/pytorch/aten/src/ATen/cuda/cub.cu".
constexpr
#endif
once_flag() noexcept = default;
once_flag(const once_flag&) = delete;
once_flag& operator=(const once_flag&) = delete;
private:
template <typename Flag, typename F, typename... Args>
friend void call_once(Flag& flag, F&& f, Args&&... args);
template <typename F, typename... Args>
void call_once_slow(F&& f, Args&&... args) {
std::lock_guard<std::mutex> guard(mutex_);
if (init_.load(std::memory_order_relaxed)) {
return;
}
c10::guts::invoke(f, std::forward<Args>(args)...);
init_.store(true, std::memory_order_release);
}
bool test_once() {
return init_.load(std::memory_order_acquire);
}
void reset_once() {
init_.store(false, std::memory_order_release);
}
private:
std::mutex mutex_;
std::atomic<bool> init_{false};
};
} // namespace c10
| 1,942
| 27.15942
| 79
|
h
|
null |
pytorch-main/c10/util/ConstexprCrc.h
|
#pragma once
#include <c10/util/IdWrapper.h>
#include <c10/util/string_view.h>
#include <cstddef>
#include <cstdint>
namespace c10 {
namespace util {
namespace detail {
constexpr uint64_t crc64_table[] = {
0x0000000000000000, 0x7ad870c830358979, 0xf5b0e190606b12f2,
0x8f689158505e9b8b, 0xc038e5739841b68f, 0xbae095bba8743ff6,
0x358804e3f82aa47d, 0x4f50742bc81f2d04, 0xab28ecb46814fe75,
0xd1f09c7c5821770c, 0x5e980d24087fec87, 0x24407dec384a65fe,
0x6b1009c7f05548fa, 0x11c8790fc060c183, 0x9ea0e857903e5a08,
0xe478989fa00bd371, 0x7d08ff3b88be6f81, 0x07d08ff3b88be6f8,
0x88b81eabe8d57d73, 0xf2606e63d8e0f40a, 0xbd301a4810ffd90e,
0xc7e86a8020ca5077, 0x4880fbd87094cbfc, 0x32588b1040a14285,
0xd620138fe0aa91f4, 0xacf86347d09f188d, 0x2390f21f80c18306,
0x594882d7b0f40a7f, 0x1618f6fc78eb277b, 0x6cc0863448deae02,
0xe3a8176c18803589, 0x997067a428b5bcf0, 0xfa11fe77117cdf02,
0x80c98ebf2149567b, 0x0fa11fe77117cdf0, 0x75796f2f41224489,
0x3a291b04893d698d, 0x40f16bccb908e0f4, 0xcf99fa94e9567b7f,
0xb5418a5cd963f206, 0x513912c379682177, 0x2be1620b495da80e,
0xa489f35319033385, 0xde51839b2936bafc, 0x9101f7b0e12997f8,
0xebd98778d11c1e81, 0x64b116208142850a, 0x1e6966e8b1770c73,
0x8719014c99c2b083, 0xfdc17184a9f739fa, 0x72a9e0dcf9a9a271,
0x08719014c99c2b08, 0x4721e43f0183060c, 0x3df994f731b68f75,
0xb29105af61e814fe, 0xc849756751dd9d87, 0x2c31edf8f1d64ef6,
0x56e99d30c1e3c78f, 0xd9810c6891bd5c04, 0xa3597ca0a188d57d,
0xec09088b6997f879, 0x96d1784359a27100, 0x19b9e91b09fcea8b,
0x636199d339c963f2, 0xdf7adabd7a6e2d6f, 0xa5a2aa754a5ba416,
0x2aca3b2d1a053f9d, 0x50124be52a30b6e4, 0x1f423fcee22f9be0,
0x659a4f06d21a1299, 0xeaf2de5e82448912, 0x902aae96b271006b,
0x74523609127ad31a, 0x0e8a46c1224f5a63, 0x81e2d7997211c1e8,
0xfb3aa75142244891, 0xb46ad37a8a3b6595, 0xceb2a3b2ba0eecec,
0x41da32eaea507767, 0x3b024222da65fe1e, 0xa2722586f2d042ee,
0xd8aa554ec2e5cb97, 0x57c2c41692bb501c, 0x2d1ab4dea28ed965,
0x624ac0f56a91f461, 0x1892b03d5aa47d18, 0x97fa21650afae693,
0xed2251ad3acf6fea, 0x095ac9329ac4bc9b, 0x7382b9faaaf135e2,
0xfcea28a2faafae69, 0x8632586aca9a2710, 0xc9622c4102850a14,
0xb3ba5c8932b0836d, 0x3cd2cdd162ee18e6, 0x460abd1952db919f,
0x256b24ca6b12f26d, 0x5fb354025b277b14, 0xd0dbc55a0b79e09f,
0xaa03b5923b4c69e6, 0xe553c1b9f35344e2, 0x9f8bb171c366cd9b,
0x10e3202993385610, 0x6a3b50e1a30ddf69, 0x8e43c87e03060c18,
0xf49bb8b633338561, 0x7bf329ee636d1eea, 0x012b592653589793,
0x4e7b2d0d9b47ba97, 0x34a35dc5ab7233ee, 0xbbcbcc9dfb2ca865,
0xc113bc55cb19211c, 0x5863dbf1e3ac9dec, 0x22bbab39d3991495,
0xadd33a6183c78f1e, 0xd70b4aa9b3f20667, 0x985b3e827bed2b63,
0xe2834e4a4bd8a21a, 0x6debdf121b863991, 0x1733afda2bb3b0e8,
0xf34b37458bb86399, 0x8993478dbb8deae0, 0x06fbd6d5ebd3716b,
0x7c23a61ddbe6f812, 0x3373d23613f9d516, 0x49aba2fe23cc5c6f,
0xc6c333a67392c7e4, 0xbc1b436e43a74e9d, 0x95ac9329ac4bc9b5,
0xef74e3e19c7e40cc, 0x601c72b9cc20db47, 0x1ac40271fc15523e,
0x5594765a340a7f3a, 0x2f4c0692043ff643, 0xa02497ca54616dc8,
0xdafce7026454e4b1, 0x3e847f9dc45f37c0, 0x445c0f55f46abeb9,
0xcb349e0da4342532, 0xb1eceec59401ac4b, 0xfebc9aee5c1e814f,
0x8464ea266c2b0836, 0x0b0c7b7e3c7593bd, 0x71d40bb60c401ac4,
0xe8a46c1224f5a634, 0x927c1cda14c02f4d, 0x1d148d82449eb4c6,
0x67ccfd4a74ab3dbf, 0x289c8961bcb410bb, 0x5244f9a98c8199c2,
0xdd2c68f1dcdf0249, 0xa7f41839ecea8b30, 0x438c80a64ce15841,
0x3954f06e7cd4d138, 0xb63c61362c8a4ab3, 0xcce411fe1cbfc3ca,
0x83b465d5d4a0eece, 0xf96c151de49567b7, 0x76048445b4cbfc3c,
0x0cdcf48d84fe7545, 0x6fbd6d5ebd3716b7, 0x15651d968d029fce,
0x9a0d8ccedd5c0445, 0xe0d5fc06ed698d3c, 0xaf85882d2576a038,
0xd55df8e515432941, 0x5a3569bd451db2ca, 0x20ed197575283bb3,
0xc49581ead523e8c2, 0xbe4df122e51661bb, 0x3125607ab548fa30,
0x4bfd10b2857d7349, 0x04ad64994d625e4d, 0x7e7514517d57d734,
0xf11d85092d094cbf, 0x8bc5f5c11d3cc5c6, 0x12b5926535897936,
0x686de2ad05bcf04f, 0xe70573f555e26bc4, 0x9ddd033d65d7e2bd,
0xd28d7716adc8cfb9, 0xa85507de9dfd46c0, 0x273d9686cda3dd4b,
0x5de5e64efd965432, 0xb99d7ed15d9d8743, 0xc3450e196da80e3a,
0x4c2d9f413df695b1, 0x36f5ef890dc31cc8, 0x79a59ba2c5dc31cc,
0x037deb6af5e9b8b5, 0x8c157a32a5b7233e, 0xf6cd0afa9582aa47,
0x4ad64994d625e4da, 0x300e395ce6106da3, 0xbf66a804b64ef628,
0xc5bed8cc867b7f51, 0x8aeeace74e645255, 0xf036dc2f7e51db2c,
0x7f5e4d772e0f40a7, 0x05863dbf1e3ac9de, 0xe1fea520be311aaf,
0x9b26d5e88e0493d6, 0x144e44b0de5a085d, 0x6e963478ee6f8124,
0x21c640532670ac20, 0x5b1e309b16452559, 0xd476a1c3461bbed2,
0xaeaed10b762e37ab, 0x37deb6af5e9b8b5b, 0x4d06c6676eae0222,
0xc26e573f3ef099a9, 0xb8b627f70ec510d0, 0xf7e653dcc6da3dd4,
0x8d3e2314f6efb4ad, 0x0256b24ca6b12f26, 0x788ec2849684a65f,
0x9cf65a1b368f752e, 0xe62e2ad306bafc57, 0x6946bb8b56e467dc,
0x139ecb4366d1eea5, 0x5ccebf68aecec3a1, 0x2616cfa09efb4ad8,
0xa97e5ef8cea5d153, 0xd3a62e30fe90582a, 0xb0c7b7e3c7593bd8,
0xca1fc72bf76cb2a1, 0x45775673a732292a, 0x3faf26bb9707a053,
0x70ff52905f188d57, 0x0a2722586f2d042e, 0x854fb3003f739fa5,
0xff97c3c80f4616dc, 0x1bef5b57af4dc5ad, 0x61372b9f9f784cd4,
0xee5fbac7cf26d75f, 0x9487ca0fff135e26, 0xdbd7be24370c7322,
0xa10fceec0739fa5b, 0x2e675fb4576761d0, 0x54bf2f7c6752e8a9,
0xcdcf48d84fe75459, 0xb71738107fd2dd20, 0x387fa9482f8c46ab,
0x42a7d9801fb9cfd2, 0x0df7adabd7a6e2d6, 0x772fdd63e7936baf,
0xf8474c3bb7cdf024, 0x829f3cf387f8795d, 0x66e7a46c27f3aa2c,
0x1c3fd4a417c62355, 0x935745fc4798b8de, 0xe98f353477ad31a7,
0xa6df411fbfb21ca3, 0xdc0731d78f8795da, 0x536fa08fdfd90e51,
0x29b7d047efec8728,
};
inline C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA uint64_t
crc64impl(uint64_t accumulator, const char* data, size_t size) {
for (size_t i = 0; i < size; ++i) {
accumulator =
crc64_table[(accumulator ^ data[i]) & 0xFF] ^ (accumulator >> 8);
}
return accumulator;
}
} // namespace detail
struct crc64_t final : IdWrapper<crc64_t, uint64_t> {
constexpr crc64_t(uint64_t checksum) : IdWrapper(checksum) {}
constexpr uint64_t checksum() const {
return this->underlyingId();
}
};
// CRC64 with Jones coefficients and an init value of 0.
inline C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA crc64_t
crc64(const char* str, size_t size) {
return crc64_t{detail::crc64impl(0, str, size)};
}
inline C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA crc64_t crc64(c10::string_view str) {
return crc64(str.data(), str.size());
}
} // namespace util
} // namespace c10
// Allow usage of crc64_t in std::unordered_set
C10_DEFINE_HASH_FOR_IDWRAPPER(c10::util::crc64_t);
| 6,633
| 49.257576
| 79
|
h
|
null |
pytorch-main/c10/util/Deprecated.h
|
#pragma once
/**
* This file provides portable macros for marking declarations
* as deprecated. You should generally use C10_DEPRECATED,
* except when marking 'using' declarations as deprecated,
* in which case you should use C10_DEFINE_DEPRECATED_USING
* (due to portability concerns).
*/
// Sample usage:
//
// C10_DEPRECATED void bad_func();
// struct C10_DEPRECATED BadStruct {
// ...
// };
// NB: __cplusplus doesn't work for MSVC, so for now MSVC always uses
// the "__declspec(deprecated)" implementation and not the C++14
// "[[deprecated]]" attribute. We tried enabling "[[deprecated]]" for C++14 on
// MSVC, but ran into issues with some older MSVC versions.
#if (defined(__cplusplus) && __cplusplus >= 201402L)
#define C10_DEPRECATED [[deprecated]]
#define C10_DEPRECATED_MESSAGE(message) [[deprecated(message)]]
#elif defined(__GNUC__)
#define C10_DEPRECATED __attribute__((deprecated))
// TODO Is there some way to implement this?
#define C10_DEPRECATED_MESSAGE(message) __attribute__((deprecated))
#elif defined(_MSC_VER)
#define C10_DEPRECATED __declspec(deprecated)
#define C10_DEPRECATED_MESSAGE(message) __declspec(deprecated(message))
#else
#warning "You need to implement C10_DEPRECATED for this compiler"
#define C10_DEPRECATED
#endif
// Sample usage:
//
// C10_DEFINE_DEPRECATED_USING(BadType, int)
//
// which is the portable version of
//
// using BadType [[deprecated]] = int;
// technically [[deprecated]] syntax is from c++14 standard, but it works in
// many compilers.
#if defined(__has_cpp_attribute)
#if __has_cpp_attribute(deprecated) && !defined(__CUDACC__)
#define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) \
using TypeName [[deprecated]] = TypeThingy;
#endif
#endif
#if defined(_MSC_VER)
#if defined(__CUDACC__)
// neither [[deprecated]] nor __declspec(deprecated) work on nvcc on Windows;
// you get the error:
//
// error: attribute does not apply to any entity
//
// So we just turn the macro off in this case.
#if defined(C10_DEFINE_DEPRECATED_USING)
#undef C10_DEFINE_DEPRECATED_USING
#endif
#define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) \
using TypeName = TypeThingy;
#else
// [[deprecated]] does work in windows without nvcc, though msc doesn't support
// `__has_cpp_attribute` when c++14 is supported, otherwise
// __declspec(deprecated) is used as the alternative.
#ifndef C10_DEFINE_DEPRECATED_USING
#if defined(_MSVC_LANG) && _MSVC_LANG >= 201402L
#define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) \
using TypeName [[deprecated]] = TypeThingy;
#else
#define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) \
using TypeName = __declspec(deprecated) TypeThingy;
#endif
#endif
#endif
#endif
#if !defined(C10_DEFINE_DEPRECATED_USING) && defined(__GNUC__)
// nvcc has a bug where it doesn't understand __attribute__((deprecated))
// declarations even when the host compiler supports it. We'll only use this gcc
// attribute when not cuda, and when using a GCC compiler that doesn't support
// the c++14 syntax we checked for above (available in __GNUC__ >= 5)
#if !defined(__CUDACC__)
#define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) \
using TypeName __attribute__((deprecated)) = TypeThingy;
#else
// using cuda + gcc < 5, neither deprecated syntax is available so turning off.
#define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) \
using TypeName = TypeThingy;
#endif
#endif
#if !defined(C10_DEFINE_DEPRECATED_USING)
#warning "You need to implement C10_DEFINE_DEPRECATED_USING for this compiler"
#define C10_DEFINE_DEPRECATED_USING
#endif
| 3,579
| 33.757282
| 80
|
h
|
null |
pytorch-main/c10/util/ExclusivelyOwned.h
|
#pragma once
#include <c10/util/in_place.h>
namespace c10 {
// See example implementation in TensorBase.h and TensorBody.h.
// Synopsis:
//
// repr_type -- type to use to store an owned T in ExclusivelyOwned.
//
// pointer_type -- pointer-esque type to return from
// ExclusivelyOwned's get() and operator*() methods.
//
// const_pointer_type -- similar to pointer_type, used for the const methods.
//
// static repr_type nullRepr() -- return a null instance of repr_type.
//
// template <class... Args>
// static repr_type createInPlace(Args&&... args) -- used by the in-place
// ExclusivelyOwned constructor.
//
// static repr_type moveToRepr(T&& x) -- move the given x into an
// instance of repr_type. used by the ExclusivelyOwned(T&&)
// constructor.
//
// static void destroyOwned(repr_type x) -- free memory for a
// known-exclusively-owned instance of x. Replaces calling repr_type's
// destructor. Being able to implement this more efficiently than
// repr_type's destructor is the main reason to use ExclusivelyOwned
// for a type.
//
// static T take(repr_type&) -- move out of the given repr_type into an owned T.
//
// static pointer_type getImpl(const repr_type&) -- return a pointer
// to the given repr_type. May take repr_type by value if that is more
// efficient.
template <typename T>
struct ExclusivelyOwnedTraits;
/// ExclusivelyOwned is a smart-pointer-like wrapper around an
/// exclusively-owned instance of some type T that normally has
/// mandatory reference counting (currently just Tensor). If you have
/// an isolated piece of code that knows that it has sole ownership of
/// an object of one of these types (i.e., because you created it
/// directly or using a factory function) and that object will not
/// escape from that isolated piece of code, then moving the object
/// into an ExclusivelyOwned will avoid an atomic reference count
/// decrement at destruction time.
///
/// If you directly create the Tensor in the first
/// place, you can use the in_place constructor of ExclusivelyOwned to
/// additionally avoid doing any stores to initialize the refcount &
/// weakcount.
template <typename T>
class ExclusivelyOwned {
using EOT = ExclusivelyOwnedTraits<T>;
union {
char dummy_;
typename ExclusivelyOwnedTraits<T>::repr_type repr_;
};
public:
ExclusivelyOwned() : repr_(EOT::nullRepr()) {}
explicit ExclusivelyOwned(T&& t) : repr_(EOT::moveToRepr(std::move(t))) {}
template <class... Args>
explicit ExclusivelyOwned(in_place_t, Args&&... args)
: repr_(EOT::createInPlace(std::forward<Args>(args)...)) {}
ExclusivelyOwned(const ExclusivelyOwned&) = delete;
ExclusivelyOwned(ExclusivelyOwned&& rhs) noexcept
: repr_(std::move(rhs.repr_)) {
rhs.repr_ = EOT::nullRepr();
}
ExclusivelyOwned& operator=(const ExclusivelyOwned&) = delete;
ExclusivelyOwned& operator=(ExclusivelyOwned&& rhs) noexcept {
EOT::destroyOwned(repr_);
repr_ = std::move(rhs.repr_);
rhs.repr_ = EOT::nullRepr();
return *this;
}
ExclusivelyOwned& operator=(T&& rhs) noexcept {
EOT::destroyOwned(repr_);
repr_ = EOT::moveToRepr(std::move(rhs));
return *this;
}
~ExclusivelyOwned() {
EOT::destroyOwned(repr_);
// Don't bother to call the destructor of repr_, since we already
// did specialized destruction for the exclusively-owned case in
// destroyOwned!
}
// We don't provide this because it would require us to be able to
// differentiate an owned-but-empty T from a lack of T. This is
// particularly problematic for Tensor, which wants to use an
// undefined Tensor as its null state.
explicit operator bool() const noexcept = delete;
operator T() && {
return take();
}
// NOTE: the equivalent operation on MaybeOwned is a moving
// operator*. For ExclusivelyOwned, take() and operator*() may well
// have different return types, so they are different functions.
T take() && {
return EOT::take(repr_);
}
typename EOT::const_pointer_type operator->() const {
return get();
}
typename EOT::const_pointer_type get() const {
return EOT::getImpl(repr_);
}
typename EOT::pointer_type operator->() {
return get();
}
typename EOT::pointer_type get() {
return EOT::getImpl(repr_);
}
std::remove_pointer_t<typename EOT::const_pointer_type>& operator*() const {
return *get();
}
std::remove_pointer_t<typename EOT::pointer_type>& operator*() {
return *get();
}
};
} // namespace c10
| 4,494
| 30.215278
| 80
|
h
|
null |
pytorch-main/c10/util/ExclusivelyOwnedTensorTraits.h
|
#pragma once
#include <c10/core/TensorImpl.h>
#include <utility>
namespace c10 {
// Shared ExclusivelyOwnedTraits implementation between caffe2::Tensor and
// at::TensorBase.
template <typename TensorType>
struct ExclusivelyOwnedTensorTraits {
using repr_type = TensorType;
using pointer_type = TensorType*;
using const_pointer_type = const TensorType*;
static repr_type nullRepr() {
return TensorType();
}
template <class... Args>
static repr_type createInPlace(Args&&... args) {
return TensorType(std::forward<Args>(args)...);
}
static repr_type moveToRepr(TensorType&& x) {
return std::move(x);
}
static void destroyOwned(TensorType& x) {
TensorImpl* const toDestroy = x.unsafeReleaseTensorImpl();
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
toDestroy != nullptr, "Tensor somehow got null TensorImpl?");
// May be 0 because UndefinedTensorImpl doesn't get its refcount
// incremented.
const bool isUndefined = toDestroy == UndefinedTensorImpl::singleton();
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
toDestroy->refcount_ == 1 || (toDestroy->refcount_ == 0 && isUndefined),
"ExclusivelyOwned<Tensor> destroyed with isUndefined ",
isUndefined,
" and refcount ",
toDestroy->refcount_,
", expected 1 or, if isUndefined, 0!");
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
toDestroy->weakcount_ == 1 ||
(toDestroy->weakcount_ == 0 &&
toDestroy == UndefinedTensorImpl::singleton()),
"ExclusivelyOwned<Tensor> destroyed with isUndefined ",
isUndefined,
" and weakcount ",
toDestroy->weakcount_,
", expected 1 or, if isUndefined, 0!");
if (!isUndefined) {
#ifndef NDEBUG
// Needed to pass the debug assertions in ~intrusive_ptr_target.
toDestroy->refcount_ = 0;
toDestroy->weakcount_ = 0;
#endif
delete toDestroy;
}
}
static TensorType take(TensorType& x) {
return std::move(x);
}
static pointer_type getImpl(repr_type& x) {
return &x;
}
static const_pointer_type getImpl(const repr_type& x) {
return &x;
}
};
} // namespace c10
| 2,152
| 27.706667
| 80
|
h
|
null |
pytorch-main/c10/util/FbcodeMaps.h
|
#ifndef C10_UTIL_FBCODEMAPS_H_
#define C10_UTIL_FBCODEMAPS_H_
// Map typedefs so that we can use folly's F14 maps in fbcode without
// taking a folly dependency.
#ifdef FBCODE_CAFFE2
#include <folly/container/F14Map.h>
#include <folly/container/F14Set.h>
#else
#include <unordered_map>
#include <unordered_set>
#endif
namespace c10 {
#ifdef FBCODE_CAFFE2
template <typename Key, typename Value>
using FastMap = folly::F14FastMap<Key, Value>;
template <typename Key>
using FastSet = folly::F14FastSet<Key>;
#else
template <typename Key, typename Value>
using FastMap = std::unordered_map<Key, Value>;
template <typename Key>
using FastSet = std::unordered_set<Key>;
#endif
} // namespace c10
#endif // C10_UTIL_FBCODEMAPS_H_
| 728
| 23.3
| 69
|
h
|
null |
pytorch-main/c10/util/Flags.h
|
#ifndef C10_UTIL_FLAGS_H_
#define C10_UTIL_FLAGS_H_
/* Commandline flags support for C10.
*
* This is a portable commandline flags tool for c10, so we can optionally
* choose to use gflags or a lightweight custom implementation if gflags is
* not possible on a certain platform. If you have gflags installed, set the
* macro C10_USE_GFLAGS will seamlessly route everything to gflags.
*
* To define a flag foo of type bool default to true, do the following in the
* *global* namespace:
* C10_DEFINE_bool(foo, true, "An example.");
*
* To use it in another .cc file, you can use C10_DECLARE_* as follows:
* C10_DECLARE_bool(foo);
*
* In both cases, you can then access the flag via FLAGS_foo.
*
* It is recommended that you build with gflags. To learn more about the flags
* usage, refer to the gflags page here:
*
* https://gflags.github.io/gflags/
*
* Note about Python users / devs: gflags is initiated from a C++ function
* ParseCommandLineFlags, and is usually done in native binaries in the main
* function. As Python does not have a modifiable main function, it is usually
* difficult to change the flags after Python starts. Hence, it is recommended
* that one sets the default value of the flags to one that's acceptable in
* general - that will allow Python to run without wrong flags.
*/
#include <string>
#include <c10/macros/Macros.h>
#include <c10/util/Registry.h>
namespace c10 {
/**
* Sets the usage message when a commandline tool is called with "--help".
*/
C10_API void SetUsageMessage(const std::string& str);
/**
* Returns the usage message for the commandline tool set by SetUsageMessage.
*/
C10_API const char* UsageMessage();
/**
* Parses the commandline flags.
*
* This command parses all the commandline arguments passed in via pargc
* and argv. Once it is finished, partc and argv will contain the remaining
* commandline args that c10 does not deal with. Note that following
* convention, argv[0] contains the binary name and is not parsed.
*/
C10_API bool ParseCommandLineFlags(int* pargc, char*** pargv);
/**
* Checks if the commandline flags has already been passed.
*/
C10_API bool CommandLineFlagsHasBeenParsed();
} // namespace c10
////////////////////////////////////////////////////////////////////////////////
// Below are gflags and non-gflags specific implementations.
// In general, they define the following macros for one to declare (use
// C10_DECLARE) or define (use C10_DEFINE) flags:
// C10_{DECLARE,DEFINE}_{int,int64,double,bool,string}
////////////////////////////////////////////////////////////////////////////////
#ifdef C10_USE_GFLAGS
////////////////////////////////////////////////////////////////////////////////
// Begin gflags section: most functions are basically rerouted to gflags.
////////////////////////////////////////////////////////////////////////////////
#include <gflags/gflags.h>
// C10 uses hidden visibility by default. However, in gflags, it only uses
// export on Windows platform (with dllexport) but not on linux/mac (with
// default visibility). As a result, to ensure that we are always exporting
// global variables, we will redefine the GFLAGS_DLL_DEFINE_FLAG macro if we
// are building C10 as a shared library.
// This has to be done after the inclusion of gflags, because some early
// versions of gflags.h (e.g. 2.0 on ubuntu 14.04) directly defines the
// macros, so we need to do definition after gflags is done.
#ifdef GFLAGS_DLL_DEFINE_FLAG
#undef GFLAGS_DLL_DEFINE_FLAG
#endif // GFLAGS_DLL_DEFINE_FLAG
#ifdef GFLAGS_DLL_DECLARE_FLAG
#undef GFLAGS_DLL_DECLARE_FLAG
#endif // GFLAGS_DLL_DECLARE_FLAG
#define GFLAGS_DLL_DEFINE_FLAG C10_EXPORT
#define GFLAGS_DLL_DECLARE_FLAG C10_IMPORT
// gflags before 2.0 uses namespace google and after 2.1 uses namespace gflags.
// Using GFLAGS_GFLAGS_H_ to capture this change.
#ifndef GFLAGS_GFLAGS_H_
namespace gflags = google;
#endif // GFLAGS_GFLAGS_H_
// Motivation about the gflags wrapper:
// (1) We would need to make sure that the gflags version and the non-gflags
// version of C10 are going to expose the same flags abstraction. One should
// explicitly use FLAGS_flag_name to access the flags.
// (2) For flag names, it is recommended to start with c10_ to distinguish it
// from regular gflags flags. For example, do
// C10_DEFINE_BOOL(c10_my_flag, true, "An example");
// to allow one to use FLAGS_c10_my_flag.
// (3) Gflags has a design issue that does not properly expose the global flags,
// if one builds the library with -fvisibility=hidden. The current gflags (as of
// Aug 2018) only deals with the Windows case using dllexport, and not the Linux
// counterparts. As a result, we will explicitly use C10_EXPORT to export the
// flags defined in C10. This is done via a global reference, so the flag
// itself is not duplicated - under the hood it is the same global gflags flag.
#define C10_GFLAGS_DEF_WRAPPER(type, real_type, name, default_value, help_str) \
DEFINE_##type(name, default_value, help_str);
#define C10_DEFINE_int(name, default_value, help_str) \
C10_GFLAGS_DEF_WRAPPER(int32, gflags::int32, name, default_value, help_str)
#define C10_DEFINE_int32(name, default_value, help_str) \
C10_DEFINE_int(name, default_value, help_str)
#define C10_DEFINE_int64(name, default_value, help_str) \
C10_GFLAGS_DEF_WRAPPER(int64, gflags::int64, name, default_value, help_str)
#define C10_DEFINE_double(name, default_value, help_str) \
C10_GFLAGS_DEF_WRAPPER(double, double, name, default_value, help_str)
#define C10_DEFINE_bool(name, default_value, help_str) \
C10_GFLAGS_DEF_WRAPPER(bool, bool, name, default_value, help_str)
#define C10_DEFINE_string(name, default_value, help_str) \
C10_GFLAGS_DEF_WRAPPER(string, ::fLS::clstring, name, default_value, help_str)
// DECLARE_typed_var should be used in header files and in the global namespace.
#define C10_GFLAGS_DECLARE_WRAPPER(type, real_type, name) DECLARE_##type(name);
#define C10_DECLARE_int(name) \
C10_GFLAGS_DECLARE_WRAPPER(int32, gflags::int32, name)
#define C10_DECLARE_int32(name) C10_DECLARE_int(name)
#define C10_DECLARE_int64(name) \
C10_GFLAGS_DECLARE_WRAPPER(int64, gflags::int64, name)
#define C10_DECLARE_double(name) \
C10_GFLAGS_DECLARE_WRAPPER(double, double, name)
#define C10_DECLARE_bool(name) C10_GFLAGS_DECLARE_WRAPPER(bool, bool, name)
#define C10_DECLARE_string(name) \
C10_GFLAGS_DECLARE_WRAPPER(string, ::fLS::clstring, name)
////////////////////////////////////////////////////////////////////////////////
// End gflags section.
////////////////////////////////////////////////////////////////////////////////
#else // C10_USE_GFLAGS
////////////////////////////////////////////////////////////////////////////////
// Begin non-gflags section: providing equivalent functionality.
////////////////////////////////////////////////////////////////////////////////
namespace c10 {
class C10_API C10FlagParser {
public:
bool success() {
return success_;
}
protected:
template <typename T>
bool Parse(const std::string& content, T* value);
bool success_{false};
};
C10_DECLARE_REGISTRY(C10FlagsRegistry, C10FlagParser, const std::string&);
} // namespace c10
// The macros are defined outside the c10 namespace. In your code, you should
// write the C10_DEFINE_* and C10_DECLARE_* macros outside any namespace
// as well.
#define C10_DEFINE_typed_var(type, name, default_value, help_str) \
C10_EXPORT type FLAGS_##name = default_value; \
namespace c10 { \
namespace { \
class C10FlagParser_##name : public C10FlagParser { \
public: \
explicit C10FlagParser_##name(const std::string& content) { \
success_ = C10FlagParser::Parse<type>(content, &FLAGS_##name); \
} \
}; \
} \
RegistererC10FlagsRegistry g_C10FlagsRegistry_##name( \
#name, \
C10FlagsRegistry(), \
RegistererC10FlagsRegistry::DefaultCreator<C10FlagParser_##name>, \
"(" #type ", default " #default_value ") " help_str); \
}
#define C10_DEFINE_int(name, default_value, help_str) \
C10_DEFINE_typed_var(int, name, default_value, help_str)
#define C10_DEFINE_int32(name, default_value, help_str) \
C10_DEFINE_int(name, default_value, help_str)
#define C10_DEFINE_int64(name, default_value, help_str) \
C10_DEFINE_typed_var(int64_t, name, default_value, help_str)
#define C10_DEFINE_double(name, default_value, help_str) \
C10_DEFINE_typed_var(double, name, default_value, help_str)
#define C10_DEFINE_bool(name, default_value, help_str) \
C10_DEFINE_typed_var(bool, name, default_value, help_str)
#define C10_DEFINE_string(name, default_value, help_str) \
C10_DEFINE_typed_var(std::string, name, default_value, help_str)
// DECLARE_typed_var should be used in header files and in the global namespace.
#define C10_DECLARE_typed_var(type, name) C10_API extern type FLAGS_##name
#define C10_DECLARE_int(name) C10_DECLARE_typed_var(int, name)
#define C10_DECLARE_int32(name) C10_DECLARE_int(name)
#define C10_DECLARE_int64(name) C10_DECLARE_typed_var(int64_t, name)
#define C10_DECLARE_double(name) C10_DECLARE_typed_var(double, name)
#define C10_DECLARE_bool(name) C10_DECLARE_typed_var(bool, name)
#define C10_DECLARE_string(name) C10_DECLARE_typed_var(std::string, name)
////////////////////////////////////////////////////////////////////////////////
// End non-gflags section.
////////////////////////////////////////////////////////////////////////////////
#endif // C10_USE_GFLAGS
#endif // C10_UTIL_FLAGS_H_
| 10,054
| 43.295154
| 80
|
h
|
null |
pytorch-main/c10/util/FunctionRef.h
|
//===- llvm/ADT/STLExtras.h - Useful STL related functions ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains some templates that are useful if you are working with the
// STL at all.
//
// No library is required when using these functions.
//
//===----------------------------------------------------------------------===//
// c10: modified from llvm::function_ref
// c10: added more SFINAE to enable use in overloaded functions
#pragma once
#include <cstdint>
#include <type_traits>
#include <utility>
namespace c10 {
/// An efficient, type-erasing, non-owning reference to a callable. This is
/// intended for use as the type of a function parameter that is not used
/// after the function in question returns.
///
/// This class does not own the callable, so it is not in general safe to store
/// a function_ref.
template <typename Fn>
class function_ref;
template <typename Ret, typename... Params>
class function_ref<Ret(Params...)> {
Ret (*callback)(intptr_t callable, Params... params) = nullptr;
intptr_t callable;
template <typename Callable>
static Ret callback_fn(intptr_t callable, Params... params) {
return (*reinterpret_cast<Callable*>(callable))(std::forward<Params>(
params)...);
}
public:
function_ref() = default;
function_ref(std::nullptr_t) {}
template <typename Callable>
function_ref(
Callable&& callable,
typename std::enable_if<!std::is_same<
typename std::remove_reference<Callable>::type,
function_ref>::value>::type* = nullptr,
typename std::enable_if<std::is_convertible<
typename c10::invoke_result_t<Callable, Params...>,
Ret>::value>::type* = nullptr)
: callback(callback_fn<typename std::remove_reference<Callable>::type>),
callable(reinterpret_cast<intptr_t>(&callable)) {}
Ret operator()(Params... params) const {
return callback(callable, std::forward<Params>(params)...);
}
operator bool() const {
return callback;
}
};
} // namespace c10
| 2,293
| 30.424658
| 80
|
h
|
null |
pytorch-main/c10/util/Half-inl.h
|
#pragma once
#include <c10/macros/Macros.h>
#include <c10/util/bit_cast.h>
#include <cstring>
#include <limits>
#ifdef __CUDACC__
#include <cuda_fp16.h>
#endif
#ifdef __HIPCC__
#include <hip/hip_fp16.h>
#endif
#if defined(CL_SYCL_LANGUAGE_VERSION)
#include <CL/sycl.hpp> // for SYCL 1.2.1
#elif defined(SYCL_LANGUAGE_VERSION)
#include <sycl/sycl.hpp> // for SYCL 2020
#endif
C10_CLANG_DIAGNOSTIC_PUSH()
#if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion")
C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-int-float-conversion")
#endif
namespace c10 {
/// Constructors
inline C10_HOST_DEVICE Half::Half(float value)
:
#if defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__)
x(__half_as_short(__float2half(value)))
#elif defined(__SYCL_DEVICE_ONLY__)
x(c10::bit_cast<uint16_t>(sycl::half(value)))
#else
x(detail::fp16_ieee_from_fp32_value(value))
#endif
{
}
/// Implicit conversions
inline C10_HOST_DEVICE Half::operator float() const {
#if defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__)
return __half2float(*reinterpret_cast<const __half*>(&x));
#elif defined(__SYCL_DEVICE_ONLY__)
return float(c10::bit_cast<sycl::half>(x));
#else
return detail::fp16_ieee_to_fp32_value(x);
#endif
}
#if defined(__CUDACC__) || defined(__HIPCC__)
inline C10_HOST_DEVICE Half::Half(const __half& value) {
x = *reinterpret_cast<const unsigned short*>(&value);
}
inline C10_HOST_DEVICE Half::operator __half() const {
return *reinterpret_cast<const __half*>(&x);
}
#endif
#ifdef SYCL_LANGUAGE_VERSION
inline C10_HOST_DEVICE Half::Half(const sycl::half& value) {
x = *reinterpret_cast<const unsigned short*>(&value);
}
inline C10_HOST_DEVICE Half::operator sycl::half() const {
return *reinterpret_cast<const sycl::half*>(&x);
}
#endif
// CUDA intrinsics
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 350)) || \
(defined(__clang__) && defined(__CUDA__))
inline __device__ Half __ldg(const Half* ptr) {
return __ldg(reinterpret_cast<const __half*>(ptr));
}
#endif
/// Arithmetic
inline C10_HOST_DEVICE Half operator+(const Half& a, const Half& b) {
return static_cast<float>(a) + static_cast<float>(b);
}
inline C10_HOST_DEVICE Half operator-(const Half& a, const Half& b) {
return static_cast<float>(a) - static_cast<float>(b);
}
inline C10_HOST_DEVICE Half operator*(const Half& a, const Half& b) {
return static_cast<float>(a) * static_cast<float>(b);
}
inline C10_HOST_DEVICE Half operator/(const Half& a, const Half& b)
__ubsan_ignore_float_divide_by_zero__ {
return static_cast<float>(a) / static_cast<float>(b);
}
inline C10_HOST_DEVICE Half operator-(const Half& a) {
#if (defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530) || \
defined(__HIP_DEVICE_COMPILE__)
return __hneg(a);
#elif defined(__SYCL_DEVICE_ONLY__)
return -c10::bit_cast<sycl::half>(a);
#else
return -static_cast<float>(a);
#endif
}
inline C10_HOST_DEVICE Half& operator+=(Half& a, const Half& b) {
a = a + b;
return a;
}
inline C10_HOST_DEVICE Half& operator-=(Half& a, const Half& b) {
a = a - b;
return a;
}
inline C10_HOST_DEVICE Half& operator*=(Half& a, const Half& b) {
a = a * b;
return a;
}
inline C10_HOST_DEVICE Half& operator/=(Half& a, const Half& b) {
a = a / b;
return a;
}
/// Arithmetic with floats
inline C10_HOST_DEVICE float operator+(Half a, float b) {
return static_cast<float>(a) + b;
}
inline C10_HOST_DEVICE float operator-(Half a, float b) {
return static_cast<float>(a) - b;
}
inline C10_HOST_DEVICE float operator*(Half a, float b) {
return static_cast<float>(a) * b;
}
inline C10_HOST_DEVICE float operator/(Half a, float b)
__ubsan_ignore_float_divide_by_zero__ {
return static_cast<float>(a) / b;
}
inline C10_HOST_DEVICE float operator+(float a, Half b) {
return a + static_cast<float>(b);
}
inline C10_HOST_DEVICE float operator-(float a, Half b) {
return a - static_cast<float>(b);
}
inline C10_HOST_DEVICE float operator*(float a, Half b) {
return a * static_cast<float>(b);
}
inline C10_HOST_DEVICE float operator/(float a, Half b)
__ubsan_ignore_float_divide_by_zero__ {
return a / static_cast<float>(b);
}
inline C10_HOST_DEVICE float& operator+=(float& a, const Half& b) {
return a += static_cast<float>(b);
}
inline C10_HOST_DEVICE float& operator-=(float& a, const Half& b) {
return a -= static_cast<float>(b);
}
inline C10_HOST_DEVICE float& operator*=(float& a, const Half& b) {
return a *= static_cast<float>(b);
}
inline C10_HOST_DEVICE float& operator/=(float& a, const Half& b) {
return a /= static_cast<float>(b);
}
/// Arithmetic with doubles
inline C10_HOST_DEVICE double operator+(Half a, double b) {
return static_cast<double>(a) + b;
}
inline C10_HOST_DEVICE double operator-(Half a, double b) {
return static_cast<double>(a) - b;
}
inline C10_HOST_DEVICE double operator*(Half a, double b) {
return static_cast<double>(a) * b;
}
inline C10_HOST_DEVICE double operator/(Half a, double b)
__ubsan_ignore_float_divide_by_zero__ {
return static_cast<double>(a) / b;
}
inline C10_HOST_DEVICE double operator+(double a, Half b) {
return a + static_cast<double>(b);
}
inline C10_HOST_DEVICE double operator-(double a, Half b) {
return a - static_cast<double>(b);
}
inline C10_HOST_DEVICE double operator*(double a, Half b) {
return a * static_cast<double>(b);
}
inline C10_HOST_DEVICE double operator/(double a, Half b)
__ubsan_ignore_float_divide_by_zero__ {
return a / static_cast<double>(b);
}
/// Arithmetic with ints
inline C10_HOST_DEVICE Half operator+(Half a, int b) {
return a + static_cast<Half>(b);
}
inline C10_HOST_DEVICE Half operator-(Half a, int b) {
return a - static_cast<Half>(b);
}
inline C10_HOST_DEVICE Half operator*(Half a, int b) {
return a * static_cast<Half>(b);
}
inline C10_HOST_DEVICE Half operator/(Half a, int b) {
return a / static_cast<Half>(b);
}
inline C10_HOST_DEVICE Half operator+(int a, Half b) {
return static_cast<Half>(a) + b;
}
inline C10_HOST_DEVICE Half operator-(int a, Half b) {
return static_cast<Half>(a) - b;
}
inline C10_HOST_DEVICE Half operator*(int a, Half b) {
return static_cast<Half>(a) * b;
}
inline C10_HOST_DEVICE Half operator/(int a, Half b) {
return static_cast<Half>(a) / b;
}
//// Arithmetic with int64_t
inline C10_HOST_DEVICE Half operator+(Half a, int64_t b) {
return a + static_cast<Half>(b);
}
inline C10_HOST_DEVICE Half operator-(Half a, int64_t b) {
return a - static_cast<Half>(b);
}
inline C10_HOST_DEVICE Half operator*(Half a, int64_t b) {
return a * static_cast<Half>(b);
}
inline C10_HOST_DEVICE Half operator/(Half a, int64_t b) {
return a / static_cast<Half>(b);
}
inline C10_HOST_DEVICE Half operator+(int64_t a, Half b) {
return static_cast<Half>(a) + b;
}
inline C10_HOST_DEVICE Half operator-(int64_t a, Half b) {
return static_cast<Half>(a) - b;
}
inline C10_HOST_DEVICE Half operator*(int64_t a, Half b) {
return static_cast<Half>(a) * b;
}
inline C10_HOST_DEVICE Half operator/(int64_t a, Half b) {
return static_cast<Half>(a) / b;
}
/// NOTE: we do not define comparisons directly and instead rely on the implicit
/// conversion from c10::Half to float.
} // namespace c10
namespace std {
template <>
class numeric_limits<c10::Half> {
public:
static constexpr bool is_specialized = true;
static constexpr bool is_signed = true;
static constexpr bool is_integer = false;
static constexpr bool is_exact = false;
static constexpr bool has_infinity = true;
static constexpr bool has_quiet_NaN = true;
static constexpr bool has_signaling_NaN = true;
static constexpr auto has_denorm = numeric_limits<float>::has_denorm;
static constexpr auto has_denorm_loss =
numeric_limits<float>::has_denorm_loss;
static constexpr auto round_style = numeric_limits<float>::round_style;
static constexpr bool is_iec559 = true;
static constexpr bool is_bounded = true;
static constexpr bool is_modulo = false;
static constexpr int digits = 11;
static constexpr int digits10 = 3;
static constexpr int max_digits10 = 5;
static constexpr int radix = 2;
static constexpr int min_exponent = -13;
static constexpr int min_exponent10 = -4;
static constexpr int max_exponent = 16;
static constexpr int max_exponent10 = 4;
static constexpr auto traps = numeric_limits<float>::traps;
static constexpr auto tinyness_before =
numeric_limits<float>::tinyness_before;
static constexpr c10::Half min() {
return c10::Half(0x0400, c10::Half::from_bits());
}
static constexpr c10::Half lowest() {
return c10::Half(0xFBFF, c10::Half::from_bits());
}
static constexpr c10::Half max() {
return c10::Half(0x7BFF, c10::Half::from_bits());
}
static constexpr c10::Half epsilon() {
return c10::Half(0x1400, c10::Half::from_bits());
}
static constexpr c10::Half round_error() {
return c10::Half(0x3800, c10::Half::from_bits());
}
static constexpr c10::Half infinity() {
return c10::Half(0x7C00, c10::Half::from_bits());
}
static constexpr c10::Half quiet_NaN() {
return c10::Half(0x7E00, c10::Half::from_bits());
}
static constexpr c10::Half signaling_NaN() {
return c10::Half(0x7D00, c10::Half::from_bits());
}
static constexpr c10::Half denorm_min() {
return c10::Half(0x0001, c10::Half::from_bits());
}
};
} // namespace std
C10_CLANG_DIAGNOSTIC_POP()
| 9,355
| 27.43769
| 80
|
h
|
null |
pytorch-main/c10/util/Half.h
|
#pragma once
/// Defines the Half type (half-precision floating-point) including conversions
/// to standard C types and basic arithmetic operations. Note that arithmetic
/// operations are implemented by converting to floating point and
/// performing the operation in float32, instead of using CUDA half intrinsics.
/// Most uses of this type within ATen are memory bound, including the
/// element-wise kernels, and the half intrinsics aren't efficient on all GPUs.
/// If you are writing a compute bound kernel, you can use the CUDA half
/// intrinsics directly on the Half type from device code.
#include <c10/macros/Macros.h>
#include <c10/util/C++17.h>
#include <c10/util/TypeSafeSignMath.h>
#include <c10/util/complex.h>
#include <type_traits>
#if defined(__cplusplus) && (__cplusplus >= 201103L)
#include <cmath>
#include <cstdint>
#elif !defined(__OPENCL_VERSION__)
#include <math.h>
#include <stdint.h>
#endif
#ifdef _MSC_VER
#include <intrin.h>
#endif
#include <complex>
#include <cstdint>
#include <cstring>
#include <iosfwd>
#include <limits>
#include <sstream>
#include <stdexcept>
#include <string>
#include <utility>
#ifdef __CUDACC__
#include <cuda_fp16.h>
#endif
#ifdef __HIPCC__
#include <hip/hip_fp16.h>
#endif
#if defined(CL_SYCL_LANGUAGE_VERSION)
#include <CL/sycl.hpp> // for SYCL 1.2.1
#elif defined(SYCL_LANGUAGE_VERSION)
#include <sycl/sycl.hpp> // for SYCL 2020
#endif
// Standard check for compiling CUDA with clang
#if defined(__clang__) && defined(__CUDA__) && defined(__CUDA_ARCH__)
#define C10_DEVICE_HOST_FUNCTION __device__ __host__
#else
#define C10_DEVICE_HOST_FUNCTION
#endif
#include <typeinfo> // operator typeid
namespace c10 {
namespace detail {
C10_DEVICE_HOST_FUNCTION inline float fp32_from_bits(uint32_t w) {
#if defined(__OPENCL_VERSION__)
return as_float(w);
#elif defined(__CUDA_ARCH__)
return __uint_as_float((unsigned int)w);
#elif defined(__INTEL_COMPILER)
return _castu32_f32(w);
#else
union {
uint32_t as_bits;
float as_value;
} fp32 = {w};
return fp32.as_value;
#endif
}
C10_DEVICE_HOST_FUNCTION inline uint32_t fp32_to_bits(float f) {
#if defined(__OPENCL_VERSION__)
return as_uint(f);
#elif defined(__CUDA_ARCH__)
return (uint32_t)__float_as_uint(f);
#elif defined(__INTEL_COMPILER)
return _castf32_u32(f);
#else
union {
float as_value;
uint32_t as_bits;
} fp32 = {f};
return fp32.as_bits;
#endif
}
/*
* Convert a 16-bit floating-point number in IEEE half-precision format, in bit
* representation, to a 32-bit floating-point number in IEEE single-precision
* format, in bit representation.
*
* @note The implementation doesn't use any floating-point operations.
*/
inline uint32_t fp16_ieee_to_fp32_bits(uint16_t h) {
/*
* Extend the half-precision floating-point number to 32 bits and shift to the
* upper part of the 32-bit word:
* +---+-----+------------+-------------------+
* | S |EEEEE|MM MMMM MMMM|0000 0000 0000 0000|
* +---+-----+------------+-------------------+
* Bits 31 26-30 16-25 0-15
*
* S - sign bit, E - bits of the biased exponent, M - bits of the mantissa, 0
* - zero bits.
*/
const uint32_t w = (uint32_t)h << 16;
/*
* Extract the sign of the input number into the high bit of the 32-bit word:
*
* +---+----------------------------------+
* | S |0000000 00000000 00000000 00000000|
* +---+----------------------------------+
* Bits 31 0-31
*/
const uint32_t sign = w & UINT32_C(0x80000000);
/*
* Extract mantissa and biased exponent of the input number into the bits 0-30
* of the 32-bit word:
*
* +---+-----+------------+-------------------+
* | 0 |EEEEE|MM MMMM MMMM|0000 0000 0000 0000|
* +---+-----+------------+-------------------+
* Bits 30 27-31 17-26 0-16
*/
const uint32_t nonsign = w & UINT32_C(0x7FFFFFFF);
/*
* Renorm shift is the number of bits to shift mantissa left to make the
* half-precision number normalized. If the initial number is normalized, some
* of its high 6 bits (sign == 0 and 5-bit exponent) equals one. In this case
* renorm_shift == 0. If the number is denormalize, renorm_shift > 0. Note
* that if we shift denormalized nonsign by renorm_shift, the unit bit of
* mantissa will shift into exponent, turning the biased exponent into 1, and
* making mantissa normalized (i.e. without leading 1).
*/
#ifdef _MSC_VER
unsigned long nonsign_bsr;
_BitScanReverse(&nonsign_bsr, (unsigned long)nonsign);
uint32_t renorm_shift = (uint32_t)nonsign_bsr ^ 31;
#else
uint32_t renorm_shift = __builtin_clz(nonsign);
#endif
renorm_shift = renorm_shift > 5 ? renorm_shift - 5 : 0;
/*
* Iff half-precision number has exponent of 15, the addition overflows
* it into bit 31, and the subsequent shift turns the high 9 bits
* into 1. Thus inf_nan_mask == 0x7F800000 if the half-precision number
* had exponent of 15 (i.e. was NaN or infinity) 0x00000000 otherwise
*/
const int32_t inf_nan_mask =
((int32_t)(nonsign + 0x04000000) >> 8) & INT32_C(0x7F800000);
/*
* Iff nonsign is 0, it overflows into 0xFFFFFFFF, turning bit 31
* into 1. Otherwise, bit 31 remains 0. The signed shift right by 31
* broadcasts bit 31 into all bits of the zero_mask. Thus zero_mask ==
* 0xFFFFFFFF if the half-precision number was zero (+0.0h or -0.0h)
* 0x00000000 otherwise
*/
const int32_t zero_mask = (int32_t)(nonsign - 1) >> 31;
/*
* 1. Shift nonsign left by renorm_shift to normalize it (if the input
* was denormal)
* 2. Shift nonsign right by 3 so the exponent (5 bits originally)
* becomes an 8-bit field and 10-bit mantissa shifts into the 10 high
* bits of the 23-bit mantissa of IEEE single-precision number.
* 3. Add 0x70 to the exponent (starting at bit 23) to compensate the
* different in exponent bias (0x7F for single-precision number less 0xF
* for half-precision number).
* 4. Subtract renorm_shift from the exponent (starting at bit 23) to
* account for renormalization. As renorm_shift is less than 0x70, this
* can be combined with step 3.
* 5. Binary OR with inf_nan_mask to turn the exponent into 0xFF if the
* input was NaN or infinity.
* 6. Binary ANDNOT with zero_mask to turn the mantissa and exponent
* into zero if the input was zero.
* 7. Combine with the sign of the input number.
*/
return sign |
((((nonsign << renorm_shift >> 3) + ((0x70 - renorm_shift) << 23)) |
inf_nan_mask) &
~zero_mask);
}
/*
* Convert a 16-bit floating-point number in IEEE half-precision format, in bit
* representation, to a 32-bit floating-point number in IEEE single-precision
* format.
*
* @note The implementation relies on IEEE-like (no assumption about rounding
* mode and no operations on denormals) floating-point operations and bitcasts
* between integer and floating-point variables.
*/
inline float fp16_ieee_to_fp32_value(uint16_t h) {
/*
* Extend the half-precision floating-point number to 32 bits and shift to the
* upper part of the 32-bit word:
* +---+-----+------------+-------------------+
* | S |EEEEE|MM MMMM MMMM|0000 0000 0000 0000|
* +---+-----+------------+-------------------+
* Bits 31 26-30 16-25 0-15
*
* S - sign bit, E - bits of the biased exponent, M - bits of the mantissa, 0
* - zero bits.
*/
const uint32_t w = (uint32_t)h << 16;
/*
* Extract the sign of the input number into the high bit of the 32-bit word:
*
* +---+----------------------------------+
* | S |0000000 00000000 00000000 00000000|
* +---+----------------------------------+
* Bits 31 0-31
*/
const uint32_t sign = w & UINT32_C(0x80000000);
/*
* Extract mantissa and biased exponent of the input number into the high bits
* of the 32-bit word:
*
* +-----+------------+---------------------+
* |EEEEE|MM MMMM MMMM|0 0000 0000 0000 0000|
* +-----+------------+---------------------+
* Bits 27-31 17-26 0-16
*/
const uint32_t two_w = w + w;
/*
* Shift mantissa and exponent into bits 23-28 and bits 13-22 so they become
* mantissa and exponent of a single-precision floating-point number:
*
* S|Exponent | Mantissa
* +-+---+-----+------------+----------------+
* |0|000|EEEEE|MM MMMM MMMM|0 0000 0000 0000|
* +-+---+-----+------------+----------------+
* Bits | 23-31 | 0-22
*
* Next, there are some adjustments to the exponent:
* - The exponent needs to be corrected by the difference in exponent bias
* between single-precision and half-precision formats (0x7F - 0xF = 0x70)
* - Inf and NaN values in the inputs should become Inf and NaN values after
* conversion to the single-precision number. Therefore, if the biased
* exponent of the half-precision input was 0x1F (max possible value), the
* biased exponent of the single-precision output must be 0xFF (max possible
* value). We do this correction in two steps:
* - First, we adjust the exponent by (0xFF - 0x1F) = 0xE0 (see exp_offset
* below) rather than by 0x70 suggested by the difference in the exponent bias
* (see above).
* - Then we multiply the single-precision result of exponent adjustment by
* 2**(-112) to reverse the effect of exponent adjustment by 0xE0 less the
* necessary exponent adjustment by 0x70 due to difference in exponent bias.
* The floating-point multiplication hardware would ensure than Inf and
* NaN would retain their value on at least partially IEEE754-compliant
* implementations.
*
* Note that the above operations do not handle denormal inputs (where biased
* exponent == 0). However, they also do not operate on denormal inputs, and
* do not produce denormal results.
*/
constexpr uint32_t exp_offset = UINT32_C(0xE0) << 23;
// const float exp_scale = 0x1.0p-112f;
constexpr uint32_t scale_bits = (uint32_t)15 << 23;
float exp_scale_val;
std::memcpy(&exp_scale_val, &scale_bits, sizeof(exp_scale_val));
const float exp_scale = exp_scale_val;
const float normalized_value =
fp32_from_bits((two_w >> 4) + exp_offset) * exp_scale;
/*
* Convert denormalized half-precision inputs into single-precision results
* (always normalized). Zero inputs are also handled here.
*
* In a denormalized number the biased exponent is zero, and mantissa has
* on-zero bits. First, we shift mantissa into bits 0-9 of the 32-bit word.
*
* zeros | mantissa
* +---------------------------+------------+
* |0000 0000 0000 0000 0000 00|MM MMMM MMMM|
* +---------------------------+------------+
* Bits 10-31 0-9
*
* Now, remember that denormalized half-precision numbers are represented as:
* FP16 = mantissa * 2**(-24).
* The trick is to construct a normalized single-precision number with the
* same mantissa and thehalf-precision input and with an exponent which would
* scale the corresponding mantissa bits to 2**(-24). A normalized
* single-precision floating-point number is represented as: FP32 = (1 +
* mantissa * 2**(-23)) * 2**(exponent - 127) Therefore, when the biased
* exponent is 126, a unit change in the mantissa of the input denormalized
* half-precision number causes a change of the constructed single-precision
* number by 2**(-24), i.e. the same amount.
*
* The last step is to adjust the bias of the constructed single-precision
* number. When the input half-precision number is zero, the constructed
* single-precision number has the value of FP32 = 1 * 2**(126 - 127) =
* 2**(-1) = 0.5 Therefore, we need to subtract 0.5 from the constructed
* single-precision number to get the numerical equivalent of the input
* half-precision number.
*/
constexpr uint32_t magic_mask = UINT32_C(126) << 23;
constexpr float magic_bias = 0.5f;
const float denormalized_value =
fp32_from_bits((two_w >> 17) | magic_mask) - magic_bias;
/*
* - Choose either results of conversion of input as a normalized number, or
* as a denormalized number, depending on the input exponent. The variable
* two_w contains input exponent in bits 27-31, therefore if its smaller than
* 2**27, the input is either a denormal number, or zero.
* - Combine the result of conversion of exponent and mantissa with the sign
* of the input number.
*/
constexpr uint32_t denormalized_cutoff = UINT32_C(1) << 27;
const uint32_t result = sign |
(two_w < denormalized_cutoff ? fp32_to_bits(denormalized_value)
: fp32_to_bits(normalized_value));
return fp32_from_bits(result);
}
/*
* Convert a 32-bit floating-point number in IEEE single-precision format to a
* 16-bit floating-point number in IEEE half-precision format, in bit
* representation.
*
* @note The implementation relies on IEEE-like (no assumption about rounding
* mode and no operations on denormals) floating-point operations and bitcasts
* between integer and floating-point variables.
*/
inline uint16_t fp16_ieee_from_fp32_value(float f) {
// const float scale_to_inf = 0x1.0p+112f;
// const float scale_to_zero = 0x1.0p-110f;
constexpr uint32_t scale_to_inf_bits = (uint32_t)239 << 23;
constexpr uint32_t scale_to_zero_bits = (uint32_t)17 << 23;
float scale_to_inf_val, scale_to_zero_val;
std::memcpy(&scale_to_inf_val, &scale_to_inf_bits, sizeof(scale_to_inf_val));
std::memcpy(
&scale_to_zero_val, &scale_to_zero_bits, sizeof(scale_to_zero_val));
const float scale_to_inf = scale_to_inf_val;
const float scale_to_zero = scale_to_zero_val;
#if defined(_MSC_VER) && _MSC_VER == 1916
float base = ((signbit(f) != 0 ? -f : f) * scale_to_inf) * scale_to_zero;
#else
float base = (fabsf(f) * scale_to_inf) * scale_to_zero;
#endif
const uint32_t w = fp32_to_bits(f);
const uint32_t shl1_w = w + w;
const uint32_t sign = w & UINT32_C(0x80000000);
uint32_t bias = shl1_w & UINT32_C(0xFF000000);
if (bias < UINT32_C(0x71000000)) {
bias = UINT32_C(0x71000000);
}
base = fp32_from_bits((bias >> 1) + UINT32_C(0x07800000)) + base;
const uint32_t bits = fp32_to_bits(base);
const uint32_t exp_bits = (bits >> 13) & UINT32_C(0x00007C00);
const uint32_t mantissa_bits = bits & UINT32_C(0x00000FFF);
const uint32_t nonsign = exp_bits + mantissa_bits;
return static_cast<uint16_t>(
(sign >> 16) |
(shl1_w > UINT32_C(0xFF000000) ? UINT16_C(0x7E00) : nonsign));
}
} // namespace detail
struct alignas(2) Half {
unsigned short x;
struct from_bits_t {};
C10_HOST_DEVICE static constexpr from_bits_t from_bits() {
return from_bits_t();
}
// HIP wants __host__ __device__ tag, CUDA does not
#if defined(USE_ROCM)
C10_HOST_DEVICE Half() = default;
#else
Half() = default;
#endif
constexpr C10_HOST_DEVICE Half(unsigned short bits, from_bits_t) : x(bits){};
inline C10_HOST_DEVICE Half(float value);
inline C10_HOST_DEVICE operator float() const;
#if defined(__CUDACC__) || defined(__HIPCC__)
inline C10_HOST_DEVICE Half(const __half& value);
inline C10_HOST_DEVICE operator __half() const;
#endif
#ifdef SYCL_LANGUAGE_VERSION
inline C10_HOST_DEVICE Half(const sycl::half& value);
inline C10_HOST_DEVICE operator sycl::half() const;
#endif
};
// TODO : move to complex.h
template <>
struct alignas(4) complex<Half> {
Half real_;
Half imag_;
// Constructors
complex() = default;
// Half constructor is not constexpr so the following constructor can't
// be constexpr
C10_HOST_DEVICE explicit inline complex(const Half& real, const Half& imag)
: real_(real), imag_(imag) {}
C10_HOST_DEVICE inline complex(const c10::complex<float>& value)
: real_(value.real()), imag_(value.imag()) {}
// Conversion operator
inline C10_HOST_DEVICE operator c10::complex<float>() const {
return {real_, imag_};
}
constexpr C10_HOST_DEVICE Half real() const {
return real_;
}
constexpr C10_HOST_DEVICE Half imag() const {
return imag_;
}
C10_HOST_DEVICE complex<Half>& operator+=(const complex<Half>& other) {
real_ = static_cast<float>(real_) + static_cast<float>(other.real_);
imag_ = static_cast<float>(imag_) + static_cast<float>(other.imag_);
return *this;
}
C10_HOST_DEVICE complex<Half>& operator-=(const complex<Half>& other) {
real_ = static_cast<float>(real_) - static_cast<float>(other.real_);
imag_ = static_cast<float>(imag_) - static_cast<float>(other.imag_);
return *this;
}
C10_HOST_DEVICE complex<Half>& operator*=(const complex<Half>& other) {
auto a = static_cast<float>(real_);
auto b = static_cast<float>(imag_);
auto c = static_cast<float>(other.real());
auto d = static_cast<float>(other.imag());
real_ = a * c - b * d;
imag_ = a * d + b * c;
return *this;
}
};
// In some versions of MSVC, there will be a compiler error when building.
// C4146: unary minus operator applied to unsigned type, result still unsigned
// C4804: unsafe use of type 'bool' in operation
// It can be addressed by disabling the following warning.
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4146)
#pragma warning(disable : 4804)
#pragma warning(disable : 4018)
#endif
// The overflow checks may involve float to int conversion which may
// trigger precision loss warning. Re-enable the warning once the code
// is fixed. See T58053069.
#ifdef __clang__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunknown-warning-option"
#pragma GCC diagnostic ignored "-Wimplicit-int-float-conversion"
#endif
// bool can be converted to any type.
// Without specializing on bool, in pytorch_linux_trusty_py2_7_9_build:
// `error: comparison of constant '255' with boolean expression is always false`
// for `f > limit::max()` below
template <typename To, typename From>
typename std::enable_if<std::is_same<From, bool>::value, bool>::type overflows(
From /*f*/) {
return false;
}
// skip isnan and isinf check for integral types
template <typename To, typename From>
typename std::enable_if<
std::is_integral<From>::value && !std::is_same<From, bool>::value,
bool>::type
overflows(From f) {
using limit = std::numeric_limits<typename scalar_value_type<To>::type>;
if (!limit::is_signed && std::numeric_limits<From>::is_signed) {
// allow for negative numbers to wrap using two's complement arithmetic.
// For example, with uint8, this allows for `a - b` to be treated as
// `a + 255 * b`.
return greater_than_max<To>(f) ||
(c10::is_negative(f) && -static_cast<uint64_t>(f) > limit::max());
} else {
return c10::less_than_lowest<To>(f) || greater_than_max<To>(f);
}
}
template <typename To, typename From>
typename std::enable_if<std::is_floating_point<From>::value, bool>::type
overflows(From f) {
using limit = std::numeric_limits<typename scalar_value_type<To>::type>;
if (limit::has_infinity && std::isinf(static_cast<double>(f))) {
return false;
}
if (!limit::has_quiet_NaN && (f != f)) {
return true;
}
return f < limit::lowest() || f > limit::max();
}
#ifdef __clang__
#pragma GCC diagnostic pop
#endif
#ifdef _MSC_VER
#pragma warning(pop)
#endif
template <typename To, typename From>
typename std::enable_if<is_complex<From>::value, bool>::type overflows(From f) {
// casts from complex to real are considered to overflow if the
// imaginary component is non-zero
if (!is_complex<To>::value && f.imag() != 0) {
return true;
}
// Check for overflow componentwise
// (Technically, the imag overflow check is guaranteed to be false
// when !is_complex<To>, but any optimizer worth its salt will be
// able to figure it out.)
return overflows<
typename scalar_value_type<To>::type,
typename From::value_type>(f.real()) ||
overflows<
typename scalar_value_type<To>::type,
typename From::value_type>(f.imag());
}
C10_API std::ostream& operator<<(std::ostream& out, const Half& value);
} // namespace c10
#include <c10/util/Half-inl.h> // IWYU pragma: keep
| 20,342
| 36.122263
| 80
|
h
|
null |
pytorch-main/c10/util/IdWrapper.h
|
#pragma once
#include <c10/macros/Macros.h>
#include <cstddef>
#include <functional>
#include <utility>
namespace c10 {
/**
* This template simplifies generation of simple classes that wrap an id
* in a typesafe way. Namely, you can use it to create a very lightweight
* type that only offers equality comparators and hashing. Example:
*
* struct MyIdType final : IdWrapper<MyIdType, uint32_t> {
* constexpr explicit MyIdType(uint32_t id): IdWrapper(id) {}
* };
*
* Then in the global top level namespace:
*
* C10_DEFINE_HASH_FOR_IDWRAPPER(MyIdType);
*
* That's it - equality operators and hash functions are automatically defined
* for you, given the underlying type supports it.
*/
template <class ConcreteType, class UnderlyingType>
class IdWrapper {
public:
using underlying_type = UnderlyingType;
using concrete_type = ConcreteType;
protected:
constexpr explicit IdWrapper(underlying_type id) noexcept(
noexcept(underlying_type(std::declval<underlying_type>())))
: id_(id) {}
constexpr underlying_type underlyingId() const
noexcept(noexcept(underlying_type(std::declval<underlying_type>()))) {
return id_;
}
private:
friend size_t hash_value(const concrete_type& v) {
return std::hash<underlying_type>()(v.id_);
}
// TODO Making operator== noexcept if underlying type is noexcept equality
// comparable doesn't work with GCC 4.8.
// Fix this once we don't need GCC 4.8 anymore.
friend constexpr bool operator==(
const concrete_type& lhs,
const concrete_type& rhs) noexcept {
return lhs.id_ == rhs.id_;
}
// TODO Making operator!= noexcept if operator== is noexcept doesn't work with
// GCC 4.8.
// Fix this once we don't need GCC 4.8 anymore.
friend constexpr bool operator!=(
const concrete_type& lhs,
const concrete_type& rhs) noexcept {
return !(lhs == rhs);
}
underlying_type id_;
};
} // namespace c10
#define C10_DEFINE_HASH_FOR_IDWRAPPER(ClassName) \
namespace std { \
template <> \
struct hash<ClassName> { \
size_t operator()(ClassName x) const { \
return hash_value(x); \
} \
}; \
}
| 2,367
| 28.974684
| 80
|
h
|
null |
pytorch-main/c10/util/LeftRight.h
|
#include <c10/macros/Macros.h>
#include <c10/util/C++17.h>
#include <c10/util/Synchronized.h>
#include <array>
#include <atomic>
#include <mutex>
#include <thread>
namespace c10 {
namespace detail {
struct IncrementRAII final {
public:
explicit IncrementRAII(std::atomic<int32_t>* counter) : _counter(counter) {
_counter->fetch_add(1);
}
~IncrementRAII() {
_counter->fetch_sub(1);
}
private:
std::atomic<int32_t>* _counter;
C10_DISABLE_COPY_AND_ASSIGN(IncrementRAII);
};
} // namespace detail
// LeftRight wait-free readers synchronization primitive
// https://hal.archives-ouvertes.fr/hal-01207881/document
//
// LeftRight is quite easy to use (it can make an arbitrary
// data structure permit wait-free reads), but it has some
// particular performance characteristics you should be aware
// of if you're deciding to use it:
//
// - Reads still incur an atomic write (this is how LeftRight
// keeps track of how long it needs to keep around the old
// data structure)
//
// - Writes get executed twice, to keep both the left and right
// versions up to date. So if your write is expensive or
// nondeterministic, this is also an inappropriate structure
//
// LeftRight is used fairly rarely in PyTorch's codebase. If you
// are still not sure if you need it or not, consult your local
// C++ expert.
//
template <class T>
class LeftRight final {
public:
template <class... Args>
explicit LeftRight(const Args&... args)
: _counters{{{0}, {0}}},
_foregroundCounterIndex(0),
_foregroundDataIndex(0),
_data{{T{args...}, T{args...}}},
_writeMutex() {}
// Copying and moving would not be threadsafe.
// Needs more thought and careful design to make that work.
LeftRight(const LeftRight&) = delete;
LeftRight(LeftRight&&) noexcept = delete;
LeftRight& operator=(const LeftRight&) = delete;
LeftRight& operator=(LeftRight&&) noexcept = delete;
~LeftRight() {
// wait until any potentially running writers are finished
{ std::unique_lock<std::mutex> lock(_writeMutex); }
// wait until any potentially running readers are finished
while (_counters[0].load() != 0 || _counters[1].load() != 0) {
std::this_thread::yield();
}
}
template <typename F>
auto read(F&& readFunc) const -> typename c10::invoke_result_t<F, const T&> {
detail::IncrementRAII _increment_counter(
&_counters[_foregroundCounterIndex.load()]);
return readFunc(_data[_foregroundDataIndex.load()]);
}
// Throwing an exception in writeFunc is ok but causes the state to be either
// the old or the new state, depending on if the first or the second call to
// writeFunc threw.
template <typename F>
auto write(F&& writeFunc) -> typename c10::invoke_result_t<F, T&> {
std::unique_lock<std::mutex> lock(_writeMutex);
return _write(writeFunc);
}
private:
template <class F>
auto _write(const F& writeFunc) -> typename c10::invoke_result_t<F, T&> {
/*
* Assume, A is in background and B in foreground. In simplified terms, we
* want to do the following:
* 1. Write to A (old background)
* 2. Switch A/B
* 3. Write to B (new background)
*
* More detailed algorithm (explanations on why this is important are below
* in code):
* 1. Write to A
* 2. Switch A/B data pointers
* 3. Wait until A counter is zero
* 4. Switch A/B counters
* 5. Wait until B counter is zero
* 6. Write to B
*/
auto localDataIndex = _foregroundDataIndex.load();
// 1. Write to A
_callWriteFuncOnBackgroundInstance(writeFunc, localDataIndex);
// 2. Switch A/B data pointers
localDataIndex = localDataIndex ^ 1;
_foregroundDataIndex = localDataIndex;
/*
* 3. Wait until A counter is zero
*
* In the previous write run, A was foreground and B was background.
* There was a time after switching _foregroundDataIndex (B to foreground)
* and before switching _foregroundCounterIndex, in which new readers could
* have read B but incremented A's counter.
*
* In this current run, we just switched _foregroundDataIndex (A back to
* foreground), but before writing to the new background B, we have to make
* sure A's counter was zero briefly, so all these old readers are gone.
*/
auto localCounterIndex = _foregroundCounterIndex.load();
_waitForBackgroundCounterToBeZero(localCounterIndex);
/*
* 4. Switch A/B counters
*
* Now that we know all readers on B are really gone, we can switch the
* counters and have new readers increment A's counter again, which is the
* correct counter since they're reading A.
*/
localCounterIndex = localCounterIndex ^ 1;
_foregroundCounterIndex = localCounterIndex;
/*
* 5. Wait until B counter is zero
*
* This waits for all the readers on B that came in while both data and
* counter for B was in foreground, i.e. normal readers that happened
* outside of that brief gap between switching data and counter.
*/
_waitForBackgroundCounterToBeZero(localCounterIndex);
// 6. Write to B
return _callWriteFuncOnBackgroundInstance(writeFunc, localDataIndex);
}
template <class F>
auto _callWriteFuncOnBackgroundInstance(
const F& writeFunc,
uint8_t localDataIndex) -> typename c10::invoke_result_t<F, T&> {
try {
return writeFunc(_data[localDataIndex ^ 1]);
} catch (...) {
// recover invariant by copying from the foreground instance
_data[localDataIndex ^ 1] = _data[localDataIndex];
// rethrow
throw;
}
}
void _waitForBackgroundCounterToBeZero(uint8_t counterIndex) {
while (_counters[counterIndex ^ 1].load() != 0) {
std::this_thread::yield();
}
}
mutable std::array<std::atomic<int32_t>, 2> _counters;
std::atomic<uint8_t> _foregroundCounterIndex;
std::atomic<uint8_t> _foregroundDataIndex;
std::array<T, 2> _data;
std::mutex _writeMutex;
};
// RWSafeLeftRightWrapper is API compatible with LeftRight and uses a
// read-write lock to protect T (data).
template <class T>
class RWSafeLeftRightWrapper final {
public:
template <class... Args>
explicit RWSafeLeftRightWrapper(const Args&... args) : data_{args...} {}
// RWSafeLeftRightWrapper is not copyable or moveable since LeftRight
// is not copyable or moveable.
RWSafeLeftRightWrapper(const RWSafeLeftRightWrapper&) = delete;
RWSafeLeftRightWrapper(RWSafeLeftRightWrapper&&) noexcept = delete;
RWSafeLeftRightWrapper& operator=(const RWSafeLeftRightWrapper&) = delete;
RWSafeLeftRightWrapper& operator=(RWSafeLeftRightWrapper&&) noexcept = delete;
template <typename F>
auto read(F&& readFunc) const -> typename c10::invoke_result_t<F, const T&> {
return data_.withLock(
[&readFunc](T const& data) { return readFunc(data); });
}
template <typename F>
auto write(F&& writeFunc) -> typename c10::invoke_result_t<F, T&> {
return data_.withLock([&writeFunc](T& data) { return writeFunc(data); });
}
private:
c10::Synchronized<T> data_;
};
} // namespace c10
| 7,157
| 31.243243
| 80
|
h
|
null |
pytorch-main/c10/util/Load.h
|
#pragma once
#include <c10/macros/Macros.h>
#include <cstring>
namespace c10 {
namespace detail {
template <typename T>
struct LoadImpl {
C10_HOST_DEVICE static T apply(const void* src) {
return *reinterpret_cast<const T*>(src);
}
};
template <>
struct LoadImpl<bool> {
C10_HOST_DEVICE static bool apply(const void* src) {
static_assert(sizeof(bool) == sizeof(char), "");
// NOTE: [Loading boolean values]
// Protect against invalid boolean values by loading as a byte
// first, then converting to bool (see gh-54789).
return *reinterpret_cast<const unsigned char*>(src);
}
};
} // namespace detail
template <typename T>
C10_HOST_DEVICE T load(const void* src) {
return c10::detail::LoadImpl<T>::apply(src);
}
template <typename scalar_t>
C10_HOST_DEVICE scalar_t load(const scalar_t* src) {
return c10::detail::LoadImpl<scalar_t>::apply(src);
}
} // namespace c10
| 908
| 22.307692
| 66
|
h
|
null |
pytorch-main/c10/util/Logging.h
|
#ifndef C10_UTIL_LOGGING_H_
#define C10_UTIL_LOGGING_H_
#include <climits>
#include <exception>
#include <functional>
#include <limits>
#include <sstream>
#include <c10/macros/Macros.h>
#include <c10/util/Exception.h>
#include <c10/util/Flags.h>
#include <c10/util/StringUtil.h>
// CAFFE2_LOG_THRESHOLD is a compile time flag that would allow us to turn off
// logging at compile time so no logging message below that level is produced
// at all. The value should be between INT_MIN and CAFFE_FATAL.
#ifndef CAFFE2_LOG_THRESHOLD
// If we have not defined the compile time log threshold, we keep all the
// log cases.
#define CAFFE2_LOG_THRESHOLD INT_MIN
#endif // CAFFE2_LOG_THRESHOLD
// Below are different implementations for glog and non-glog cases.
#ifdef C10_USE_GLOG
#include <c10/util/logging_is_google_glog.h>
#else // !C10_USE_GLOG
#include <c10/util/logging_is_not_google_glog.h>
#endif // C10_USE_GLOG
C10_DECLARE_int(caffe2_log_level);
C10_DECLARE_bool(caffe2_use_fatal_for_enforce);
// Some versions of GLOG support less-spammy version of LOG_EVERY_MS. If it's
// not available - just short-circuit to the always working one one.
// We define the C10_ name to avoid confusing other files
#ifdef LOG_EVERY_MS
#define C10_LOG_EVERY_MS(severity, ms) LOG_EVERY_MS(severity, ms)
#else
#define C10_LOG_EVERY_MS(severity, ms) LOG(severity)
#endif
// Same for LOG_FIRST_N
#ifdef LOG_FIRST_N
#define C10_LOG_FIRST_N(severity, n) LOG_FIRST_N(severity, n)
#else
#define C10_LOG_FIRST_N(severity, n) LOG(severity)
#endif
// Same for LOG_EVERY_N
#ifdef LOG_EVERY_N
#define C10_LOG_EVERY_N(severity, n) LOG_EVERY_N(severity, n)
#else
#define C10_LOG_EVERY_N(severity, n) LOG(severity)
#endif
namespace c10 {
using std::string;
// Functions that we use for initialization.
C10_API bool InitCaffeLogging(int* argc, char** argv);
C10_API void UpdateLoggingLevelsFromFlags();
[[noreturn]] C10_API void ThrowEnforceNotMet(
const char* file,
const int line,
const char* condition,
const std::string& msg,
const void* caller = nullptr);
[[noreturn]] C10_API void ThrowEnforceNotMet(
const char* file,
const int line,
const char* condition,
const char* msg,
const void* caller = nullptr);
[[noreturn]] C10_API inline void ThrowEnforceNotMet(
const char* file,
const int line,
const char* condition,
detail::CompileTimeEmptyString /*msg*/,
const void* caller = nullptr) {
ThrowEnforceNotMet(file, line, condition, "", caller);
}
[[noreturn]] C10_API void ThrowEnforceFiniteNotMet(
const char* file,
const int line,
const char* condition,
const std::string& msg,
const void* caller = nullptr);
[[noreturn]] C10_API void ThrowEnforceFiniteNotMet(
const char* file,
const int line,
const char* condition,
const char* msg,
const void* caller = nullptr);
[[noreturn]] C10_API inline void ThrowEnforceFiniteNotMet(
const char* file,
const int line,
const char* condition,
detail::CompileTimeEmptyString /*msg*/,
const void* caller = nullptr) {
ThrowEnforceFiniteNotMet(file, line, condition, "", caller);
}
constexpr bool IsUsingGoogleLogging() {
#ifdef C10_USE_GLOG
return true;
#else
return false;
#endif
}
/**
* A utility to allow one to show log info to stderr after the program starts.
*
* This is similar to calling GLOG's --logtostderr, or setting caffe2_log_level
* to smaller than INFO. You are recommended to only use this in a few sparse
* cases, such as when you want to write a tutorial or something. Normally, use
* the commandline flags to set the log level.
*/
C10_API void ShowLogInfoToStderr();
C10_API void SetStackTraceFetcher(std::function<string(void)> fetcher);
using EnforceNotMet = ::c10::Error;
#define CAFFE_ENFORCE(condition, ...) \
do { \
if (C10_UNLIKELY(!(condition))) { \
::c10::ThrowEnforceNotMet( \
__FILE__, __LINE__, #condition, ::c10::str(__VA_ARGS__)); \
} \
} while (false)
#define CAFFE_ENFORCE_FINITE(condition, ...) \
do { \
if (C10_UNLIKELY(!(condition))) { \
::c10::ThrowEnforceFiniteNotMet( \
__FILE__, __LINE__, #condition, ::c10::str(__VA_ARGS__)); \
} \
} while (false)
#define CAFFE_ENFORCE_WITH_CALLER(condition, ...) \
do { \
if (C10_UNLIKELY(!(condition))) { \
::c10::ThrowEnforceNotMet( \
__FILE__, __LINE__, #condition, ::c10::str(__VA_ARGS__), this); \
} \
} while (false)
#define CAFFE_THROW(...) \
::c10::ThrowEnforceNotMet(__FILE__, __LINE__, "", ::c10::str(__VA_ARGS__))
/**
* Rich logging messages
*
* CAFFE_ENFORCE_THAT can be used with one of the "checker functions" that
* capture input argument values and add it to the exception message. E.g.
* `CAFFE_ENFORCE_THAT(Equals(foo(x), bar(y)), "Optional additional message")`
* would evaluate both foo and bar only once and if the results are not equal -
* include them in the exception message.
*
* Some of the basic checker functions like Equals or Greater are already
* defined below. Other header might define customized checkers by adding
* functions to caffe2::enforce_detail namespace. For example:
*
* namespace caffe2 { namespace enforce_detail {
* inline EnforceFailMessage IsVector(const vector<int64_t>& shape) {
* if (shape.size() == 1) { return EnforceOK(); }
* return c10::str("Shape ", shape, " is not a vector");
* }
* }}
*
* With further usages like `CAFFE_ENFORCE_THAT(IsVector(Input(0).dims()))`
*
* Convenient wrappers for binary operations like CAFFE_ENFORCE_EQ are provided
* too. Please use them instead of TORCH_CHECK_EQ and friends for failures in
* user-provided input.
*/
namespace enforce_detail {
template <typename T1, typename T2>
std::string enforceFailMsgImpl(const T1& x, const T2& y) {
return c10::str(x, " vs ", y);
}
template <typename T1, typename T2, typename... Args>
std::string enforceFailMsgImpl(const T1& x, const T2& y, const Args&... args) {
return c10::str(x, " vs ", y, ". ", args...);
}
template <typename Pred, typename T1, typename T2, typename... Args>
void enforceThatImpl(
Pred p,
const T1& lhs,
const T2& rhs,
const char* file,
int line,
const char* expr,
const void* caller,
const Args&... args) {
if (C10_UNLIKELY(!(p(lhs, rhs)))) {
::c10::ThrowEnforceNotMet(
file,
line,
expr,
::c10::enforce_detail::enforceFailMsgImpl(lhs, rhs, args...),
caller);
}
}
#define CAFFE_ENFORCE_THAT_IMPL(op, lhs, rhs, expr, ...) \
::c10::enforce_detail::enforceThatImpl( \
op, lhs, rhs, __FILE__, __LINE__, expr, nullptr, ##__VA_ARGS__)
#define CAFFE_ENFORCE_THAT_IMPL_WITH_CALLER(op, lhs, rhs, expr, ...) \
::c10::enforce_detail::enforceThatImpl( \
op, (lhs), (rhs), __FILE__, __LINE__, expr, this, ##__VA_ARGS__)
} // namespace enforce_detail
#define CAFFE_ENFORCE_THAT(cmp, op, lhs, rhs, ...) \
CAFFE_ENFORCE_THAT_IMPL(cmp, lhs, rhs, #lhs " " #op " " #rhs, ##__VA_ARGS__)
#define CAFFE_ENFORCE_BINARY_OP(cmp, op, x, y, ...) \
CAFFE_ENFORCE_THAT_IMPL(cmp, x, y, #x " " #op " " #y, ##__VA_ARGS__)
#define CAFFE_ENFORCE_EQ(x, y, ...) \
CAFFE_ENFORCE_BINARY_OP(std::equal_to<void>(), ==, x, y, ##__VA_ARGS__)
#define CAFFE_ENFORCE_NE(x, y, ...) \
CAFFE_ENFORCE_BINARY_OP(std::not_equal_to<void>(), !=, x, y, ##__VA_ARGS__)
#define CAFFE_ENFORCE_LE(x, y, ...) \
CAFFE_ENFORCE_BINARY_OP(std::less_equal<void>(), <=, x, y, ##__VA_ARGS__)
#define CAFFE_ENFORCE_LT(x, y, ...) \
CAFFE_ENFORCE_BINARY_OP(std::less<void>(), <, x, y, ##__VA_ARGS__)
#define CAFFE_ENFORCE_GE(x, y, ...) \
CAFFE_ENFORCE_BINARY_OP(std::greater_equal<void>(), >=, x, y, ##__VA_ARGS__)
#define CAFFE_ENFORCE_GT(x, y, ...) \
CAFFE_ENFORCE_BINARY_OP(std::greater<void>(), >, x, y, ##__VA_ARGS__)
#define CAFFE_ENFORCE_BINARY_OP_WITH_CALLER(cmp, op, x, y, ...) \
CAFFE_ENFORCE_THAT_IMPL_WITH_CALLER( \
cmp, x, y, #x " " #op " " #y, ##__VA_ARGS__)
#define CAFFE_ENFORCE_EQ_WITH_CALLER(x, y, ...) \
CAFFE_ENFORCE_BINARY_OP_WITH_CALLER( \
std::equal_to<void>(), ==, x, y, ##__VA_ARGS__)
#define CAFFE_ENFORCE_NE_WITH_CALLER(x, y, ...) \
CAFFE_ENFORCE_BINARY_OP_WITH_CALLER( \
std::not_equal_to<void>(), !=, x, y, ##__VA_ARGS__)
#define CAFFE_ENFORCE_LE_WITH_CALLER(x, y, ...) \
CAFFE_ENFORCE_BINARY_OP_WITH_CALLER( \
std::less_equal<void>(), <=, x, y, ##__VA_ARGS__)
#define CAFFE_ENFORCE_LT_WITH_CALLER(x, y, ...) \
CAFFE_ENFORCE_BINARY_OP_WITH_CALLER(std::less<void>(), <, x, y, ##__VA_ARGS__)
#define CAFFE_ENFORCE_GE_WITH_CALLER(x, y, ...) \
CAFFE_ENFORCE_BINARY_OP_WITH_CALLER( \
std::greater_equal<void>(), >=, x, y, ##__VA_ARGS__)
#define CAFFE_ENFORCE_GT_WITH_CALLER(x, y, ...) \
CAFFE_ENFORCE_BINARY_OP_WITH_CALLER( \
std::greater<void>(), >, x, y, ##__VA_ARGS__)
/**
* Very lightweight logging for the first time API usage. It's beneficial for
* tracking of individual functionality usage in larger applications.
*
* In order to ensure light-weightedness of logging, we utilize static variable
* trick - LogAPIUsage will be invoked only once and further invocations will
* just do an atomic check.
*
* Example:
* // Logs caller info with an arbitrary text event, if there is a usage.
* C10_LOG_API_USAGE_ONCE("my_api");
*/
#define C10_LOG_API_USAGE_ONCE(...) \
C10_UNUSED static bool C10_ANONYMOUS_VARIABLE(logFlag) = \
::c10::detail::LogAPIUsageFakeReturn(__VA_ARGS__);
// API usage logging capabilities
C10_API void SetAPIUsageLogger(std::function<void(const std::string&)> logger);
C10_API void LogAPIUsage(const std::string& context);
C10_API void SetAPIUsageMetadataLogger(
std::function<void(
const std::string&,
const std::map<std::string, std::string>& metadata_map)> logger);
C10_API void LogAPIUsageMetadata(
const std::string& context,
const std::map<std::string, std::string>& metadata_map);
// PyTorch ddp usage logging capabilities
// DDPLoggingData holds data that can be logged in applications
// for analysis and debugging. Data structure is defined in
// c10 directory so that it can be easily imported by both c10
// and torch files.
struct DDPLoggingData {
// logging fields that are string types.
std::map<std::string, std::string> strs_map;
// logging fields that are int64_t types.
std::map<std::string, int64_t> ints_map;
};
C10_API void SetPyTorchDDPUsageLogger(
std::function<void(const DDPLoggingData&)> logger);
C10_API void LogPyTorchDDPUsage(const DDPLoggingData& ddpData);
namespace detail {
// Return value is needed to do the static variable initialization trick
C10_API bool LogAPIUsageFakeReturn(const std::string& context);
} // namespace detail
// Initializes the c10 logger.
C10_API void initLogging();
} // namespace c10
#endif // C10_UTIL_LOGGING_H_
| 11,624
| 35.102484
| 80
|
h
|
null |
pytorch-main/c10/util/MathConstants.h
|
#pragma once
#include <c10/macros/Macros.h>
#include <c10/util/BFloat16.h>
#include <c10/util/Half.h>
C10_CLANG_DIAGNOSTIC_PUSH()
#if C10_CLANG_HAS_WARNING("-Wimplicit-float-conversion")
C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-float-conversion")
#endif
namespace c10 {
// TODO: Replace me with inline constexpr variable when C++17 becomes available
namespace detail {
template <typename T>
C10_HOST_DEVICE inline constexpr T e() {
return static_cast<T>(2.718281828459045235360287471352662);
}
template <typename T>
C10_HOST_DEVICE inline constexpr T euler() {
return static_cast<T>(0.577215664901532860606512090082402);
}
template <typename T>
C10_HOST_DEVICE inline constexpr T frac_1_pi() {
return static_cast<T>(0.318309886183790671537767526745028);
}
template <typename T>
C10_HOST_DEVICE inline constexpr T frac_1_sqrt_pi() {
return static_cast<T>(0.564189583547756286948079451560772);
}
template <typename T>
C10_HOST_DEVICE inline constexpr T frac_sqrt_2() {
return static_cast<T>(0.707106781186547524400844362104849);
}
template <typename T>
C10_HOST_DEVICE inline constexpr T frac_sqrt_3() {
return static_cast<T>(0.577350269189625764509148780501957);
}
template <typename T>
C10_HOST_DEVICE inline constexpr T golden_ratio() {
return static_cast<T>(1.618033988749894848204586834365638);
}
template <typename T>
C10_HOST_DEVICE inline constexpr T ln_10() {
return static_cast<T>(2.302585092994045684017991454684364);
}
template <typename T>
C10_HOST_DEVICE inline constexpr T ln_2() {
return static_cast<T>(0.693147180559945309417232121458176);
}
template <typename T>
C10_HOST_DEVICE inline constexpr T log_10_e() {
return static_cast<T>(0.434294481903251827651128918916605);
}
template <typename T>
C10_HOST_DEVICE inline constexpr T log_2_e() {
return static_cast<T>(1.442695040888963407359924681001892);
}
template <typename T>
C10_HOST_DEVICE inline constexpr T pi() {
return static_cast<T>(3.141592653589793238462643383279502);
}
template <typename T>
C10_HOST_DEVICE inline constexpr T sqrt_2() {
return static_cast<T>(1.414213562373095048801688724209698);
}
template <typename T>
C10_HOST_DEVICE inline constexpr T sqrt_3() {
return static_cast<T>(1.732050807568877293527446341505872);
}
template <>
C10_HOST_DEVICE inline constexpr BFloat16 pi<BFloat16>() {
// According to
// https://en.wikipedia.org/wiki/Bfloat16_floating-point_format#Special_values
// pi is encoded as 4049
return BFloat16(0x4049, BFloat16::from_bits());
}
template <>
C10_HOST_DEVICE inline constexpr Half pi<Half>() {
return Half(0x4248, Half::from_bits());
}
} // namespace detail
template <typename T>
constexpr T e = c10::detail::e<T>();
template <typename T>
constexpr T euler = c10::detail::euler<T>();
template <typename T>
constexpr T frac_1_pi = c10::detail::frac_1_pi<T>();
template <typename T>
constexpr T frac_1_sqrt_pi = c10::detail::frac_1_sqrt_pi<T>();
template <typename T>
constexpr T frac_sqrt_2 = c10::detail::frac_sqrt_2<T>();
template <typename T>
constexpr T frac_sqrt_3 = c10::detail::frac_sqrt_3<T>();
template <typename T>
constexpr T golden_ratio = c10::detail::golden_ratio<T>();
template <typename T>
constexpr T ln_10 = c10::detail::ln_10<T>();
template <typename T>
constexpr T ln_2 = c10::detail::ln_2<T>();
template <typename T>
constexpr T log_10_e = c10::detail::log_10_e<T>();
template <typename T>
constexpr T log_2_e = c10::detail::log_2_e<T>();
template <typename T>
constexpr T pi = c10::detail::pi<T>();
template <typename T>
constexpr T sqrt_2 = c10::detail::sqrt_2<T>();
template <typename T>
constexpr T sqrt_3 = c10::detail::sqrt_3<T>();
} // namespace c10
C10_CLANG_DIAGNOSTIC_POP()
| 3,690
| 24.811189
| 80
|
h
|
null |
pytorch-main/c10/util/MaybeOwned.h
|
#pragma once
#include <c10/macros/Macros.h>
#include <c10/util/Exception.h>
#include <c10/util/in_place.h>
#include <type_traits>
namespace c10 {
/// MaybeOwnedTraits<T> describes how to borrow from T. Here is how we
/// can implement borrowing from an arbitrary type T using a raw
/// pointer to const:
template <typename T>
struct MaybeOwnedTraitsGenericImpl {
using owned_type = T;
using borrow_type = const T*;
static borrow_type createBorrow(const owned_type& from) {
return &from;
}
static void assignBorrow(borrow_type& lhs, borrow_type rhs) {
lhs = rhs;
}
static void destroyBorrow(borrow_type& /*toDestroy*/) {}
static const owned_type& referenceFromBorrow(const borrow_type& borrow) {
return *borrow;
}
static const owned_type* pointerFromBorrow(const borrow_type& borrow) {
return borrow;
}
static bool debugBorrowIsValid(const borrow_type& borrow) {
return borrow != nullptr;
}
};
/// It is possible to eliminate the extra layer of indirection for
/// borrows for some types that we control. For examples, see
/// intrusive_ptr.h and TensorBody.h.
template <typename T>
struct MaybeOwnedTraits;
// Explicitly enable MaybeOwned<shared_ptr<T>>, rather than allowing
// MaybeOwned to be used for any type right away.
template <typename T>
struct MaybeOwnedTraits<std::shared_ptr<T>>
: public MaybeOwnedTraitsGenericImpl<std::shared_ptr<T>> {};
/// A smart pointer around either a borrowed or owned T. When
/// constructed with borrowed(), the caller MUST ensure that the
/// borrowed-from argument outlives this MaybeOwned<T>. Compare to
/// Rust's std::borrow::Cow
/// (https://doc.rust-lang.org/std/borrow/enum.Cow.html), but note
/// that it is probably not suitable for general use because C++ has
/// no borrow checking. Included here to support
/// Tensor::expect_contiguous.
template <typename T>
class MaybeOwned final {
using borrow_type = typename MaybeOwnedTraits<T>::borrow_type;
using owned_type = typename MaybeOwnedTraits<T>::owned_type;
bool isBorrowed_;
union {
borrow_type borrow_;
owned_type own_;
};
/// Don't use this; use borrowed() instead.
explicit MaybeOwned(const owned_type& t)
: isBorrowed_(true), borrow_(MaybeOwnedTraits<T>::createBorrow(t)) {}
/// Don't use this; use owned() instead.
explicit MaybeOwned(T&& t) noexcept(
std::is_nothrow_move_constructible<T>::value)
: isBorrowed_(false), own_(std::move(t)) {}
/// Don't use this; use owned() instead.
template <class... Args>
explicit MaybeOwned(in_place_t, Args&&... args)
: isBorrowed_(false), own_(std::forward<Args>(args)...) {}
public:
explicit MaybeOwned() : isBorrowed_(true), borrow_() {}
// Copying a borrow yields another borrow of the original, as with a
// T*. Copying an owned T yields another owned T for safety: no
// chains of borrowing by default! (Note you could get that behavior
// with MaybeOwned<T>::borrowed(*rhs) if you wanted it.)
MaybeOwned(const MaybeOwned& rhs) : isBorrowed_(rhs.isBorrowed_) {
if (C10_LIKELY(rhs.isBorrowed_)) {
MaybeOwnedTraits<T>::assignBorrow(borrow_, rhs.borrow_);
} else {
new (&own_) T(rhs.own_);
}
}
MaybeOwned& operator=(const MaybeOwned& rhs) {
if (this == &rhs) {
return *this;
}
if (C10_UNLIKELY(!isBorrowed_)) {
if (rhs.isBorrowed_) {
own_.~T();
MaybeOwnedTraits<T>::assignBorrow(borrow_, rhs.borrow_);
isBorrowed_ = true;
} else {
own_ = rhs.own_;
}
} else {
if (C10_LIKELY(rhs.isBorrowed_)) {
MaybeOwnedTraits<T>::assignBorrow(borrow_, rhs.borrow_);
} else {
MaybeOwnedTraits<T>::destroyBorrow(borrow_);
new (&own_) T(rhs.own_);
isBorrowed_ = false;
}
}
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(isBorrowed_ == rhs.isBorrowed_);
return *this;
}
MaybeOwned(MaybeOwned&& rhs) noexcept(
std::is_nothrow_move_constructible<T>::value)
: isBorrowed_(rhs.isBorrowed_) {
if (C10_LIKELY(rhs.isBorrowed_)) {
MaybeOwnedTraits<T>::assignBorrow(borrow_, rhs.borrow_);
} else {
new (&own_) T(std::move(rhs.own_));
}
}
MaybeOwned& operator=(MaybeOwned&& rhs) noexcept(
std::is_nothrow_move_assignable<T>::value) {
if (this == &rhs) {
return *this;
}
if (C10_UNLIKELY(!isBorrowed_)) {
if (rhs.isBorrowed_) {
own_.~T();
MaybeOwnedTraits<T>::assignBorrow(borrow_, rhs.borrow_);
isBorrowed_ = true;
} else {
own_ = std::move(rhs.own_);
}
} else {
if (C10_LIKELY(rhs.isBorrowed_)) {
MaybeOwnedTraits<T>::assignBorrow(borrow_, rhs.borrow_);
} else {
MaybeOwnedTraits<T>::destroyBorrow(borrow_);
new (&own_) T(std::move(rhs.own_));
isBorrowed_ = false;
}
}
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(isBorrowed_ == rhs.isBorrowed_);
return *this;
}
static MaybeOwned borrowed(const T& t) {
return MaybeOwned(t);
}
static MaybeOwned owned(T&& t) noexcept(
std::is_nothrow_move_constructible<T>::value) {
return MaybeOwned(std::move(t));
}
template <class... Args>
static MaybeOwned owned(in_place_t, Args&&... args) {
return MaybeOwned(in_place, std::forward<Args>(args)...);
}
~MaybeOwned() {
if (C10_UNLIKELY(!isBorrowed_)) {
own_.~T();
} else {
MaybeOwnedTraits<T>::destroyBorrow(borrow_);
}
}
// This is an implementation detail! You should know what you're doing
// if you are testing this. If you just want to guarantee ownership move
// this into a T
bool unsafeIsBorrowed() const {
return isBorrowed_;
}
const T& operator*() const& {
if (isBorrowed_) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
MaybeOwnedTraits<T>::debugBorrowIsValid(borrow_));
}
return C10_LIKELY(isBorrowed_)
? MaybeOwnedTraits<T>::referenceFromBorrow(borrow_)
: own_;
}
const T* operator->() const {
if (isBorrowed_) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
MaybeOwnedTraits<T>::debugBorrowIsValid(borrow_));
}
return C10_LIKELY(isBorrowed_)
? MaybeOwnedTraits<T>::pointerFromBorrow(borrow_)
: &own_;
}
// If borrowed, copy the underlying T. If owned, move from
// it. borrowed/owned state remains the same, and either we
// reference the same borrow as before or we are an owned moved-from
// T.
T operator*() && {
if (isBorrowed_) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
MaybeOwnedTraits<T>::debugBorrowIsValid(borrow_));
return MaybeOwnedTraits<T>::referenceFromBorrow(borrow_);
} else {
return std::move(own_);
}
}
};
} // namespace c10
| 6,737
| 28.423581
| 75
|
h
|
null |
pytorch-main/c10/util/Metaprogramming.h
|
#pragma once
#include <c10/util/Array.h>
#include <c10/util/TypeList.h>
#include <functional>
#include <type_traits>
namespace c10 {
namespace guts {
/**
* Access information about result type or arguments from a function type.
* Example:
* using A = function_traits<int (float, double)>::return_type // A == int
* using A = function_traits<int (float, double)>::parameter_types::tuple_type
* // A == tuple<float, double>
*/
template <class Func>
struct function_traits {
static_assert(
!std::is_same<Func, Func>::value,
"In function_traits<Func>, Func must be a plain function type.");
};
template <class Result, class... Args>
struct function_traits<Result(Args...)> {
using func_type = Result(Args...);
using return_type = Result;
using parameter_types = typelist::typelist<Args...>;
static constexpr auto number_of_parameters = sizeof...(Args);
};
/**
* infer_function_traits: creates a `function_traits` type for a simple
* function (pointer) or functor (lambda/struct). Currently does not support
* class methods.
*/
template <typename Functor>
struct infer_function_traits {
using type = function_traits<
c10::guts::detail::strip_class_t<decltype(&Functor::operator())>>;
};
template <typename Result, typename... Args>
struct infer_function_traits<Result (*)(Args...)> {
using type = function_traits<Result(Args...)>;
};
template <typename Result, typename... Args>
struct infer_function_traits<Result(Args...)> {
using type = function_traits<Result(Args...)>;
};
template <typename T>
using infer_function_traits_t = typename infer_function_traits<T>::type;
/**
* make_function_traits: creates a `function_traits` type given a Return type
* and a typelist of Argument types
*
* Example:
* bool f(int, int);
*
* infer_function_traits_t<f> == make_function_traits_t<bool,
* typelist::typelist<int, int>>
*/
template <typename Result, typename ArgList>
struct make_function_traits {
static_assert(
false_t<ArgList>::value,
"In guts::make_function_traits<Result, TypeList>, the ArgList argument must be typelist<...>.");
};
template <typename Result, typename... Args>
struct make_function_traits<Result, typelist::typelist<Args...>> {
using type = function_traits<Result(Args...)>;
};
template <typename Result, typename ArgList>
using make_function_traits_t =
typename make_function_traits<Result, ArgList>::type;
/**
* make_offset_index_sequence<Start, N>
* Like make_index_sequence<N>, but starting from Start instead of 0.
*
* Example:
* make_offset_index_sequence<10, 3> == std::index_sequence<10, 11, 12>
*/
template <size_t Start, size_t N, size_t... Is>
struct make_offset_index_sequence_impl
: make_offset_index_sequence_impl<Start, N - 1, Start + N - 1, Is...> {
static_assert(
static_cast<int>(Start) >= 0,
"make_offset_index_sequence: Start < 0");
static_assert(static_cast<int>(N) >= 0, "make_offset_index_sequence: N < 0");
};
template <size_t Start, size_t... Is>
struct make_offset_index_sequence_impl<Start, 0, Is...> {
typedef std::index_sequence<Is...> type;
};
template <size_t Start, size_t N>
using make_offset_index_sequence =
typename make_offset_index_sequence_impl<Start, N>::type;
/**
* Use tuple_elements to extract a position-indexed subset of elements
* from the argument tuple into a result tuple.
*
* Example:
* std::tuple<int, const char*, double> t = std::make_tuple(0, "HEY", 2.0);
* std::tuple<int, double> result = tuple_elements(t, std::index_sequence<0,
* 2>());
*/
template <class Tuple, size_t... Is>
constexpr auto tuple_elements(Tuple t, std::index_sequence<Is...>) {
return std::tuple<std::tuple_element_t<Is, Tuple>...>(std::get<Is>(t)...);
}
/**
* Use tuple_take to extract the first or last n elements from the argument
* tuple into a result tuple.
*
* Example:
* std::tuple<int, const char*, double> t = std::make_tuple(0, "HEY", 2.0);
* std::tuple<int, const char*> first_two = tuple_take<decltype(t), 2>(t);
* std::tuple<const char*, double> last_two = tuple_take<decltype(t), -2>(t);
*/
template <class Tuple, int N, class Enable = void>
struct TupleTake {};
template <class Tuple, int N>
struct TupleTake<Tuple, N, std::enable_if_t<N >= 0, void>> {
static auto call(Tuple t) {
constexpr size_t size = std::tuple_size<Tuple>();
static_assert(N <= size, "tuple_take: N > size");
return tuple_elements(t, std::make_index_sequence<N>{});
}
};
template <class Tuple, int N>
struct TupleTake < Tuple,
N, std::enable_if_t<N<0, void>> {
static auto call(Tuple t) {
constexpr size_t size = std::tuple_size<Tuple>();
static_assert(-N <= size, "tuple_take: -N > size");
return tuple_elements(t, make_offset_index_sequence<size + N, -N>{});
}
};
template <class Tuple, int N>
auto tuple_take(Tuple t) {
return TupleTake<Tuple, N>::call(t);
}
/**
* Use tuple_slice to extract a contiguous subtuple from the argument.
*
* Example:
* std::tuple<int, const char*, double, bool> t = std::make_tuple(0,
* "HEY", 2.0, false); std::tuple<int, const char*> middle_two =
* tuple_slice<decltype(t), 1, 2>(t);
*/
template <class Tuple, size_t Start, size_t N>
constexpr auto tuple_slice(Tuple t) {
constexpr size_t size = std::tuple_size<Tuple>();
static_assert(Start + N <= size, "tuple_slice: Start + N > size");
return tuple_elements(t, make_offset_index_sequence<Start, N>{});
}
/**
* Use tuple_map to run a mapping function over a tuple to get a new tuple.
*
* Example 1:
* auto result = tuple_map(std::tuple<int32_t, int32_t, int32_t>(3, 4, 5), []
* (int32_t a) -> int16_t {return a+1;});
* // result == std::tuple<int16_t, int16_t, int16_t>(4, 5, 6)
*
* Example 2:
* struct Mapper {
* std::string operator()(int32_t a) const {
* return std::to_string(a);
* }
* int64_t operator()(const std::string& a) const {
* return atoi(a.c_str());
* }
* };
* auto result = tuple_map(std::tuple<int32_t, std::string>(3, "4"),
* Mapper());
* // result == std::tuple<std::string, int64_t>("3", 4)
*
* Example 3:
* struct A final {
* int32_t func() {
* return 5;
* }
* };
* struct B final {
* std::string func() {
* return "5";
* }
* };
* auto result = tuple_map(std::make_tuple(A(), B()), [] (auto a) { return
* a.func(); });
* // result == std::tuple<int32_t, std::string>(5, "5");
*/
namespace detail {
template <class Mapper, class... Args, size_t... Indices>
auto tuple_map(
std::tuple<Args...>&& tuple,
const Mapper& mapper,
std::index_sequence<Indices...>) {
return std::tuple<decltype(mapper(std::forward<Args>(std::get<Indices>(
tuple))))...>(mapper(std::forward<Args>(std::get<Indices>(tuple)))...);
}
} // namespace detail
template <class Mapper, class... Args>
auto tuple_map(std::tuple<Args...>&& tuple, const Mapper& mapper) {
return detail::tuple_map(
std::move(tuple), mapper, std::index_sequence_for<Args...>());
}
} // namespace guts
} // namespace c10
| 7,037
| 29.868421
| 102
|
h
|
null |
pytorch-main/c10/util/OptionalArrayRef.h
|
// This file defines OptionalArrayRef<T>, a class that has almost the same
// exact functionality as c10::optional<ArrayRef<T>>, except that its
// converting constructor fixes a dangling pointer issue.
//
// The implicit converting constructor of both c10::optional<ArrayRef<T>> and
// std::optional<ArrayRef<T>> can cause the underlying ArrayRef<T> to store
// a dangling pointer. OptionalArrayRef<T> prevents this by wrapping
// a c10::optional<ArrayRef<T>> and fixing the constructor implementation.
//
// See https://github.com/pytorch/pytorch/issues/63645 for more on this.
#pragma once
#include <c10/util/ArrayRef.h>
#include <c10/util/Optional.h>
namespace c10 {
template <typename T>
class OptionalArrayRef final {
public:
// Constructors
constexpr OptionalArrayRef() noexcept = default;
constexpr OptionalArrayRef(nullopt_t) noexcept {}
OptionalArrayRef(const OptionalArrayRef& other) = default;
OptionalArrayRef(OptionalArrayRef&& other) = default;
constexpr OptionalArrayRef(const optional<ArrayRef<T>>& other) noexcept
: wrapped_opt_array_ref(other) {}
constexpr OptionalArrayRef(optional<ArrayRef<T>>&& other) noexcept
: wrapped_opt_array_ref(other) {}
constexpr OptionalArrayRef(const T& value) noexcept
: wrapped_opt_array_ref(value) {}
template <
typename U = ArrayRef<T>,
std::enable_if_t<
!std::is_same<std::decay_t<U>, OptionalArrayRef>::value &&
!std::is_same<std::decay_t<U>, in_place_t>::value &&
std::is_constructible<ArrayRef<T>, U&&>::value &&
std::is_convertible<U&&, ArrayRef<T>>::value &&
!std::is_convertible<U&&, T>::value,
bool> = false>
constexpr OptionalArrayRef(U&& value) noexcept(
std::is_nothrow_constructible<ArrayRef<T>, U&&>::value)
: wrapped_opt_array_ref(value) {}
template <
typename U = ArrayRef<T>,
std::enable_if_t<
!std::is_same<std::decay_t<U>, OptionalArrayRef>::value &&
!std::is_same<std::decay_t<U>, in_place_t>::value &&
std::is_constructible<ArrayRef<T>, U&&>::value &&
!std::is_convertible<U&&, ArrayRef<T>>::value,
bool> = false>
constexpr explicit OptionalArrayRef(U&& value) noexcept(
std::is_nothrow_constructible<ArrayRef<T>, U&&>::value)
: wrapped_opt_array_ref(value) {}
template <typename... Args>
constexpr explicit OptionalArrayRef(in_place_t ip, Args&&... args) noexcept
: wrapped_opt_array_ref(ip, args...) {}
template <typename U, typename... Args>
constexpr explicit OptionalArrayRef(
in_place_t ip,
std::initializer_list<U> il,
Args&&... args)
: wrapped_opt_array_ref(ip, il, args...) {}
constexpr OptionalArrayRef(const std::initializer_list<T>& Vec)
: wrapped_opt_array_ref(ArrayRef<T>(Vec)) {}
// Destructor
~OptionalArrayRef() = default;
// Assignment
constexpr OptionalArrayRef& operator=(nullopt_t) noexcept {
wrapped_opt_array_ref = c10::nullopt;
return *this;
}
OptionalArrayRef& operator=(const OptionalArrayRef& other) = default;
OptionalArrayRef& operator=(OptionalArrayRef&& other) = default;
constexpr OptionalArrayRef& operator=(
const optional<ArrayRef<T>>& other) noexcept {
wrapped_opt_array_ref = other;
return *this;
}
constexpr OptionalArrayRef& operator=(
optional<ArrayRef<T>>&& other) noexcept {
wrapped_opt_array_ref = other;
return *this;
}
template <typename U = ArrayRef<T>>
constexpr std::enable_if_t<
!std::is_same<std::decay_t<U>, OptionalArrayRef>::value &&
std::is_constructible<ArrayRef<T>, U&&>::value &&
std::is_assignable<ArrayRef<T>&, U&&>::value,
OptionalArrayRef&>
operator=(U&& value) noexcept(
std::is_nothrow_constructible<ArrayRef<T>, U&&>::value&&
std::is_nothrow_assignable<ArrayRef<T>&, U&&>::value) {
wrapped_opt_array_ref = value;
return *this;
}
// Observers
constexpr ArrayRef<T>* operator->() noexcept {
return &wrapped_opt_array_ref.value();
}
constexpr const ArrayRef<T>* operator->() const noexcept {
return &wrapped_opt_array_ref.value();
}
constexpr ArrayRef<T>& operator*() & noexcept {
return wrapped_opt_array_ref.value();
}
constexpr const ArrayRef<T>& operator*() const& noexcept {
return wrapped_opt_array_ref.value();
}
constexpr ArrayRef<T>&& operator*() && noexcept {
return std::move(wrapped_opt_array_ref.value());
}
constexpr const ArrayRef<T>&& operator*() const&& noexcept {
return std::move(wrapped_opt_array_ref.value());
}
constexpr explicit operator bool() const noexcept {
return wrapped_opt_array_ref.has_value();
}
constexpr bool has_value() const noexcept {
return wrapped_opt_array_ref.has_value();
}
constexpr ArrayRef<T>& value() & {
return wrapped_opt_array_ref.value();
}
constexpr const ArrayRef<T>& value() const& {
return wrapped_opt_array_ref.value();
}
constexpr ArrayRef<T>&& value() && {
return std::move(wrapped_opt_array_ref.value());
}
constexpr const ArrayRef<T>&& value() const&& {
return std::move(wrapped_opt_array_ref.value());
}
template <typename U>
constexpr std::
enable_if_t<std::is_convertible<U&&, ArrayRef<T>>::value, ArrayRef<T>>
value_or(U&& default_value) const& {
return wrapped_opt_array_ref.value_or(default_value);
}
template <typename U>
constexpr std::
enable_if_t<std::is_convertible<U&&, ArrayRef<T>>::value, ArrayRef<T>>
value_or(U&& default_value) && {
return wrapped_opt_array_ref.value_or(default_value);
}
// Modifiers
constexpr void swap(OptionalArrayRef& other) noexcept {
std::swap(wrapped_opt_array_ref, other.wrapped_opt_array_ref);
}
constexpr void reset() noexcept {
wrapped_opt_array_ref.reset();
}
template <typename... Args>
constexpr std::enable_if_t<
std::is_constructible<ArrayRef<T>, Args&&...>::value,
ArrayRef<T>&>
emplace(Args&&... args) noexcept(
std::is_nothrow_constructible<ArrayRef<T>, Args&&...>::value) {
return wrapped_opt_array_ref.emplace(args...);
}
template <typename U, typename... Args>
constexpr ArrayRef<T>& emplace(
std::initializer_list<U> il,
Args&&... args) noexcept {
return wrapped_opt_array_ref.emplace(il, args...);
}
private:
optional<ArrayRef<T>> wrapped_opt_array_ref;
};
using OptionalIntArrayRef = OptionalArrayRef<int64_t>;
inline bool operator==(
const OptionalIntArrayRef& a1,
const IntArrayRef& other) {
if (!a1.has_value()) {
return false;
}
return a1.value() == other;
}
inline bool operator==(
const c10::IntArrayRef& a1,
const c10::OptionalIntArrayRef& a2) {
return a2 == a1;
}
} // namespace c10
| 6,834
| 28.461207
| 77
|
h
|
null |
pytorch-main/c10/util/Registry.h
|
#ifndef C10_UTIL_REGISTRY_H_
#define C10_UTIL_REGISTRY_H_
/**
* Simple registry implementation that uses static variables to
* register object creators during program initialization time.
*/
// NB: This Registry works poorly when you have other namespaces.
// Make all macro invocations from inside the at namespace.
#include <algorithm>
#include <cstdio>
#include <cstdlib>
#include <functional>
#include <memory>
#include <mutex>
#include <stdexcept>
#include <string>
#include <unordered_map>
#include <vector>
#include <c10/macros/Macros.h>
#include <c10/util/Type.h>
namespace c10 {
template <typename KeyType>
inline std::string KeyStrRepr(const KeyType& /*key*/) {
return "[key type printing not supported]";
}
template <>
inline std::string KeyStrRepr(const std::string& key) {
return key;
}
enum RegistryPriority {
REGISTRY_FALLBACK = 1,
REGISTRY_DEFAULT = 2,
REGISTRY_PREFERRED = 3,
};
/**
* @brief A template class that allows one to register classes by keys.
*
* The keys are usually a std::string specifying the name, but can be anything
* that can be used in a std::map.
*
* You should most likely not use the Registry class explicitly, but use the
* helper macros below to declare specific registries as well as registering
* objects.
*/
template <class SrcType, class ObjectPtrType, class... Args>
class Registry {
public:
typedef std::function<ObjectPtrType(Args...)> Creator;
Registry(bool warning = true)
: registry_(), priority_(), terminate_(true), warning_(warning) {}
void Register(
const SrcType& key,
Creator creator,
const RegistryPriority priority = REGISTRY_DEFAULT) {
std::lock_guard<std::mutex> lock(register_mutex_);
// The if statement below is essentially the same as the following line:
// TORCH_CHECK_EQ(registry_.count(key), 0) << "Key " << key
// << " registered twice.";
// However, TORCH_CHECK_EQ depends on google logging, and since registration
// is carried out at static initialization time, we do not want to have an
// explicit dependency on glog's initialization function.
if (registry_.count(key) != 0) {
auto cur_priority = priority_[key];
if (priority > cur_priority) {
#ifdef DEBUG
std::string warn_msg =
"Overwriting already registered item for key " + KeyStrRepr(key);
fprintf(stderr, "%s\n", warn_msg.c_str());
#endif
registry_[key] = creator;
priority_[key] = priority;
} else if (priority == cur_priority) {
std::string err_msg =
"Key already registered with the same priority: " + KeyStrRepr(key);
fprintf(stderr, "%s\n", err_msg.c_str());
if (terminate_) {
std::exit(1);
} else {
throw std::runtime_error(err_msg);
}
} else if (warning_) {
std::string warn_msg =
"Higher priority item already registered, skipping registration of " +
KeyStrRepr(key);
fprintf(stderr, "%s\n", warn_msg.c_str());
}
} else {
registry_[key] = creator;
priority_[key] = priority;
}
}
void Register(
const SrcType& key,
Creator creator,
const std::string& help_msg,
const RegistryPriority priority = REGISTRY_DEFAULT) {
Register(key, creator, priority);
help_message_[key] = help_msg;
}
inline bool Has(const SrcType& key) {
return (registry_.count(key) != 0);
}
ObjectPtrType Create(const SrcType& key, Args... args) {
auto it = registry_.find(key);
if (it == registry_.end()) {
// Returns nullptr if the key is not registered.
return nullptr;
}
return it->second(args...);
}
/**
* Returns the keys currently registered as a std::vector.
*/
std::vector<SrcType> Keys() const {
std::vector<SrcType> keys;
keys.reserve(registry_.size());
for (const auto& it : registry_) {
keys.push_back(it.first);
}
return keys;
}
inline const std::unordered_map<SrcType, std::string>& HelpMessage() const {
return help_message_;
}
const char* HelpMessage(const SrcType& key) const {
auto it = help_message_.find(key);
if (it == help_message_.end()) {
return nullptr;
}
return it->second.c_str();
}
// Used for testing, if terminate is unset, Registry throws instead of
// calling std::exit
void SetTerminate(bool terminate) {
terminate_ = terminate;
}
private:
std::unordered_map<SrcType, Creator> registry_;
std::unordered_map<SrcType, RegistryPriority> priority_;
bool terminate_;
const bool warning_;
std::unordered_map<SrcType, std::string> help_message_;
std::mutex register_mutex_;
C10_DISABLE_COPY_AND_ASSIGN(Registry);
};
template <class SrcType, class ObjectPtrType, class... Args>
class Registerer {
public:
explicit Registerer(
const SrcType& key,
Registry<SrcType, ObjectPtrType, Args...>* registry,
typename Registry<SrcType, ObjectPtrType, Args...>::Creator creator,
const std::string& help_msg = "") {
registry->Register(key, creator, help_msg);
}
explicit Registerer(
const SrcType& key,
const RegistryPriority priority,
Registry<SrcType, ObjectPtrType, Args...>* registry,
typename Registry<SrcType, ObjectPtrType, Args...>::Creator creator,
const std::string& help_msg = "") {
registry->Register(key, creator, help_msg, priority);
}
template <class DerivedType>
static ObjectPtrType DefaultCreator(Args... args) {
return ObjectPtrType(new DerivedType(args...));
}
};
/**
* C10_DECLARE_TYPED_REGISTRY is a macro that expands to a function
* declaration, as well as creating a convenient typename for its corresponding
* registerer.
*/
// Note on C10_IMPORT and C10_EXPORT below: we need to explicitly mark DECLARE
// as import and DEFINE as export, because these registry macros will be used
// in downstream shared libraries as well, and one cannot use *_API - the API
// macro will be defined on a per-shared-library basis. Semantically, when one
// declares a typed registry it is always going to be IMPORT, and when one
// defines a registry (which should happen ONLY ONCE and ONLY IN SOURCE FILE),
// the instantiation unit is always going to be exported.
//
// The only unique condition is when in the same file one does DECLARE and
// DEFINE - in Windows compilers, this generates a warning that dllimport and
// dllexport are mixed, but the warning is fine and linker will be properly
// exporting the symbol. Same thing happens in the gflags flag declaration and
// definition caes.
#define C10_DECLARE_TYPED_REGISTRY( \
RegistryName, SrcType, ObjectType, PtrType, ...) \
C10_API ::c10::Registry<SrcType, PtrType<ObjectType>, ##__VA_ARGS__>* \
RegistryName(); \
typedef ::c10::Registerer<SrcType, PtrType<ObjectType>, ##__VA_ARGS__> \
Registerer##RegistryName
#define TORCH_DECLARE_TYPED_REGISTRY( \
RegistryName, SrcType, ObjectType, PtrType, ...) \
TORCH_API ::c10::Registry<SrcType, PtrType<ObjectType>, ##__VA_ARGS__>* \
RegistryName(); \
typedef ::c10::Registerer<SrcType, PtrType<ObjectType>, ##__VA_ARGS__> \
Registerer##RegistryName
#define C10_DEFINE_TYPED_REGISTRY( \
RegistryName, SrcType, ObjectType, PtrType, ...) \
C10_EXPORT ::c10::Registry<SrcType, PtrType<ObjectType>, ##__VA_ARGS__>* \
RegistryName() { \
static ::c10::Registry<SrcType, PtrType<ObjectType>, ##__VA_ARGS__>* \
registry = new ::c10:: \
Registry<SrcType, PtrType<ObjectType>, ##__VA_ARGS__>(); \
return registry; \
}
#define C10_DEFINE_TYPED_REGISTRY_WITHOUT_WARNING( \
RegistryName, SrcType, ObjectType, PtrType, ...) \
C10_EXPORT ::c10::Registry<SrcType, PtrType<ObjectType>, ##__VA_ARGS__>* \
RegistryName() { \
static ::c10::Registry<SrcType, PtrType<ObjectType>, ##__VA_ARGS__>* \
registry = \
new ::c10::Registry<SrcType, PtrType<ObjectType>, ##__VA_ARGS__>( \
false); \
return registry; \
}
// Note(Yangqing): The __VA_ARGS__ below allows one to specify a templated
// creator with comma in its templated arguments.
#define C10_REGISTER_TYPED_CREATOR(RegistryName, key, ...) \
static Registerer##RegistryName C10_ANONYMOUS_VARIABLE(g_##RegistryName)( \
key, RegistryName(), ##__VA_ARGS__);
#define C10_REGISTER_TYPED_CREATOR_WITH_PRIORITY( \
RegistryName, key, priority, ...) \
static Registerer##RegistryName C10_ANONYMOUS_VARIABLE(g_##RegistryName)( \
key, priority, RegistryName(), ##__VA_ARGS__);
#define C10_REGISTER_TYPED_CLASS(RegistryName, key, ...) \
static Registerer##RegistryName C10_ANONYMOUS_VARIABLE(g_##RegistryName)( \
key, \
RegistryName(), \
Registerer##RegistryName::DefaultCreator<__VA_ARGS__>, \
::c10::demangle_type<__VA_ARGS__>());
#define C10_REGISTER_TYPED_CLASS_WITH_PRIORITY( \
RegistryName, key, priority, ...) \
static Registerer##RegistryName C10_ANONYMOUS_VARIABLE(g_##RegistryName)( \
key, \
priority, \
RegistryName(), \
Registerer##RegistryName::DefaultCreator<__VA_ARGS__>, \
::c10::demangle_type<__VA_ARGS__>());
// C10_DECLARE_REGISTRY and C10_DEFINE_REGISTRY are hard-wired to use
// std::string as the key type, because that is the most commonly used cases.
#define C10_DECLARE_REGISTRY(RegistryName, ObjectType, ...) \
C10_DECLARE_TYPED_REGISTRY( \
RegistryName, std::string, ObjectType, std::unique_ptr, ##__VA_ARGS__)
#define TORCH_DECLARE_REGISTRY(RegistryName, ObjectType, ...) \
TORCH_DECLARE_TYPED_REGISTRY( \
RegistryName, std::string, ObjectType, std::unique_ptr, ##__VA_ARGS__)
#define C10_DEFINE_REGISTRY(RegistryName, ObjectType, ...) \
C10_DEFINE_TYPED_REGISTRY( \
RegistryName, std::string, ObjectType, std::unique_ptr, ##__VA_ARGS__)
#define C10_DEFINE_REGISTRY_WITHOUT_WARNING(RegistryName, ObjectType, ...) \
C10_DEFINE_TYPED_REGISTRY_WITHOUT_WARNING( \
RegistryName, std::string, ObjectType, std::unique_ptr, ##__VA_ARGS__)
#define C10_DECLARE_SHARED_REGISTRY(RegistryName, ObjectType, ...) \
C10_DECLARE_TYPED_REGISTRY( \
RegistryName, std::string, ObjectType, std::shared_ptr, ##__VA_ARGS__)
#define TORCH_DECLARE_SHARED_REGISTRY(RegistryName, ObjectType, ...) \
TORCH_DECLARE_TYPED_REGISTRY( \
RegistryName, std::string, ObjectType, std::shared_ptr, ##__VA_ARGS__)
#define C10_DEFINE_SHARED_REGISTRY(RegistryName, ObjectType, ...) \
C10_DEFINE_TYPED_REGISTRY( \
RegistryName, std::string, ObjectType, std::shared_ptr, ##__VA_ARGS__)
#define C10_DEFINE_SHARED_REGISTRY_WITHOUT_WARNING( \
RegistryName, ObjectType, ...) \
C10_DEFINE_TYPED_REGISTRY_WITHOUT_WARNING( \
RegistryName, std::string, ObjectType, std::shared_ptr, ##__VA_ARGS__)
// C10_REGISTER_CREATOR and C10_REGISTER_CLASS are hard-wired to use std::string
// as the key
// type, because that is the most commonly used cases.
#define C10_REGISTER_CREATOR(RegistryName, key, ...) \
C10_REGISTER_TYPED_CREATOR(RegistryName, #key, __VA_ARGS__)
#define C10_REGISTER_CREATOR_WITH_PRIORITY(RegistryName, key, priority, ...) \
C10_REGISTER_TYPED_CREATOR_WITH_PRIORITY( \
RegistryName, #key, priority, __VA_ARGS__)
#define C10_REGISTER_CLASS(RegistryName, key, ...) \
C10_REGISTER_TYPED_CLASS(RegistryName, #key, __VA_ARGS__)
#define C10_REGISTER_CLASS_WITH_PRIORITY(RegistryName, key, priority, ...) \
C10_REGISTER_TYPED_CLASS_WITH_PRIORITY( \
RegistryName, #key, priority, __VA_ARGS__)
} // namespace c10
#endif // C10_UTIL_REGISTRY_H_
| 13,166
| 39.143293
| 82
|
h
|
null |
pytorch-main/c10/util/ScopeExit.h
|
#pragma once
#include <type_traits>
#include <utility>
namespace c10 {
/**
* Mostly copied from https://llvm.org/doxygen/ScopeExit_8h_source.html
*/
template <typename Callable>
class scope_exit {
Callable ExitFunction;
bool Engaged = true; // False once moved-from or release()d.
public:
template <typename Fp>
// constructor accepting a forwarding reference can hide the
// move constructor
// @lint-ignore CLANGTIDY
explicit scope_exit(Fp&& F) : ExitFunction(std::forward<Fp>(F)) {}
scope_exit(scope_exit&& Rhs) noexcept
: ExitFunction(std::move(Rhs.ExitFunction)), Engaged(Rhs.Engaged) {
Rhs.release();
}
scope_exit(const scope_exit&) = delete;
scope_exit& operator=(scope_exit&&) = delete;
scope_exit& operator=(const scope_exit&) = delete;
void release() {
Engaged = false;
}
~scope_exit() {
if (Engaged) {
ExitFunction();
}
}
};
// Keeps the callable object that is passed in, and execute it at the
// destruction of the returned object (usually at the scope exit where the
// returned object is kept).
//
// Interface is specified by p0052r2.
template <typename Callable>
scope_exit<typename std::decay<Callable>::type> make_scope_exit(Callable&& F) {
return scope_exit<typename std::decay<Callable>::type>(
std::forward<Callable>(F));
}
} // namespace c10
| 1,345
| 23.925926
| 79
|
h
|
null |
pytorch-main/c10/util/StringUtil.h
|
#ifndef C10_UTIL_STRINGUTIL_H_
#define C10_UTIL_STRINGUTIL_H_
#include <c10/macros/Macros.h>
#include <c10/util/string_utils.h>
#include <c10/util/string_view.h>
#include <cstddef>
#include <ostream>
#include <sstream>
#include <string>
#include <vector>
C10_CLANG_DIAGNOSTIC_PUSH()
#if C10_CLANG_HAS_WARNING("-Wshorten-64-to-32")
C10_CLANG_DIAGNOSTIC_IGNORE("-Wshorten-64-to-32")
#endif
namespace c10 {
namespace detail {
// Obtains the base name from a full path.
C10_API std::string StripBasename(const std::string& full_path);
C10_API std::string ExcludeFileExtension(const std::string& full_path);
struct CompileTimeEmptyString {
operator const std::string&() const {
static const std::string empty_string_literal;
return empty_string_literal;
}
operator const char*() const {
return "";
}
};
template <typename T>
struct CanonicalizeStrTypes {
using type = const T&;
};
template <size_t N>
struct CanonicalizeStrTypes<char[N]> {
using type = const char*;
};
inline std::ostream& _str(std::ostream& ss) {
return ss;
}
template <typename T>
inline std::ostream& _str(std::ostream& ss, const T& t) {
// NOLINTNEXTLINE(clang-analyzer-core.CallAndMessage)
ss << t;
return ss;
}
template <>
inline std::ostream& _str<CompileTimeEmptyString>(
std::ostream& ss,
const CompileTimeEmptyString&) {
return ss;
}
template <typename T, typename... Args>
inline std::ostream& _str(std::ostream& ss, const T& t, const Args&... args) {
return _str(_str(ss, t), args...);
}
template <typename... Args>
struct _str_wrapper final {
static std::string call(const Args&... args) {
std::ostringstream ss;
_str(ss, args...);
return ss.str();
}
};
// Specializations for already-a-string types.
template <>
struct _str_wrapper<std::string> final {
// return by reference to avoid the binary size of a string copy
static const std::string& call(const std::string& str) {
return str;
}
};
template <>
struct _str_wrapper<const char*> final {
static const char* call(const char* str) {
return str;
}
};
// For c10::str() with an empty argument list (which is common in our assert
// macros), we don't want to pay the binary size for constructing and
// destructing a stringstream or even constructing a string.
template <>
struct _str_wrapper<> final {
static CompileTimeEmptyString call() {
return CompileTimeEmptyString();
}
};
} // namespace detail
// Convert a list of string-like arguments into a single string.
template <typename... Args>
inline decltype(auto) str(const Args&... args) {
return detail::_str_wrapper<
typename detail::CanonicalizeStrTypes<Args>::type...>::call(args...);
}
template <class Container>
inline std::string Join(const std::string& delimiter, const Container& v) {
std::stringstream s;
int cnt = static_cast<int64_t>(v.size()) - 1;
for (auto i = v.begin(); i != v.end(); ++i, --cnt) {
s << (*i) << (cnt ? delimiter : "");
}
return s.str();
}
// Replace all occurrences of "from" substring to "to" string.
// Returns number of replacements
size_t C10_API
ReplaceAll(std::string& s, c10::string_view from, c10::string_view to);
/// Represents a location in source code (for debugging).
struct C10_API SourceLocation {
const char* function;
const char* file;
uint32_t line;
};
std::ostream& operator<<(std::ostream& out, const SourceLocation& loc);
// unix isprint but insensitive to locale
inline static bool isPrint(char s) {
return s > 0x1f && s < 0x7f;
}
inline void printQuotedString(std::ostream& stmt, const string_view str) {
stmt << "\"";
for (auto s : str) {
switch (s) {
case '\\':
stmt << "\\\\";
break;
case '\'':
stmt << "\\'";
break;
case '\"':
stmt << "\\\"";
break;
case '\a':
stmt << "\\a";
break;
case '\b':
stmt << "\\b";
break;
case '\f':
stmt << "\\f";
break;
case '\n':
stmt << "\\n";
break;
case '\r':
stmt << "\\r";
break;
case '\t':
stmt << "\\t";
break;
case '\v':
stmt << "\\v";
break;
default:
if (isPrint(s)) {
stmt << s;
} else {
// C++ io has stateful formatting settings. Messing with
// them is probably worse than doing this manually.
char buf[4] = "000";
buf[2] += s % 8;
s /= 8;
buf[1] += s % 8;
s /= 8;
buf[0] += s;
stmt << "\\" << buf;
}
break;
}
}
stmt << "\"";
}
} // namespace c10
C10_CLANG_DIAGNOSTIC_POP()
#endif // C10_UTIL_STRINGUTIL_H_
| 4,710
| 22.206897
| 78
|
h
|
null |
pytorch-main/c10/util/Synchronized.h
|
#pragma once
#include <mutex>
#include <c10/util/C++17.h>
namespace c10 {
/**
* A very simple Synchronization class for error-free use of data
* in a multi-threaded context. See folly/docs/Synchronized.md for
* the inspiration of this class.
*
* Full URL:
* https://github.com/facebook/folly/blob/main/folly/docs/Synchronized.md
*
* This class implements a small subset of the generic functionality
* implemented by folly:Synchronized<T>. Specifically, only withLock<T>
* is implemented here since it's the smallest possible API that is
* able to cover a large surface area of functionality offered by
* folly::Synchronized<T>.
*/
template <typename T>
class Synchronized final {
mutable std::mutex mutex_;
T data_;
public:
Synchronized() = default;
Synchronized(T const& data) : data_(data) {}
Synchronized(T&& data) : data_(data) {}
// Don't permit copy construction, move, assignment, or
// move assignment, since the underlying std::mutex
// isn't necessarily copyable/moveable.
Synchronized(Synchronized const&) = delete;
Synchronized(Synchronized&&) = delete;
Synchronized operator=(Synchronized const&) = delete;
Synchronized operator=(Synchronized&&) = delete;
/**
* To use, call withLock<T> with a callback that accepts T either
* by copy or by reference. Use the protected variable in the
* provided callback safely.
*/
template <typename CB>
typename c10::invoke_result_t<CB, T&> withLock(CB cb) {
std::lock_guard<std::mutex> guard(this->mutex_);
return cb(this->data_);
}
/**
* To use, call withLock<T> with a callback that accepts T either
* by copy or by const reference. Use the protected variable in
* the provided callback safely.
*/
template <typename CB>
typename c10::invoke_result_t<CB, T const&> withLock(CB cb) const {
std::lock_guard<std::mutex> guard(this->mutex_);
return cb(this->data_);
}
};
} // end namespace c10
| 1,946
| 29.421875
| 73
|
h
|
null |
pytorch-main/c10/util/ThreadLocal.h
|
#pragma once
#include <c10/macros/Macros.h>
/**
* Android versions with libgnustl incorrectly handle thread_local C++
* qualifier with composite types. NDK up to r17 version is affected.
*
* (A fix landed on Jun 4 2018:
* https://android-review.googlesource.com/c/toolchain/gcc/+/683601)
*
* In such cases, use c10::ThreadLocal<T> wrapper
* which is `pthread_*` based with smart pointer semantics.
*
* In addition, convenient macro C10_DEFINE_TLS_static is available.
* To define static TLS variable of type std::string, do the following
* ```
* C10_DEFINE_TLS_static(std::string, str_tls_);
* ///////
* {
* *str_tls_ = "abc";
* assert(str_tls_->length(), 3);
* }
* ```
*
* (see c10/test/util/ThreadLocal_test.cpp for more examples)
*/
#if !defined(C10_PREFER_CUSTOM_THREAD_LOCAL_STORAGE)
#if defined(C10_ANDROID) && defined(__GLIBCXX__) && __GLIBCXX__ < 20180604
#define C10_PREFER_CUSTOM_THREAD_LOCAL_STORAGE
#endif // defined(C10_ANDROID) && defined(__GLIBCXX__) && __GLIBCXX__ < 20180604
#endif // !defined(C10_PREFER_CUSTOM_THREAD_LOCAL_STORAGE)
#if defined(C10_PREFER_CUSTOM_THREAD_LOCAL_STORAGE)
#include <c10/util/Exception.h>
#include <errno.h>
#include <pthread.h>
#include <memory>
namespace c10 {
/**
* @brief Temporary thread_local C++ qualifier replacement for Android
* based on `pthread_*`.
* To be used with composite types that provide default ctor.
*/
template <typename Type>
class ThreadLocal {
public:
ThreadLocal() {
pthread_key_create(
&key_, [](void* buf) { delete static_cast<Type*>(buf); });
}
~ThreadLocal() {
if (void* current = pthread_getspecific(key_)) {
delete static_cast<Type*>(current);
}
pthread_key_delete(key_);
}
ThreadLocal(const ThreadLocal&) = delete;
ThreadLocal& operator=(const ThreadLocal&) = delete;
Type& get() {
if (void* current = pthread_getspecific(key_)) {
return *static_cast<Type*>(current);
}
std::unique_ptr<Type> ptr = std::make_unique<Type>();
if (0 == pthread_setspecific(key_, ptr.get())) {
return *ptr.release();
}
int err = errno;
TORCH_INTERNAL_ASSERT(false, "pthread_setspecific() failed, errno = ", err);
}
Type& operator*() {
return get();
}
Type* operator->() {
return &get();
}
private:
pthread_key_t key_;
};
} // namespace c10
#define C10_DEFINE_TLS_static(Type, Name) static ::c10::ThreadLocal<Type> Name
#define C10_DECLARE_TLS_class_static(Class, Type, Name) \
static ::c10::ThreadLocal<Type> Name
#define C10_DEFINE_TLS_class_static(Class, Type, Name) \
::c10::ThreadLocal<Type> Class::Name
#else // defined(C10_PREFER_CUSTOM_THREAD_LOCAL_STORAGE)
namespace c10 {
/**
* @brief Default thread_local implementation for non-Android cases.
* To be used with composite types that provide default ctor.
*/
template <typename Type>
class ThreadLocal {
public:
using Accessor = Type* (*)();
explicit ThreadLocal(Accessor accessor) : accessor_(accessor) {}
ThreadLocal(const ThreadLocal&) = delete;
ThreadLocal& operator=(const ThreadLocal&) = delete;
Type& get() {
return *accessor_();
}
Type& operator*() {
return get();
}
Type* operator->() {
return &get();
}
private:
Accessor accessor_;
};
} // namespace c10
#define C10_DEFINE_TLS_static(Type, Name) \
static ::c10::ThreadLocal<Type> Name([]() { \
static thread_local Type var; \
return &var; \
})
#define C10_DECLARE_TLS_class_static(Class, Type, Name) \
static ::c10::ThreadLocal<Type> Name
#define C10_DEFINE_TLS_class_static(Class, Type, Name) \
::c10::ThreadLocal<Type> Class::Name([]() { \
static thread_local Type var; \
return &var; \
})
#endif // defined(C10_PREFER_CUSTOM_THREAD_LOCAL_STORAGE)
| 3,883
| 24.220779
| 80
|
h
|
null |
pytorch-main/c10/util/ThreadLocalDebugInfo.h
|
#pragma once
#include <c10/macros/Export.h>
#include <memory>
#include <string>
namespace c10 {
enum class C10_API_ENUM DebugInfoKind : uint8_t {
PRODUCER_INFO = 0,
MOBILE_RUNTIME_INFO,
PROFILER_STATE,
INFERENCE_CONTEXT, // for inference usage
PARAM_COMMS_INFO,
TEST_INFO, // used only in tests
TEST_INFO_2, // used only in tests
};
class C10_API DebugInfoBase {
public:
DebugInfoBase() = default;
virtual ~DebugInfoBase() = default;
};
// Thread local debug information is propagated across the forward
// (including async fork tasks) and backward passes and is supposed
// to be utilized by the user's code to pass extra information from
// the higher layers (e.g. model id) down to the lower levels
// (e.g. to the operator observers used for debugging, logging,
// profiling, etc)
class C10_API ThreadLocalDebugInfo {
public:
static DebugInfoBase* get(DebugInfoKind kind);
// Get current ThreadLocalDebugInfo
static std::shared_ptr<ThreadLocalDebugInfo> current();
// Internal, use DebugInfoGuard/ThreadLocalStateGuard
static void _forceCurrentDebugInfo(
std::shared_ptr<ThreadLocalDebugInfo> info);
// Push debug info struct of a given kind
static void _push(DebugInfoKind kind, std::shared_ptr<DebugInfoBase> info);
// Pop debug info, throws in case the last pushed
// debug info is not of a given kind
static std::shared_ptr<DebugInfoBase> _pop(DebugInfoKind kind);
// Peek debug info, throws in case the last pushed debug info is not of the
// given kind
static std::shared_ptr<DebugInfoBase> _peek(DebugInfoKind kind);
private:
std::shared_ptr<DebugInfoBase> info_;
DebugInfoKind kind_;
std::shared_ptr<ThreadLocalDebugInfo> parent_info_;
friend class DebugInfoGuard;
};
// DebugInfoGuard is used to set debug information,
// ThreadLocalDebugInfo is semantically immutable, the values are set
// through the scope-based guard object.
// Nested DebugInfoGuard adds/overrides existing values in the scope,
// restoring the original values after exiting the scope.
// Users can access the values through the ThreadLocalDebugInfo::get() call;
class C10_API DebugInfoGuard {
public:
DebugInfoGuard(DebugInfoKind kind, std::shared_ptr<DebugInfoBase> info);
explicit DebugInfoGuard(std::shared_ptr<ThreadLocalDebugInfo> info);
~DebugInfoGuard();
DebugInfoGuard(const DebugInfoGuard&) = delete;
DebugInfoGuard(DebugInfoGuard&&) = delete;
private:
bool active_ = false;
std::shared_ptr<ThreadLocalDebugInfo> prev_info_ = nullptr;
};
} // namespace c10
| 2,547
| 29.333333
| 77
|
h
|
null |
pytorch-main/c10/util/TypeCast.h
|
#pragma once
#include <c10/macros/Macros.h>
#include <c10/util/BFloat16.h>
#include <c10/util/Half.h>
#include <type_traits>
C10_CLANG_DIAGNOSTIC_PUSH()
#if C10_CLANG_HAS_WARNING("-Wimplicit-float-conversion")
C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-float-conversion")
#endif
#if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion")
C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-int-float-conversion")
#endif
namespace c10 {
template <typename dest_t, typename src_t>
struct needs_real {
constexpr static bool value =
(is_complex<src_t>::value && !is_complex<dest_t>::value);
};
template <bool, typename src_t>
struct maybe_real {
C10_HOST_DEVICE static inline src_t apply(src_t src) {
return src;
}
};
template <typename src_t>
struct maybe_real<true, src_t> {
C10_HOST_DEVICE static inline decltype(auto) apply(src_t src) {
return src.real();
}
};
// Note: deliberately ignores undefined behavior, consistent with NumPy.
// PyTorch's type conversions can cause a variety of undefined behavior,
// including float to integral overflow and signed to unsigned integer overflow.
// Some of this undefined behavior is addressed below.
template <typename dest_t, typename src_t>
struct static_cast_with_inter_type {
C10_HOST_DEVICE __ubsan_ignore_undefined__ static inline dest_t apply(
src_t src) {
constexpr bool real = needs_real<dest_t, src_t>::value;
auto r = maybe_real<real, src_t>::apply(src);
return static_cast<dest_t>(r);
}
};
// Partial template instantiation for casting to uint8.
// Note: Converting from negative float values to unsigned integer types is
// undefined behavior in C++, and current CPU and GPU compilers exhibit
// divergent behavior. Casting from negative float values to signed
// integer types and then to unsigned integer types is not undefined,
// however, so this cast improves the consistency of type conversions
// to uint8 across compilers.
// Further note: Type conversions across compilers still have other undefined
// and divergent behavior.
template <typename src_t>
struct static_cast_with_inter_type<uint8_t, src_t> {
C10_HOST_DEVICE __ubsan_ignore_undefined__ static inline uint8_t apply(
src_t src) {
constexpr bool real = needs_real<uint8_t, src_t>::value;
return static_cast<uint8_t>(
static_cast<int64_t>(maybe_real<real, src_t>::apply(src)));
}
};
template <>
struct static_cast_with_inter_type<c10::complex<c10::Half>, c10::BFloat16> {
C10_HOST_DEVICE __ubsan_ignore_undefined__ static inline c10::complex<
c10::Half>
apply(c10::BFloat16 src) {
return static_cast<c10::complex<c10::Half>>(c10::complex<float>{src});
}
};
template <>
struct static_cast_with_inter_type<c10::complex<c10::Half>, c10::Half> {
C10_HOST_DEVICE __ubsan_ignore_undefined__ static inline c10::complex<
c10::Half>
apply(c10::Half src) {
return static_cast<c10::complex<c10::Half>>(c10::complex<float>{src});
}
};
template <>
struct static_cast_with_inter_type<
c10::complex<c10::Half>,
c10::complex<double>> {
C10_HOST_DEVICE __ubsan_ignore_undefined__ static inline c10::complex<
c10::Half>
apply(c10::complex<double> src) {
return static_cast<c10::complex<c10::Half>>(
static_cast<c10::complex<float>>(src));
}
};
template <typename To, typename From>
C10_HOST_DEVICE To convert(From f) {
return static_cast_with_inter_type<To, From>::apply(f);
}
// Define separately to avoid being inlined and prevent code-size bloat
C10_API void report_overflow(const char* name);
template <typename To, typename From>
To checked_convert(From f, const char* name) {
// Converting to bool can't overflow so we exclude this case from checking.
if (!std::is_same<To, bool>::value && overflows<To, From>(f)) {
report_overflow(name);
}
return convert<To, From>(f);
}
} // namespace c10
C10_CLANG_DIAGNOSTIC_POP()
// Trigger tests for D25440771. TODO: Remove this line any time you want.
| 3,958
| 31.186992
| 80
|
h
|
null |
pytorch-main/c10/util/TypeIndex.h
|
#pragma once
#include <c10/util/C++17.h>
#include <c10/util/ConstexprCrc.h>
#include <c10/util/IdWrapper.h>
#include <c10/util/string_view.h>
#include <cinttypes>
#include <functional>
namespace c10 {
namespace util {
// TODO Make it work for more compilers
// Intel compiler works
#if defined(__INTEL_COMPILER)
#define C10_TYPENAME_SUPPORTS_CONSTEXPR 0
#define C10_TYPENAME_CONSTEXPR
// Clang works
#elif defined(__clang__)
// except for NVCC
#if defined(__CUDACC__)
#define C10_TYPENAME_SUPPORTS_CONSTEXPR 0
#define C10_TYPENAME_CONSTEXPR
#else
#define C10_TYPENAME_SUPPORTS_CONSTEXPR 1
#define C10_TYPENAME_CONSTEXPR constexpr
#endif
// Windows works
#elif defined(_MSC_VER)
// except for NVCC
#if defined(__CUDACC__)
#define C10_TYPENAME_SUPPORTS_CONSTEXPR 0
#define C10_TYPENAME_CONSTEXPR
#else
#define C10_TYPENAME_SUPPORTS_CONSTEXPR 1
#define C10_TYPENAME_CONSTEXPR constexpr
#endif
// GCC works
#elif defined(__GNUC__)
// except when gcc < 9
#if (__GNUC__ < 9) || defined(__CUDACC__)
#define C10_TYPENAME_SUPPORTS_CONSTEXPR 0
#define C10_TYPENAME_CONSTEXPR
#else
#define C10_TYPENAME_SUPPORTS_CONSTEXPR 1
#define C10_TYPENAME_CONSTEXPR constexpr
#endif
// some other compiler we don't know about
#else
#define C10_TYPENAME_SUPPORTS_CONSTEXPR 1
#define C10_TYPENAME_CONSTEXPR constexpr
#endif
struct type_index final : IdWrapper<type_index, uint64_t> {
constexpr explicit type_index(uint64_t checksum) : IdWrapper(checksum) {}
// Allow usage in std::map / std::set
// TODO Disallow this and rather use std::unordered_map/set everywhere
friend constexpr bool operator<(type_index lhs, type_index rhs) noexcept {
return lhs.underlyingId() < rhs.underlyingId();
}
friend std::ostream& operator<<(std::ostream& stream, type_index typeId) {
return stream << typeId.underlyingId();
}
};
namespace detail {
#if !defined(__clang__) && !defined(_MSC_VER) && defined(__GNUC__) && \
__GNUC__ < 5
// Getting __PRETTY_FUNCTION__ at compile time only works with GCC >= 5
#error "You're running a too old version of GCC. We need GCC 5 or later."
#endif
#if defined(__clang__) && __clang_major__ < 4
// Getting __PRETTY_FUNCTION__ at compile time only works with Clang >= 4
#error "You're running a too old version of Clang. We need Clang 4 or later."
#endif
inline constexpr string_view extract(
string_view prefix,
string_view suffix,
string_view str) {
#if !defined(__CUDA_ARCH__) // CUDA doesn't like std::logic_error in device code
return (!str.starts_with(prefix) || !str.ends_with(suffix))
? (throw std::logic_error("Invalid pattern"), string_view())
: str.substr(prefix.size(), str.size() - prefix.size() - suffix.size());
#else
return str.substr(prefix.size(), str.size() - prefix.size() - suffix.size());
#endif
}
template <typename T>
inline C10_TYPENAME_CONSTEXPR c10::string_view fully_qualified_type_name_impl() {
#if defined(_MSC_VER) && !defined(__clang__)
#if defined(__NVCC__)
return extract(
"c10::basic_string_view<char> c10::util::detail::fully_qualified_type_name_impl<",
">()",
__FUNCSIG__);
#else
return extract(
"class c10::basic_string_view<char> __cdecl c10::util::detail::fully_qualified_type_name_impl<",
">(void)",
__FUNCSIG__);
#endif
#elif defined(__clang__)
return extract(
"c10::string_view c10::util::detail::fully_qualified_type_name_impl() [T = ",
"]",
__PRETTY_FUNCTION__);
#elif defined(__GNUC__)
return extract(
#if C10_TYPENAME_SUPPORTS_CONSTEXPR
"constexpr c10::string_view c10::util::detail::fully_qualified_type_name_impl() [with T = ",
#else
"c10::string_view c10::util::detail::fully_qualified_type_name_impl() [with T = ",
#endif
"; c10::string_view = c10::basic_string_view<char>]",
__PRETTY_FUNCTION__);
#endif
}
#if !defined(__CUDA_ARCH__)
template <typename T>
inline constexpr uint64_t type_index_impl() {
// Idea: __PRETTY_FUNCTION__ (or __FUNCSIG__ on msvc) contains a qualified name
// of this function, including its template parameter, i.e. including the
// type we want an id for. We use this name and run crc64 on it to get a type
// id.
#if defined(_MSC_VER) && !defined(__clang__)
return crc64(__FUNCSIG__, sizeof(__FUNCSIG__)).checksum();
#elif defined(__clang__)
return crc64(__PRETTY_FUNCTION__, sizeof(__PRETTY_FUNCTION__)).checksum();
#elif defined(__GNUC__)
return crc64(__PRETTY_FUNCTION__, sizeof(__PRETTY_FUNCTION__)).checksum();
#endif
}
#endif
} // namespace detail
template <typename T>
inline constexpr type_index get_type_index() {
#if !defined(__CUDA_ARCH__)
// To enforce that this is really computed at compile time, we pass the
// type index through std::integral_constant.
return type_index{std::integral_constant<
uint64_t,
detail::type_index_impl<std::decay_t<T>>()>::value};
#else
// There's nothing in theory preventing us from running this on device code
// except for nvcc throwing a compiler error if we enable it.
return (abort(), type_index(0));
#endif
}
#if !defined(TORCH_PEDANTIC)
// Use precomputed hashsum for std::string
// Needed to workaround ambiguity in class name resolution
// into __PRETTY_FUNCION__ when abovementioned class is defined in inlined
// namespace. In multi-ABI C++ library, `std::string` is an alias to
// `std::__cxx11::basic_string<char>` which depending on compiler flags can be
// resolved to `basic_string<char>` either in `std` namespace or in
// `std::__cxx11` one (`__cxx11` is an inline namespace)
template <>
inline constexpr type_index get_type_index<std::string>() {
// hashsum for std::basic_string<char>
return type_index{4193213214807308375ULL};
}
#endif
template <typename T>
inline C10_TYPENAME_CONSTEXPR string_view
get_fully_qualified_type_name() noexcept {
#if C10_TYPENAME_SUPPORTS_CONSTEXPR
constexpr
#else
static
#endif
string_view name = detail::fully_qualified_type_name_impl<T>();
return name;
}
} // namespace util
} // namespace c10
C10_DEFINE_HASH_FOR_IDWRAPPER(c10::util::type_index);
| 6,033
| 29.629442
| 102
|
h
|
null |
pytorch-main/c10/util/TypeSafeSignMath.h
|
#pragma once
#include <c10/macros/Macros.h>
#include <limits>
#include <type_traits>
C10_CLANG_DIAGNOSTIC_PUSH()
#if C10_CLANG_HAS_WARNING("-Wstring-conversion")
C10_CLANG_DIAGNOSTIC_IGNORE("-Wstring-conversion")
#endif
#if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion")
C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-int-float-conversion")
#endif
namespace c10 {
/// Returns false since we cannot have x < 0 if x is unsigned.
template <typename T>
static inline constexpr bool is_negative(
const T& /*x*/,
std::true_type /*is_unsigned*/) {
return false;
}
/// Returns true if a signed variable x < 0
template <typename T>
static inline constexpr bool is_negative(
const T& x,
std::false_type /*is_unsigned*/) {
return x < T(0);
}
/// Returns true if x < 0
/// NOTE: Will fail on an unsigned custom type
/// For the most part it's possible to fix this if
/// the custom type has a constexpr constructor.
/// However, notably, c10::Half does not :-(
template <typename T>
inline constexpr bool is_negative(const T& x) {
return is_negative(x, std::is_unsigned<T>());
}
/// Returns the sign of an unsigned variable x as 0, 1
template <typename T>
static inline constexpr int signum(const T& x, std::true_type /*is_unsigned*/) {
return T(0) < x;
}
/// Returns the sign of a signed variable x as -1, 0, 1
template <typename T>
static inline constexpr int signum(
const T& x,
std::false_type /*is_unsigned*/) {
return (T(0) < x) - (x < T(0));
}
/// Returns the sign of x as -1, 0, 1
/// NOTE: Will fail on an unsigned custom type
/// For the most part it's possible to fix this if
/// the custom type has a constexpr constructor.
/// However, notably, c10::Half does not :-(
template <typename T>
inline constexpr int signum(const T& x) {
return signum(x, std::is_unsigned<T>());
}
/// Returns true if a and b are not both negative
template <typename T, typename U>
inline constexpr bool signs_differ(const T& a, const U& b) {
return is_negative(a) != is_negative(b);
}
// Suppress sign compare warning when compiling with GCC
// as later does not account for short-circuit rule before
// raising the warning, see https://godbolt.org/z/Tr3Msnz99
#ifdef __GNUC__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wsign-compare"
#endif
/// Returns true if x is greater than the greatest value of the type Limit
template <typename Limit, typename T>
inline constexpr bool greater_than_max(const T& x) {
constexpr bool can_overflow =
std::numeric_limits<T>::digits > std::numeric_limits<Limit>::digits;
return can_overflow && x > std::numeric_limits<Limit>::max();
}
#ifdef __GNUC__
#pragma GCC diagnostic pop
#endif
/// Returns true if x < lowest(Limit). Standard comparison
template <typename Limit, typename T>
static inline constexpr bool less_than_lowest(
const T& x,
std::false_type /*limit_is_unsigned*/,
std::false_type /*x_is_unsigned*/) {
return x < std::numeric_limits<Limit>::lowest();
}
/// Returns false since all the limit is signed and therefore includes
/// negative values but x cannot be negative because it is unsigned
template <typename Limit, typename T>
static inline constexpr bool less_than_lowest(
const T& /*x*/,
std::false_type /*limit_is_unsigned*/,
std::true_type /*x_is_unsigned*/) {
return false;
}
/// Returns true if x < 0, where 0 is constructed from T.
/// Limit is not signed, so its lower value is zero
template <typename Limit, typename T>
static inline constexpr bool less_than_lowest(
const T& x,
std::true_type /*limit_is_unsigned*/,
std::false_type /*x_is_unsigned*/) {
return x < T(0);
}
/// Returns false sign both types are unsigned
template <typename Limit, typename T>
static inline constexpr bool less_than_lowest(
const T& /*x*/,
std::true_type /*limit_is_unsigned*/,
std::true_type /*x_is_unsigned*/) {
return false;
}
/// Returns true if x is less than the lowest value of type T
/// NOTE: Will fail on an unsigned custom type
/// For the most part it's possible to fix this if
/// the custom type has a constexpr constructor.
/// However, notably, c10::Half does not :
template <typename Limit, typename T>
inline constexpr bool less_than_lowest(const T& x) {
return less_than_lowest<Limit>(
x, std::is_unsigned<Limit>(), std::is_unsigned<T>());
}
} // namespace c10
C10_CLANG_DIAGNOSTIC_POP()
| 4,437
| 29.606897
| 80
|
h
|
null |
pytorch-main/c10/util/TypeTraits.h
|
#pragma once
#include <c10/util/C++17.h>
namespace c10 {
namespace guts {
/**
* is_equality_comparable<T> is true_type iff the equality operator is defined
* for T.
*/
template <class T, class Enable = void>
struct is_equality_comparable : std::false_type {};
template <class T>
struct is_equality_comparable<
T,
void_t<decltype(std::declval<T&>() == std::declval<T&>())>>
: std::true_type {};
template <class T>
using is_equality_comparable_t = typename is_equality_comparable<T>::type;
/**
* is_hashable<T> is true_type iff std::hash is defined for T
*/
template <class T, class Enable = void>
struct is_hashable : std::false_type {};
template <class T>
struct is_hashable<T, void_t<decltype(std::hash<T>()(std::declval<T&>()))>>
: std::true_type {};
template <class T>
using is_hashable_t = typename is_hashable<T>::type;
/**
* is_function_type<T> is true_type iff T is a plain function type (i.e.
* "Result(Args...)")
*/
template <class T>
struct is_function_type : std::false_type {};
template <class Result, class... Args>
struct is_function_type<Result(Args...)> : std::true_type {};
template <class T>
using is_function_type_t = typename is_function_type<T>::type;
/**
* is_instantiation_of<T, I> is true_type iff I is a template instantiation of T
* (e.g. vector<int> is an instantiation of vector) Example:
* is_instantiation_of_t<vector, vector<int>> // true
* is_instantiation_of_t<pair, pair<int, string>> // true
* is_instantiation_of_t<vector, pair<int, string>> // false
*/
template <template <class...> class Template, class T>
struct is_instantiation_of : std::false_type {};
template <template <class...> class Template, class... Args>
struct is_instantiation_of<Template, Template<Args...>> : std::true_type {};
template <template <class...> class Template, class T>
using is_instantiation_of_t = typename is_instantiation_of<Template, T>::type;
namespace detail {
/**
* strip_class: helper to remove the class type from pointers to `operator()`.
*/
template <typename T>
struct strip_class {};
template <typename Class, typename Result, typename... Args>
struct strip_class<Result (Class::*)(Args...)> {
using type = Result(Args...);
};
template <typename Class, typename Result, typename... Args>
struct strip_class<Result (Class::*)(Args...) const> {
using type = Result(Args...);
};
template <typename T>
using strip_class_t = typename strip_class<T>::type;
} // namespace detail
/**
* Evaluates to true_type, iff the given class is a Functor
* (i.e. has a call operator with some set of arguments)
*/
template <class Functor, class Enable = void>
struct is_functor : std::false_type {};
template <class Functor>
struct is_functor<
Functor,
std::enable_if_t<is_function_type<
detail::strip_class_t<decltype(&Functor::operator())>>::value>>
: std::true_type {};
/**
* lambda_is_stateless<T> is true iff the lambda type T is stateless
* (i.e. does not have a closure).
* Example:
* auto stateless_lambda = [] (int a) {return a;};
* lambda_is_stateless<decltype(stateless_lambda)> // true
* auto stateful_lambda = [&] (int a) {return a;};
* lambda_is_stateless<decltype(stateful_lambda)> // false
*/
namespace detail {
template <class LambdaType, class FuncType>
struct is_stateless_lambda__ final {
static_assert(
!std::is_same<LambdaType, LambdaType>::value,
"Base case shouldn't be hit");
};
// implementation idea: According to the C++ standard, stateless lambdas are
// convertible to function pointers
template <class LambdaType, class C, class Result, class... Args>
struct is_stateless_lambda__<LambdaType, Result (C::*)(Args...) const>
: std::is_convertible<LambdaType, Result (*)(Args...)> {};
template <class LambdaType, class C, class Result, class... Args>
struct is_stateless_lambda__<LambdaType, Result (C::*)(Args...)>
: std::is_convertible<LambdaType, Result (*)(Args...)> {};
// case where LambdaType is not even a functor
template <class LambdaType, class Enable = void>
struct is_stateless_lambda_ final : std::false_type {};
// case where LambdaType is a functor
template <class LambdaType>
struct is_stateless_lambda_<
LambdaType,
std::enable_if_t<is_functor<LambdaType>::value>>
: is_stateless_lambda__<LambdaType, decltype(&LambdaType::operator())> {};
} // namespace detail
template <class T>
using is_stateless_lambda = detail::is_stateless_lambda_<std::decay_t<T>>;
/**
* is_type_condition<C> is true_type iff C<...> is a type trait representing a
* condition (i.e. has a constexpr static bool ::value member) Example:
* is_type_condition<std::is_reference> // true
*/
template <template <class> class C, class Enable = void>
struct is_type_condition : std::false_type {};
template <template <class> class C>
struct is_type_condition<
C,
std::enable_if_t<
std::is_same<bool, std::remove_cv_t<decltype(C<int>::value)>>::value>>
: std::true_type {};
/**
* is_fundamental<T> is true_type iff the lambda type T is a fundamental type
* (that is, arithmetic type, void, or nullptr_t). Example: is_fundamental<int>
* // true We define it here to resolve a MSVC bug. See
* https://github.com/pytorch/pytorch/issues/30932 for details.
*/
template <class T>
struct is_fundamental : std::is_fundamental<T> {};
} // namespace guts
} // namespace c10
| 5,346
| 33.947712
| 80
|
h
|
null |
pytorch-main/c10/util/UniqueVoidPtr.h
|
#pragma once
#include <memory>
#include <c10/macros/Macros.h>
namespace c10 {
using DeleterFnPtr = void (*)(void*);
namespace detail {
// Does not delete anything
C10_API void deleteNothing(void*);
// A detail::UniqueVoidPtr is an owning smart pointer like unique_ptr, but
// with three major differences:
//
// 1) It is specialized to void
//
// 2) It is specialized for a function pointer deleter
// void(void* ctx); i.e., the deleter doesn't take a
// reference to the data, just to a context pointer
// (erased as void*). In fact, internally, this pointer
// is implemented as having an owning reference to
// context, and a non-owning reference to data; this is why
// you release_context(), not release() (the conventional
// API for release() wouldn't give you enough information
// to properly dispose of the object later.)
//
// 3) The deleter is guaranteed to be called when the unique
// pointer is destructed and the context is non-null; this is different
// from std::unique_ptr where the deleter is not called if the
// data pointer is null.
//
// Some of the methods have slightly different types than std::unique_ptr
// to reflect this.
//
class UniqueVoidPtr {
private:
// Lifetime tied to ctx_
void* data_;
std::unique_ptr<void, DeleterFnPtr> ctx_;
public:
UniqueVoidPtr() : data_(nullptr), ctx_(nullptr, &deleteNothing) {}
explicit UniqueVoidPtr(void* data)
: data_(data), ctx_(nullptr, &deleteNothing) {}
UniqueVoidPtr(void* data, void* ctx, DeleterFnPtr ctx_deleter)
: data_(data), ctx_(ctx, ctx_deleter ? ctx_deleter : &deleteNothing) {}
void* operator->() const {
return data_;
}
void clear() {
ctx_ = nullptr;
data_ = nullptr;
}
void* get() const {
return data_;
}
void* get_context() const {
return ctx_.get();
}
void* release_context() {
return ctx_.release();
}
std::unique_ptr<void, DeleterFnPtr>&& move_context() {
return std::move(ctx_);
}
C10_NODISCARD bool compare_exchange_deleter(
DeleterFnPtr expected_deleter,
DeleterFnPtr new_deleter) {
if (get_deleter() != expected_deleter)
return false;
ctx_ = std::unique_ptr<void, DeleterFnPtr>(ctx_.release(), new_deleter);
return true;
}
template <typename T>
T* cast_context(DeleterFnPtr expected_deleter) const {
if (get_deleter() != expected_deleter)
return nullptr;
return static_cast<T*>(get_context());
}
operator bool() const {
return data_ || ctx_;
}
DeleterFnPtr get_deleter() const {
return ctx_.get_deleter();
}
};
// Note [How UniqueVoidPtr is implemented]
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// UniqueVoidPtr solves a common problem for allocators of tensor data, which
// is that the data pointer (e.g., float*) which you are interested in, is not
// the same as the context pointer (e.g., DLManagedTensor) which you need
// to actually deallocate the data. Under a conventional deleter design, you
// have to store extra context in the deleter itself so that you can actually
// delete the right thing. Implementing this with standard C++ is somewhat
// error-prone: if you use a std::unique_ptr to manage tensors, the deleter will
// not be called if the data pointer is nullptr, which can cause a leak if the
// context pointer is non-null (and the deleter is responsible for freeing both
// the data pointer and the context pointer).
//
// So, in our reimplementation of unique_ptr, which just store the context
// directly in the unique pointer, and attach the deleter to the context
// pointer itself. In simple cases, the context pointer is just the pointer
// itself.
inline bool operator==(const UniqueVoidPtr& sp, std::nullptr_t) noexcept {
return !sp;
}
inline bool operator==(std::nullptr_t, const UniqueVoidPtr& sp) noexcept {
return !sp;
}
inline bool operator!=(const UniqueVoidPtr& sp, std::nullptr_t) noexcept {
return sp;
}
inline bool operator!=(std::nullptr_t, const UniqueVoidPtr& sp) noexcept {
return sp;
}
} // namespace detail
} // namespace c10
| 4,115
| 31.928
| 80
|
h
|
null |
pytorch-main/c10/util/Unroll.h
|
#pragma once
#include <c10/macros/Macros.h>
// Utility to guarantee complete unrolling of a loop where the bounds are known
// at compile time. Various pragmas achieve similar effects, but are not as
// portable across compilers.
// Example: c10::ForcedUnroll<4>{}(f); is equivalent to f(0); f(1); f(2); f(3);
namespace c10 {
template <int n>
struct ForcedUnroll {
template <typename Func>
C10_ALWAYS_INLINE void operator()(const Func& f) const {
ForcedUnroll<n - 1>{}(f);
f(n - 1);
}
};
template <>
struct ForcedUnroll<1> {
template <typename Func>
C10_ALWAYS_INLINE void operator()(const Func& f) const {
f(0);
}
};
} // namespace c10
| 667
| 21.266667
| 79
|
h
|
null |
pytorch-main/c10/util/accumulate.h
|
// Copyright 2004-present Facebook. All Rights Reserved.
#pragma once
#include <c10/util/ArrayRef.h>
#include <iterator>
#include <numeric>
#include <type_traits>
namespace c10 {
/// Sum of a list of integers; accumulates into the int64_t datatype
template <
typename C,
typename std::enable_if<
std::is_integral<typename C::value_type>::value,
int>::type = 0>
inline int64_t sum_integers(const C& container) {
// std::accumulate infers return type from `init` type, so if the `init` type
// is not large enough to hold the result, computation can overflow. We use
// `int64_t` here to avoid this.
return std::accumulate(
container.begin(), container.end(), static_cast<int64_t>(0));
}
/// Sum of integer elements referred to by iterators; accumulates into the
/// int64_t datatype
template <
typename Iter,
typename std::enable_if<
std::is_integral<
typename std::iterator_traits<Iter>::value_type>::value,
int>::type = 0>
inline int64_t sum_integers(Iter begin, Iter end) {
// std::accumulate infers return type from `init` type, so if the `init` type
// is not large enough to hold the result, computation can overflow. We use
// `int64_t` here to avoid this.
return std::accumulate(begin, end, static_cast<int64_t>(0));
}
/// Product of a list of integers; accumulates into the int64_t datatype
template <
typename C,
typename std::enable_if<
std::is_integral<typename C::value_type>::value,
int>::type = 0>
inline int64_t multiply_integers(const C& container) {
// std::accumulate infers return type from `init` type, so if the `init` type
// is not large enough to hold the result, computation can overflow. We use
// `int64_t` here to avoid this.
return std::accumulate(
container.begin(),
container.end(),
static_cast<int64_t>(1),
std::multiplies<>());
}
/// Product of integer elements referred to by iterators; accumulates into the
/// int64_t datatype
template <
typename Iter,
typename std::enable_if<
std::is_integral<
typename std::iterator_traits<Iter>::value_type>::value,
int>::type = 0>
inline int64_t multiply_integers(Iter begin, Iter end) {
// std::accumulate infers return type from `init` type, so if the `init` type
// is not large enough to hold the result, computation can overflow. We use
// `int64_t` here to avoid this.
return std::accumulate(
begin, end, static_cast<int64_t>(1), std::multiplies<>());
}
/// Return product of all dimensions starting from k
/// Returns 1 if k>=dims.size()
template <
typename C,
typename std::enable_if<
std::is_integral<typename C::value_type>::value,
int>::type = 0>
inline int64_t numelements_from_dim(const int k, const C& dims) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(k >= 0);
if (k > static_cast<int>(dims.size())) {
return 1;
} else {
auto cbegin = dims.cbegin();
std::advance(cbegin, k);
return multiply_integers(cbegin, dims.cend());
}
}
/// Product of all dims up to k (not including dims[k])
/// Throws an error if k>dims.size()
template <
typename C,
typename std::enable_if<
std::is_integral<typename C::value_type>::value,
int>::type = 0>
inline int64_t numelements_to_dim(const int k, const C& dims) {
TORCH_INTERNAL_ASSERT(0 <= k);
TORCH_INTERNAL_ASSERT((unsigned)k <= dims.size());
auto cend = dims.cbegin();
std::advance(cend, k);
return multiply_integers(dims.cbegin(), cend);
}
/// Product of all dims between k and l (including dims[k] and excluding
/// dims[l]) k and l may be supplied in either order
template <
typename C,
typename std::enable_if<
std::is_integral<typename C::value_type>::value,
int>::type = 0>
inline int64_t numelements_between_dim(int k, int l, const C& dims) {
TORCH_INTERNAL_ASSERT(0 <= k);
TORCH_INTERNAL_ASSERT(0 <= l);
if (k > l) {
std::swap(k, l);
}
TORCH_INTERNAL_ASSERT((unsigned)l < dims.size());
auto cbegin = dims.cbegin();
auto cend = dims.cbegin();
std::advance(cbegin, k);
std::advance(cend, l);
return multiply_integers(cbegin, cend);
}
} // namespace c10
| 4,209
| 30.185185
| 79
|
h
|
null |
pytorch-main/c10/util/bit_cast.h
|
#pragma once
#include <cstring>
#include <type_traits>
namespace c10 {
// Implementations of std::bit_cast() from C++ 20.
//
// This is a less sketchy version of reinterpret_cast.
//
// See https://en.cppreference.com/w/cpp/numeric/bit_cast for more
// information as well as the source of our implementations.
template <class To, class From>
std::enable_if_t<
sizeof(To) == sizeof(From) && std::is_trivially_copyable<From>::value &&
std::is_trivially_copyable<To>::value,
To>
// constexpr support needs compiler magic
bit_cast(const From& src) noexcept {
static_assert(
std::is_trivially_constructible<To>::value,
"This implementation additionally requires "
"destination type to be trivially constructible");
To dst;
std::memcpy(&dst, &src, sizeof(To));
return dst;
}
} // namespace c10
| 836
| 25.15625
| 76
|
h
|
null |
pytorch-main/c10/util/bits.h
|
#pragma once
#include <cstdint>
#include <c10/macros/Macros.h>
namespace c10 {
/**
* bits1x8 is an uninterpreted dtype of a tensor with 1 bit (packed to byte
* boundary), without any semantics defined.
*/
struct alignas(1) bits1x8 {
using underlying = uint8_t;
uint8_t val_;
bits1x8() = default;
C10_HOST_DEVICE explicit bits1x8(uint8_t val) : val_(val) {}
};
/**
* bits2x4 is an uninterpreted dtype of a tensor with 2 bits (packed to byte
* boundary), without any semantics defined.
*/
struct alignas(1) bits2x4 {
using underlying = uint8_t;
uint8_t val_;
bits2x4() = default;
C10_HOST_DEVICE explicit bits2x4(uint8_t val) : val_(val) {}
};
/**
* bits4x2 is an uninterpreted dtype of a tensor with 4 bits (packed to byte
* boundary), without any semantics defined.
*/
struct alignas(1) bits4x2 {
using underlying = uint8_t;
uint8_t val_;
bits4x2() = default;
C10_HOST_DEVICE explicit bits4x2(uint8_t val) : val_(val) {}
};
/**
* bits8 is an uninterpreted dtype of a tensor with 8 bits, without any
* semantics defined.
*/
struct alignas(1) bits8 {
uint8_t val_;
bits8() = default;
C10_HOST_DEVICE explicit bits8(uint8_t val) : val_(val) {}
};
/**
* bits16 is an uninterpreted dtype of a tensor with 16 bits, without any
* semantics defined.
*/
struct alignas(2) bits16 {
uint16_t val_;
bits16() = default;
C10_HOST_DEVICE explicit bits16(uint16_t val) : val_(val) {}
};
} // namespace c10
| 1,449
| 22.387097
| 76
|
h
|
null |
pytorch-main/c10/util/complex.h
|
#pragma once
#include <complex>
#include <c10/macros/Macros.h>
#if defined(__CUDACC__) || defined(__HIPCC__)
#include <thrust/complex.h>
#endif
C10_CLANG_DIAGNOSTIC_PUSH()
#if C10_CLANG_HAS_WARNING("-Wimplicit-float-conversion")
C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-float-conversion")
#endif
#if C10_CLANG_HAS_WARNING("-Wfloat-conversion")
C10_CLANG_DIAGNOSTIC_IGNORE("-Wfloat-conversion")
#endif
namespace c10 {
// c10::complex is an implementation of complex numbers that aims
// to work on all devices supported by PyTorch
//
// Most of the APIs duplicates std::complex
// Reference: https://en.cppreference.com/w/cpp/numeric/complex
//
// [NOTE: Complex Operator Unification]
// Operators currently use a mix of std::complex, thrust::complex, and
// c10::complex internally. The end state is that all operators will use
// c10::complex internally. Until then, there may be some hacks to support all
// variants.
//
//
// [Note on Constructors]
//
// The APIs of constructors are mostly copied from C++ standard:
// https://en.cppreference.com/w/cpp/numeric/complex/complex
//
// Since C++14, all constructors are constexpr in std::complex
//
// There are three types of constructors:
// - initializing from real and imag:
// `constexpr complex( const T& re = T(), const T& im = T() );`
// - implicitly-declared copy constructor
// - converting constructors
//
// Converting constructors:
// - std::complex defines converting constructor between float/double/long
// double,
// while we define converting constructor between float/double.
// - For these converting constructors, upcasting is implicit, downcasting is
// explicit.
// - We also define explicit casting from std::complex/thrust::complex
// - Note that the conversion from thrust is not constexpr, because
// thrust does not define them as constexpr ????
//
//
// [Operator =]
//
// The APIs of operator = are mostly copied from C++ standard:
// https://en.cppreference.com/w/cpp/numeric/complex/operator%3D
//
// Since C++20, all operator= are constexpr. Although we are not building with
// C++20, we also obey this behavior.
//
// There are three types of assign operator:
// - Assign a real value from the same scalar type
// - In std, this is templated as complex& operator=(const T& x)
// with specialization `complex& operator=(T x)` for float/double/long
// double Since we only support float and double, on will use `complex&
// operator=(T x)`
// - Copy assignment operator and converting assignment operator
// - There is no specialization of converting assignment operators, which type
// is
// convertible is solely dependent on whether the scalar type is convertible
//
// In addition to the standard assignment, we also provide assignment operators
// with std and thrust
//
//
// [Casting operators]
//
// std::complex does not have casting operators. We define casting operators
// casting to std::complex and thrust::complex
//
//
// [Operator ""]
//
// std::complex has custom literals `i`, `if` and `il` defined in namespace
// `std::literals::complex_literals`. We define our own custom literals in the
// namespace `c10::complex_literals`. Our custom literals does not follow the
// same behavior as in std::complex, instead, we define _if, _id to construct
// float/double complex literals.
//
//
// [real() and imag()]
//
// In C++20, there are two overload of these functions, one it to return the
// real/imag, another is to set real/imag, they are both constexpr. We follow
// this design.
//
//
// [Operator +=,-=,*=,/=]
//
// Since C++20, these operators become constexpr. In our implementation, they
// are also constexpr.
//
// There are two types of such operators: operating with a real number, or
// operating with another complex number. For the operating with a real number,
// the generic template form has argument type `const T &`, while the overload
// for float/double/long double has `T`. We will follow the same type as
// float/double/long double in std.
//
// [Unary operator +-]
//
// Since C++20, they are constexpr. We also make them expr
//
// [Binary operators +-*/]
//
// Each operator has three versions (taking + as example):
// - complex + complex
// - complex + real
// - real + complex
//
// [Operator ==, !=]
//
// Each operator has three versions (taking == as example):
// - complex == complex
// - complex == real
// - real == complex
//
// Some of them are removed on C++20, but we decide to keep them
//
// [Operator <<, >>]
//
// These are implemented by casting to std::complex
//
//
//
// TODO(@zasdfgbnm): c10::complex<c10::Half> is not currently supported,
// because:
// - lots of members and functions of c10::Half are not constexpr
// - thrust::complex only support float and double
template <typename T>
struct alignas(sizeof(T) * 2) complex {
using value_type = T;
T real_ = T(0);
T imag_ = T(0);
constexpr complex() = default;
C10_HOST_DEVICE constexpr complex(const T& re, const T& im = T())
: real_(re), imag_(im) {}
template <typename U>
explicit constexpr complex(const std::complex<U>& other)
: complex(other.real(), other.imag()) {}
#if defined(__CUDACC__) || defined(__HIPCC__)
template <typename U>
explicit C10_HOST_DEVICE complex(const thrust::complex<U>& other)
: real_(other.real()), imag_(other.imag()) {}
// NOTE can not be implemented as follow due to ROCm bug:
// explicit C10_HOST_DEVICE complex(const thrust::complex<U> &other):
// complex(other.real(), other.imag()) {}
#endif
// Use SFINAE to specialize casting constructor for c10::complex<float> and
// c10::complex<double>
template <typename U = T>
C10_HOST_DEVICE explicit constexpr complex(
const std::enable_if_t<std::is_same<U, float>::value, complex<double>>&
other)
: real_(other.real_), imag_(other.imag_) {}
template <typename U = T>
C10_HOST_DEVICE constexpr complex(
const std::enable_if_t<std::is_same<U, double>::value, complex<float>>&
other)
: real_(other.real_), imag_(other.imag_) {}
constexpr complex<T>& operator=(T re) {
real_ = re;
imag_ = 0;
return *this;
}
constexpr complex<T>& operator+=(T re) {
real_ += re;
return *this;
}
constexpr complex<T>& operator-=(T re) {
real_ -= re;
return *this;
}
constexpr complex<T>& operator*=(T re) {
real_ *= re;
imag_ *= re;
return *this;
}
constexpr complex<T>& operator/=(T re) {
real_ /= re;
imag_ /= re;
return *this;
}
template <typename U>
constexpr complex<T>& operator=(const complex<U>& rhs) {
real_ = rhs.real();
imag_ = rhs.imag();
return *this;
}
template <typename U>
constexpr complex<T>& operator+=(const complex<U>& rhs) {
real_ += rhs.real();
imag_ += rhs.imag();
return *this;
}
template <typename U>
constexpr complex<T>& operator-=(const complex<U>& rhs) {
real_ -= rhs.real();
imag_ -= rhs.imag();
return *this;
}
template <typename U>
constexpr complex<T>& operator*=(const complex<U>& rhs) {
// (a + bi) * (c + di) = (a*c - b*d) + (a * d + b * c) i
T a = real_;
T b = imag_;
U c = rhs.real();
U d = rhs.imag();
real_ = a * c - b * d;
imag_ = a * d + b * c;
return *this;
}
#ifdef __APPLE__
#define FORCE_INLINE_APPLE __attribute__((always_inline))
#else
#define FORCE_INLINE_APPLE
#endif
template <typename U>
constexpr FORCE_INLINE_APPLE complex<T>& operator/=(const complex<U>& rhs)
__ubsan_ignore_float_divide_by_zero__ {
// (a + bi) / (c + di) = (ac + bd)/(c^2 + d^2) + (bc - ad)/(c^2 + d^2) i
// the calculation below follows numpy's complex division
T a = real_;
T b = imag_;
U c = rhs.real();
U d = rhs.imag();
#if defined(__GNUC__) && !defined(__clang__)
// std::abs is already constexpr by gcc
auto abs_c = std::abs(c);
auto abs_d = std::abs(d);
#else
auto abs_c = c < 0 ? -c : c;
auto abs_d = d < 0 ? -d : d;
#endif
if (abs_c >= abs_d) {
if (abs_c == 0 && abs_d == 0) {
/* divide by zeros should yield a complex inf or nan */
real_ = a / abs_c;
imag_ = b / abs_d;
} else {
auto rat = d / c;
auto scl = 1.0 / (c + d * rat);
real_ = (a + b * rat) * scl;
imag_ = (b - a * rat) * scl;
}
} else {
auto rat = c / d;
auto scl = 1.0 / (d + c * rat);
real_ = (a * rat + b) * scl;
imag_ = (b * rat - a) * scl;
}
return *this;
}
#undef FORCE_INLINE_APPLE
template <typename U>
constexpr complex<T>& operator=(const std::complex<U>& rhs) {
real_ = rhs.real();
imag_ = rhs.imag();
return *this;
}
#if defined(__CUDACC__) || defined(__HIPCC__)
template <typename U>
C10_HOST_DEVICE complex<T>& operator=(const thrust::complex<U>& rhs) {
real_ = rhs.real();
imag_ = rhs.imag();
return *this;
}
#endif
template <typename U>
explicit constexpr operator std::complex<U>() const {
return std::complex<U>(std::complex<T>(real(), imag()));
}
#if defined(__CUDACC__) || defined(__HIPCC__)
template <typename U>
C10_HOST_DEVICE explicit operator thrust::complex<U>() const {
return static_cast<thrust::complex<U>>(thrust::complex<T>(real(), imag()));
}
#endif
// consistent with NumPy behavior
explicit constexpr operator bool() const {
return real() || imag();
}
C10_HOST_DEVICE constexpr T real() const {
return real_;
}
constexpr void real(T value) {
real_ = value;
}
constexpr T imag() const {
return imag_;
}
constexpr void imag(T value) {
imag_ = value;
}
};
namespace complex_literals {
constexpr complex<float> operator"" _if(long double imag) {
return complex<float>(0.0f, static_cast<float>(imag));
}
constexpr complex<double> operator"" _id(long double imag) {
return complex<double>(0.0, static_cast<double>(imag));
}
constexpr complex<float> operator"" _if(unsigned long long imag) {
return complex<float>(0.0f, static_cast<float>(imag));
}
constexpr complex<double> operator"" _id(unsigned long long imag) {
return complex<double>(0.0, static_cast<double>(imag));
}
} // namespace complex_literals
template <typename T>
constexpr complex<T> operator+(const complex<T>& val) {
return val;
}
template <typename T>
constexpr complex<T> operator-(const complex<T>& val) {
return complex<T>(-val.real(), -val.imag());
}
template <typename T>
constexpr complex<T> operator+(const complex<T>& lhs, const complex<T>& rhs) {
complex<T> result = lhs;
return result += rhs;
}
template <typename T>
constexpr complex<T> operator+(const complex<T>& lhs, const T& rhs) {
complex<T> result = lhs;
return result += rhs;
}
template <typename T>
constexpr complex<T> operator+(const T& lhs, const complex<T>& rhs) {
return complex<T>(lhs + rhs.real(), rhs.imag());
}
template <typename T>
constexpr complex<T> operator-(const complex<T>& lhs, const complex<T>& rhs) {
complex<T> result = lhs;
return result -= rhs;
}
template <typename T>
constexpr complex<T> operator-(const complex<T>& lhs, const T& rhs) {
complex<T> result = lhs;
return result -= rhs;
}
template <typename T>
constexpr complex<T> operator-(const T& lhs, const complex<T>& rhs) {
complex<T> result = -rhs;
return result += lhs;
}
template <typename T>
constexpr complex<T> operator*(const complex<T>& lhs, const complex<T>& rhs) {
complex<T> result = lhs;
return result *= rhs;
}
template <typename T>
constexpr complex<T> operator*(const complex<T>& lhs, const T& rhs) {
complex<T> result = lhs;
return result *= rhs;
}
template <typename T>
constexpr complex<T> operator*(const T& lhs, const complex<T>& rhs) {
complex<T> result = rhs;
return result *= lhs;
}
template <typename T>
constexpr complex<T> operator/(const complex<T>& lhs, const complex<T>& rhs) {
complex<T> result = lhs;
return result /= rhs;
}
template <typename T>
constexpr complex<T> operator/(const complex<T>& lhs, const T& rhs) {
complex<T> result = lhs;
return result /= rhs;
}
template <typename T>
constexpr complex<T> operator/(const T& lhs, const complex<T>& rhs) {
complex<T> result(lhs, T());
return result /= rhs;
}
// Define operators between integral scalars and c10::complex. std::complex does
// not support this when T is a floating-point number. This is useful because it
// saves a lot of "static_cast" when operate a complex and an integer. This
// makes the code both less verbose and potentially more efficient.
#define COMPLEX_INTEGER_OP_TEMPLATE_CONDITION \
typename std::enable_if_t< \
std::is_floating_point<fT>::value && std::is_integral<iT>::value, \
int> = 0
template <typename fT, typename iT, COMPLEX_INTEGER_OP_TEMPLATE_CONDITION>
constexpr c10::complex<fT> operator+(const c10::complex<fT>& a, const iT& b) {
return a + static_cast<fT>(b);
}
template <typename fT, typename iT, COMPLEX_INTEGER_OP_TEMPLATE_CONDITION>
constexpr c10::complex<fT> operator+(const iT& a, const c10::complex<fT>& b) {
return static_cast<fT>(a) + b;
}
template <typename fT, typename iT, COMPLEX_INTEGER_OP_TEMPLATE_CONDITION>
constexpr c10::complex<fT> operator-(const c10::complex<fT>& a, const iT& b) {
return a - static_cast<fT>(b);
}
template <typename fT, typename iT, COMPLEX_INTEGER_OP_TEMPLATE_CONDITION>
constexpr c10::complex<fT> operator-(const iT& a, const c10::complex<fT>& b) {
return static_cast<fT>(a) - b;
}
template <typename fT, typename iT, COMPLEX_INTEGER_OP_TEMPLATE_CONDITION>
constexpr c10::complex<fT> operator*(const c10::complex<fT>& a, const iT& b) {
return a * static_cast<fT>(b);
}
template <typename fT, typename iT, COMPLEX_INTEGER_OP_TEMPLATE_CONDITION>
constexpr c10::complex<fT> operator*(const iT& a, const c10::complex<fT>& b) {
return static_cast<fT>(a) * b;
}
template <typename fT, typename iT, COMPLEX_INTEGER_OP_TEMPLATE_CONDITION>
constexpr c10::complex<fT> operator/(const c10::complex<fT>& a, const iT& b) {
return a / static_cast<fT>(b);
}
template <typename fT, typename iT, COMPLEX_INTEGER_OP_TEMPLATE_CONDITION>
constexpr c10::complex<fT> operator/(const iT& a, const c10::complex<fT>& b) {
return static_cast<fT>(a) / b;
}
#undef COMPLEX_INTEGER_OP_TEMPLATE_CONDITION
template <typename T>
constexpr bool operator==(const complex<T>& lhs, const complex<T>& rhs) {
return (lhs.real() == rhs.real()) && (lhs.imag() == rhs.imag());
}
template <typename T>
constexpr bool operator==(const complex<T>& lhs, const T& rhs) {
return (lhs.real() == rhs) && (lhs.imag() == T());
}
template <typename T>
constexpr bool operator==(const T& lhs, const complex<T>& rhs) {
return (lhs == rhs.real()) && (T() == rhs.imag());
}
template <typename T>
constexpr bool operator!=(const complex<T>& lhs, const complex<T>& rhs) {
return !(lhs == rhs);
}
template <typename T>
constexpr bool operator!=(const complex<T>& lhs, const T& rhs) {
return !(lhs == rhs);
}
template <typename T>
constexpr bool operator!=(const T& lhs, const complex<T>& rhs) {
return !(lhs == rhs);
}
template <typename T, typename CharT, typename Traits>
std::basic_ostream<CharT, Traits>& operator<<(
std::basic_ostream<CharT, Traits>& os,
const complex<T>& x) {
return (os << static_cast<std::complex<T>>(x));
}
template <typename T, typename CharT, typename Traits>
std::basic_istream<CharT, Traits>& operator>>(
std::basic_istream<CharT, Traits>& is,
complex<T>& x) {
std::complex<T> tmp;
is >> tmp;
x = tmp;
return is;
}
} // namespace c10
// std functions
//
// The implementation of these functions also follow the design of C++20
namespace std {
template <typename T>
constexpr T real(const c10::complex<T>& z) {
return z.real();
}
template <typename T>
constexpr T imag(const c10::complex<T>& z) {
return z.imag();
}
template <typename T>
C10_HOST_DEVICE T abs(const c10::complex<T>& z) {
#if defined(__CUDACC__) || defined(__HIPCC__)
return thrust::abs(static_cast<thrust::complex<T>>(z));
#else
return std::abs(static_cast<std::complex<T>>(z));
#endif
}
#if defined(USE_ROCM)
#define ROCm_Bug(x)
#else
#define ROCm_Bug(x) x
#endif
template <typename T>
C10_HOST_DEVICE T arg(const c10::complex<T>& z) {
return ROCm_Bug(std)::atan2(std::imag(z), std::real(z));
}
#undef ROCm_Bug
template <typename T>
constexpr T norm(const c10::complex<T>& z) {
return z.real() * z.real() + z.imag() * z.imag();
}
// For std::conj, there are other versions of it:
// constexpr std::complex<float> conj( float z );
// template< class DoubleOrInteger >
// constexpr std::complex<double> conj( DoubleOrInteger z );
// constexpr std::complex<long double> conj( long double z );
// These are not implemented
// TODO(@zasdfgbnm): implement them as c10::conj
template <typename T>
constexpr c10::complex<T> conj(const c10::complex<T>& z) {
return c10::complex<T>(z.real(), -z.imag());
}
// Thrust does not have complex --> complex version of thrust::proj,
// so this function is not implemented at c10 right now.
// TODO(@zasdfgbnm): implement it by ourselves
// There is no c10 version of std::polar, because std::polar always
// returns std::complex. Use c10::polar instead;
} // namespace std
namespace c10 {
template <typename T>
C10_HOST_DEVICE complex<T> polar(const T& r, const T& theta = T()) {
#if defined(__CUDACC__) || defined(__HIPCC__)
return static_cast<complex<T>>(thrust::polar(r, theta));
#else
// std::polar() requires r >= 0, so spell out the explicit implementation to
// avoid a branch.
return complex<T>(r * std::cos(theta), r * std::sin(theta));
#endif
}
} // namespace c10
C10_CLANG_DIAGNOSTIC_POP()
#define C10_INTERNAL_INCLUDE_COMPLEX_REMAINING_H
// math functions are included in a separate file
#include <c10/util/complex_math.h> // IWYU pragma: keep
// utilities for complex types
#include <c10/util/complex_utils.h> // IWYU pragma: keep
#undef C10_INTERNAL_INCLUDE_COMPLEX_REMAINING_H
| 18,019
| 28.017713
| 80
|
h
|
null |
pytorch-main/c10/util/complex_math.h
|
#if !defined(C10_INTERNAL_INCLUDE_COMPLEX_REMAINING_H)
#error \
"c10/util/complex_math.h is not meant to be individually included. Include c10/util/complex.h instead."
#endif
namespace c10_complex_math {
// Exponential functions
template <typename T>
C10_HOST_DEVICE inline c10::complex<T> exp(const c10::complex<T>& x) {
#if defined(__CUDACC__) || defined(__HIPCC__)
return static_cast<c10::complex<T>>(
thrust::exp(static_cast<thrust::complex<T>>(x)));
#else
return static_cast<c10::complex<T>>(
std::exp(static_cast<std::complex<T>>(x)));
#endif
}
template <typename T>
C10_HOST_DEVICE inline c10::complex<T> log(const c10::complex<T>& x) {
#if defined(__CUDACC__) || defined(__HIPCC__)
return static_cast<c10::complex<T>>(
thrust::log(static_cast<thrust::complex<T>>(x)));
#else
return static_cast<c10::complex<T>>(
std::log(static_cast<std::complex<T>>(x)));
#endif
}
template <typename T>
C10_HOST_DEVICE inline c10::complex<T> log10(const c10::complex<T>& x) {
#if defined(__CUDACC__) || defined(__HIPCC__)
return static_cast<c10::complex<T>>(
thrust::log10(static_cast<thrust::complex<T>>(x)));
#else
return static_cast<c10::complex<T>>(
std::log10(static_cast<std::complex<T>>(x)));
#endif
}
template <typename T>
C10_HOST_DEVICE inline c10::complex<T> log2(const c10::complex<T>& x) {
const c10::complex<T> log2 = c10::complex<T>(::log(2.0), 0.0);
return c10_complex_math::log(x) / log2;
}
// Power functions
//
#if defined(_LIBCPP_VERSION) || \
(defined(__GLIBCXX__) && !defined(_GLIBCXX11_USE_C99_COMPLEX))
namespace _detail {
C10_API c10::complex<float> sqrt(const c10::complex<float>& in);
C10_API c10::complex<double> sqrt(const c10::complex<double>& in);
C10_API c10::complex<float> acos(const c10::complex<float>& in);
C10_API c10::complex<double> acos(const c10::complex<double>& in);
}; // namespace _detail
#endif
template <typename T>
C10_HOST_DEVICE inline c10::complex<T> sqrt(const c10::complex<T>& x) {
#if defined(__CUDACC__) || defined(__HIPCC__)
return static_cast<c10::complex<T>>(
thrust::sqrt(static_cast<thrust::complex<T>>(x)));
#elif !( \
defined(_LIBCPP_VERSION) || \
(defined(__GLIBCXX__) && !defined(_GLIBCXX11_USE_C99_COMPLEX)))
return static_cast<c10::complex<T>>(
std::sqrt(static_cast<std::complex<T>>(x)));
#else
return _detail::sqrt(x);
#endif
}
template <typename T>
C10_HOST_DEVICE inline c10::complex<T> pow(
const c10::complex<T>& x,
const c10::complex<T>& y) {
#if defined(__CUDACC__) || defined(__HIPCC__)
return static_cast<c10::complex<T>>(thrust::pow(
static_cast<thrust::complex<T>>(x), static_cast<thrust::complex<T>>(y)));
#else
return static_cast<c10::complex<T>>(std::pow(
static_cast<std::complex<T>>(x), static_cast<std::complex<T>>(y)));
#endif
}
template <typename T>
C10_HOST_DEVICE inline c10::complex<T> pow(
const c10::complex<T>& x,
const T& y) {
#if defined(__CUDACC__) || defined(__HIPCC__)
return static_cast<c10::complex<T>>(
thrust::pow(static_cast<thrust::complex<T>>(x), y));
#else
return static_cast<c10::complex<T>>(
std::pow(static_cast<std::complex<T>>(x), y));
#endif
}
template <typename T>
C10_HOST_DEVICE inline c10::complex<T> pow(
const T& x,
const c10::complex<T>& y) {
#if defined(__CUDACC__) || defined(__HIPCC__)
return static_cast<c10::complex<T>>(
thrust::pow(x, static_cast<thrust::complex<T>>(y)));
#else
return static_cast<c10::complex<T>>(
std::pow(x, static_cast<std::complex<T>>(y)));
#endif
}
template <typename T, typename U>
C10_HOST_DEVICE inline c10::complex<decltype(T() * U())> pow(
const c10::complex<T>& x,
const c10::complex<U>& y) {
#if defined(__CUDACC__) || defined(__HIPCC__)
return static_cast<c10::complex<T>>(thrust::pow(
static_cast<thrust::complex<T>>(x), static_cast<thrust::complex<T>>(y)));
#else
return static_cast<c10::complex<T>>(std::pow(
static_cast<std::complex<T>>(x), static_cast<std::complex<T>>(y)));
#endif
}
template <typename T, typename U>
C10_HOST_DEVICE inline c10::complex<decltype(T() * U())> pow(
const c10::complex<T>& x,
const U& y) {
#if defined(__CUDACC__) || defined(__HIPCC__)
return static_cast<c10::complex<T>>(
thrust::pow(static_cast<thrust::complex<T>>(x), y));
#else
return static_cast<c10::complex<T>>(
std::pow(static_cast<std::complex<T>>(x), y));
#endif
}
template <typename T, typename U>
C10_HOST_DEVICE inline c10::complex<decltype(T() * U())> pow(
const T& x,
const c10::complex<U>& y) {
#if defined(__CUDACC__) || defined(__HIPCC__)
return static_cast<c10::complex<T>>(
thrust::pow(x, static_cast<thrust::complex<T>>(y)));
#else
return static_cast<c10::complex<T>>(
std::pow(x, static_cast<std::complex<T>>(y)));
#endif
}
// Trigonometric functions
template <typename T>
C10_HOST_DEVICE inline c10::complex<T> sin(const c10::complex<T>& x) {
#if defined(__CUDACC__) || defined(__HIPCC__)
return static_cast<c10::complex<T>>(
thrust::sin(static_cast<thrust::complex<T>>(x)));
#else
return static_cast<c10::complex<T>>(
std::sin(static_cast<std::complex<T>>(x)));
#endif
}
template <typename T>
C10_HOST_DEVICE inline c10::complex<T> cos(const c10::complex<T>& x) {
#if defined(__CUDACC__) || defined(__HIPCC__)
return static_cast<c10::complex<T>>(
thrust::cos(static_cast<thrust::complex<T>>(x)));
#else
return static_cast<c10::complex<T>>(
std::cos(static_cast<std::complex<T>>(x)));
#endif
}
template <typename T>
C10_HOST_DEVICE inline c10::complex<T> tan(const c10::complex<T>& x) {
#if defined(__CUDACC__) || defined(__HIPCC__)
return static_cast<c10::complex<T>>(
thrust::tan(static_cast<thrust::complex<T>>(x)));
#else
return static_cast<c10::complex<T>>(
std::tan(static_cast<std::complex<T>>(x)));
#endif
}
template <typename T>
C10_HOST_DEVICE inline c10::complex<T> asin(const c10::complex<T>& x) {
#if defined(__CUDACC__) || defined(__HIPCC__)
return static_cast<c10::complex<T>>(
thrust::asin(static_cast<thrust::complex<T>>(x)));
#else
return static_cast<c10::complex<T>>(
std::asin(static_cast<std::complex<T>>(x)));
#endif
}
template <typename T>
C10_HOST_DEVICE inline c10::complex<T> acos(const c10::complex<T>& x) {
#if defined(__CUDACC__) || defined(__HIPCC__)
return static_cast<c10::complex<T>>(
thrust::acos(static_cast<thrust::complex<T>>(x)));
#elif !defined(_LIBCPP_VERSION)
return static_cast<c10::complex<T>>(
std::acos(static_cast<std::complex<T>>(x)));
#else
return _detail::acos(x);
#endif
}
template <typename T>
C10_HOST_DEVICE inline c10::complex<T> atan(const c10::complex<T>& x) {
#if defined(__CUDACC__) || defined(__HIPCC__)
return static_cast<c10::complex<T>>(
thrust::atan(static_cast<thrust::complex<T>>(x)));
#else
return static_cast<c10::complex<T>>(
std::atan(static_cast<std::complex<T>>(x)));
#endif
}
// Hyperbolic functions
template <typename T>
C10_HOST_DEVICE inline c10::complex<T> sinh(const c10::complex<T>& x) {
#if defined(__CUDACC__) || defined(__HIPCC__)
return static_cast<c10::complex<T>>(
thrust::sinh(static_cast<thrust::complex<T>>(x)));
#else
return static_cast<c10::complex<T>>(
std::sinh(static_cast<std::complex<T>>(x)));
#endif
}
template <typename T>
C10_HOST_DEVICE inline c10::complex<T> cosh(const c10::complex<T>& x) {
#if defined(__CUDACC__) || defined(__HIPCC__)
return static_cast<c10::complex<T>>(
thrust::cosh(static_cast<thrust::complex<T>>(x)));
#else
return static_cast<c10::complex<T>>(
std::cosh(static_cast<std::complex<T>>(x)));
#endif
}
template <typename T>
C10_HOST_DEVICE inline c10::complex<T> tanh(const c10::complex<T>& x) {
#if defined(__CUDACC__) || defined(__HIPCC__)
return static_cast<c10::complex<T>>(
thrust::tanh(static_cast<thrust::complex<T>>(x)));
#else
return static_cast<c10::complex<T>>(
std::tanh(static_cast<std::complex<T>>(x)));
#endif
}
template <typename T>
C10_HOST_DEVICE inline c10::complex<T> asinh(const c10::complex<T>& x) {
#if defined(__CUDACC__) || defined(__HIPCC__)
return static_cast<c10::complex<T>>(
thrust::asinh(static_cast<thrust::complex<T>>(x)));
#else
return static_cast<c10::complex<T>>(
std::asinh(static_cast<std::complex<T>>(x)));
#endif
}
template <typename T>
C10_HOST_DEVICE inline c10::complex<T> acosh(const c10::complex<T>& x) {
#if defined(__CUDACC__) || defined(__HIPCC__)
return static_cast<c10::complex<T>>(
thrust::acosh(static_cast<thrust::complex<T>>(x)));
#else
return static_cast<c10::complex<T>>(
std::acosh(static_cast<std::complex<T>>(x)));
#endif
}
template <typename T>
C10_HOST_DEVICE inline c10::complex<T> atanh(const c10::complex<T>& x) {
#if defined(__CUDACC__) || defined(__HIPCC__)
return static_cast<c10::complex<T>>(
thrust::atanh(static_cast<thrust::complex<T>>(x)));
#else
return static_cast<c10::complex<T>>(
std::atanh(static_cast<std::complex<T>>(x)));
#endif
}
template <typename T>
C10_HOST_DEVICE inline c10::complex<T> log1p(const c10::complex<T>& z) {
// log1p(z) = log(1 + z)
// Let's define 1 + z = r * e ^ (i * a), then we have
// log(r * e ^ (i * a)) = log(r) + i * a
// With z = x + iy, the term r can be written as
// r = ((1 + x) ^ 2 + y ^ 2) ^ 0.5
// = (1 + x ^ 2 + 2 * x + y ^ 2) ^ 0.5
// So, log(r) is
// log(r) = 0.5 * log(1 + x ^ 2 + 2 * x + y ^ 2)
// = 0.5 * log1p(x * (x + 2) + y ^ 2)
// we need to use the expression only on certain condition to avoid overflow
// and underflow from `(x * (x + 2) + y ^ 2)`
T x = z.real();
T y = z.imag();
T zabs = std::abs(z);
T theta = std::atan2(y, x + T(1));
if (zabs < 0.5) {
T r = x * (T(2) + x) + y * y;
if (r == 0) { // handle underflow
return {x, theta};
}
return {T(0.5) * std::log1p(r), theta};
} else {
T z0 = std::hypot(x + 1, y);
return {std::log(z0), theta};
}
}
template <typename T>
C10_HOST_DEVICE inline c10::complex<T> expm1(const c10::complex<T>& z) {
// expm1(z) = exp(z) - 1
// Define z = x + i * y
// f = e ^ (x + i * y) - 1
// = e ^ x * e ^ (i * y) - 1
// = (e ^ x * cos(y) - 1) + i * (e ^ x * sin(y))
// = (e ^ x - 1) * cos(y) - (1 - cos(y)) + i * e ^ x * sin(y)
// = expm1(x) * cos(y) - 2 * sin(y / 2) ^ 2 + i * e ^ x * sin(y)
T x = z.real();
T y = z.imag();
T a = std::sin(y / 2);
T er = std::expm1(x) * std::cos(y) - T(2) * a * a;
T ei = std::exp(x) * std::sin(y);
return {er, ei};
}
} // namespace c10_complex_math
using c10_complex_math::acos;
using c10_complex_math::acosh;
using c10_complex_math::asin;
using c10_complex_math::asinh;
using c10_complex_math::atan;
using c10_complex_math::atanh;
using c10_complex_math::cos;
using c10_complex_math::cosh;
using c10_complex_math::exp;
using c10_complex_math::expm1;
using c10_complex_math::log;
using c10_complex_math::log10;
using c10_complex_math::log1p;
using c10_complex_math::log2;
using c10_complex_math::pow;
using c10_complex_math::sin;
using c10_complex_math::sinh;
using c10_complex_math::sqrt;
using c10_complex_math::tan;
using c10_complex_math::tanh;
namespace std {
using c10_complex_math::acos;
using c10_complex_math::acosh;
using c10_complex_math::asin;
using c10_complex_math::asinh;
using c10_complex_math::atan;
using c10_complex_math::atanh;
using c10_complex_math::cos;
using c10_complex_math::cosh;
using c10_complex_math::exp;
using c10_complex_math::expm1;
using c10_complex_math::log;
using c10_complex_math::log10;
using c10_complex_math::log1p;
using c10_complex_math::log2;
using c10_complex_math::pow;
using c10_complex_math::sin;
using c10_complex_math::sinh;
using c10_complex_math::sqrt;
using c10_complex_math::tan;
using c10_complex_math::tanh;
} // namespace std
| 11,849
| 29.779221
| 107
|
h
|
null |
pytorch-main/c10/util/complex_utils.h
|
#if !defined(C10_INTERNAL_INCLUDE_COMPLEX_REMAINING_H)
#error \
"c10/util/complex_utils.h is not meant to be individually included. Include c10/util/complex.h instead."
#endif
#include <limits>
namespace c10 {
template <typename T>
struct is_complex : public std::false_type {};
template <typename T>
struct is_complex<std::complex<T>> : public std::true_type {};
template <typename T>
struct is_complex<c10::complex<T>> : public std::true_type {};
// Extract double from std::complex<double>; is identity otherwise
// TODO: Write in more idiomatic C++17
template <typename T>
struct scalar_value_type {
using type = T;
};
template <typename T>
struct scalar_value_type<std::complex<T>> {
using type = T;
};
template <typename T>
struct scalar_value_type<c10::complex<T>> {
using type = T;
};
} // namespace c10
namespace std {
template <typename T>
class numeric_limits<c10::complex<T>> : public numeric_limits<T> {};
template <typename T>
bool isnan(const c10::complex<T>& v) {
return std::isnan(v.real()) || std::isnan(v.imag());
}
} // namespace std
| 1,077
| 21.93617
| 108
|
h
|
null |
pytorch-main/c10/util/copysign.h
|
#pragma once
#include <c10/util/BFloat16.h>
#include <c10/util/Half.h>
#include <c10/util/math_compat.h>
namespace c10 {
// Note: Explicit implementation of copysign for Half and BFloat16
// is needed to workaround g++-7/8 crash on aarch64, but also makes
// copysign faster for the half-precision types
template <typename T, typename U>
inline auto copysign(const T& a, const U& b) {
return std::copysign(a, b);
}
// Implement copysign for half precision floats using bit ops
// Sign is the most significant bit for both half and bfloat16 types
inline c10::Half copysign(c10::Half a, c10::Half b) {
return c10::Half((a.x & 0x7fff) | (b.x & 0x8000), c10::Half::from_bits());
}
inline c10::BFloat16 copysign(c10::BFloat16 a, c10::BFloat16 b) {
return c10::BFloat16(
(a.x & 0x7fff) | (b.x & 0x8000), c10::BFloat16::from_bits());
}
} // namespace c10
| 866
| 28.896552
| 76
|
h
|
null |
pytorch-main/c10/util/either.h
|
// Originally taken from
// https://github.com/cryfs/cryfs/blob/14ad22570ddacef22d5ff139cdff68a54fc8234d/src/cpp-utils/either.h
#pragma once
#include <c10/macros/Macros.h>
#include <c10/util/C++17.h>
#include <c10/util/Optional.h>
namespace c10 {
/**
* either<A, B> is a tagged union that holds either an object of type A
* or an object of type B.
*/
template <class Left, class Right>
class either final {
public:
template <
class Head,
class... Tail,
std::enable_if_t<
std::is_constructible<Left, Head, Tail...>::value &&
!std::is_constructible<Right, Head, Tail...>::value>* = nullptr>
either(Head&& construct_left_head_arg, Tail&&... construct_left_tail_args)
: _side(Side::left) {
_construct_left(
std::forward<Head>(construct_left_head_arg),
std::forward<Tail>(construct_left_tail_args)...);
}
template <
class Head,
class... Tail,
std::enable_if_t<
!std::is_constructible<Left, Head, Tail...>::value &&
std::is_constructible<Right, Head, Tail...>::value>* = nullptr>
either(Head&& construct_right_head_arg, Tail&&... construct_right_tail_args)
: _side(Side::right) {
_construct_right(
std::forward<Head>(construct_right_head_arg),
std::forward<Tail>(construct_right_tail_args)...);
}
either(const either<Left, Right>& rhs) : _side(rhs._side) {
if (_side == Side::left) {
_construct_left(
rhs._left); // NOLINT(cppcoreguidelines-pro-type-union-access)
} else {
_construct_right(
rhs._right); // NOLINT(cppcoreguidelines-pro-type-union-access)
}
}
either(either<Left, Right>&& rhs) noexcept : _side(rhs._side) {
if (_side == Side::left) {
_construct_left(std::move(
rhs._left)); // NOLINT(cppcoreguidelines-pro-type-union-access)
} else {
_construct_right(std::move(
rhs._right)); // NOLINT(cppcoreguidelines-pro-type-union-access)
}
}
~either() {
_destruct();
}
either<Left, Right>& operator=(const either<Left, Right>& rhs) {
_destruct();
_side = rhs._side;
if (_side == Side::left) {
_construct_left(
rhs._left); // NOLINT(cppcoreguidelines-pro-type-union-access)
} else {
_construct_right(
rhs._right); // NOLINT(cppcoreguidelines-pro-type-union-access)
}
return *this;
}
either<Left, Right>& operator=(either<Left, Right>&& rhs) {
_destruct();
_side = rhs._side;
if (_side == Side::left) {
_construct_left(std::move(
rhs._left)); // NOLINT(cppcoreguidelines-pro-type-union-access)
} else {
_construct_right(std::move(
rhs._right)); // NOLINT(cppcoreguidelines-pro-type-union-access)
}
return *this;
}
bool is_left() const noexcept {
return _side == Side::left;
}
bool is_right() const noexcept {
return _side == Side::right;
}
const Left& left() const& {
if (C10_UNLIKELY(!is_left())) {
throw std::logic_error(
"Tried to get left side of an either which is right.");
}
return _left; // NOLINT(cppcoreguidelines-pro-type-union-access)
}
Left& left() & {
return const_cast<Left&>(
const_cast<const either<Left, Right>*>(this)->left());
}
Left&& left() && {
return std::move(left());
}
const Right& right() const& {
if (C10_UNLIKELY(!is_right())) {
throw std::logic_error(
"Tried to get right side of an either which is left.");
}
return _right; // NOLINT(cppcoreguidelines-pro-type-union-access)
}
Right& right() & {
return const_cast<Right&>(
const_cast<const either<Left, Right>*>(this)->right());
}
Right&& right() && {
return std::move(right());
}
template <class Result, class LeftFoldFunc, class RightFoldFunc>
Result fold(LeftFoldFunc&& leftFoldFunc, RightFoldFunc&& rightFoldFunc)
const {
if (Side::left == _side) {
return std::forward<LeftFoldFunc>(leftFoldFunc)(_left);
} else {
return std::forward<RightFoldFunc>(rightFoldFunc)(_right);
}
}
private:
union {
Left _left;
Right _right;
};
enum class Side : uint8_t { left, right } _side;
explicit either(Side side) noexcept : _side(side) {}
template <typename... Args>
void _construct_left(Args&&... args) {
new (&_left) Left(std::forward<Args>(
args)...); // NOLINT(cppcoreguidelines-pro-type-union-access)
}
template <typename... Args>
void _construct_right(Args&&... args) {
new (&_right) Right(std::forward<Args>(
args)...); // NOLINT(cppcoreguidelines-pro-type-union-access)
}
void _destruct() noexcept {
if (_side == Side::left) {
_left.~Left(); // NOLINT(cppcoreguidelines-pro-type-union-access)
} else {
_right.~Right(); // NOLINT(cppcoreguidelines-pro-type-union-access)
}
}
template <typename Left_, typename Right_, typename... Args>
friend either<Left_, Right_> make_left(Args&&... args);
template <typename Left_, typename Right_, typename... Args>
friend either<Left_, Right_> make_right(Args&&... args);
};
template <class Left, class Right>
inline bool operator==(
const either<Left, Right>& lhs,
const either<Left, Right>& rhs) {
if (lhs.is_left() != rhs.is_left()) {
return false;
}
if (lhs.is_left()) {
return lhs.left() == rhs.left();
} else {
return lhs.right() == rhs.right();
}
}
template <class Left, class Right>
inline bool operator!=(
const either<Left, Right>& lhs,
const either<Left, Right>& rhs) {
return !operator==(lhs, rhs);
}
template <class Left, class Right>
inline std::ostream& operator<<(
std::ostream& stream,
const either<Left, Right>& value) {
if (value.is_left()) {
stream << "Left(" << value.left() << ")";
} else {
stream << "Right(" << value.right() << ")";
}
return stream;
}
template <typename Left, typename Right, typename... Args>
inline either<Left, Right> make_left(Args&&... args) {
either<Left, Right> result(either<Left, Right>::Side::left);
result._construct_left(std::forward<Args>(args)...);
return result;
}
template <typename Left, typename Right, typename... Args>
inline either<Left, Right> make_right(Args&&... args) {
either<Left, Right> result(either<Left, Right>::Side::right);
result._construct_right(std::forward<Args>(args)...);
return result;
}
} // namespace c10
| 6,423
| 27.807175
| 102
|
h
|
null |
pytorch-main/c10/util/env.h
|
#pragma once
#include <c10/util/Exception.h>
#include <c10/util/Optional.h>
#include <cstring>
namespace c10 {
namespace utils {
// Reads an environment variable and returns
// - optional<true>, if set equal to "1"
// - optional<false>, if set equal to "0"
// - nullopt, otherwise
//
// NB:
// Issues a warning if the value of the environment variable is not 0 or 1.
inline optional<bool> check_env(const char* name) {
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4996)
#endif
auto envar = std::getenv(name);
#ifdef _MSC_VER
#pragma warning(pop)
#endif
if (envar) {
if (strcmp(envar, "0") == 0) {
return false;
}
if (strcmp(envar, "1") == 0) {
return true;
}
TORCH_WARN(
"Ignoring invalid value for boolean flag ",
name,
": ",
envar,
"valid values are 0 or 1.");
}
return c10::nullopt;
}
} // namespace utils
} // namespace c10
| 956
| 21.255814
| 75
|
h
|
null |
pytorch-main/c10/util/hash.h
|
#pragma once
#include <functional>
#include <iomanip>
#include <sstream>
#include <vector>
#include <c10/util/ArrayRef.h>
#include <c10/util/complex.h>
namespace c10 {
// NOTE: hash_combine and SHA1 hashing is based on implementation from Boost
//
// Boost Software License - Version 1.0 - August 17th, 2003
//
// Permission is hereby granted, free of charge, to any person or organization
// obtaining a copy of the software and accompanying documentation covered by
// this license (the "Software") to use, reproduce, display, distribute,
// execute, and transmit the Software, and to prepare derivative works of the
// Software, and to permit third-parties to whom the Software is furnished to
// do so, all subject to the following:
//
// The copyright notices in the Software and this entire statement, including
// the above license grant, this restriction and the following disclaimer,
// must be included in all copies of the Software, in whole or in part, and
// all derivative works of the Software, unless such copies or derivative
// works are solely in the form of machine-executable object code generated by
// a source language processor.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
// SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
// FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
inline size_t hash_combine(size_t seed, size_t value) {
return seed ^ (value + 0x9e3779b9 + (seed << 6u) + (seed >> 2u));
}
// Creates the SHA1 hash of a string. A 160-bit hash.
// Based on the implementation in Boost (see notice above).
// Note that SHA1 hashes are no longer considered cryptographically
// secure, but are the standard hash for generating unique ids.
// Usage:
// // Let 'code' be a std::string
// c10::sha1 sha1_hash{code};
// const auto hash_code = sha1_hash.str();
// TODO: Compare vs OpenSSL and/or CryptoPP implementations
struct sha1 {
typedef unsigned int(digest_type)[5];
sha1(const std::string& s = "") {
if (!s.empty()) {
reset();
process_bytes(s.c_str(), s.size());
}
}
void reset() {
h_[0] = 0x67452301;
h_[1] = 0xEFCDAB89;
h_[2] = 0x98BADCFE;
h_[3] = 0x10325476;
h_[4] = 0xC3D2E1F0;
block_byte_index_ = 0;
bit_count_low = 0;
bit_count_high = 0;
}
std::string str() {
unsigned int digest[5];
get_digest(digest);
std::ostringstream buf;
for (unsigned int i : digest) {
buf << std::hex << std::setfill('0') << std::setw(8) << i;
}
return buf.str();
}
private:
unsigned int left_rotate(unsigned int x, std::size_t n) {
return (x << n) ^ (x >> (32 - n));
}
void process_block_impl() {
unsigned int w[80];
for (std::size_t i = 0; i < 16; ++i) {
w[i] = (block_[i * 4 + 0] << 24);
w[i] |= (block_[i * 4 + 1] << 16);
w[i] |= (block_[i * 4 + 2] << 8);
w[i] |= (block_[i * 4 + 3]);
}
for (std::size_t i = 16; i < 80; ++i) {
w[i] = left_rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]), 1);
}
unsigned int a = h_[0];
unsigned int b = h_[1];
unsigned int c = h_[2];
unsigned int d = h_[3];
unsigned int e = h_[4];
for (std::size_t i = 0; i < 80; ++i) {
unsigned int f;
unsigned int k;
if (i < 20) {
f = (b & c) | (~b & d);
k = 0x5A827999;
} else if (i < 40) {
f = b ^ c ^ d;
k = 0x6ED9EBA1;
} else if (i < 60) {
f = (b & c) | (b & d) | (c & d);
k = 0x8F1BBCDC;
} else {
f = b ^ c ^ d;
k = 0xCA62C1D6;
}
unsigned temp = left_rotate(a, 5) + f + e + k + w[i];
e = d;
d = c;
c = left_rotate(b, 30);
b = a;
a = temp;
}
h_[0] += a;
h_[1] += b;
h_[2] += c;
h_[3] += d;
h_[4] += e;
}
void process_byte_impl(unsigned char byte) {
block_[block_byte_index_++] = byte;
if (block_byte_index_ == 64) {
block_byte_index_ = 0;
process_block_impl();
}
}
void process_byte(unsigned char byte) {
process_byte_impl(byte);
// size_t max value = 0xFFFFFFFF
// if (bit_count_low + 8 >= 0x100000000) { // would overflow
// if (bit_count_low >= 0x100000000-8) {
if (bit_count_low < 0xFFFFFFF8) {
bit_count_low += 8;
} else {
bit_count_low = 0;
if (bit_count_high <= 0xFFFFFFFE) {
++bit_count_high;
} else {
TORCH_CHECK(false, "sha1 too many bytes");
}
}
}
void process_block(void const* bytes_begin, void const* bytes_end) {
unsigned char const* begin = static_cast<unsigned char const*>(bytes_begin);
unsigned char const* end = static_cast<unsigned char const*>(bytes_end);
for (; begin != end; ++begin) {
process_byte(*begin);
}
}
void process_bytes(void const* buffer, std::size_t byte_count) {
unsigned char const* b = static_cast<unsigned char const*>(buffer);
process_block(b, b + byte_count);
}
void get_digest(digest_type& digest) {
// append the bit '1' to the message
process_byte_impl(0x80);
// append k bits '0', where k is the minimum number >= 0
// such that the resulting message length is congruent to 56 (mod 64)
// check if there is enough space for padding and bit_count
if (block_byte_index_ > 56) {
// finish this block
while (block_byte_index_ != 0) {
process_byte_impl(0);
}
// one more block
while (block_byte_index_ < 56) {
process_byte_impl(0);
}
} else {
while (block_byte_index_ < 56) {
process_byte_impl(0);
}
}
// append length of message (before pre-processing)
// as a 64-bit big-endian integer
process_byte_impl(
static_cast<unsigned char>((bit_count_high >> 24) & 0xFF));
process_byte_impl(
static_cast<unsigned char>((bit_count_high >> 16) & 0xFF));
process_byte_impl(static_cast<unsigned char>((bit_count_high >> 8) & 0xFF));
process_byte_impl(static_cast<unsigned char>((bit_count_high)&0xFF));
process_byte_impl(static_cast<unsigned char>((bit_count_low >> 24) & 0xFF));
process_byte_impl(static_cast<unsigned char>((bit_count_low >> 16) & 0xFF));
process_byte_impl(static_cast<unsigned char>((bit_count_low >> 8) & 0xFF));
process_byte_impl(static_cast<unsigned char>((bit_count_low)&0xFF));
// get final digest
digest[0] = h_[0];
digest[1] = h_[1];
digest[2] = h_[2];
digest[3] = h_[3];
digest[4] = h_[4];
}
unsigned int h_[5];
unsigned char block_[64];
std::size_t block_byte_index_;
std::size_t bit_count_low;
std::size_t bit_count_high;
};
////////////////////////////////////////////////////////////////////////////////
// c10::hash implementation
////////////////////////////////////////////////////////////////////////////////
namespace _hash_detail {
// Use template argument deduction to shorten calls to c10::hash
template <typename T>
size_t simple_get_hash(const T& o);
template <typename T, typename V>
using type_if_not_enum =
typename std::enable_if<!std::is_enum<T>::value, V>::type;
// Use SFINAE to dispatch to std::hash if possible, cast enum types to int
// automatically, and fall back to T::hash otherwise. NOTE: C++14 added support
// for hashing enum types to the standard, and some compilers implement it even
// when C++14 flags aren't specified. This is why we have to disable this
// overload if T is an enum type (and use the one below in this case).
template <typename T>
auto dispatch_hash(const T& o)
-> decltype(std::hash<T>()(o), type_if_not_enum<T, size_t>()) {
return std::hash<T>()(o);
}
template <typename T>
typename std::enable_if<std::is_enum<T>::value, size_t>::type dispatch_hash(
const T& o) {
using R = typename std::underlying_type<T>::type;
return std::hash<R>()(static_cast<R>(o));
}
template <typename T>
auto dispatch_hash(const T& o) -> decltype(T::hash(o), size_t()) {
return T::hash(o);
}
} // namespace _hash_detail
// Hasher struct
template <typename T>
struct hash {
size_t operator()(const T& o) const {
return _hash_detail::dispatch_hash(o);
};
};
// Specialization for std::tuple
template <typename... Types>
struct hash<std::tuple<Types...>> {
template <size_t idx, typename... Ts>
struct tuple_hash {
size_t operator()(const std::tuple<Ts...>& t) const {
return hash_combine(
_hash_detail::simple_get_hash(std::get<idx>(t)),
tuple_hash<idx - 1, Ts...>()(t));
}
};
template <typename... Ts>
struct tuple_hash<0, Ts...> {
size_t operator()(const std::tuple<Ts...>& t) const {
return _hash_detail::simple_get_hash(std::get<0>(t));
}
};
size_t operator()(const std::tuple<Types...>& t) const {
return tuple_hash<sizeof...(Types) - 1, Types...>()(t);
}
};
template <typename T1, typename T2>
struct hash<std::pair<T1, T2>> {
size_t operator()(const std::pair<T1, T2>& pair) const {
std::tuple<T1, T2> tuple = std::make_tuple(pair.first, pair.second);
return _hash_detail::simple_get_hash(tuple);
}
};
template <typename T>
struct hash<c10::ArrayRef<T>> {
size_t operator()(c10::ArrayRef<T> v) const {
size_t seed = 0;
for (const auto& elem : v) {
seed = hash_combine(seed, _hash_detail::simple_get_hash(elem));
}
return seed;
}
};
// Specialization for std::vector
template <typename T>
struct hash<std::vector<T>> {
size_t operator()(const std::vector<T>& v) const {
return hash<c10::ArrayRef<T>>()(v);
}
};
namespace _hash_detail {
template <typename T>
size_t simple_get_hash(const T& o) {
return c10::hash<T>()(o);
}
} // namespace _hash_detail
// Use this function to actually hash multiple things in one line.
// Dispatches to c10::hash, so it can hash containers.
// Example:
//
// static size_t hash(const MyStruct& s) {
// return get_hash(s.member1, s.member2, s.member3);
// }
template <typename... Types>
size_t get_hash(const Types&... args) {
return c10::hash<decltype(std::tie(args...))>()(std::tie(args...));
}
// Specialization for c10::complex
template <typename T>
struct hash<c10::complex<T>> {
size_t operator()(const c10::complex<T>& c) const {
return get_hash(c.real(), c.imag());
}
};
} // namespace c10
| 10,584
| 28.07967
| 80
|
h
|
null |
pytorch-main/c10/util/int128.h
|
// This file is based on the uint128 implementation of protobuf at
// https://github.com/protocolbuffers/protobuf/blob/1e88936fce10cf773cb72b44c6a7f48b38c7578b/src/google/protobuf/stubs/int128.h
//
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#pragma once
#include <c10/macros/Export.h>
#include <iosfwd>
namespace c10 {
struct uint128_pod;
// TODO(xiaofeng): Define GOOGLE_PROTOBUF_HAS_CONSTEXPR when constexpr is
// available.
#ifdef GOOGLE_PROTOBUF_HAS_CONSTEXPR
#define UINT128_CONSTEXPR constexpr
#else
#define UINT128_CONSTEXPR
#endif
class uint128;
static inline uint128& operator<<=(uint128& self, int amount);
// An unsigned 128-bit integer type. Thread-compatible.
class C10_API uint128 {
public:
UINT128_CONSTEXPR uint128(); // Sets to 0, but don't trust on this behavior.
UINT128_CONSTEXPR uint128(uint64_t top, uint64_t bottom);
#ifndef SWIG
UINT128_CONSTEXPR uint128(int bottom);
UINT128_CONSTEXPR uint128(uint32_t bottom); // Top 96 bits = 0
#endif
UINT128_CONSTEXPR uint128(uint64_t bottom); // hi_ = 0
UINT128_CONSTEXPR uint128(const uint128_pod& val);
// Trivial copy constructor, assignment operator and destructor.
void Initialize(uint64_t top, uint64_t bottom);
// Arithmetic operators.
uint128& operator+=(const uint128& b);
uint128& operator-=(const uint128& b);
uint128& operator*=(const uint128& b);
// Long division/modulo for uint128.
uint128& operator/=(const uint128& b);
uint128& operator%=(const uint128& b);
uint128 operator++(int);
uint128 operator--(int);
// Make msvc happy with using operator<<= from DivModImpl
// which is a static function, and linker complained about missing
// static version of this overload
friend uint128& operator<<=(uint128&, int);
uint128& operator>>=(int);
uint128& operator&=(const uint128& b);
uint128& operator|=(const uint128& b);
uint128& operator^=(const uint128& b);
uint128& operator++();
uint128& operator--();
friend uint64_t Uint128Low64(const uint128& v);
friend uint64_t Uint128High64(const uint128& v);
// We add "std::" to avoid including all of port.h.
C10_API friend std::ostream& operator<<(std::ostream& o, const uint128& b);
private:
static void DivModImpl(
uint128 dividend,
uint128 divisor,
uint128* quotient_ret,
uint128* remainder_ret);
// Little-endian memory order optimizations can benefit from
// having lo_ first, hi_ last.
// See util/endian/endian.h and Load128/Store128 for storing a uint128.
uint64_t lo_;
uint64_t hi_;
// Not implemented, just declared for catching automatic type conversions.
uint128(uint8_t);
uint128(uint16_t);
uint128(float v);
uint128(double v);
};
// This is a POD form of uint128 which can be used for static variables which
// need to be operated on as uint128.
struct uint128_pod {
// Note: The ordering of fields is different than 'class uint128' but the
// same as its 2-arg constructor. This enables more obvious initialization
// of static instances, which is the primary reason for this struct in the
// first place. This does not seem to defeat any optimizations wrt
// operations involving this struct.
uint64_t hi;
uint64_t lo;
};
C10_API extern const uint128_pod kuint128max;
// allow uint128 to be logged
C10_API extern std::ostream& operator<<(std::ostream& o, const uint128& b);
// Methods to access low and high pieces of 128-bit value.
// Defined externally from uint128 to facilitate conversion
// to native 128-bit types when compilers support them.
inline uint64_t Uint128Low64(const uint128& v) {
return v.lo_;
}
inline uint64_t Uint128High64(const uint128& v) {
return v.hi_;
}
// TODO: perhaps it would be nice to have int128, a signed 128-bit type?
// --------------------------------------------------------------------------
// Implementation details follow
// --------------------------------------------------------------------------
inline bool operator==(const uint128& lhs, const uint128& rhs) {
return (
Uint128Low64(lhs) == Uint128Low64(rhs) &&
Uint128High64(lhs) == Uint128High64(rhs));
}
inline bool operator!=(const uint128& lhs, const uint128& rhs) {
return !(lhs == rhs);
}
C10_API inline UINT128_CONSTEXPR uint128::uint128() : lo_(0), hi_(0) {}
C10_API inline UINT128_CONSTEXPR uint128::uint128(uint64_t top, uint64_t bottom)
: lo_(bottom), hi_(top) {}
C10_API inline UINT128_CONSTEXPR uint128::uint128(const uint128_pod& v)
: lo_(v.lo), hi_(v.hi) {}
C10_API inline UINT128_CONSTEXPR uint128::uint128(uint64_t bottom)
: lo_(bottom), hi_(0) {}
#ifndef SWIG
C10_API inline UINT128_CONSTEXPR uint128::uint128(uint32_t bottom)
: lo_(bottom), hi_(0) {}
C10_API inline UINT128_CONSTEXPR uint128::uint128(int bottom)
: lo_(bottom), hi_(static_cast<int64_t>((bottom < 0) ? -1 : 0)) {}
#endif
#undef UINT128_CONSTEXPR
C10_API inline void uint128::Initialize(uint64_t top, uint64_t bottom) {
hi_ = top;
lo_ = bottom;
}
// Comparison operators.
#define CMP128(op) \
inline bool operator op(const uint128& lhs, const uint128& rhs) { \
return (Uint128High64(lhs) == Uint128High64(rhs)) \
? (Uint128Low64(lhs) op Uint128Low64(rhs)) \
: (Uint128High64(lhs) op Uint128High64(rhs)); \
}
CMP128(<)
CMP128(>)
CMP128(>=)
CMP128(<=)
#undef CMP128
// Unary operators
inline uint128 operator-(const uint128& val) {
const uint64_t hi_flip = ~Uint128High64(val);
const uint64_t lo_flip = ~Uint128Low64(val);
const uint64_t lo_add = lo_flip + 1;
if (lo_add < lo_flip) {
return uint128(hi_flip + 1, lo_add);
}
return uint128(hi_flip, lo_add);
}
inline bool operator!(const uint128& val) {
return !Uint128High64(val) && !Uint128Low64(val);
}
// Logical operators.
inline uint128 operator~(const uint128& val) {
return uint128(~Uint128High64(val), ~Uint128Low64(val));
}
#define LOGIC128(op) \
inline uint128 operator op(const uint128& lhs, const uint128& rhs) { \
return uint128( \
Uint128High64(lhs) op Uint128High64(rhs), \
Uint128Low64(lhs) op Uint128Low64(rhs)); \
}
LOGIC128(|)
LOGIC128(&)
LOGIC128(^)
#undef LOGIC128
#define LOGICASSIGN128(op) \
C10_API inline uint128& uint128::operator op(const uint128& other) { \
hi_ op other.hi_; \
lo_ op other.lo_; \
return *this; \
}
LOGICASSIGN128(|=)
LOGICASSIGN128(&=)
LOGICASSIGN128(^=)
#undef LOGICASSIGN128
// Shift operators.
inline uint128 operator<<(const uint128& val, int amount) {
// uint64_t shifts of >= 64 are undefined, so we will need some
// special-casing.
if (amount < 64) {
if (amount == 0) {
return val;
}
uint64_t new_hi =
(Uint128High64(val) << amount) | (Uint128Low64(val) >> (64 - amount));
uint64_t new_lo = Uint128Low64(val) << amount;
return uint128(new_hi, new_lo);
} else if (amount < 128) {
return uint128(Uint128Low64(val) << (amount - 64), 0);
} else {
return uint128(0, 0);
}
}
inline uint128 operator>>(const uint128& val, int amount) {
// uint64_t shifts of >= 64 are undefined, so we will need some
// special-casing.
if (amount < 64) {
if (amount == 0) {
return val;
}
uint64_t new_hi = Uint128High64(val) >> amount;
uint64_t new_lo =
(Uint128Low64(val) >> amount) | (Uint128High64(val) << (64 - amount));
return uint128(new_hi, new_lo);
} else if (amount < 128) {
return uint128(0, Uint128High64(val) >> (amount - 64));
} else {
return uint128(0, 0);
}
}
static inline uint128& operator<<=(uint128& self, int amount) {
// uint64_t shifts of >= 64 are undefined, so we will need some
// special-casing.
if (amount < 64) {
if (amount != 0) {
self.hi_ = (self.hi_ << amount) | (self.lo_ >> (64 - amount));
self.lo_ = self.lo_ << amount;
}
} else if (amount < 128) {
self.hi_ = self.lo_ << (amount - 64);
self.lo_ = 0;
} else {
self.hi_ = 0;
self.lo_ = 0;
}
return self;
}
C10_API inline uint128& uint128::operator>>=(int amount) {
// uint64_t shifts of >= 64 are undefined, so we will need some
// special-casing.
if (amount < 64) {
if (amount != 0) {
lo_ = (lo_ >> amount) | (hi_ << (64 - amount));
hi_ = hi_ >> amount;
}
} else if (amount < 128) {
lo_ = hi_ >> (amount - 64);
hi_ = 0;
} else {
lo_ = 0;
hi_ = 0;
}
return *this;
}
inline uint128 operator+(const uint128& lhs, const uint128& rhs) {
return uint128(lhs) += rhs;
}
inline uint128 operator-(const uint128& lhs, const uint128& rhs) {
return uint128(lhs) -= rhs;
}
inline uint128 operator*(const uint128& lhs, const uint128& rhs) {
return uint128(lhs) *= rhs;
}
inline uint128 operator/(const uint128& lhs, const uint128& rhs) {
return uint128(lhs) /= rhs;
}
inline uint128 operator%(const uint128& lhs, const uint128& rhs) {
return uint128(lhs) %= rhs;
}
C10_API inline uint128& uint128::operator+=(const uint128& b) {
hi_ += b.hi_;
uint64_t lolo = lo_ + b.lo_;
if (lolo < lo_)
++hi_;
lo_ = lolo;
return *this;
}
C10_API inline uint128& uint128::operator-=(const uint128& b) {
hi_ -= b.hi_;
if (b.lo_ > lo_)
--hi_;
lo_ -= b.lo_;
return *this;
}
C10_API inline uint128& uint128::operator*=(const uint128& b) {
uint64_t a96 = hi_ >> 32;
uint64_t a64 = hi_ & 0xffffffffu;
uint64_t a32 = lo_ >> 32;
uint64_t a00 = lo_ & 0xffffffffu;
uint64_t b96 = b.hi_ >> 32;
uint64_t b64 = b.hi_ & 0xffffffffu;
uint64_t b32 = b.lo_ >> 32;
uint64_t b00 = b.lo_ & 0xffffffffu;
// multiply [a96 .. a00] x [b96 .. b00]
// terms higher than c96 disappear off the high side
// terms c96 and c64 are safe to ignore carry bit
uint64_t c96 = a96 * b00 + a64 * b32 + a32 * b64 + a00 * b96;
uint64_t c64 = a64 * b00 + a32 * b32 + a00 * b64;
this->hi_ = (c96 << 32) + c64;
this->lo_ = 0;
// add terms after this one at a time to capture carry
*this += uint128(a32 * b00) << 32;
*this += uint128(a00 * b32) << 32;
*this += a00 * b00;
return *this;
}
C10_API inline uint128 uint128::operator++(int) {
uint128 tmp(*this);
*this += 1;
return tmp;
}
C10_API inline uint128 uint128::operator--(int) {
uint128 tmp(*this);
*this -= 1;
return tmp;
}
C10_API inline uint128& uint128::operator++() {
*this += 1;
return *this;
}
C10_API inline uint128& uint128::operator--() {
*this -= 1;
return *this;
}
} // namespace c10
| 12,442
| 30.263819
| 127
|
h
|
null |
pytorch-main/c10/util/irange.h
|
// Copyright 2004-present Facebook. All Rights Reserved.
#pragma once
#include <c10/util/Exception.h>
#include <c10/util/TypeSafeSignMath.h>
#include <algorithm>
#include <iterator>
#include <limits>
#include <type_traits>
namespace c10 {
namespace detail {
template <
typename I,
bool one_sided = false,
typename std::enable_if<std::is_integral<I>::value, int>::type = 0>
struct integer_iterator {
using iterator_category = std::input_iterator_tag;
using value_type = I;
using difference_type = std::ptrdiff_t;
using pointer = I*;
using reference = I&;
explicit integer_iterator(I value) : value(value) {}
I operator*() const {
return value;
}
I const* operator->() const {
return &value;
}
integer_iterator& operator++() {
++value;
return *this;
}
integer_iterator operator++(int) {
const auto copy = *this;
++*this;
return copy;
}
bool operator==(const integer_iterator& other) const {
if constexpr (one_sided) {
// Range-for loops' end test is `begin != end`, not `begin <
// end`. To handle `c10::irange(n)` where n < 0 (which should be
// empty), we just make `begin != end` fail whenever `end` is
// negative.
return is_negative(other.value) || value == other.value;
} else {
return value == other.value;
}
// Suppress "warning: missing return statement at end of non-void function"
// which Nvidia's Robert Crovella confirms is an NVCC compiler error
// here https://stackoverflow.com/a/64561686/752843 on 2020-10-27
// `__builtin_unreachable();` would be best here, but it's not
// available with all compilers. So we instead return an arbitrary
// value trusting that this line will, in fact, never be reached.
return false; // Horrible hack
}
bool operator!=(const integer_iterator& other) const {
return !(*this == other);
}
protected:
I value;
};
} // namespace detail
template <
typename I,
bool one_sided = false,
typename std::enable_if<std::is_integral<I>::value, bool>::type = true>
struct integer_range {
public:
integer_range(I begin, I end) : begin_(begin), end_(end) {}
using iterator = detail::integer_iterator<I, one_sided>;
iterator begin() const {
return begin_;
}
iterator end() const {
return end_;
}
private:
iterator begin_;
iterator end_;
};
/// Creates an integer range for the half-open interval [begin, end)
/// If end<=begin, then the range is empty.
/// The range has the type of the `end` integer; `begin` integer is
/// cast to this type.
template <
typename Integer1,
typename Integer2,
typename std::enable_if<std::is_integral<Integer1>::value, bool>::type =
true,
typename std::enable_if<std::is_integral<Integer2>::value, bool>::type =
true>
integer_range<Integer2> irange(Integer1 begin, Integer2 end) {
// If end<=begin then the range is empty; we can achieve this effect by
// choosing the larger of {begin, end} as the loop terminator
return {
static_cast<Integer2>(begin),
std::max(static_cast<Integer2>(begin), end)};
}
/// Creates an integer range for the half-open interval [0, end)
/// If end<=begin, then the range is empty
template <
typename Integer,
typename std::enable_if<std::is_integral<Integer>::value, bool>::type =
true>
integer_range<Integer, true> irange(Integer end) {
return {Integer(), end};
}
} // namespace c10
| 3,465
| 26.078125
| 79
|
h
|
null |
pytorch-main/c10/util/logging_is_google_glog.h
|
#ifndef C10_UTIL_LOGGING_IS_GOOGLE_GLOG_H_
#define C10_UTIL_LOGGING_IS_GOOGLE_GLOG_H_
#include <map>
#include <set>
#include <vector>
#include <iomanip> // because some of the caffe2 code uses e.g. std::setw
// Using google glog. For glog 0.3.2 versions, stl_logging.h needs to be before
// logging.h to actually use stl_logging. Because template magic.
// In addition, we do not do stl logging in .cu files because nvcc does not like
// it. Some mobile platforms do not like stl_logging, so we add an
// overload in that case as well.
#ifdef __CUDACC__
#include <cuda.h>
#endif
#if !defined(__CUDACC__) && !defined(C10_USE_MINIMAL_GLOG)
#include <glog/stl_logging.h>
// Old versions of glog don't declare this using declaration, so help
// them out. Fortunately, C++ won't complain if you declare the same
// using declaration multiple times.
namespace std {
using ::operator<<;
}
#else // !defined(__CUDACC__) && !defined(C10_USE_MINIMAL_GLOG)
// In the cudacc compiler scenario, we will simply ignore the container
// printout feature. Basically we need to register a fake overload for
// vector/string - here, we just ignore the entries in the logs.
namespace std {
#define INSTANTIATE_FOR_CONTAINER(container) \
template <class... Types> \
ostream& operator<<(ostream& out, const container<Types...>&) { \
return out; \
}
INSTANTIATE_FOR_CONTAINER(vector)
INSTANTIATE_FOR_CONTAINER(map)
INSTANTIATE_FOR_CONTAINER(set)
#undef INSTANTIATE_FOR_CONTAINER
} // namespace std
#endif
#include <glog/logging.h>
// Additional macros on top of glog
#define TORCH_CHECK_EQ(val1, val2) CHECK_EQ(val1, val2)
#define TORCH_CHECK_NE(val1, val2) CHECK_NE(val1, val2)
#define TORCH_CHECK_LE(val1, val2) CHECK_LE(val1, val2)
#define TORCH_CHECK_LT(val1, val2) CHECK_LT(val1, val2)
#define TORCH_CHECK_GE(val1, val2) CHECK_GE(val1, val2)
#define TORCH_CHECK_GT(val1, val2) CHECK_GT(val1, val2)
#ifndef NDEBUG
#define TORCH_DCHECK_EQ(val1, val2) DCHECK_EQ(val1, val2)
#define TORCH_DCHECK_NE(val1, val2) DCHECK_NE(val1, val2)
#define TORCH_DCHECK_LE(val1, val2) DCHECK_LE(val1, val2)
#define TORCH_DCHECK_LT(val1, val2) DCHECK_LT(val1, val2)
#define TORCH_DCHECK_GE(val1, val2) DCHECK_GE(val1, val2)
#define TORCH_DCHECK_GT(val1, val2) DCHECK_GT(val1, val2)
#else // !NDEBUG
// These versions generate no code in optimized mode.
#define TORCH_DCHECK_EQ(val1, val2) \
while (false) \
DCHECK_EQ(val1, val2)
#define TORCH_DCHECK_NE(val1, val2) \
while (false) \
DCHECK_NE(val1, val2)
#define TORCH_DCHECK_LE(val1, val2) \
while (false) \
DCHECK_LE(val1, val2)
#define TORCH_DCHECK_LT(val1, val2) \
while (false) \
DCHECK_LT(val1, val2)
#define TORCH_DCHECK_GE(val1, val2) \
while (false) \
DCHECK_GE(val1, val2)
#define TORCH_DCHECK_GT(val1, val2) \
while (false) \
DCHECK_GT(val1, val2)
#endif // NDEBUG
// Check that a pointer is not null.
#define TORCH_CHECK_NOTNULL(val) CHECK_NOTNULL(val)
#ifndef NDEBUG
// Debug only version of TORCH_CHECK_NOTNULL
#define TORCH_DCHECK_NOTNULL(val) DCHECK_NOTNULL(val)
#else // !NDEBUG
// Optimized version - generates no code.
#define TORCH_DCHECK_NOTNULL(val) \
while (false) \
DCHECK_NOTNULL(val)
#endif // NDEBUG
// Log with source location information override (to be used in generic
// warning/error handlers implemented as functions, not macros)
//
// Note, we don't respect GOOGLE_STRIP_LOG here for simplicity
#define LOG_AT_FILE_LINE(n, file, line) \
::google::LogMessage(file, line, ::google::GLOG_##n).stream()
#endif // C10_UTIL_LOGGING_IS_GOOGLE_GLOG_H_
| 3,794
| 33.5
| 80
|
h
|
null |
pytorch-main/c10/util/logging_is_not_google_glog.h
|
#ifndef C10_UTIL_LOGGING_IS_NOT_GOOGLE_GLOG_H_
#define C10_UTIL_LOGGING_IS_NOT_GOOGLE_GLOG_H_
#include <chrono>
#include <climits>
#include <ctime>
#include <fstream>
#include <iomanip>
#include <map>
#include <set>
#include <sstream>
#include <string>
#include <vector>
#include <c10/util/Flags.h>
const char CAFFE2_SEVERITY_PREFIX[] = "FEWIV";
namespace c10 {
// Log severity level constants.
const int GLOG_FATAL = 3;
const int GLOG_ERROR = 2;
const int GLOG_WARNING = 1;
const int GLOG_INFO = 0;
class C10_API MessageLogger {
public:
MessageLogger(const char* file, int line, int severity);
~MessageLogger();
// Return the stream associated with the logger object.
std::stringstream& stream() {
return stream_;
}
private:
// When there is a fatal log, we simply abort.
void DealWithFatal() {
abort();
}
const char* tag_;
std::stringstream stream_;
int severity_;
};
// This class is used to explicitly ignore values in the conditional
// logging macros. This avoids compiler warnings like "value computed
// is not used" and "statement has no effect".
class C10_API LoggerVoidify {
public:
LoggerVoidify() = default;
// This has to be an operator with a precedence lower than << but
// higher than ?:
void operator&(const std::ostream& s) {}
};
// Log a message and terminate.
template <class T>
void LogMessageFatal(const char* file, int line, const T& message) {
MessageLogger(file, line, GLOG_FATAL).stream() << message;
}
// Helpers for TORCH_CHECK_NOTNULL(). Two are necessary to support both raw
// pointers and smart pointers.
template <typename T>
T& CheckNotNullCommon(const char* file, int line, const char* names, T& t) {
if (t == nullptr) {
LogMessageFatal(file, line, std::string(names));
}
return t;
}
template <typename T>
T* CheckNotNull(const char* file, int line, const char* names, T* t) {
return CheckNotNullCommon(file, line, names, t);
}
template <typename T>
T& CheckNotNull(const char* file, int line, const char* names, T& t) {
return CheckNotNullCommon(file, line, names, t);
}
} // namespace c10
// ---------------------- Logging Macro definitions --------------------------
static_assert(
CAFFE2_LOG_THRESHOLD <= ::c10::GLOG_FATAL,
"CAFFE2_LOG_THRESHOLD should at most be GLOG_FATAL.");
// If n is under the compile time caffe log threshold, The _CAFFE_LOG(n)
// should not generate anything in optimized code.
#define LOG(n) \
if (::c10::GLOG_##n >= CAFFE2_LOG_THRESHOLD) \
::c10::MessageLogger(__FILE__, __LINE__, ::c10::GLOG_##n).stream()
#define VLOG(n) \
if (-n >= CAFFE2_LOG_THRESHOLD) \
::c10::MessageLogger(__FILE__, __LINE__, -n).stream()
#define LOG_IF(n, condition) \
if (::c10::GLOG_##n >= CAFFE2_LOG_THRESHOLD && (condition)) \
::c10::MessageLogger(__FILE__, __LINE__, ::c10::GLOG_##n).stream()
#define VLOG_IF(n, condition) \
if (-n >= CAFFE2_LOG_THRESHOLD && (condition)) \
::c10::MessageLogger(__FILE__, __LINE__, -n).stream()
#define VLOG_IS_ON(verboselevel) (CAFFE2_LOG_THRESHOLD <= -(verboselevel))
// Log with source location information override (to be used in generic
// warning/error handlers implemented as functions, not macros)
#define LOG_AT_FILE_LINE(n, file, line) \
if (::c10::GLOG_##n >= CAFFE2_LOG_THRESHOLD) \
::c10::MessageLogger(file, line, ::c10::GLOG_##n).stream()
// Log only if condition is met. Otherwise evaluates to void.
#define FATAL_IF(condition) \
condition ? (void)0 \
: ::c10::LoggerVoidify() & \
::c10::MessageLogger(__FILE__, __LINE__, ::c10::GLOG_FATAL).stream()
// Check for a given boolean condition.
#define CHECK(condition) FATAL_IF(condition) << "Check failed: " #condition " "
#ifndef NDEBUG
// Debug only version of CHECK
#define DCHECK(condition) FATAL_IF(condition) << "Check failed: " #condition " "
#define DLOG(severity) LOG(severity)
#else // NDEBUG
// Optimized version - generates no code.
#define DCHECK(condition) \
while (false) \
CHECK(condition)
#define DLOG(n) \
true ? (void)0 \
: ::c10::LoggerVoidify() & \
::c10::MessageLogger(__FILE__, __LINE__, ::c10::GLOG_##n).stream()
#endif // NDEBUG
#define TORCH_CHECK_OP(val1, val2, op) \
FATAL_IF(((val1)op(val2))) << "Check failed: " #val1 " " #op " " #val2 " (" \
<< (val1) << " vs. " << (val2) << ") "
// TORCH_CHECK_OP macro definitions
#define TORCH_CHECK_EQ(val1, val2) TORCH_CHECK_OP(val1, val2, ==)
#define TORCH_CHECK_NE(val1, val2) TORCH_CHECK_OP(val1, val2, !=)
#define TORCH_CHECK_LE(val1, val2) TORCH_CHECK_OP(val1, val2, <=)
#define TORCH_CHECK_LT(val1, val2) TORCH_CHECK_OP(val1, val2, <)
#define TORCH_CHECK_GE(val1, val2) TORCH_CHECK_OP(val1, val2, >=)
#define TORCH_CHECK_GT(val1, val2) TORCH_CHECK_OP(val1, val2, >)
#ifndef NDEBUG
// Debug only versions of TORCH_CHECK_OP macros.
#define TORCH_DCHECK_EQ(val1, val2) TORCH_CHECK_OP(val1, val2, ==)
#define TORCH_DCHECK_NE(val1, val2) TORCH_CHECK_OP(val1, val2, !=)
#define TORCH_DCHECK_LE(val1, val2) TORCH_CHECK_OP(val1, val2, <=)
#define TORCH_DCHECK_LT(val1, val2) TORCH_CHECK_OP(val1, val2, <)
#define TORCH_DCHECK_GE(val1, val2) TORCH_CHECK_OP(val1, val2, >=)
#define TORCH_DCHECK_GT(val1, val2) TORCH_CHECK_OP(val1, val2, >)
#else // !NDEBUG
// These versions generate no code in optimized mode.
#define TORCH_DCHECK_EQ(val1, val2) \
while (false) \
TORCH_CHECK_OP(val1, val2, ==)
#define TORCH_DCHECK_NE(val1, val2) \
while (false) \
TORCH_CHECK_OP(val1, val2, !=)
#define TORCH_DCHECK_LE(val1, val2) \
while (false) \
TORCH_CHECK_OP(val1, val2, <=)
#define TORCH_DCHECK_LT(val1, val2) \
while (false) \
TORCH_CHECK_OP(val1, val2, <)
#define TORCH_DCHECK_GE(val1, val2) \
while (false) \
TORCH_CHECK_OP(val1, val2, >=)
#define TORCH_DCHECK_GT(val1, val2) \
while (false) \
TORCH_CHECK_OP(val1, val2, >)
#endif // NDEBUG
// Check that a pointer is not null.
#define TORCH_CHECK_NOTNULL(val) \
::c10::CheckNotNull( \
__FILE__, __LINE__, "Check failed: '" #val "' Must be non NULL", (val))
#ifndef NDEBUG
// Debug only version of TORCH_CHECK_NOTNULL
#define TORCH_DCHECK_NOTNULL(val) \
::c10::CheckNotNull( \
__FILE__, __LINE__, "Check failed: '" #val "' Must be non NULL", (val))
#else // !NDEBUG
// Optimized version - generates no code.
#define TORCH_DCHECK_NOTNULL(val) \
while (false) \
TORCH_CHECK_NOTNULL(val)
#endif // NDEBUG
// ---------------------- Support for std objects --------------------------
// These are adapted from glog to support a limited set of logging capability
// for STL objects.
namespace std {
// Forward declare these two, and define them after all the container streams
// operators so that we can recurse from pair -> container -> container -> pair
// properly.
template <class First, class Second>
std::ostream& operator<<(std::ostream& out, const std::pair<First, Second>& p);
} // namespace std
namespace c10 {
template <class Iter>
void PrintSequence(std::ostream& ss, Iter begin, Iter end);
} // namespace c10
namespace std {
#define INSTANTIATE_FOR_CONTAINER(container) \
template <class... Types> \
std::ostream& operator<<( \
std::ostream& out, const container<Types...>& seq) { \
c10::PrintSequence(out, seq.begin(), seq.end()); \
return out; \
}
INSTANTIATE_FOR_CONTAINER(std::vector)
INSTANTIATE_FOR_CONTAINER(std::map)
INSTANTIATE_FOR_CONTAINER(std::set)
#undef INSTANTIATE_FOR_CONTAINER
template <class First, class Second>
inline std::ostream& operator<<(
std::ostream& out,
const std::pair<First, Second>& p) {
out << '(' << p.first << ", " << p.second << ')';
return out;
}
inline std::ostream& operator<<(std::ostream& out, const std::nullptr_t&) {
out << "(null)";
return out;
}
} // namespace std
namespace c10 {
template <class Iter>
inline void PrintSequence(std::ostream& out, Iter begin, Iter end) {
// Output at most 100 elements -- appropriate if used for logging.
for (int i = 0; begin != end && i < 100; ++i, ++begin) {
if (i > 0)
out << ' ';
out << *begin;
}
if (begin != end) {
out << " ...";
}
}
} // namespace c10
#endif // C10_UTIL_LOGGING_IS_NOT_GOOGLE_GLOG_H_
| 8,664
| 32.455598
| 80
|
h
|
null |
pytorch-main/c10/util/math_compat.h
|
#pragma once
#include <cmath>
// Android NDK platform < 21 with libstdc++ has spotty C++11 support.
// Various hacks in this header allow the rest of the codebase to use
// standard APIs.
#if (defined(__ANDROID__) && __ANDROID_API__ < 21 && defined(__GLIBCXX__)) || \
defined(__NEWLIB__)
#include <stdexcept>
namespace std {
// Import double versions of these functions from the global namespace.
using ::acosh;
using ::asinh;
using ::atanh;
using ::erf;
using ::erfc;
using ::expm1;
using ::lgamma;
using ::log1p;
using ::nearbyint;
using ::round;
using ::tgamma;
using ::trunc;
using ::truncf;
// Define float versions the same way as more recent libstdc++
inline float acosh(float x) {
return __builtin_acoshf(x);
}
inline float asinh(float x) {
return __builtin_asinhf(x);
}
inline float atanh(float x) {
return __builtin_atanhf(x);
}
inline float copysign(float x, float y) {
return __builtin_copysignf(x, y);
}
inline float erf(float x) {
return __builtin_erff(x);
}
inline float erfc(float x) {
return __builtin_erfcf(x);
}
inline float expm1(float x) {
return __builtin_expm1f(x);
}
inline float fmax(float x, float y) {
return __builtin_fmaxf(x, y);
}
inline float fmin(float x, float y) {
return __builtin_fminf(x, y);
}
inline float lgamma(float x) {
return __builtin_lgammaf(x);
}
inline float log1p(float x) {
return __builtin_log1pf(x);
}
inline float nearbyint(float x) {
return __builtin_nearbyintf(x);
}
inline float remainder(float x, float y) {
return __builtin_remainderf(x, y);
}
inline float round(float x) {
return __builtin_roundf(x);
}
inline float tgamma(float x) {
return __builtin_tgammaf(x);
}
inline float trunc(float x) {
return __builtin_truncf(x);
}
// __builtin_nexttoward isn't doesn't work. It appears to try to
// link against the global nexttoward function, which is not present
// prior to API 18. Just bail for now.
inline float nexttoward(float x, long double y) {
throw std::runtime_error("std::nexttoward is not present on older Android");
}
inline double nexttoward(double x, long double y) {
throw std::runtime_error("std::nexttoward is not present on older Android");
}
#if !defined(__NEWLIB__)
// TODO: this function needs to be implemented and tested. Currently just throw
// an error.
inline float hypot(float x, float y) {
throw std::runtime_error("std::hypot is not implemented on older Android");
}
inline double hypot(double x, double y) {
throw std::runtime_error("std::hypot is not implemented on older Android");
}
#else
inline float hypot(float x, float y) {
return hypot((double)x, (double)y);
}
#endif
// TODO: this function needs to be implemented and tested. Currently just throw
// an error.
inline float igamma(float x, float y) {
throw std::runtime_error("igamma is not implemented on older Android");
}
inline double igamma(double x, double y) {
throw std::runtime_error("igamma is not implemented on older Android");
}
inline float igammac(float x, float y) {
throw std::runtime_error("igammac is not implemented on older Android");
}
inline double igammac(double x, double y) {
throw std::runtime_error("igammac is not implemented on older Android");
}
// Note: std::signbit returns true for negative zero (-0), but this
// implementation returns false.
inline bool signbit(float x) {
return x < 0;
}
inline bool signbit(double x) {
return x < 0;
}
inline bool signbit(long double x) {
return x < 0;
}
#if !defined(__NEWLIB__)
// TODO: this function needs to be implemented and tested. Currently just throw
// an error.
inline float nextafter(float x, float y) {
throw std::runtime_error(
"std::nextafter is not implemented on older Android");
}
inline double nextafter(double x, double y) {
throw std::runtime_error(
"std::nextafter is not implemented on older Android");
}
#else
inline float nextafter(float x, float y) {
return nextafter((double)x, (double)y);
}
#endif
#if !defined(__NEWLIB__)
// TODO: this function needs to be implemented and tested. Currently just throw
// an error.
inline float exp2(float x) {
throw std::runtime_error("std::exp2 is not implemented on older Android");
}
inline double exp2(double x) {
throw std::runtime_error("std::exp2 is not implemented on older Android");
}
#else
inline float exp2(float x) {
return exp2((double)x);
}
#endif
// Define integral versions the same way as more recent libstdc++
template <typename T>
typename std::enable_if<std::is_integral<T>::value, double>::type acosh(T x) {
return __builtin_acosh(x);
}
template <typename T>
typename std::enable_if<std::is_integral<T>::value, double>::type asinh(T x) {
return __builtin_asinh(x);
}
template <typename T>
typename std::enable_if<std::is_integral<T>::value, double>::type atanh(T x) {
return __builtin_atanh(x);
}
template <typename T>
typename std::enable_if<std::is_integral<T>::value, double>::type erf(T x) {
return __builtin_erf(x);
}
template <typename T>
typename std::enable_if<std::is_integral<T>::value, double>::type erfc(T x) {
return __builtin_erfc(x);
}
template <typename T>
typename std::enable_if<std::is_integral<T>::value, double>::type expm1(T x) {
return __builtin_expm1(x);
}
template <typename T>
typename std::enable_if<std::is_integral<T>::value, double>::type lgamma(T x) {
return __builtin_lgamma(x);
}
template <typename T>
typename std::enable_if<std::is_integral<T>::value, double>::type log1p(T x) {
return __builtin_log1p(x);
}
template <typename T>
typename std::enable_if<std::is_integral<T>::value, double>::type nearbyint(
T x) {
return __builtin_nearbyint(x);
}
template <typename T>
typename std::enable_if<std::is_integral<T>::value, double>::type round(T x) {
return __builtin_round(x);
}
template <typename T>
typename std::enable_if<std::is_integral<T>::value, double>::type tgamma(T x) {
return __builtin_tgamma(x);
}
template <typename T>
typename std::enable_if<std::is_integral<T>::value, double>::type trunc(T x) {
return __builtin_trunc(x);
}
// Convoluted definition of these binary functions for overloads other than
// (float,float) and (double,double). Using a template from __gnu_cxx
// is dirty, but this code is only enabled on a dead platform, so there
// shouldn't be any risk of it breaking due to updates.
template <typename T, typename U>
typename __gnu_cxx::__promote_2<T, U>::__type fmax(T x, U y) {
typedef typename __gnu_cxx::__promote_2<T, U>::__type type;
return fmax(type(x), type(y));
}
template <typename T, typename U>
typename __gnu_cxx::__promote_2<T, U>::__type fmin(T x, U y) {
typedef typename __gnu_cxx::__promote_2<T, U>::__type type;
return fmin(type(x), type(y));
}
template <typename T, typename U>
typename __gnu_cxx::__promote_2<T, U>::__type copysign(T x, U y) {
typedef typename __gnu_cxx::__promote_2<T, U>::__type type;
return copysign(type(x), type(y));
}
template <typename T, typename U>
typename __gnu_cxx::__promote_2<T, U>::__type remainder(T x, U y) {
typedef typename __gnu_cxx::__promote_2<T, U>::__type type;
return remainder(type(x), type(y));
}
// log2 is a macro on Android API < 21, so we need to define it ourselves.
inline float log2(float arg) {
return ::log(arg) / ::log(2.0);
}
#if !defined(__NEWLIB__)
inline double log2(double arg) {
return ::log(arg) / ::log(2.0);
}
#endif
inline long double log2(long double arg) {
return ::log(arg) / ::log(2.0);
}
template <typename T>
typename std::enable_if<std::is_integral<T>::value, double>::type log2(T x) {
return ::log(x) / ::log(2.0);
}
} // namespace std
#endif
| 7,552
| 28.389105
| 79
|
h
|
null |
pytorch-main/c10/util/overloaded.h
|
#pragma once
namespace c10 {
namespace detail {
template <class... Ts>
struct overloaded_t {};
template <class T0>
struct overloaded_t<T0> : T0 {
using T0::operator();
overloaded_t(T0 t0) : T0(std::move(t0)) {}
};
template <class T0, class... Ts>
struct overloaded_t<T0, Ts...> : T0, overloaded_t<Ts...> {
using T0::operator();
using overloaded_t<Ts...>::operator();
overloaded_t(T0 t0, Ts... ts)
: T0(std::move(t0)), overloaded_t<Ts...>(std::move(ts)...) {}
};
} // namespace detail
// Construct an overloaded callable combining multiple callables, e.g. lambdas
template <class... Ts>
detail::overloaded_t<Ts...> overloaded(Ts... ts) {
return {std::move(ts)...};
}
} // namespace c10
| 709
| 21.903226
| 78
|
h
|
null |
pytorch-main/c10/util/reverse_iterator.h
|
#pragma once
/**
* A constexpr std::reverse_iterator for C++11.
* Implementation taken from libstdc++,
* https://raw.githubusercontent.com/gcc-mirror/gcc/gcc-9_2_0-release/libstdc%2B%2B-v3/include/bits/stl_iterator.h
* adapted to our code base and constexpr'ified.
*/
// Copyright (C) 2001-2019 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 3, or (at your option)
// any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// Under Section 7 of GPL version 3, you are granted additional
// permissions described in the GCC Runtime Library Exception, version
// 3.1, as published by the Free Software Foundation.
// You should have received a copy of the GNU General Public License and
// a copy of the GCC Runtime Library Exception along with this program;
// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
// <http://www.gnu.org/licenses/>.
/*
*
* Copyright (c) 1994
* Hewlett-Packard Company
*
* Permission to use, copy, modify, distribute and sell this software
* and its documentation for any purpose is hereby granted without fee,
* provided that the above copyright notice appear in all copies and
* that both that copyright notice and this permission notice appear
* in supporting documentation. Hewlett-Packard Company makes no
* representations about the suitability of this software for any
* purpose. It is provided "as is" without express or implied warranty.
*
*
* Copyright (c) 1996-1998
* Silicon Graphics Computer Systems, Inc.
*
* Permission to use, copy, modify, distribute and sell this software
* and its documentation for any purpose is hereby granted without fee,
* provided that the above copyright notice appear in all copies and
* that both that copyright notice and this permission notice appear
* in supporting documentation. Silicon Graphics makes no
* representations about the suitability of this software for any
* purpose. It is provided "as is" without express or implied warranty.
*/
#include <c10/util/C++17.h>
#include <iterator>
namespace c10 {
template <typename _Iterator>
class reverse_iterator {
protected:
_Iterator current;
using __traits_type = std::iterator_traits<_Iterator>;
public:
using iterator_type = _Iterator;
using value_type = typename __traits_type::value_type;
using difference_type = typename __traits_type::difference_type;
using pointer = typename __traits_type::pointer;
using reference = typename __traits_type::reference;
using iterator_category = typename __traits_type::iterator_category;
constexpr reverse_iterator() : current() {}
explicit constexpr reverse_iterator(iterator_type __x) : current(__x) {}
constexpr reverse_iterator(const reverse_iterator& __x)
: current(__x.current) {}
constexpr reverse_iterator& operator=(const reverse_iterator& rhs) noexcept {
current = rhs.current;
return *this;
}
template <typename _Iter>
constexpr reverse_iterator(const reverse_iterator<_Iter>& __x)
: current(__x.base()) {}
constexpr iterator_type base() const {
return current;
}
constexpr reference operator*() const {
#if defined(__cpp_constexpr) && __cpp_constexpr >= 201304
_Iterator iter = current;
return *--iter;
#else
// Only works for random access iterators if we're not C++14 :(
return *(current - 1);
#endif
}
constexpr pointer operator->() const {
#if defined(__cpp_constexpr) && __cpp_constexpr >= 201304
_Iterator iter = current;
return _S_to_pointer(--iter);
#else
// Only works for random access iterators if we're not C++14 :(
return _S_to_pointer(current - 1);
#endif
}
constexpr reverse_iterator& operator++() {
--current;
return *this;
}
constexpr reverse_iterator operator++(int) {
reverse_iterator __tmp = *this;
--current;
return __tmp;
}
constexpr reverse_iterator& operator--() {
++current;
return *this;
}
constexpr reverse_iterator operator--(int) {
reverse_iterator __tmp = *this;
++current;
return __tmp;
}
constexpr reverse_iterator operator+(difference_type __n) const {
return reverse_iterator(current - __n);
}
constexpr reverse_iterator& operator+=(difference_type __n) {
current -= __n;
return *this;
}
constexpr reverse_iterator operator-(difference_type __n) const {
return reverse_iterator(current + __n);
}
constexpr reverse_iterator& operator-=(difference_type __n) {
current += __n;
return *this;
}
constexpr reference operator[](difference_type __n) const {
return *(*this + __n);
}
private:
template <typename _Tp>
static constexpr _Tp* _S_to_pointer(_Tp* __p) {
return __p;
}
template <typename _Tp>
static constexpr pointer _S_to_pointer(_Tp __t) {
return __t.operator->();
}
};
template <typename _Iterator>
inline constexpr bool operator==(
const reverse_iterator<_Iterator>& __x,
const reverse_iterator<_Iterator>& __y) {
return __x.base() == __y.base();
}
template <typename _Iterator>
inline constexpr bool operator<(
const reverse_iterator<_Iterator>& __x,
const reverse_iterator<_Iterator>& __y) {
return __y.base() < __x.base();
}
template <typename _Iterator>
inline constexpr bool operator!=(
const reverse_iterator<_Iterator>& __x,
const reverse_iterator<_Iterator>& __y) {
return !(__x == __y);
}
template <typename _Iterator>
inline constexpr bool operator>(
const reverse_iterator<_Iterator>& __x,
const reverse_iterator<_Iterator>& __y) {
return __y < __x;
}
template <typename _Iterator>
inline constexpr bool operator<=(
const reverse_iterator<_Iterator>& __x,
const reverse_iterator<_Iterator>& __y) {
return !(__y < __x);
}
template <typename _Iterator>
inline constexpr bool operator>=(
const reverse_iterator<_Iterator>& __x,
const reverse_iterator<_Iterator>& __y) {
return !(__x < __y);
}
template <typename _IteratorL, typename _IteratorR>
inline constexpr bool operator==(
const reverse_iterator<_IteratorL>& __x,
const reverse_iterator<_IteratorR>& __y) {
return __x.base() == __y.base();
}
template <typename _IteratorL, typename _IteratorR>
inline constexpr bool operator<(
const reverse_iterator<_IteratorL>& __x,
const reverse_iterator<_IteratorR>& __y) {
return __y.base() < __x.base();
}
template <typename _IteratorL, typename _IteratorR>
inline constexpr bool operator!=(
const reverse_iterator<_IteratorL>& __x,
const reverse_iterator<_IteratorR>& __y) {
return !(__x == __y);
}
template <typename _IteratorL, typename _IteratorR>
inline constexpr bool operator>(
const reverse_iterator<_IteratorL>& __x,
const reverse_iterator<_IteratorR>& __y) {
return __y < __x;
}
template <typename _IteratorL, typename _IteratorR>
inline constexpr bool operator<=(
const reverse_iterator<_IteratorL>& __x,
const reverse_iterator<_IteratorR>& __y) {
return !(__y < __x);
}
template <typename _IteratorL, typename _IteratorR>
inline constexpr bool operator>=(
const reverse_iterator<_IteratorL>& __x,
const reverse_iterator<_IteratorR>& __y) {
return !(__x < __y);
}
template <typename _IteratorL, typename _IteratorR>
inline constexpr decltype(auto) operator-(
const reverse_iterator<_IteratorL>& __x,
const reverse_iterator<_IteratorR>& __y) {
return __y.base() - __x.base();
}
template <typename _Iterator>
inline constexpr reverse_iterator<_Iterator> operator+(
typename reverse_iterator<_Iterator>::difference_type __n,
const reverse_iterator<_Iterator>& __x) {
return reverse_iterator<_Iterator>(__x.base() - __n);
}
template <typename _Iterator>
inline constexpr reverse_iterator<_Iterator> __make_reverse_iterator(
_Iterator __i) {
return reverse_iterator<_Iterator>(__i);
}
template <typename _Iterator>
inline constexpr reverse_iterator<_Iterator> make_reverse_iterator(
_Iterator __i) {
return reverse_iterator<_Iterator>(__i);
}
template <typename _Iterator>
decltype(auto) __niter_base(reverse_iterator<_Iterator> __it) {
return __make_reverse_iterator(__niter_base(__it.base()));
}
} // namespace c10
| 8,566
| 28.541379
| 114
|
h
|
null |
pytorch-main/c10/util/safe_numerics.h
|
#pragma once
#include <c10/macros/Macros.h>
#include <c10/util/ArrayRef.h>
#include <iterator>
#include <numeric>
#include <type_traits>
// GCC has __builtin_mul_overflow from before it supported __has_builtin
#ifdef _MSC_VER
#define C10_HAS_BUILTIN_OVERFLOW() (0)
#include <c10/util/llvmMathExtras.h>
#include <intrin.h>
#else
#define C10_HAS_BUILTIN_OVERFLOW() (1)
#endif
namespace c10 {
C10_ALWAYS_INLINE bool add_overflows(uint64_t a, uint64_t b, uint64_t* out) {
#if C10_HAS_BUILTIN_OVERFLOW()
return __builtin_add_overflow(a, b, out);
#else
unsigned long long tmp;
#if defined(_M_IX86) || defined(_M_X64)
auto carry = _addcarry_u64(0, a, b, &tmp);
#else
tmp = a + b;
unsigned long long vector = (a & b) ^ ((a ^ b) & ~tmp);
auto carry = vector >> 63;
#endif
*out = tmp;
return carry;
#endif
}
C10_ALWAYS_INLINE bool mul_overflows(uint64_t a, uint64_t b, uint64_t* out) {
#if C10_HAS_BUILTIN_OVERFLOW()
return __builtin_mul_overflow(a, b, out);
#else
*out = a * b;
// This test isnt exact, but avoids doing integer division
return (
(c10::llvm::countLeadingZeros(a) + c10::llvm::countLeadingZeros(b)) < 64);
#endif
}
C10_ALWAYS_INLINE bool mul_overflows(int64_t a, int64_t b, int64_t* out) {
#if C10_HAS_BUILTIN_OVERFLOW()
return __builtin_mul_overflow(a, b, out);
#else
volatile int64_t tmp = a * b;
*out = tmp;
if (a == 0 || b == 0) {
return false;
}
return !(a == tmp / b);
#endif
}
template <typename It>
bool safe_multiplies_u64(It first, It last, uint64_t* out) {
#if C10_HAS_BUILTIN_OVERFLOW()
uint64_t prod = 1;
bool overflow = false;
for (; first != last; ++first) {
overflow |= c10::mul_overflows(prod, *first, &prod);
}
*out = prod;
return overflow;
#else
uint64_t prod = 1;
uint64_t prod_log2 = 0;
bool is_zero = false;
for (; first != last; ++first) {
auto x = static_cast<uint64_t>(*first);
prod *= x;
// log2(0) isn't valid, so need to track it specially
is_zero |= (x == 0);
prod_log2 += c10::llvm::Log2_64_Ceil(x);
}
*out = prod;
// This test isnt exact, but avoids doing integer division
return !is_zero && (prod_log2 >= 64);
#endif
}
template <typename Container>
bool safe_multiplies_u64(const Container& c, uint64_t* out) {
return safe_multiplies_u64(c.begin(), c.end(), out);
}
} // namespace c10
| 2,332
| 23.819149
| 80
|
h
|
null |
pytorch-main/c10/util/signal_handler.h
|
#pragma once
#include <atomic>
#include <csignal>
#include <mutex>
#include <c10/macros/Export.h>
#if defined(__APPLE__)
#define C10_SUPPORTS_SIGNAL_HANDLER
#elif defined(__linux__) && !defined(C10_DISABLE_SIGNAL_HANDLERS)
#define C10_SUPPORTS_FATAL_SIGNAL_HANDLERS
#define C10_SUPPORTS_SIGNAL_HANDLER
#endif
#if defined(C10_SUPPORTS_FATAL_SIGNAL_HANDLERS)
#include <pthread.h>
#endif
namespace c10 {
class C10_API SignalHandler {
public:
enum class Action { NONE, STOP };
// Constructor. Specify what action to take when a signal is received.
SignalHandler(Action SIGINT_action, Action SIGHUP_action);
~SignalHandler();
Action CheckForSignals();
bool GotSIGINT();
bool GotSIGHUP();
Action SIGINT_action_;
Action SIGHUP_action_;
unsigned long my_sigint_count_;
unsigned long my_sighup_count_;
};
#if defined(C10_SUPPORTS_FATAL_SIGNAL_HANDLERS)
class C10_API FatalSignalHandler {
// This works by setting up certain fatal signal handlers. Previous fatal
// signal handlers will still be called when the signal is raised. Defaults
// to being off.
public:
C10_API void setPrintStackTracesOnFatalSignal(bool print);
C10_API bool printStackTracesOnFatalSignal();
static FatalSignalHandler& getInstance();
virtual ~FatalSignalHandler();
protected:
explicit FatalSignalHandler();
private:
void installFatalSignalHandlers();
void uninstallFatalSignalHandlers();
static void fatalSignalHandlerStatic(int signum);
void fatalSignalHandler(int signum);
virtual void fatalSignalHandlerPostProcess();
struct sigaction* getPreviousSigaction(int signum);
const char* getSignalName(int signum);
void callPreviousSignalHandler(
struct sigaction* action,
int signum,
siginfo_t* info,
void* ctx);
void stacktraceSignalHandler(bool needsLock);
static void stacktraceSignalHandlerStatic(
int signum,
siginfo_t* info,
void* ctx);
void stacktraceSignalHandler(int signum, siginfo_t* info, void* ctx);
// The mutex protects the bool.
std::mutex fatalSignalHandlersInstallationMutex;
bool fatalSignalHandlersInstalled;
// We need to hold a reference to call the previous SIGUSR2 handler in case
// we didn't signal it
struct sigaction previousSigusr2 {};
// Flag dictating whether the SIGUSR2 handler falls back to previous handlers
// or is intercepted in order to print a stack trace.
std::atomic<bool> fatalSignalReceived;
// Global state set when a fatal signal is received so that backtracing
// threads know why they're printing a stacktrace.
const char* fatalSignalName;
int fatalSignum = -1;
// This wait condition is used to wait for other threads to finish writing
// their stack trace when in fatal sig handler (we can't use pthread_join
// because there's no way to convert from a tid to a pthread_t).
pthread_cond_t writingCond;
pthread_mutex_t writingMutex;
struct signal_handler {
const char* name;
int signum;
struct sigaction previous;
};
static signal_handler kSignalHandlers[];
};
#endif // defined(C10_SUPPORTS_SIGNAL_HANDLER)
} // namespace c10
| 3,119
| 28.433962
| 79
|
h
|
null |
pytorch-main/c10/util/ssize.h
|
#pragma once
#include <c10/util/Exception.h>
#include <c10/util/TypeSafeSignMath.h>
#include <cstddef>
#include <type_traits>
namespace c10 {
// Implementations of std::ssize() from C++ 20.
//
// This is useful in particular for avoiding -Werror=sign-compare
// issues.
//
// Use this with argument-dependent lookup, e.g.:
// use c10::ssize;
// auto size = ssize(container);
//
// As with the standard library version, containers are permitted to
// specialize this with a free function defined in the same namespace.
//
// See https://en.cppreference.com/w/cpp/iterator/size for more
// information as well as the source of our implementations.
//
// We augment the implementation by adding an assert() if an overflow
// would occur.
template <typename C>
constexpr auto ssize(const C& c) -> std::
common_type_t<std::ptrdiff_t, std::make_signed_t<decltype(c.size())>> {
using R = std::
common_type_t<std::ptrdiff_t, std::make_signed_t<decltype(c.size())>>;
// We expect this to be exceedingly rare to fire and don't wish to
// pay a performance hit in release mode.
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(!greater_than_max<R>(c.size()));
return static_cast<R>(c.size());
}
template <typename T, std::ptrdiff_t N>
constexpr auto ssize(const T (&array)[N]) noexcept -> std::ptrdiff_t {
return N;
}
} // namespace c10
| 1,339
| 28.130435
| 76
|
h
|
null |
pytorch-main/c10/util/strides.h
|
#pragma once
#include <c10/util/ArrayRef.h>
#include <c10/util/DimVector.h>
namespace c10 {
// Computes the contiguous strides of a tensor, given its sizes.
static inline DimVector contiguous_strides(const IntArrayRef sizes) {
using Int = IntArrayRef::value_type;
const Int dims = static_cast<Int>(sizes.size());
// With this initialisation we get the case dim == 0 or 1 right
DimVector strides(dims, 1);
for (auto i = dims - 2; i >= 0; --i) {
// Strides can't be 0 even if sizes are 0.
strides[i] = strides[i + 1] * std::max(sizes[i + 1], Int{1});
}
return strides;
}
} // namespace c10
| 616
| 24.708333
| 69
|
h
|
null |
pytorch-main/c10/util/string_utils.h
|
#pragma once
#include <sstream>
#include <stdexcept>
#include <string>
namespace c10 {
// to_string, stoi and stod implementation for Android related stuff.
// Note(jiayq): Do not use the CAFFE2_TESTONLY_FORCE_STD_STRING_TEST macro
// outside testing code that lives under common_test.cc
#if defined(__ANDROID__) || defined(CAFFE2_TESTONLY_FORCE_STD_STRING_TEST)
#define CAFFE2_TESTONLY_WE_ARE_USING_CUSTOM_STRING_FUNCTIONS 1
template <typename T>
std::string to_string(T value) {
std::ostringstream os;
os << value;
return os.str();
}
inline int stoi(const std::string& str, std::size_t* pos = 0) {
std::stringstream ss;
int n = 0;
ss << str;
ss >> n;
if (ss.fail()) {
// To mimic `std::stoi` and to avoid including `Exception.h`, throw
// `std::invalid_argument`.
// We can't easily detect out-of-range, so we don't use `std::out_of_range`.
throw std::invalid_argument("Not an integer");
}
if (pos) {
if (ss.tellg() == std::streampos(-1)) {
*pos = str.size();
} else {
*pos = ss.tellg();
}
}
return n;
}
inline uint64_t stoull(const std::string& str) {
std::stringstream ss;
uint64_t n = 0;
ss << str;
ss >> n;
if (ss.fail()) {
// To mimic `std::stoull` and to avoid including `Exception.h`, throw
// `std::invalid_argument`.
// We can't easily detect out-of-range, so we don't use `std::out_of_range`.
throw std::invalid_argument("Not an unsigned 64-bit integer");
}
return n;
}
inline double stod(const std::string& str, std::size_t* pos = 0) {
std::stringstream ss;
ss << str;
double val = 0;
ss >> val;
if (ss.fail()) {
// To mimic `std::stod` and to avoid including `Exception.h`, throw
// `std::invalid_argument`.
// We can't easily detect out-of-range, so we don't use `std::out_of_range`.
throw std::invalid_argument("Not a double-precision floating point number");
}
if (pos) {
if (ss.tellg() == std::streampos(-1)) {
*pos = str.size();
} else {
*pos = ss.tellg();
}
}
return val;
}
inline long long stoll(const std::string& str, std::size_t* pos = 0) {
// std::stoll doesn't exist in our Android environment, we need to implement
// it ourselves.
std::stringstream ss;
ss << str;
long long result = 0;
ss >> result;
if (ss.fail()) {
// To mimic `std::stoll` and to avoid including `Exception.h`, throw
// `std::invalid_argument`.
// We can't easily detect out-of-range, so we don't use `std::out_of_range`.
throw std::invalid_argument("Not a long long integer");
}
if (pos) {
if (ss.tellg() == std::streampos(-1)) {
*pos = str.size();
} else {
*pos = ss.tellg();
}
}
return result;
}
inline long long stoll(const std::string& str, size_t pos, int base) {
// std::stoll doesn't exist in our Android environment, we need to implement
// it ourselves.
std::stringstream ss;
if (str.size() > 0 && str.at(0) == '0') {
if (str.size() > 1 && (str.at(1) == 'x' || str.at(1) == 'X')) {
ss << std::hex << str;
} else {
ss << std::oct << str;
}
} else {
ss << str;
}
long long result = 0;
ss >> result;
if (ss.fail()) {
// To mimic `std::stoll` and to avoid including `Exception.h`, throw
// `std::invalid_argument`.
// We can't easily detect out-of-range, so we don't use `std::out_of_range`.
throw std::invalid_argument("Not a long long integer");
}
return result;
}
#else
#define CAFFE2_TESTONLY_WE_ARE_USING_CUSTOM_STRING_FUNCTIONS 0
using std::stod;
using std::stoi;
using std::stoll;
using std::stoull;
using std::to_string;
#endif // defined(__ANDROID__) || defined(CAFFE2_FORCE_STD_STRING_FALLBACK_TEST)
} // namespace c10
#if defined(__ANDROID__) && __ANDROID_API__ < 21 && defined(__GLIBCXX__)
#include <cstdlib>
// std::strtoll isn't available on Android NDK platform < 21 when building
// with libstdc++, so bring the global version into std.
namespace std {
using ::strtoll;
}
#endif
| 3,989
| 26.902098
| 80
|
h
|
null |
pytorch-main/c10/util/string_view.h
|
#pragma once
#include <c10/macros/Macros.h>
#include <c10/util/C++17.h>
#include <c10/util/reverse_iterator.h>
#include <algorithm>
#include <cstring>
#include <limits>
#include <stdexcept>
#include <string>
#if __cpp_lib_string_view
#include <string_view>
#define C10_HAS_STD_STRING_VIEW() 1
#define C10_HAS_STD_EXPERIMENTAL_STRING_VIEW() 0
#elif defined(__has_include)
#if __has_include(<experimental/string_view>)
// libc++ 7.0 has experimental/string_view but it's just a #error
#if !defined(_LIBCPP_VERSION) || (_LIBCPP_VERSION < 7000)
#include <experimental/string_view>
#endif
#if __cpp_lib_experimental_string_view
#define C10_HAS_STD_STRING_VIEW() 0
#define C10_HAS_STD_EXPERIMENTAL_STRING_VIEW() 1
#endif
#endif
#endif
#ifndef C10_HAS_STD_STRING_VIEW
#define C10_HAS_STD_STRING_VIEW() 0
#endif
#ifndef C10_HAS_STD_EXPERIMENTAL_STRING_VIEW
#define C10_HAS_STD_EXPERIMENTAL_STRING_VIEW() 0
#endif
C10_CLANG_DIAGNOSTIC_PUSH()
#if C10_CLANG_HAS_WARNING("-Wdeprecated")
C10_CLANG_DIAGNOSTIC_IGNORE("-Wdeprecated")
#endif
namespace c10 {
/**
* Reimplementation of std::string_view for C++11.
* Implemented following the interface definition in
* https://en.cppreference.com/w/cpp/string/basic_string_view
* See there for the API documentation.
*
* Difference: We don't have a Traits template parameter because
* std::char_traits isn't constexpr and we'd have to reimplement
* std::char_traits if we wanted to use it with our constexpr basic_string_view.
*/
template <class CharT>
class basic_string_view final {
public:
using value_type = CharT;
using pointer = CharT*;
using const_pointer = const CharT*;
using reference = CharT&;
using const_reference = const CharT&;
using const_iterator = const CharT*;
using iterator = const_iterator;
using const_reverse_iterator = c10::reverse_iterator<const_iterator>;
using reverse_iterator = const_reverse_iterator;
using size_type = std::size_t;
using difference_type = std::ptrdiff_t;
static constexpr size_type npos = size_type(-1);
constexpr basic_string_view() noexcept : begin_(nullptr), size_(0) {}
explicit constexpr basic_string_view(const_pointer str, size_type count)
: begin_(str), size_(count) {}
/* implicit */ constexpr basic_string_view(const_pointer str)
: basic_string_view(str, strlen_(str)) {}
/* implicit */ basic_string_view(const ::std::basic_string<CharT>& str)
: basic_string_view(str.data(), str.size()) {}
constexpr basic_string_view(const basic_string_view&) noexcept = default;
constexpr basic_string_view& operator=(
const basic_string_view& rhs) noexcept {
begin_ = rhs.begin_;
size_ = rhs.size_;
return *this;
}
explicit operator ::std::basic_string<CharT>() const {
return ::std::basic_string<CharT>(data(), size());
}
constexpr const_iterator begin() const noexcept {
return cbegin();
}
constexpr const_iterator cbegin() const noexcept {
return begin_;
}
constexpr const_iterator end() const noexcept {
return cend();
}
constexpr const_iterator cend() const noexcept {
return begin_ + size_;
}
constexpr const_reverse_iterator rbegin() const noexcept {
return crbegin();
}
constexpr const_reverse_iterator crbegin() const noexcept {
return const_reverse_iterator(this->end());
}
constexpr const_reverse_iterator rend() const noexcept {
return crend();
}
constexpr const_reverse_iterator crend() const noexcept {
return const_reverse_iterator(this->begin());
}
friend constexpr const_iterator begin(basic_string_view sv) noexcept {
return sv.begin();
}
friend constexpr const_iterator end(basic_string_view sv) noexcept {
return sv.end();
}
constexpr const_reference operator[](size_type pos) const {
// TODO: split out
return at_(pos);
}
constexpr const_reference at(size_type pos) const {
#if !defined( \
__CUDA_ARCH__) // CUDA doesn't like std::out_of_range in device code
return C10_UNLIKELY(pos >= size_)
? (throw std::out_of_range(
"string_view::operator[] or string_view::at() out of range. Index: " +
c10::guts::to_string(pos) +
", size: " + c10::guts::to_string(size())),
at_(0))
: at_(pos);
#else
return at_(pos);
#endif
}
constexpr const_reference front() const {
return *begin_;
}
constexpr const_reference back() const {
return *(begin_ + size_ - 1);
}
constexpr const_pointer data() const noexcept {
return begin_;
}
constexpr size_type size() const noexcept {
return size_;
}
constexpr size_type length() const noexcept {
return size();
}
constexpr size_type max_size() const noexcept {
return std::numeric_limits<difference_type>::max();
}
C10_NODISCARD constexpr bool empty() const noexcept {
return size() == 0;
}
constexpr void remove_prefix(size_type n) {
if (n > size()) {
throw std::out_of_range(
"basic_string_view::remove_prefix: out of range. PrefixLength: " +
c10::guts::to_string(n) + ", size: " + c10::guts::to_string(size()));
}
begin_ += n;
size_ -= n;
}
constexpr void remove_suffix(size_type n) {
if (n > size()) {
throw std::out_of_range(
"basic_string_view::remove_suffix: out of range. SuffixLength: " +
c10::guts::to_string(n) + ", size: " + c10::guts::to_string(size()));
}
size_ -= n;
}
constexpr void swap(basic_string_view& sv) noexcept {
auto tmp = *this;
*this = sv;
sv = tmp;
}
size_type copy(pointer dest, size_type count, size_type pos = 0) const {
if (pos > size_) {
throw std::out_of_range(
"basic_string_view::copy: out of range. Index: " +
c10::guts::to_string(pos) +
", size: " + c10::guts::to_string(size()));
}
size_type copy_length = std::min(count, size_ - pos);
for (auto iter = begin() + pos, end = iter + copy_length; iter != end;) {
*(dest++) = *(iter++);
}
return copy_length;
}
constexpr basic_string_view substr(size_type pos = 0, size_type count = npos)
const {
#if !defined( \
__CUDA_ARCH__) // CUDA doesn't like std::out_of_range in device code
return (pos > size_)
? (throw std::out_of_range(
"basic_string_view::substr parameter out of bounds. Index: " +
c10::guts::to_string(pos) +
", size: " + c10::guts::to_string(size())),
substr_())
: substr_(pos, count);
#else
return substr_(pos, count);
#endif
}
constexpr int compare(basic_string_view rhs) const noexcept {
// Write it iteratively. This is faster.
for (size_t i = 0, end = std::min(size(), rhs.size()); i < end; ++i) {
if (at_(i) < rhs.at_(i)) {
return -1;
} else if (at_(i) > rhs.at_(i)) {
return 1;
}
}
if (size() < rhs.size()) {
return -1;
} else if (size() > rhs.size()) {
return 1;
}
return 0;
}
constexpr int compare(size_type pos1, size_type count1, basic_string_view v)
const {
return substr(pos1, count1).compare(v);
}
constexpr int compare(
size_type pos1,
size_type count1,
basic_string_view v,
size_type pos2,
size_type count2) const {
return substr(pos1, count1).compare(v.substr(pos2, count2));
}
constexpr int compare(const_pointer s) const {
return compare(basic_string_view(s));
}
constexpr int compare(size_type pos1, size_type count1, const_pointer s)
const {
return substr(pos1, count1).compare(basic_string_view(s));
}
constexpr int compare(
size_type pos1,
size_type count1,
const_pointer s,
size_type count2) const {
return substr(pos1, count1).compare(basic_string_view(s, count2));
}
friend constexpr bool operator==(
basic_string_view lhs,
basic_string_view rhs) noexcept {
return lhs.equals_(rhs);
}
friend constexpr bool operator!=(
basic_string_view lhs,
basic_string_view rhs) noexcept {
return !(lhs == rhs);
}
friend constexpr bool operator<(
basic_string_view lhs,
basic_string_view rhs) noexcept {
return lhs.compare(rhs) < 0;
}
friend constexpr bool operator>=(
basic_string_view lhs,
basic_string_view rhs) noexcept {
return !(lhs < rhs);
}
friend constexpr bool operator>(
basic_string_view lhs,
basic_string_view rhs) noexcept {
return rhs < lhs;
}
friend constexpr bool operator<=(
basic_string_view lhs,
basic_string_view rhs) noexcept {
return !(lhs > rhs);
}
constexpr bool starts_with(basic_string_view prefix) const noexcept {
return (prefix.size() > size()) ? false
: prefix.equals_(substr_(0, prefix.size()));
}
constexpr bool starts_with(CharT prefix) const noexcept {
return !empty() && prefix == front();
}
constexpr bool starts_with(const_pointer prefix) const {
return starts_with(basic_string_view(prefix));
}
constexpr bool ends_with(basic_string_view suffix) const noexcept {
return (suffix.size() > size())
? false
: suffix.equals_(substr_(size() - suffix.size(), suffix.size()));
}
constexpr bool ends_with(CharT suffix) const noexcept {
return !empty() && suffix == back();
}
constexpr bool ends_with(const_pointer suffix) const {
return ends_with(basic_string_view(suffix));
}
constexpr size_type find(basic_string_view v, size_type pos = 0)
const noexcept {
#if __cpp_constexpr >= 201304
// if we are in C++14, write it iteratively. This is faster.
if (v.size() == 0) {
return pos <= size() ? pos : npos;
}
if (pos + v.size() <= size()) {
for (size_type cur = pos, end = size() - v.size(); cur <= end; ++cur) {
if (v.at_(0) == at_(cur) &&
v.substr_(1).equals_(substr_(cur + 1, v.size() - 1))) {
return cur;
}
}
}
return npos;
#else
// if we are in C++11, we need to do it recursively because of constexpr
// restrictions.
return (v.size() == 0) ? (pos <= size() ? pos : npos)
: (pos + v.size() > size()) ? npos
: (v.at_(0) == at_(pos) &&
v.substr_(1).equals_(substr_(pos + 1, v.size() - 1)))
? pos
: find(v, pos + 1);
#endif
}
constexpr size_type find(CharT ch, size_type pos = 0) const noexcept {
return find_first_if_(pos, charIsEqual_{ch});
}
constexpr size_type find(const_pointer s, size_type pos, size_type count)
const {
return find(basic_string_view(s, count), pos);
}
constexpr size_type find(const_pointer s, size_type pos = 0) const {
return find(basic_string_view(s), pos);
}
constexpr size_type rfind(basic_string_view v, size_type pos = npos)
const noexcept {
// Write it iteratively. This is faster.
if (v.size() == 0) {
return pos <= size() ? pos : size();
}
if (v.size() <= size()) {
pos = std::min(size() - v.size(), pos);
do {
if (v.at_(0) == at_(pos) &&
v.substr_(1).equals_(substr_(pos + 1, v.size() - 1))) {
return pos;
}
} while (pos-- > 0);
}
return npos;
}
constexpr size_type rfind(CharT ch, size_type pos = npos) const noexcept {
return find_last_if_(pos, charIsEqual_{ch});
}
constexpr size_type rfind(const_pointer s, size_type pos, size_type count)
const {
return rfind(basic_string_view(s, count), pos);
}
constexpr size_type rfind(const_pointer s, size_type pos = npos) const {
return rfind(basic_string_view(s), pos);
}
constexpr size_type find_first_of(basic_string_view v, size_type pos = 0)
const noexcept {
return find_first_if_(pos, stringViewContainsChar_{v});
}
constexpr size_type find_first_of(CharT ch, size_type pos = 0)
const noexcept {
return find_first_if_(pos, charIsEqual_{ch});
}
constexpr size_type find_first_of(
const_pointer s,
size_type pos,
size_type count) const {
return find_first_of(basic_string_view(s, count), pos);
}
constexpr size_type find_first_of(const_pointer s, size_type pos = 0) const {
return find_first_of(basic_string_view(s), pos);
}
constexpr size_type find_last_of(basic_string_view v, size_type pos = npos)
const noexcept {
return find_last_if_(pos, stringViewContainsChar_{v});
}
constexpr size_type find_last_of(CharT ch, size_type pos = npos)
const noexcept {
return find_last_if_(pos, charIsEqual_{ch});
}
constexpr size_type find_last_of(
const_pointer s,
size_type pos,
size_type count) const {
return find_last_of(basic_string_view(s, count), pos);
}
constexpr size_type find_last_of(const_pointer s, size_type pos = npos)
const {
return find_last_of(basic_string_view(s), pos);
}
constexpr size_type find_first_not_of(basic_string_view v, size_type pos = 0)
const noexcept {
return find_first_if_(pos, stringViewDoesNotContainChar_{v});
}
constexpr size_type find_first_not_of(CharT ch, size_type pos = 0)
const noexcept {
return find_first_if_(pos, charIsNotEqual_{ch});
}
constexpr size_type find_first_not_of(
const_pointer s,
size_type pos,
size_type count) const {
return find_first_not_of(basic_string_view(s, count), pos);
}
constexpr size_type find_first_not_of(const_pointer s, size_type pos = 0)
const {
return find_first_not_of(basic_string_view(s), pos);
}
constexpr size_type find_last_not_of(
basic_string_view v,
size_type pos = npos) const noexcept {
return find_last_if_(pos, stringViewDoesNotContainChar_{v});
}
constexpr size_type find_last_not_of(CharT ch, size_type pos = npos)
const noexcept {
return find_last_if_(pos, charIsNotEqual_{ch});
}
constexpr size_type find_last_not_of(
const_pointer s,
size_type pos,
size_type count) const {
return find_last_not_of(basic_string_view(s, count), pos);
}
constexpr size_type find_last_not_of(const_pointer s, size_type pos = npos)
const {
return find_last_not_of(basic_string_view(s), pos);
}
private:
static constexpr size_type strlen_(const_pointer str) noexcept {
#if __cpp_constexpr >= 201304
// if we are in C++14, write it iteratively. This is faster.
const_pointer current = str;
while (*current != '\0') {
++current;
}
return current - str;
#else
// if we are in C++11, we need to do it recursively because of constexpr
// restrictions.
return (*str == '\0') ? 0 : 1 + strlen_(str + 1);
#endif
}
constexpr const_reference at_(size_type pos) const noexcept {
return *(begin_ + pos);
}
constexpr basic_string_view substr_(size_type pos = 0, size_type count = npos)
const {
return basic_string_view{begin_ + pos, std::min(count, size() - pos)};
}
template <class Condition>
constexpr size_type find_first_if_(size_type pos, Condition&& condition)
const noexcept {
#if __cpp_constexpr >= 201304
// if we are in C++14, write it iteratively. This is faster.
if (pos + 1 <= size()) {
for (size_type cur = pos; cur < size(); ++cur) {
if (condition(at_(cur))) {
return cur;
}
}
}
return npos;
#else
// if we are in C++11, we need to do it recursively because of constexpr
// restrictions.
return (pos + 1 > size()) ? npos
: condition(at_(pos))
? pos
: find_first_if_(pos + 1, std::forward<Condition>(condition));
#endif
}
template <class Condition>
constexpr size_type find_last_if_(size_type pos, Condition&& condition)
const noexcept {
// Write it iteratively. This is faster.
if (size() > 0) {
pos = std::min(size() - 1, pos);
do {
if (condition(at_(pos))) {
return pos;
}
} while (pos-- > 0);
}
return npos;
}
constexpr bool equals_(basic_string_view rhs) const {
// We don't use string_view::compare() here but implement it manually
// because only looking at equality allows for more optimized code.
#if defined(__GNUC__) && !defined(__CUDACC__)
return size() == rhs.size() &&
0 == __builtin_memcmp(data(), rhs.data(), size());
#elif __cpp_constexpr >= 201304
// if we are in C++14, write it iteratively. This is faster than the
// recursive C++11 implementation below.
if (size() != rhs.size()) {
return false;
}
// Yes, memcmp would be laster than this loop, but memcmp isn't constexpr
// and I didn't feel like implementing a constexpr memcmp variant.
// TODO At some point this should probably be done, including tricks
// like comparing one machine word instead of a byte per iteration.
for (typename basic_string_view<CharT>::size_type pos = 0; pos < size();
++pos) {
if (at_(pos) != rhs.at_(pos)) {
return false;
}
}
return true;
#else
// if we are in C++11, we need to do it recursively because of constexpr
// restrictions.
return (size() != rhs.size()) ? false
: (size() == 0) ? true
: (front() != rhs.front()) ? false
: (substr_(1).equals_(rhs.substr_(1)));
#endif
}
struct charIsEqual_ final {
CharT expected;
constexpr bool operator()(CharT actual) const noexcept {
return expected == actual;
}
};
struct charIsNotEqual_ final {
CharT expected;
constexpr bool operator()(CharT actual) const noexcept {
return expected != actual;
}
};
struct stringViewContainsChar_ final {
basic_string_view expected;
constexpr bool operator()(CharT ch) const noexcept {
return npos != expected.find(ch);
}
};
struct stringViewDoesNotContainChar_ final {
basic_string_view expected;
constexpr bool operator()(CharT ch) const noexcept {
return npos == expected.find(ch);
}
};
const_pointer begin_;
size_type size_{};
};
template <class CharT>
const typename basic_string_view<CharT>::size_type
basic_string_view<CharT>::npos;
template <class CharT>
inline std::basic_ostream<CharT>& operator<<(
std::basic_ostream<CharT>& stream,
basic_string_view<CharT> sv) {
// The rules for operator<< are quite complex, so lets defer to the
// STL implementation. The std::string fallback might be a bit
// slower, but is better than getting it wrong.
#if C10_HAS_STD_STRING_VIEW()
using std_string_type = ::std::basic_string_view<CharT>;
#elif C10_HAS_STD_EXPERIMENTAL_STRING_VIEW()
using std_string_type = ::std::experimental::basic_string_view<CharT>;
#else
using std_string_type = ::std::basic_string<CharT>;
#endif
return stream << std_string_type(sv.data(), sv.size());
}
template <class CharT>
constexpr inline void swap(
basic_string_view<CharT>& lhs,
basic_string_view<CharT>& rhs) {
lhs.swap(rhs);
}
using string_view = basic_string_view<char>;
} // namespace c10
namespace std {
template <class CharT>
struct hash<::c10::basic_string_view<CharT>> {
size_t operator()(::c10::basic_string_view<CharT> x) const {
// The standard says that std""string_view hashing must do the same as
// std::string hashing but leaves the details of std::string hashing
// up to the implementer. So, to be conformant, we need to re-use and
// existing STL type's hash function. The std::string fallback is probably
// slow but the only way to be conformant.
#if C10_HAS_STD_STRING_VIEW()
using std_string_type = ::std::basic_string_view<CharT>;
#elif C10_HAS_STD_EXPERIMENTAL_STRING_VIEW()
using std_string_type = ::std::experimental::basic_string_view<CharT>;
#else
using std_string_type = ::std::basic_string<CharT>;
#endif
return ::std::hash<std_string_type>{}(std_string_type(x.data(), x.size()));
}
};
} // namespace std
C10_CLANG_DIAGNOSTIC_POP()
| 20,018
| 27.845821
| 85
|
h
|
null |
pytorch-main/c10/util/tempfile.h
|
#pragma once
#include <c10/util/Exception.h>
#include <c10/util/Optional.h>
#include <cerrno>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <string>
#include <utility>
#include <vector>
#if !defined(_WIN32)
#include <unistd.h>
#else // defined(_WIN32)
#include <Windows.h>
#include <fileapi.h>
#endif // defined(_WIN32)
namespace c10 {
namespace detail {
// Creates the filename pattern passed to and completed by `mkstemp`.
// Returns std::vector<char> because `mkstemp` needs a (non-const) `char*` and
// `std::string` only provides `const char*` before C++17.
#if !defined(_WIN32)
inline std::vector<char> make_filename(std::string name_prefix) {
// The filename argument to `mkstemp` needs "XXXXXX" at the end according to
// http://pubs.opengroup.org/onlinepubs/009695399/functions/mkstemp.html
static const std::string kRandomPattern = "XXXXXX";
// We see if any of these environment variables is set and use their value, or
// else default the temporary directory to `/tmp`.
static const char* env_variables[] = {"TMPDIR", "TMP", "TEMP", "TEMPDIR"};
std::string tmp_directory = "/tmp";
for (const char* variable : env_variables) {
if (const char* path = getenv(variable)) {
tmp_directory = path;
break;
}
}
std::vector<char> filename;
filename.reserve(
tmp_directory.size() + name_prefix.size() + kRandomPattern.size() + 2);
filename.insert(filename.end(), tmp_directory.begin(), tmp_directory.end());
filename.push_back('/');
filename.insert(filename.end(), name_prefix.begin(), name_prefix.end());
filename.insert(filename.end(), kRandomPattern.begin(), kRandomPattern.end());
filename.push_back('\0');
return filename;
}
#endif // !defined(_WIN32)
} // namespace detail
struct TempFile {
#if !defined(_WIN32)
TempFile() : fd(-1) {}
TempFile(std::string name, int fd) : fd(fd), name(std::move(name)) {}
TempFile(const TempFile&) = delete;
TempFile(TempFile&& other) noexcept
: fd(other.fd), name(std::move(other.name)) {
other.fd = -1;
other.name.clear();
}
TempFile& operator=(const TempFile&) = delete;
TempFile& operator=(TempFile&& other) noexcept {
fd = other.fd;
name = std::move(other.name);
other.fd = -1;
other.name.clear();
return *this;
}
~TempFile() {
if (fd >= 0) {
unlink(name.c_str());
close(fd);
}
}
int fd;
#endif // !defined(_WIN32)
std::string name;
};
struct TempDir {
TempDir() = default;
explicit TempDir(std::string name) : name(std::move(name)) {}
TempDir(const TempDir&) = delete;
TempDir(TempDir&& other) noexcept : name(std::move(other.name)) {
other.name.clear();
}
TempDir& operator=(const TempDir&) = delete;
TempDir& operator=(TempDir&& other) noexcept {
name = std::move(other.name);
other.name.clear();
return *this;
}
~TempDir() {
if (!name.empty()) {
#if !defined(_WIN32)
rmdir(name.c_str());
#else // defined(_WIN32)
RemoveDirectoryA(name.c_str());
#endif // defined(_WIN32)
}
}
std::string name;
};
/// Attempts to return a temporary file or returns `nullopt` if an error
/// occurred.
///
/// The file returned follows the pattern
/// `<tmp-dir>/<name-prefix><random-pattern>`, where `<tmp-dir>` is the value of
/// the `"TMPDIR"`, `"TMP"`, `"TEMP"` or
/// `"TEMPDIR"` environment variable if any is set, or otherwise `/tmp`;
/// `<name-prefix>` is the value supplied to this function, and
/// `<random-pattern>` is a random sequence of numbers.
/// On Windows, `name_prefix` is ignored and `tmpnam` is used.
inline c10::optional<TempFile> try_make_tempfile(
std::string name_prefix = "torch-file-") {
#if defined(_WIN32)
return TempFile{std::tmpnam(nullptr)};
#else
std::vector<char> filename = detail::make_filename(std::move(name_prefix));
const int fd = mkstemp(filename.data());
if (fd == -1) {
return c10::nullopt;
}
// Don't make the string from string(filename.begin(), filename.end(), or
// there will be a trailing '\0' at the end.
return TempFile(filename.data(), fd);
#endif // defined(_WIN32)
}
/// Like `try_make_tempfile`, but throws an exception if a temporary file could
/// not be returned.
inline TempFile make_tempfile(std::string name_prefix = "torch-file-") {
if (auto tempfile = try_make_tempfile(std::move(name_prefix))) {
return std::move(*tempfile);
}
TORCH_CHECK(false, "Error generating temporary file: ", std::strerror(errno));
}
/// Attempts to return a temporary directory or returns `nullopt` if an error
/// occurred.
///
/// The directory returned follows the pattern
/// `<tmp-dir>/<name-prefix><random-pattern>/`, where `<tmp-dir>` is the value
/// of the `"TMPDIR"`, `"TMP"`, `"TEMP"` or
/// `"TEMPDIR"` environment variable if any is set, or otherwise `/tmp`;
/// `<name-prefix>` is the value supplied to this function, and
/// `<random-pattern>` is a random sequence of numbers.
/// On Windows, `name_prefix` is ignored and `tmpnam` is used.
inline c10::optional<TempDir> try_make_tempdir(
std::string name_prefix = "torch-dir-") {
#if defined(_WIN32)
while (true) {
const char* dirname = std::tmpnam(nullptr);
if (!dirname) {
return c10::nullopt;
}
if (CreateDirectoryA(dirname, NULL)) {
return TempDir(dirname);
}
if (GetLastError() != ERROR_ALREADY_EXISTS) {
return c10::nullopt;
}
}
return c10::nullopt;
#else
std::vector<char> filename = detail::make_filename(std::move(name_prefix));
const char* dirname = mkdtemp(filename.data());
if (!dirname) {
return c10::nullopt;
}
return TempDir(dirname);
#endif // defined(_WIN32)
}
/// Like `try_make_tempdir`, but throws an exception if a temporary directory
/// could not be returned.
inline TempDir make_tempdir(std::string name_prefix = "torch-dir-") {
if (auto tempdir = try_make_tempdir(std::move(name_prefix))) {
return std::move(*tempdir);
}
TORCH_CHECK(
false, "Error generating temporary directory: ", std::strerror(errno));
}
} // namespace c10
| 6,047
| 29.089552
| 80
|
h
|
null |
pytorch-main/c10/util/typeid.h
|
#pragma once
#include <atomic>
#include <cstdlib>
#include <memory>
#include <mutex>
#include <type_traits>
#include <vector>
#include <c10/macros/Macros.h>
#include <c10/util/Exception.h>
#include <c10/util/IdWrapper.h>
#include <c10/util/TypeIndex.h>
#include <c10/util/TypeTraits.h>
#include <c10/core/ScalarType.h>
#include <c10/util/irange.h>
/*
* TypeIdentifier is a small type containing an id.
* Types must be registered using CAFFE_DECLARE_KNOWN_TYPE() (in their header)
* and CAFFE_DEFINE_KNOWN_TYPE() (in their .cpp file) for them to have a type
* id. If a type is registered, you can also create an object containing meta
* data like constructor, destructor, stringified name, ... about the type by
* calling TypeMeta::Make<T>. This returns a TypeMeta() object, which is
* basically just a pointer to the type information, so it's cheap to pass
* around.
*/
// TODO: This file is still in the caffe2 namespace, despite living
// in the ATen directory. This is because the macro
// CAFFE_KNOWN_TYPE (and CAFFE_DECLARE_KNOWN_TYPE) defines a template
// specialization, which relies
// on the namespace of TypeMeta matching the namespace where the macro is
// called. This requires us to fix all of the call-sites, which I want to do
// later. So the namespace is not fixed at the moment.
// Make at::Half a fundamental type.
namespace c10 {
namespace guts {
template <>
struct is_fundamental<at::Half> : std::true_type {};
} // namespace guts
} // namespace c10
namespace caffe2 {
/**
* A type id is a unique id for a given C++ type.
* You need to register your types using CAFFE_KNOWN_TYPE(MyType) to be able to
* use TypeIdentifier with custom types. This is for example used to store the
* dtype of tensors.
*/
class C10_API TypeIdentifier final
: public at::IdWrapper<TypeIdentifier, c10::util::type_index> {
public:
friend std::ostream& operator<<(std::ostream& stream, TypeIdentifier typeId);
friend constexpr bool operator<(TypeIdentifier lhs, TypeIdentifier rhs);
/**
* Returns the unique id for the given type T. The id is unique for the type T
* in the sense that for any two different types, their ids are different; for
* the same type T, the id remains the same over different calls of the
* function. However, this is not guaranteed over different runs, as the id
* is generated during run-time. Do NOT serialize the id for storage.
*/
template <typename T>
static C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA TypeIdentifier Get() noexcept {
return TypeIdentifier(c10::util::get_type_index<T>());
}
static constexpr TypeIdentifier uninitialized() {
return TypeIdentifier(c10::util::type_index{0});
}
private:
constexpr explicit TypeIdentifier(c10::util::type_index id) : IdWrapper(id) {}
};
// Allow usage in std::map / std::set
// TODO Disallow this and rather use std::unordered_map/set everywhere
inline constexpr bool operator<(TypeIdentifier lhs, TypeIdentifier rhs) {
return lhs.underlyingId() < rhs.underlyingId();
}
inline std::ostream& operator<<(
std::ostream& stream,
caffe2::TypeIdentifier typeId) {
return stream << typeId.underlyingId();
}
} // namespace caffe2
namespace at {
using DataType = caffe2::TypeIdentifier;
}
C10_DEFINE_HASH_FOR_IDWRAPPER(caffe2::TypeIdentifier)
namespace caffe2 {
namespace detail {
// This struct holds the actual type information. There will be
// one allocated per type. TypeMeta objects will then point to the struct
// instance for the type they're configured for.
struct TypeMetaData final {
using New = void*();
using PlacementNew = void(void*, size_t);
using Copy = void(const void*, void*, size_t);
using PlacementDelete = void(void*, size_t);
using Delete = void(void*);
constexpr TypeMetaData() noexcept
: itemsize_(0),
new_(nullptr),
placementNew_(nullptr),
copy_(nullptr),
placementDelete_(nullptr),
delete_(nullptr),
id_(TypeIdentifier::uninitialized()),
name_("nullptr (uninitialized)") {}
constexpr TypeMetaData(
size_t itemsize,
New* newFn,
PlacementNew* placementNew,
Copy* copy,
PlacementDelete* placementDelete,
Delete* deleteFn,
TypeIdentifier id,
c10::string_view name) noexcept
: itemsize_(itemsize),
new_(newFn),
placementNew_(placementNew),
copy_(copy),
placementDelete_(placementDelete),
delete_(deleteFn),
id_(id),
name_(name) {}
size_t itemsize_;
New* new_;
PlacementNew* placementNew_;
Copy* copy_;
PlacementDelete* placementDelete_;
Delete* delete_;
TypeIdentifier id_;
c10::string_view name_;
};
// Mechanism for throwing errors which can't be prevented at compile time
// due to type erasure. E.g. somebody calling TypeMeta::copy() for
// non-copyable type. Right now just throws exception but is implemented
// in .cpp to manage dependencies
[[noreturn]] C10_API void _ThrowRuntimeTypeLogicError(const std::string& msg);
/**
* Placement new function for the type.
*/
template <typename T>
inline void _PlacementNew(void* ptr, size_t n) {
T* typed_ptr = static_cast<T*>(ptr);
for (const auto i : c10::irange(n)) {
new (typed_ptr + i) T;
}
}
template <typename T>
inline void _PlacementNewNotDefault(void* /*ptr*/, size_t /*n*/) {
_ThrowRuntimeTypeLogicError(
"Type " + std::string(c10::util::get_fully_qualified_type_name<T>()) +
" is not default-constructible.");
}
template <
typename T,
std::enable_if_t<std::is_default_constructible<T>::value>* = nullptr>
inline constexpr TypeMetaData::PlacementNew* _PickPlacementNew() {
return (c10::guts::is_fundamental<T>::value || std::is_pointer<T>::value)
? nullptr
: &_PlacementNew<T>;
}
template <
typename T,
std::enable_if_t<!std::is_default_constructible<T>::value>* = nullptr>
inline constexpr TypeMetaData::PlacementNew* _PickPlacementNew() {
static_assert(
!c10::guts::is_fundamental<T>::value && !std::is_pointer<T>::value,
"this should have picked the other SFINAE case");
return &_PlacementNewNotDefault<T>;
}
template <typename T>
inline void* _New() {
return new T;
}
template <typename T>
inline void* _NewNotDefault() {
_ThrowRuntimeTypeLogicError(
"Type " + std::string(c10::util::get_fully_qualified_type_name<T>()) +
" is not default-constructible.");
}
template <
typename T,
std::enable_if_t<std::is_default_constructible<T>::value>* = nullptr>
inline constexpr TypeMetaData::New* _PickNew() {
return &_New<T>;
}
template <
typename T,
std::enable_if_t<!std::is_default_constructible<T>::value>* = nullptr>
inline constexpr TypeMetaData::New* _PickNew() {
return &_NewNotDefault<T>;
}
/**
* Typed copy function for classes.
*/
template <typename T>
inline void _Copy(const void* src, void* dst, size_t n) {
const T* typed_src = static_cast<const T*>(src);
T* typed_dst = static_cast<T*>(dst);
for (const auto i : c10::irange(n)) {
typed_dst[i] = typed_src[i];
}
}
/**
* A placeholder function for types that do not allow assignment.
*/
template <typename T>
inline void _CopyNotAllowed(const void* /*src*/, void* /*dst*/, size_t /*n*/) {
_ThrowRuntimeTypeLogicError(
"Type " + std::string(c10::util::get_fully_qualified_type_name<T>()) +
" does not allow assignment.");
}
template <
typename T,
std::enable_if_t<std::is_copy_assignable<T>::value>* = nullptr>
inline constexpr TypeMetaData::Copy* _PickCopy() {
return (c10::guts::is_fundamental<T>::value || std::is_pointer<T>::value)
? nullptr
: &_Copy<T>;
}
template <
typename T,
std::enable_if_t<!std::is_copy_assignable<T>::value>* = nullptr>
inline constexpr TypeMetaData::Copy* _PickCopy() {
static_assert(
!c10::guts::is_fundamental<T>::value && !std::is_pointer<T>::value,
"this should have picked the other SFINAE case");
return &_CopyNotAllowed<T>;
}
/**
* Destructor for non-fundamental types.
*/
template <typename T>
inline void _PlacementDelete(void* ptr, size_t n) {
T* typed_ptr = static_cast<T*>(ptr);
for (const auto i : c10::irange(n)) {
typed_ptr[i].~T();
}
}
template <typename T>
inline constexpr TypeMetaData::PlacementDelete* _PickPlacementDelete() {
return (c10::guts::is_fundamental<T>::value || std::is_pointer<T>::value)
? nullptr
: &_PlacementDelete<T>;
}
template <typename T>
inline void _Delete(void* ptr) {
T* typed_ptr = static_cast<T*>(ptr);
delete typed_ptr;
}
template <class T>
inline constexpr TypeMetaData::Delete* _PickDelete() noexcept {
return &_Delete<T>;
}
class _Uninitialized final {};
} // namespace detail
//
// note: this is outside TypeMeta bc gcc seems to have trouble
// with scalarTypeItemSizes as a constexpr static member used by
// a public inline instance method
//
// item sizes for TypeMeta::itemsize() fast path
static constexpr uint8_t scalarTypeItemSizes[NumScalarTypes] = {
#define SCALAR_TYPE_SIZE(T, name) sizeof(T),
AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(SCALAR_TYPE_SIZE)
#undef SCALAR_TYPE_SIZE
0, // Undefined
};
/**
* TypeMeta is a thin class that allows us to store the type of a container such
* as a blob, or the data type of a tensor, with a unique run-time id. It also
* stores some additional data such as the item size and the name of the type
* for run-time inspection.
*/
class C10_API TypeMeta final {
public:
using New = detail::TypeMetaData::New;
using PlacementNew = detail::TypeMetaData::PlacementNew;
using Copy = detail::TypeMetaData::Copy;
using PlacementDelete = detail::TypeMetaData::PlacementDelete;
using Delete = detail::TypeMetaData::Delete;
/** Create a dummy TypeMeta object. To create a TypeMeta object for a specific
* type, use TypeMeta::Make<T>().
*/
TypeMeta() noexcept;
/**
* Copy constructor.
*/
TypeMeta(const TypeMeta& src) noexcept = default;
/**
* Assignment operators.
*/
TypeMeta& operator=(const TypeMeta& src) noexcept = default;
TypeMeta(TypeMeta&& rhs) noexcept = default;
inline TypeMeta& operator=(ScalarType scalar_type) noexcept {
index_ = static_cast<uint16_t>(scalar_type);
return *this;
}
private:
// TypeMeta can only be created by Make, making sure that we do not
// create incorrectly mixed up TypeMeta objects.
explicit TypeMeta(const uint16_t index) noexcept : index_(index) {}
public:
/**
* Returns the type id.
*/
TypeIdentifier id() const noexcept {
return data().id_;
}
/**
* true if we represent some ScalarType type
*/
inline bool isScalarType() const noexcept {
return index_ < NumScalarTypes;
}
/**
* true if we represent ScalarType scalar_type
*/
inline bool isScalarType(ScalarType scalar_type) const noexcept {
return index_ == static_cast<uint16_t>(scalar_type);
}
/**
* Returns the size of the item.
*/
inline size_t itemsize() const noexcept {
if (C10_LIKELY(isScalarType())) {
return scalarTypeItemSizes[index_];
}
return data().itemsize_;
}
/**
* Returns the new function pointer for individual items.
*/
New* newFn() const noexcept {
return data().new_;
}
/**
* Returns the placement new function pointer for individual items.
*/
PlacementNew* placementNew() const noexcept {
return data().placementNew_;
}
/**
* Returns the typed copy function pointer for individual iterms.
*/
Copy* copy() const noexcept {
return data().copy_;
}
/**
* Returns the destructor function pointer for individual items.
*/
PlacementDelete* placementDelete() const noexcept {
return data().placementDelete_;
}
Delete* deleteFn() const noexcept {
return data().delete_;
}
/**
* Returns a printable name for the type.
*/
c10::string_view name() const noexcept {
return data().name_;
}
friend bool operator==(const TypeMeta& lhs, const TypeMeta& rhs) noexcept;
template <typename T>
bool Match() const noexcept {
return (*this == Make<T>());
}
// Below are static functions that can be called by passing a specific type.
template <class T>
static C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA TypeIdentifier Id() noexcept {
return TypeIdentifier::Get<T>();
}
template <class T>
static c10::string_view TypeName() noexcept {
return c10::util::get_fully_qualified_type_name<T>();
}
template <class T>
static constexpr size_t ItemSize() noexcept {
return sizeof(T);
}
/**
* Returns a TypeMeta object that corresponds to the typename T.
*/
template <typename T>
static TypeMeta Make() {
// The instance pointed to is declared here, but defined in a .cpp file.
// We need to silence the compiler warning about using an undefined
// variable template. '-Wpragmas' and '-Wunknown-warning-option' has to be
// disabled for compilers that don't know '-Wundefined-var-template' and
// would error at our attempt to disable it.
#ifndef _MSC_VER
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wpragmas"
#pragma GCC diagnostic ignored "-Wunknown-warning-option"
#pragma GCC diagnostic ignored "-Wundefined-var-template"
#endif
return TypeMeta(_typeMetaData<T>());
#ifndef _MSC_VER
#pragma GCC diagnostic pop
#endif
}
/**
* convert ScalarType enum values to TypeMeta handles
*/
static inline caffe2::TypeMeta fromScalarType(ScalarType scalar_type) {
const auto index = static_cast<uint16_t>(scalar_type);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
index < NumScalarTypes,
"Unrecognized Scalartype ",
scalar_type,
" (please report this error)");
return TypeMeta(index);
}
/**
* convert TypeMeta handles to ScalarType enum values
*/
inline ScalarType toScalarType() {
if (C10_LIKELY(isScalarType())) {
return static_cast<ScalarType>(index_);
}
error_unsupported_typemeta(*this);
}
private:
[[noreturn]] static void error_unsupported_typemeta(caffe2::TypeMeta dtype);
// hard limit number of registered types
// note: constexpr provokes Windows compilation error "member may not be
// initialized" static constexpr size_t MaxTypeIndex = 32;
//
#if defined C10_MOBILE
// The reason for this not to be UINT8_MAX is that the array
// initialization takes space which is proportional to the size of the array.
// The compiler seems to add code (or data padding) to initialize the array with
// empty elements. Please see
// https://github.com/pytorch/pytorch/pull/51881 for details.
//
#define MaxTypeIndex \
(NumScalarTypes + 15 /* number of CAFFE_DEFINE_KNOWN_TYPE in typeid.cpp */ + \
1 /* 1 more for caffe2 tensor */)
#else
#define MaxTypeIndex UINT8_MAX
#endif
// Protects type metadata allocation.
// NOLINTNEXTLINE(facebook-hte-NonPodStaticDeclaration)
static std::mutex& getTypeMetaDatasLock();
static uint16_t nextTypeIndex;
static detail::TypeMetaData* typeMetaDatas();
static uint16_t existingMetaDataIndexForType(TypeIdentifier identifier);
public:
#ifdef __CUDACC__
// NOTE [ TypeIdentifier::Get nvcc/clang discrepancy]
// nvcc and clang do not produce identical results for
// TypeIdentifier::Get, because TypeIdentifier::Get relies on
// __PRETTY_FUNCTION__ and they don't agree on the canonical names
// of types (e.g., nvcc normalizes to `short unsigned int`, but clang
// calls it `unsigned short`). Hide the implementation of this function
// from nvcc so that we always use clang (or whatever host C++ compiler)
// for TypeIdentifier::Get.
template <class T>
C10_EXPORT static uint16_t addTypeMetaData();
#else
template <class T>
C10_EXPORT static uint16_t addTypeMetaData() {
const auto identifier = TypeIdentifier::Get<T>();
// Need to hold this for the rest of the function, protecting:
// 1) existingMetaDataIndexForType()
// 2) nextTypeIndex++
// 3) the write into typeMetaDatas()
std::lock_guard<std::mutex> lock(getTypeMetaDatasLock());
// It may exist already if added in a different dynamic shared library.
const uint16_t existing_index = existingMetaDataIndexForType(identifier);
if (existing_index != MaxTypeIndex) {
return existing_index;
}
const uint16_t index = nextTypeIndex++;
TORCH_CHECK(
index <= MaxTypeIndex,
"Maximum number of CAFFE_KNOWN_TYPE declarations has been exceeded. ",
"Please report this issue.");
typeMetaDatas()[index] = detail::TypeMetaData{
sizeof(T),
detail::_PickNew<T>(),
detail::_PickPlacementNew<T>(),
detail::_PickCopy<T>(),
detail::_PickPlacementDelete<T>(),
detail::_PickDelete<T>(),
identifier,
c10::util::get_fully_qualified_type_name<T>()};
return index;
}
#endif
private:
// specializations return indexes into typeMetaDataInstances()
template <class T>
C10_API static uint16_t _typeMetaData() noexcept;
//
// TypeMeta just wraps this index
//
uint16_t index_;
inline const detail::TypeMetaData& data() const {
return typeMetaDatas()[index_];
}
};
// specializations of TypeMeta::_typeMetaData for ScalarType types
#define DEFINE_SCALAR_METADATA_INSTANCE(T, name) \
template <> \
constexpr uint16_t TypeMeta::_typeMetaData<T>() noexcept { \
return static_cast<uint16_t>(ScalarType::name); \
}
AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(DEFINE_SCALAR_METADATA_INSTANCE)
#undef DEFINE_SCALAR_METADATA_INSTANCE
template <>
C10_EXPORT constexpr uint16_t TypeMeta::_typeMetaData<
detail::_Uninitialized>() noexcept {
return static_cast<uint16_t>(ScalarType::Undefined);
}
inline TypeMeta::TypeMeta() noexcept
: index_(_typeMetaData<detail::_Uninitialized>()) {}
inline bool operator==(const TypeMeta& lhs, const TypeMeta& rhs) noexcept {
return (lhs.index_ == rhs.index_);
}
inline bool operator!=(const TypeMeta& lhs, const TypeMeta& rhs) noexcept {
return !operator==(lhs, rhs);
}
inline std::ostream& operator<<(
std::ostream& stream,
caffe2::TypeMeta typeMeta) {
return stream << typeMeta.name();
}
/**
* Register unique id for a type so it can be used in TypeMeta context, e.g. be
* used as a type for Blob or for Tensor elements.
*
* CAFFE_KNOWN_TYPE is deprecated; prefer CAFFE_DECLARE_KNOWN_TYPE and
* CAFFE_DEFINE_KNOWN_TYPE.
*
* CAFFE_KNOWN_TYPE does explicit instantiation of TypeIdentifier::Get<T>
* template function and thus needs to be put in a single translation unit (.cpp
* file) for a given type T. Other translation units that use type T as a type
* of the caffe2::Blob or element type of caffe2::Tensor need to depend on the
* translation unit that contains CAFFE_KNOWN_TYPE declaration via regular
* linkage dependencies.
*
* NOTE: the macro needs to be invoked in ::caffe2 namespace
*/
// Implementation note: in MSVC, we will need to prepend the C10_API
// keyword in order to get things compiled properly. in Linux, gcc seems to
// create attribute ignored error for explicit template instantiations, see
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2017/p0537r0.html
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=51930
// and as a result, we define these two macros slightly differently.
#if defined(_MSC_VER) || defined(__clang__)
#define EXPORT_IF_NOT_GCC C10_EXPORT
#else
#define EXPORT_IF_NOT_GCC
#endif
// CAFFE_KNOWN_TYPE is deprecated! Use CAFFE_DECLARE_KNOWN_TYPE and
// CAFFE_DEFINE_KNOWN_TYPE instead.
#define CAFFE_KNOWN_TYPE(T) \
template uint16_t TypeMeta::addTypeMetaData<T>(); \
template <> \
EXPORT_IF_NOT_GCC uint16_t TypeMeta::_typeMetaData<T>() noexcept { \
static const uint16_t index = addTypeMetaData<T>(); \
return index; \
}
#define CAFFE_DEFINE_KNOWN_TYPE(T, ident) \
template uint16_t TypeMeta::addTypeMetaData<T>(); \
namespace detail { \
EXPORT_IF_NOT_GCC const uint16_t ident##_metadata_index = \
TypeMeta::addTypeMetaData<T>(); \
} // namespace detail
// Unlike CAFFE_KNOWN_TYPE, CAFFE_DECLARE_KNOWN_TYPE avoids a function
// call to access _typeMetaData in the common case.
#define CAFFE_DECLARE_KNOWN_TYPE(T, ident) \
extern template uint16_t TypeMeta::addTypeMetaData<T>(); \
namespace detail { \
extern C10_API const uint16_t ident##_metadata_index; \
} /* namespace detail */ \
template <> \
EXPORT_IF_NOT_GCC C10_ALWAYS_INLINE uint16_t \
TypeMeta::_typeMetaData<T>() noexcept { \
return detail::ident##_metadata_index; \
}
#define CAFFE_KNOWN_TYPE_NOEXPORT(T) \
template <> \
uint16_t TypeMeta::_typeMetaData<T>() noexcept { \
static const uint16_t index = addTypeMetaData<T>(); \
return index; \
}
CAFFE_DECLARE_KNOWN_TYPE(std::string, std_string)
CAFFE_DECLARE_KNOWN_TYPE(uint16_t, uint16_t)
CAFFE_DECLARE_KNOWN_TYPE(char, char)
CAFFE_DECLARE_KNOWN_TYPE(std::unique_ptr<std::mutex>, std_unique_ptr_std_mutex)
CAFFE_DECLARE_KNOWN_TYPE(
std::unique_ptr<std::atomic<bool>>,
std_unique_ptr_std_atomic_bool)
CAFFE_DECLARE_KNOWN_TYPE(std::vector<int32_t>, std_vector_int32_t)
CAFFE_DECLARE_KNOWN_TYPE(std::vector<int64_t>, std_vector_int64_t)
CAFFE_DECLARE_KNOWN_TYPE(std::vector<unsigned long>, std_vector_unsigned_long)
CAFFE_DECLARE_KNOWN_TYPE(bool*, bool_ptr)
CAFFE_DECLARE_KNOWN_TYPE(char*, char_ptr)
CAFFE_DECLARE_KNOWN_TYPE(int*, int_ptr)
// For some of the compilers, long is defined separately from int32_t and
// int64_t. As a result we will need to actually define them separately.
// It is recommended that one does NOT use long - use int32_t and int64_t
// explicitly. Explicit long type annotation may go away in the future.
// details: This hack works by defining a _guard_long_unique type, which is
// long iff the compiler has a separate long type and is a dummy type otherwise.
// we then allocate a type id to that _guard_long_unique. If the compiler has a
// separate long type, this allocates a type id for long. Otherwise, it
// allocates a type id for the dummy type, which doesn't matter.
namespace detail {
template <class T>
class _guard_long_unique_dummy final {};
template <class T>
using _guard_long_unique = std::conditional_t<
std::is_same<long, int32_t>::value || std::is_same<long, int64_t>::value,
_guard_long_unique_dummy<T>,
T>;
} // namespace detail
CAFFE_DECLARE_KNOWN_TYPE(
detail::_guard_long_unique<long>,
detail_guard_long_unique_long);
CAFFE_DECLARE_KNOWN_TYPE(
detail::_guard_long_unique<std::vector<long>>,
detail_guard_long_unique_std_vector_long)
CAFFE_DECLARE_KNOWN_TYPE(float*, float_ptr)
CAFFE_DECLARE_KNOWN_TYPE(at::Half*, at_Half)
} // namespace caffe2
| 23,293
| 31.716292
| 80
|
h
|
null |
pytorch-main/caffe2/contrib/aten/aten_op_template.h
|
#pragma once
#include <unordered_map>
#include <string>
#include <ATen/Functions.h>
#include <c10/macros/Macros.h>
#include <c10/util/irange.h>
#include <caffe2/core/context.h>
#include <caffe2/core/operator.h>
#include <caffe2/utils/math.h>
#include <iostream>
// a map from descriptor strings (see [DESCRIPTORS])
// to the key in the switch statement that implements them
static std::unordered_map<std::string, int> op_to_key = {
${mappings}
};
namespace caffe2 {
using at::Half; // for AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, ...)
namespace internal {
TORCH_API at::Tensor index_with_uint8_handling(
const at::Tensor& self,
const torch::List<c10::optional<at::Tensor>>& indices);
}
template <class Context>
class ATenOp : public Operator<Context> {
public:
ATenOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws) {
VLOG(2) << "ATen OpDef: " << ProtoDebugString(operator_def) << "\n";
switch(findImplementation(operator_def)) {
${cases}
default:
CAFFE_THROW("Unexpected key value for aten operator");
}
}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override {
return run_op();
}
private:
// actual operator implementation is initialized in ctor.
std::function<bool()> run_op;
at::Backend backend() const;
TypeMeta typeMetaFor(const at::Tensor & t) {
return typeMetaFor(t.scalar_type());
}
TypeMeta typeMetaFor(at::ScalarType st) {
#define DEFINE_CASE(ctype,aten_name) \
case at::k##aten_name: \
return TypeMeta::Make<ctype>();
switch(st) {
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, DEFINE_CASE)
default:
CAFFE_THROW("Unknown ATen Type");
}
#undef DEFINE_CASE
}
at::TensorOptions optionsFor(const Tensor& ten) {
at::Device device = ten.GetDevice();
#if defined(USE_ROCM)
if (backend() == at::Backend::HIP) {
device = at::Device(kCUDA, device.index());
}
#endif
return at::TensorOptions(device).dtype(ten.dtype());
}
at::Tensor tensorWrapping(const Tensor& ten_) {
auto& ten = const_cast<Tensor&>(ten_);
return at::from_blob(
ten.raw_mutable_data(),
ten.sizes(),
optionsFor(ten));
}
at::Tensor peek(size_t i, size_t N) {
auto real_idx = InputSize() - N + i;
return tensorWrapping(Input(real_idx));
}
std::vector<at::Tensor> peekSlice(size_t i, size_t len, size_t N) {
std::vector<at::Tensor> results;
results.reserve(len);
for (size_t ii = i; ii < i + len; ++ii) {
results.push_back(peek(ii, N));
}
return results;
}
torch::List<c10::optional<at::Tensor>> peekSliceOptionals(size_t i, size_t len, size_t N) {
torch::List<c10::optional<at::Tensor>> results;
results.reserve(len);
for (size_t ii = i; ii < i + len; ++ii) {
results.push_back(peek(ii, N));
}
return results;
}
void assignTo(Tensor* dst, const at::Tensor& src_) {
at::Tensor src = src_.contiguous();
auto at_sizes = src.sizes();
caffe2::TypeMeta type_meta = typeMetaFor(src);
at::Device device = src.device();
#if defined(USE_ROCM)
if (device.is_cuda()) {
device = at::Device(at::DeviceType::HIP, device.index());
}
#endif
at::TensorImpl* src_impl = src.unsafeReleaseTensorImpl();
std::vector<int64_t> dims(at_sizes.begin(), at_sizes.end());
dst->Resize(dims);
dst->ShareExternalPointer(
at::DataPtr(
src_impl->mutable_data(),
static_cast<void*>(src_impl),
[](void* t_ptr) -> void {
at::TensorImpl* local_impl = static_cast<at::TensorImpl*>(t_ptr);
c10::raw::intrusive_ptr::decref(local_impl);
},
device),
type_meta,
0);
}
void assignListStartingAt(
size_t offset,
const std::vector<at::Tensor>& tensors) {
for (const auto i : c10::irange(tensors.size())) {
assignTo(Output(offset + i), tensors[i]);
}
}
template<typename T,
typename std::enable_if<std::numeric_limits<T>::is_integer, bool>::type* =
nullptr>
int64_t extract(const at::Scalar &s) {
return s.toLong();
}
template<typename T,
typename std::enable_if<!std::numeric_limits<T>::is_integer, bool>::type* =
nullptr>
int64_t extract(const at::Scalar &s) {
return s.toDouble();
}
void assignTo(Tensor* dst, at::ScalarType scalar_type, const at::Scalar& scalar) {
switch(scalar_type) {
#define DEFINE_CASE(ctype,aten_name) \
case at::k##aten_name: { \
auto value = extract<ctype>(scalar); \
assignToValue<ctype>(dst, at::convert<ctype,decltype(value)>(value)); \
} break;
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, DEFINE_CASE)
#undef DEFINE_CASE
default:
CAFFE_THROW("Unknown ATen Type");
}
}
template <typename T>
void assignToValue(Tensor* dst, T v) {
dst->Resize(std::vector<int64_t>());
math::Set(1, v, dst->template mutable_data<T>(), &context_);
}
int findImplementation(const OperatorDef& operator_def) {
CAFFE_ENFORCE(HasArgument("operator"));
std::string op = OperatorBase::GetSingleArgument<std::string>("operator", "");
// construct descriptor string ([DESCRIPTORS]) given the attributes
// and inputs of this operator_def, and look up the implementation key
// for this variant
std::stringstream descriptor;
descriptor << op;
std::vector<std::string> attrs;
for (const auto i : c10::irange(operator_def.arg_size())) {
auto & attr = operator_def.arg(i);
if (attr.name() == "operator" || attr.name() == "type" || attr.name() == "overload_name") {
continue;
}
attrs.push_back(attr.name());
}
std::sort(attrs.begin(), attrs.end());
for(auto & a : attrs)
descriptor << "-" << a;
std::string descriptor_sized =
descriptor.str() + "-" + c10::to_string(InputSize());
std::string descriptor_var_args = descriptor.str() + "-*";
if (op_to_key.count(descriptor_sized) > 0) {
return op_to_key[descriptor_sized];
}
if (op_to_key.count(descriptor_var_args) > 0) {
return op_to_key[descriptor_var_args];
}
std::stringstream ss;
ss << "Attempting to run unknown ATen operator configuration: "
<< descriptor_sized;
CAFFE_THROW(ss.str());
}
at::Scalar readScalarAttribute(const std::string & name) {
if(OperatorBase::HasSingleArgumentOfType<int64_t>(name)) {
return OperatorBase::GetSingleArgument<int64_t>(name, 0);
} else {
CAFFE_ENFORCE(OperatorBase::HasSingleArgumentOfType<float>(name));
return OperatorBase::GetSingleArgument<float>(name, 0);
}
}
template<typename T>
T readAttribute(const std::string & name) {
CAFFE_ENFORCE(OperatorBase::HasSingleArgumentOfType<T>(name));
return OperatorBase::GetSingleArgument<T>(name, 0);
}
std::vector<int64_t> readIntArrayRef(const std::string & name) {
CAFFE_ENFORCE(OperatorBase::HasArgument(name));
return OperatorBase::GetRepeatedArgument<int64_t>(name, {});
}
template <int N>
std::array<bool, N> readBoolMask(const std::string& name) {
CAFFE_ENFORCE(OperatorBase::HasArgument(name));
std::vector<int64_t> ints =
OperatorBase::GetRepeatedArgument<int64_t>(name, {});
std::array<bool, N> result;
for (const auto i : c10::irange(N)) {
result[i] = ints.at(i);
}
return result;
}
${implementations}
};
}
| 7,515
| 30.579832
| 97
|
h
|
null |
pytorch-main/caffe2/contrib/fakelowp/batch_matmul_fp16_fake_op.h
|
#ifndef CAFFE2_OPERATORS_BATCH_MATMUL_OP_H_
#define CAFFE2_OPERATORS_BATCH_MATMUL_OP_H_
#include <ATen/Utils.h>
#include <c10/util/accumulate.h>
#include <fbgemm/FbgemmConvert.h>
#include "caffe2/contrib/fakelowp/fp16_gemm_utils.h"
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include <algorithm>
#include <functional>
#include <numeric>
#include <string>
#include <vector>
C10_DECLARE_bool(caffe2_fbgemm_fake_fp16_clamp);
namespace caffe2 {
template <
class Context,
class Engine = DefaultEngine,
bool USE_ACC_FP16 = false,
bool USE_TMP_ACCUMULATOR = false,
bool USE_CUSTOM_ACC32 =
false> /* if USE_ACC_FP16=false, set to true to use custom gemm kernel
in fp16_gemm_utils.cc instead of math.h gemm functions */
class BatchMatMulFP16FakeOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit BatchMatMulFP16FakeOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
OP_SINGLE_ARG(bool, "trans_a", trans_a_, false),
OP_SINGLE_ARG(bool, "trans_b", trans_b_, false),
OP_SINGLE_ARG(bool, "broadcast", broadcast_, false) {}
bool RunOnDevice() override {
return DispatchHelper<TensorTypes<float>>::call(this, Input(0));
}
template <typename T>
bool DoRunWithType() {
const auto& A = Input(0);
const auto& B = Input(1);
const int A_ndim = A.dim();
const int B_ndim = B.dim();
const std::vector<std::int64_t> A_dims = A.sizes().vec();
const std::vector<std::int64_t> B_dims = B.sizes().vec();
const T* A_data = A.template data<T>();
const T* B_data = B.template data<T>();
// Fake fp16 rounding of input
std::vector<float> A_rounded(A.numel());
std::vector<float> B_rounded(B.numel());
fbgemm::RoundToFloat16(
A_data,
A_rounded.data(),
A.numel(),
FLAGS_caffe2_fbgemm_fake_fp16_clamp,
USE_ACC_FP16);
fbgemm::RoundToFloat16(
B_data,
B_rounded.data(),
B.numel(),
FLAGS_caffe2_fbgemm_fake_fp16_clamp,
USE_ACC_FP16);
A_data = A_rounded.data();
B_data = B_rounded.data();
if (A_ndim == 1 && B_ndim == 1) {
CAFFE_ENFORCE_EQ(A.numel(), B.numel());
auto* Y = Output(0, {1}, at::dtype<T>());
T* Y_data = Y->template mutable_data<T>();
math::Dot<T, Context>(A.numel(), A_data, B_data, Y_data, &context_);
fbgemm::RoundToFloat16(
reinterpret_cast<const float*>(Y_data),
Y_data,
Y->numel(),
FLAGS_caffe2_fbgemm_fake_fp16_clamp,
USE_ACC_FP16);
return true;
}
if (A_ndim == 1) {
const int N = A.numel();
if (trans_b_) {
CAFFE_ENFORCE_EQ(B_dims[B_ndim - 1], N);
} else {
CAFFE_ENFORCE_EQ(B_dims[B_ndim - 2], N);
}
std::vector<std::int64_t> Y_dims(B_ndim - 1);
if (trans_b_) {
std::copy_n(B_dims.cbegin(), B_ndim - 1, Y_dims.begin());
} else {
std::copy_n(B_dims.cbegin(), B_ndim - 2, Y_dims.begin());
Y_dims.back() = B_dims.back();
}
auto* Y = Output(0, Y_dims, at::dtype<T>());
T* Y_data = Y->template mutable_data<T>();
if (trans_b_) {
const int M = B.numel() / N;
caffe2::custom_fp16_gemv(
USE_ACC_FP16,
USE_CUSTOM_ACC32,
USE_TMP_ACCUMULATOR,
CblasNoTrans,
M,
N,
1.0f,
B_data,
A_data,
0.0f,
Y_data,
&context_);
} else {
const int M = B_dims[B_ndim - 1];
const int batch_size = B.numel() / (M * N);
if (batch_size == 1) {
caffe2::custom_fp16_gemv(
USE_ACC_FP16,
USE_CUSTOM_ACC32,
USE_TMP_ACCUMULATOR,
CblasTrans,
N,
M,
1.0f,
B_data,
A_data,
0.0f,
Y_data,
&context_);
} else {
caffe2::custom_fp16_gemm_strided_batched(
USE_ACC_FP16,
USE_CUSTOM_ACC32,
USE_TMP_ACCUMULATOR,
CblasTrans,
CblasNoTrans,
batch_size,
M,
1,
N,
1.0f,
B_data,
M * N,
A_data,
0,
0.0f,
Y_data,
M,
&context_);
}
}
fbgemm::RoundToFloat16(
reinterpret_cast<const float*>(Y_data),
Y_data,
Y->numel(),
FLAGS_caffe2_fbgemm_fake_fp16_clamp,
USE_ACC_FP16);
return true;
}
if (B_ndim == 1) {
const int N = B.numel();
if (trans_a_) {
CAFFE_ENFORCE_EQ(A_dims[A_ndim - 2], N);
} else {
CAFFE_ENFORCE_EQ(A_dims[A_ndim - 1], N);
}
const std::vector<std::int64_t> Y_dims(
A_dims.cbegin(), A_dims.cbegin() + A_ndim - 1);
auto* Y = Output(0, Y_dims, at::dtype<T>());
T* Y_data = Y->template mutable_data<T>();
if (trans_a_) {
const int M = A_dims[A_ndim - 1];
const int batch_size = A.numel() / (M * N);
if (batch_size == 1) {
caffe2::custom_fp16_gemv(
USE_ACC_FP16,
USE_CUSTOM_ACC32,
USE_TMP_ACCUMULATOR,
CblasTrans,
N,
M,
1.0f,
A_data,
B_data,
0.0f,
Y_data,
&context_);
} else {
caffe2::custom_fp16_gemm_strided_batched(
USE_ACC_FP16,
USE_CUSTOM_ACC32,
USE_TMP_ACCUMULATOR,
CblasTrans,
CblasNoTrans,
batch_size,
M,
1,
N,
1.0f,
A_data,
M * N,
B_data,
0,
0.0f,
Y_data,
M,
&context_);
}
} else {
const int M = A.numel() / N;
caffe2::custom_fp16_gemv(
USE_ACC_FP16,
USE_CUSTOM_ACC32,
USE_TMP_ACCUMULATOR,
CblasNoTrans,
M,
N,
1.0f,
A_data,
B_data,
0.0f,
Y_data,
&context_);
}
fbgemm::RoundToFloat16(
reinterpret_cast<const float*>(Y_data),
Y_data,
Y->numel(),
FLAGS_caffe2_fbgemm_fake_fp16_clamp);
return true;
}
const int M = trans_a_ ? A_dims[A_ndim - 1] : A_dims[A_ndim - 2];
const int K = trans_a_ ? A_dims[A_ndim - 2] : A_dims[A_ndim - 1];
if (trans_b_) {
CAFFE_ENFORCE_EQ(B_dims[B_ndim - 1], K);
} else {
CAFFE_ENFORCE_EQ(B_dims[B_ndim - 2], K);
}
const int N = trans_b_ ? B_dims[B_ndim - 2] : B_dims[B_ndim - 1];
const int ndim = std::max(A_ndim, B_ndim);
std::vector<std::int64_t> A_broadcast_dims(ndim);
std::vector<std::int64_t> B_broadcast_dims(ndim);
std::vector<std::int64_t> Y_broadcast_dims(ndim);
math::utils::ComputeBroadcastBinaryOpDims(
A_ndim - 2,
A_dims.data(),
B_ndim - 2,
B_dims.data(),
A_broadcast_dims.data(),
B_broadcast_dims.data(),
Y_broadcast_dims.data());
Y_broadcast_dims[ndim - 2] = M;
Y_broadcast_dims[ndim - 1] = N;
auto* Y = Output(0, Y_broadcast_dims, at::dtype<T>());
T* Y_data = Y->template mutable_data<T>();
const int batch_dim = ndim - 2;
const bool is_broadcast_dims = !std::equal(
A_broadcast_dims.cbegin(),
A_broadcast_dims.cbegin() + batch_dim,
B_broadcast_dims.cbegin());
if (is_broadcast_dims) {
CAFFE_ENFORCE(broadcast_);
}
const std::int64_t A_batch_size = c10::multiply_integers(
A_broadcast_dims.cbegin(),
A_broadcast_dims.cbegin() + batch_dim);
const std::int64_t B_batch_size = c10::multiply_integers(
B_broadcast_dims.cbegin(),
B_broadcast_dims.cbegin() + batch_dim);
const std::int64_t Y_batch_size = c10::multiply_integers(
Y_broadcast_dims.cbegin(),
Y_broadcast_dims.cbegin() + batch_dim);
if (Y_batch_size == 0) {
fbgemm::RoundToFloat16(
reinterpret_cast<const float*>(Y_data),
Y_data,
Y->numel(),
FLAGS_caffe2_fbgemm_fake_fp16_clamp);
return true;
}
if (A_batch_size == 1 && B_batch_size == 1) {
if (USE_ACC_FP16) {
caffe2::custom_fp16_gemm_with_trans(
trans_a_ ? CblasTrans : CblasNoTrans,
trans_b_ ? CblasTrans : CblasNoTrans,
M,
K,
N,
A_data,
B_data,
0.0f,
Y_data,
true, /* use acc16*/
USE_TMP_ACCUMULATOR);
} else if (USE_CUSTOM_ACC32) {
caffe2::custom_fp16_gemm_with_trans(
trans_a_ ? CblasTrans : CblasNoTrans,
trans_b_ ? CblasTrans : CblasNoTrans,
M,
K,
N,
A_data,
B_data,
0.0f,
Y_data,
false, /* use acc32*/
USE_TMP_ACCUMULATOR);
} else {
math::Gemm<T, Context, Engine>(
trans_a_ ? CblasTrans : CblasNoTrans,
trans_b_ ? CblasTrans : CblasNoTrans,
M,
N,
K,
1.0f,
A_data,
B_data,
0.0f,
Y_data,
&context_);
}
} else if (A_batch_size == 1) {
caffe2::custom_fp16_gemm_strided_batched(
USE_ACC_FP16,
USE_CUSTOM_ACC32,
USE_TMP_ACCUMULATOR,
trans_a_ ? CblasTrans : CblasNoTrans,
trans_b_ ? CblasTrans : CblasNoTrans,
Y_batch_size,
M,
N,
K,
1.0f,
A_data,
0,
B_data,
K * N,
0.0f,
Y_data,
M * N,
&context_);
} else if (B_batch_size == 1) {
caffe2::custom_fp16_gemm_strided_batched(
USE_ACC_FP16,
USE_CUSTOM_ACC32,
USE_TMP_ACCUMULATOR,
trans_a_ ? CblasTrans : CblasNoTrans,
trans_b_ ? CblasTrans : CblasNoTrans,
Y_batch_size,
M,
N,
K,
1.0f,
A_data,
M * K,
B_data,
0,
0.0f,
Y_data,
M * N,
&context_);
} else if (!is_broadcast_dims) {
caffe2::custom_fp16_gemm_strided_batched(
USE_ACC_FP16,
USE_CUSTOM_ACC32,
USE_TMP_ACCUMULATOR,
trans_a_ ? CblasTrans : CblasNoTrans,
trans_b_ ? CblasTrans : CblasNoTrans,
Y_batch_size,
M,
N,
K,
1.0f,
A_data,
M * K,
B_data,
K * N,
0.0f,
Y_data,
M * N,
&context_);
} else {
std::vector<const T*> A_ptr(Y_batch_size);
std::vector<const T*> B_ptr(Y_batch_size);
std::vector<T*> Y_ptr(Y_batch_size);
std::vector<std::int64_t> index(batch_dim);
for (std::int64_t i = 0; i < Y_batch_size; ++i) {
const std::int64_t A_index = math::utils::GetIndexFromDims(
batch_dim, A_broadcast_dims.data(), index.data());
const std::int64_t B_index = math::utils::GetIndexFromDims(
batch_dim, B_broadcast_dims.data(), index.data());
A_ptr[i] = A_data + A_index * M * K;
B_ptr[i] = B_data + B_index * K * N;
Y_ptr[i] = Y_data + i * M * N;
math::utils::IncreaseIndexInDims(
batch_dim, Y_broadcast_dims.data(), index.data());
}
caffe2::custom_fp16_gemm_batched(
USE_ACC_FP16,
USE_CUSTOM_ACC32,
USE_TMP_ACCUMULATOR,
trans_a_ ? CblasTrans : CblasNoTrans,
trans_b_ ? CblasTrans : CblasNoTrans,
Y_batch_size,
M,
N,
K,
1.0f,
A_ptr.data(),
B_ptr.data(),
0.0f,
Y_ptr.data(),
&context_);
}
fbgemm::RoundToFloat16(
reinterpret_cast<const float*>(Y_data),
Y_data,
Y->numel(),
FLAGS_caffe2_fbgemm_fake_fp16_clamp);
return true;
}
private:
const bool trans_a_;
const bool trans_b_;
const bool broadcast_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_BATCH_MATMUL_OP_H_
| 12,655
| 27.698413
| 79
|
h
|
null |
pytorch-main/caffe2/contrib/fakelowp/fp16_fc_acc_op.h
|
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <fbgemm/FbgemmConvert.h>
#include <fbgemm/FbgemmFP16.h>
#include <immintrin.h>
#include "caffe2/contrib/fakelowp/fp16_gemm_utils.h"
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/core/tensor.h"
#include "caffe2/utils/conversions.h"
#include "caffe2/utils/math.h"
C10_DECLARE_bool(caffe2_fbgemm_fake_fp16_clamp);
namespace caffe2 {
using namespace std;
// C2 wrapper for fp16 gemm with fp16 accumulation
template <
class Context,
class Engine = DefaultEngine,
bool USE_ACC_FP16 = false, // Whether use fp16 accumulation
bool USE_TMP_ACCUMULATOR = false,
bool ADD_BIAS_FIRST = false>
class Fp16FCAccOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
Fp16FCAccOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
axis_(OperatorBase::GetSingleArgument<int32_t>("axis", 1)),
axis_w_(OperatorBase::GetSingleArgument<int32_t>("axis_w", 1)) {}
~Fp16FCAccOp() noexcept override {
if (X_fp16_ != nullptr) {
delete[] X_fp16_;
}
if (W_fp16_ != nullptr) {
delete[] W_fp16_;
}
if (b_fp16_ != nullptr) {
delete[] b_fp16_;
}
if (bias_multiplier_fp16_ != nullptr) {
delete[] bias_multiplier_fp16_;
}
}
// template on X, B, W and Y.
template <typename T_X, typename T_B, typename T_W, typename T_Y>
bool DoRunWithType() {
const auto& X = Input(0);
const auto& W_blob = OperatorBase::InputBlob(1);
const auto& b = Input(2);
auto* Y = Output(0);
CAFFE_ENFORCE(b.ndim() == 1, b.ndim());
// batch size
const auto canonical_axis = X.canonical_axis_index(axis_);
const int M = X.size_to_dim(canonical_axis);
const int N = b.size();
const int K = X.size_from_dim(canonical_axis);
Y_shape_cache_ = X.sizes().vec();
// This is an invariant of canonical_axis, so we can DCHECK.
TORCH_DCHECK_LE(canonical_axis + 1, Y_shape_cache_.size());
Y_shape_cache_.resize(canonical_axis + 1);
Y_shape_cache_[canonical_axis] = N;
Y->Resize(Y_shape_cache_);
if (X.size() == 0) {
// skip the rest of the computation if X is empty
Y->template mutable_data<T_Y>();
return true;
}
// Convert X and W to fp16
int X_size = M * K;
int W_size = N * K;
if (X_fp16_ == nullptr) {
X_fp16_ = new float[X_size];
X_size_cached_ = X_size;
} else if (X_size > X_size_cached_) {
delete[] X_fp16_;
X_fp16_ = new float[X_size];
X_size_cached_ = X_size;
}
fbgemm::RoundToFloat16(
X.template data<T_X>(),
X_fp16_,
X_size,
FLAGS_caffe2_fbgemm_fake_fp16_clamp);
if (W_fp16_ == nullptr) {
W_fp16_ = new float[W_size];
const T_W* W_data = nullptr;
if (W_blob.template IsType<
caffe2::unique_ptr<fbgemm::PackedGemmMatrixFP16>>()) {
auto* W_fbgemm =
OperatorBase::Input<
caffe2::unique_ptr<fbgemm::PackedGemmMatrixFP16>>(1)
.get();
if (!W_fbgemm->packed()) {
float* W_fp16_trans = new float[W_size];
fbgemm::Float16ToFloat_avx2(W_fbgemm->pmat(), W_fp16_trans, W_size);
for (const auto i : c10::irange(N)) {
for (const auto j : c10::irange(K)) {
W_fp16_[j * N + i] = W_fp16_trans[i * K + j];
}
}
delete[] W_fp16_trans;
} else {
vector<fbgemm::float16> unpacked_mat;
unpacked_mat.resize(W_size);
W_fbgemm->unpack(
unpacked_mat.data(), fbgemm::matrix_op_t::NoTranspose);
fbgemm::Float16ToFloat_avx2(unpacked_mat.data(), W_fp16_, W_size);
}
} else {
const auto& W = Input(1);
W_data = W.template data<T_W>();
// Transpose W
for (const auto i : c10::irange(N)) {
for (const auto j : c10::irange(K)) {
W_fp16_[j * N + i] = W_data[i * K + j];
}
}
}
fbgemm::RoundToFloat16(
W_fp16_, W_fp16_, W_size, FLAGS_caffe2_fbgemm_fake_fp16_clamp);
}
auto Y_data = Y->template mutable_data<T_Y>();
int Y_size = M * N;
// Initialize Y
memset(Y_data, 0.0, sizeof(float) * Y_size);
// Add bias term, accumulation is in fp16.
if (bias_multiplier_.size() != M) {
// If the helper bias multiplier is not M, reshape and fill it with one.
bias_multiplier_.Resize(M);
math::Set<T_B, Context>(
M,
convert::To<float, T_B>(1),
bias_multiplier_.template mutable_data<T_B>(),
&context_);
}
if (bias_multiplier_fp16_ == nullptr) {
bias_multiplier_fp16_ = new float[M];
M_cached_ = M;
} else if (M > M_cached_) {
delete[] bias_multiplier_fp16_;
bias_multiplier_fp16_ = new float[M];
M_cached_ = M;
}
fbgemm::RoundToFloat16(
bias_multiplier_.template data<T_B>(),
bias_multiplier_fp16_,
M,
FLAGS_caffe2_fbgemm_fake_fp16_clamp);
if (b_fp16_ == nullptr) {
b_fp16_ = new float[N];
}
fbgemm::RoundToFloat16(
b.template data<T_B>(),
b_fp16_,
N,
FLAGS_caffe2_fbgemm_fake_fp16_clamp);
if (ADD_BIAS_FIRST) {
custom_fp16_gemm(
M,
1,
N,
bias_multiplier_fp16_,
b_fp16_,
0.f,
Y->template mutable_data<T_Y>(),
USE_ACC_FP16,
USE_TMP_ACCUMULATOR);
#ifdef LOG_LEVEL_FOR_FBFCPACKEDACC16_ACCURACY_LOG
float* Y_ref = new float[M * N]();
TensorProto::DataType math_type = TensorProto_DataType_FLOAT;
math::Gemm<T_B, Context, Engine>(
CblasNoTrans,
CblasNoTrans,
M,
N,
1,
1,
bias_multiplier_.template data<T_B>(),
b.template data<T_B>(),
0.f,
Y_ref,
&context_,
math_type);
relative_error =
compute_relative_error(Y->template mutable_data<T_Y>(), Y_ref, M * N);
total_error_with_bias += relative_error;
VLOG(LOG_LEVEL_FOR_FBFCPACKEDACC16_ACCURACY_LOG)
<< "Relative error for Y = bias_multiplier_ * b' = " << relative_error
<< ", average error with bias after " << runs
<< " runs = " << total_error_with_bias / runs << endl;
#endif
custom_fp16_gemm(
M,
K,
N,
X_fp16_,
W_fp16_,
1.f,
Y->template mutable_data<T_Y>(),
USE_ACC_FP16,
USE_TMP_ACCUMULATOR);
#ifdef LOG_LEVEL_FOR_FBFCPACKEDACC16_ACCURACY_LOG
if (!W_blob.IsType<caffe2::unique_ptr<fbgemm::PackedGemmMatrixFP16>>()) {
const auto& W = Input(1);
math::Gemm<float, Context, Engine>(
CblasNoTrans,
CblasTrans,
M,
N,
K,
1,
X.template data<T_X>(),
W.template data<T_W>(),
1.f,
Y_ref,
&context_,
math_type);
runs++;
float relative_error = compute_relative_error(
Y->template mutable_data<T_Y>(), Y_ref, M * N);
total_error += relative_error;
VLOG(LOG_LEVEL_FOR_FBFCPACKEDACC16_ACCURACY_LOG)
<< "Relative error for Y = bias_multiplier_ * b' + X * W' = "
<< relative_error << ", average error after " << runs
<< " runs = " << total_error / runs << endl;
if (Y_ref != nullptr) {
delete[] Y_ref;
}
}
#endif
} else {
custom_fp16_gemm(
M,
K,
N,
X_fp16_,
W_fp16_,
0.f,
Y->template mutable_data<T_Y>(),
USE_ACC_FP16,
USE_TMP_ACCUMULATOR);
#ifdef LOG_LEVEL_FOR_FBFCPACKEDACC16_ACCURACY_LOG
if (!W_blob.IsType<caffe2::unique_ptr<fbgemm::PackedGemmMatrixFP16>>()) {
const auto& W = Input(1);
float* Y_ref = new float[M * N]();
TensorProto::DataType math_type = TensorProto_DataType_FLOAT;
math::Gemm<float, Context, Engine>(
CblasNoTrans,
CblasTrans,
M,
N,
K,
1,
X.template data<T_X>(),
W.template data<T_W>(),
0.f,
Y_ref,
&context_,
math_type);
runs++;
float relative_error = compute_relative_error(
Y->template mutable_data<T_Y>(), Y_ref, M * N);
total_error += relative_error;
VLOG(LOG_LEVEL_FOR_FBFCPACKEDACC16_ACCURACY_LOG)
<< "Relative error for Y = X * W' = " << relative_error
<< ", average error after " << runs
<< " runs = " << total_error / runs << endl;
}
#endif
custom_fp16_gemm(
M,
1,
N,
bias_multiplier_fp16_,
b_fp16_,
1.f,
Y->template mutable_data<T_Y>(),
USE_ACC_FP16,
USE_TMP_ACCUMULATOR);
#ifdef LOG_LEVEL_FOR_FBFCPACKEDACC16_ACCURACY_LOG
math::Gemm<T_B, Context, Engine>(
CblasNoTrans,
CblasNoTrans,
M,
N,
1,
1,
bias_multiplier_.template data<T_B>(),
b.template data<T_B>(),
1,
Y_ref,
&context_,
math_type);
relative_error =
compute_relative_error(Y->template mutable_data<T_Y>(), Y_ref, M * N);
total_error_with_bias += relative_error;
VLOG(LOG_LEVEL_FOR_FBFCPACKEDACC16_ACCURACY_LOG)
<< "Relative error for Y = X * W' + bias_multiplier_ * b' = "
<< relative_error << ", average error with bias after " << runs
<< " runs = " << total_error_with_bias / runs << endl;
if (Y_ref != nullptr) {
delete[] Y_ref;
}
#endif
}
return true;
}
#ifdef LOG_LEVEL_FOR_FBFCPACKEDACC16_ACCURACY_LOG
float compute_L2_norm(float* A, int size) {
float square_sum = 0.0;
for (const auto i : c10::irange(size)) {
square_sum += A[i] * A[i];
}
return std::sqrt(square_sum);
}
float compute_relative_error(float* A, float* A_ref, int size) {
float error = 0.0;
for (const auto i : c10::irange(size)) {
error += (A[i] - A_ref[i]) * (A[i] - A_ref[i]);
}
error = std::sqrt(error);
float L2_norm = compute_L2_norm(A, size);
return error / L2_norm;
}
#endif
bool RunOnDevice() override {
return DoRunWithType<
float, // X
float, // B
float, // W
float>(); // Y
}
protected:
size_t axis_{1};
size_t axis_w_{1};
size_t X_size_cached_{0};
size_t M_cached_{0};
static int runs;
static float total_error;
static float total_error_with_bias;
float* X_fp16_ = nullptr;
float* W_fp16_ = nullptr;
float* b_fp16_ = nullptr;
float* bias_multiplier_fp16_ = nullptr;
// A local vector to cache the output shape so we don't need to recreate
// a vector object every time we run Run().
vector<int64_t> Y_shape_cache_;
Tensor bias_multiplier_{Context::GetDeviceType()};
};
} // namespace caffe2
| 11,795
| 28.56391
| 80
|
h
|
null |
pytorch-main/caffe2/contrib/fakelowp/fp16_gemm_utils.h
|
// Copyright 2004-present Facebook. All Rights Reserved.
#pragma once
#include "caffe2/core/context.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
void custom_fp16_gemm(
const int m,
const int k,
const int n,
const float* A_fp16,
const float* B_fp16,
const float beta,
float* C,
const bool use_acc_fp16,
const bool use_temp_accumulator);
void custom_fp16_gemm_with_trans(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int m,
const int k,
const int n,
const float* A_fp16,
const float* B_fp16,
const float beta,
float* C,
const bool use_acc_fp16,
const bool use_temp_accumulator);
void transpose(const float* A, float* A_trans, int M, int N);
void custom_fp16_gemv(
const bool use_acc_fp16,
const bool use_custom_acc32,
const bool use_temp_accumulator,
const CBLAS_TRANSPOSE trans_A,
const int M,
const int N,
const float alpha,
const float* A,
const float* x,
const float beta,
float* y,
CPUContext* context);
void custom_fp16_gemm_batched(
const bool use_acc_fp16,
const bool use_custom_acc32,
const bool use_temp_accumulator,
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float** A,
const float** B,
const float beta,
float** C,
CPUContext* context);
void custom_fp16_gemm_strided_batched(
const bool use_acc_fp16,
const bool use_custom_acc32,
const bool use_temp_accumulator,
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha /* unused */,
const float* A,
const int A_stride,
const float* B,
const int B_stride,
const float beta,
float* C,
const int C_stride,
CPUContext* context);
} // namespace caffe2
| 1,992
| 23.304878
| 61
|
h
|
null |
pytorch-main/caffe2/contrib/fakelowp/int8_dequantize_op_nnpi.h
|
#ifndef CAFFE2_OPERATORS_INT8_DEQUANTIZE_OP_H_
#define CAFFE2_OPERATORS_INT8_DEQUANTIZE_OP_H_
#include <fbgemm/FbgemmConvert.h>
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/core/tensor_int8.h"
#include "caffe2/operators/quantized/int8_utils.h"
C10_DECLARE_bool(caffe2_fbgemm_fake_fp16_clamp);
namespace caffe2 {
namespace int8 {
namespace {
void Int8DequantizeNNPI(
const uint8_t* in,
float* out,
const int64_t N,
const float X_scale,
const int32_t X_offset) {
float X_scale_fp32 = 1.0f / X_scale;
for (const auto i : c10::irange(N)) {
out[i] = (float)(static_cast<int32_t>(in[i]) - X_offset) / X_scale_fp32;
}
} // namespace
} // namespace
class Int8DequantizeNNPIOp final : public Operator<CPUContext> {
public:
using Operator<CPUContext>::Operator;
bool RunOnDevice() override {
const auto& X = Inputs()[0]->template Get<Int8TensorCPU>();
auto* Y = Output(0, X.t.sizes(), at::dtype<float>());
int32_t X_offset = X.zero_point;
auto X_scale = X.scale;
Int8DequantizeNNPI(
X.t.data<uint8_t>(),
Y->mutable_data<float>(),
X.t.numel(),
X_scale,
X_offset);
// UsingOneOverScale_);
return true;
}
};
} // namespace int8
} // namespace caffe2
#endif // CAFFE2_OPERATORS_INT8_DEQUANTIZE_OP_H_
| 1,343
| 22.172414
| 76
|
h
|
null |
pytorch-main/caffe2/contrib/fakelowp/int8_quantize_op_nnpi.h
|
#ifndef CAFFE2_OPERATORS_INT8_QUANTIZE_OP_H_
#define CAFFE2_OPERATORS_INT8_QUANTIZE_OP_H_
#include <fbgemm/FbgemmConvert.h>
#include <cmath>
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/core/tensor_int8.h"
#include "caffe2/operators/quantized/int8_utils.h"
#include "fp16_fma.h"
C10_DECLARE_bool(caffe2_fbgemm_fake_fp16_clamp);
namespace caffe2 {
namespace int8 {
namespace {
static float ClampScale(float s)
{
const float MinScale(1e-10f);
if (std::fabs(s) < MinScale) {
LOG_EVERY_N(WARNING, 1000) << "Too small scale detected: "
<< s << " clamping to +/-" << MinScale;
return std::signbit(s) ? -MinScale : MinScale;
} else {
return s;
}
}
void Int8QuantizeNNPI(
const float* in,
uint8_t* out,
const int64_t N,
const float Y_scale,
const int32_t Y_offset) {
const int32_t qmin = std::numeric_limits<uint8_t>::min();
const int32_t qmax = std::numeric_limits<uint8_t>::max();
float inv_scale = ClampScale(1 / Y_scale);
float inv_scale_fp16 = 0;
fbgemm::RoundToFloat16(
&inv_scale, &inv_scale_fp16, 1, false /* no clamping */);
float offset_tmp = -Y_offset;
fbgemm::RoundToFloat16(
&offset_tmp, &offset_tmp, 1, false /* no clamping */);
std::vector<float> in_fp16(N);
fbgemm::RoundToFloat16(
in, in_fp16.data(), N, false /* no clamping */);
std::vector<float> inv_scalev(N, inv_scale_fp16);
std::vector<float> offsetv(N, -offset_tmp);
fake_fp16::fma_fp16(N, in_fp16.data(), inv_scalev.data(), offsetv.data());
for (const auto i : c10::irange(N)) {
offsetv[i] = round(offsetv[i]);
}
fbgemm::RoundToFloat16(
offsetv.data(), offsetv.data(), N, false /* no clamping */);
for (const auto i : c10::irange(N)) {
float halfRes = offsetv[i];
if (std::isinf(halfRes)) {
if (halfRes > 0) {
halfRes = qmax;
} else {
halfRes = qmin;
}
}
if (halfRes > qmax) {
halfRes = qmax;
}
if (halfRes < qmin) {
halfRes = qmin;
}
out[i] = static_cast<uint8_t>(halfRes);
}
}
} // namespace
class Int8QuantizeNNPIOp final : public Operator<CPUContext> {
public:
using Operator<CPUContext>::Operator;
bool RunOnDevice() override {
const auto& X = Input(0);
auto* Y = Outputs()[0]->template GetMutable<Int8TensorCPU>();
Y->t.ResizeLike(X);
int32_t Y_offset =
this->template GetSingleArgument<int>("Y_zero_point", 0);
auto Y_scale = this->template GetSingleArgument<float>("Y_scale", 1);
Y->scale = Y_scale;
Y->zero_point = Y_offset;
Int8QuantizeNNPI(
X.data<float>(),
Y->t.mutable_data<uint8_t>(),
X.numel(),
Y_scale,
Y_offset);
return true;
}
};
} // namespace int8
} // namespace caffe2
#endif // CAFFE2_OPERATORS_INT8_QUANTIZE_OP_H_
| 2,859
| 25.238532
| 76
|
h
|
null |
pytorch-main/caffe2/contrib/fakelowp/int8_swish_op_nnpi.h
|
#ifndef CAFFE2_OPERATORS_INT8_SWISH_OP_H_
#define CAFFE2_OPERATORS_INT8_SWISH_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/core/tensor_int8.h"
#include "caffe2/operators/quantized/int8_utils.h"
namespace caffe2 {
namespace int8 {
namespace {
using namespace std;
void SwishFakeInt8NNPI(
const uint8_t* in,
uint8_t* out,
const int64_t N,
const float X_scale,
const int32_t X_offset,
const float Y_scale,
const int32_t Y_offset) {
const uint8_t max_val = std::numeric_limits<uint8_t>::max();
const uint8_t min_val = std::numeric_limits<uint8_t>::min();
float X_scale_fp32 = 1.0f / X_scale;
float deq_val = 0.0f;
float deq_swish = 0.0f;
int32_t quant_val = 0;
uint8_t result = 0;
for (const auto i : c10::irange(N)) {
deq_val = (static_cast<uint8_t>(in[i]) - X_offset) / X_scale_fp32;
deq_swish = deq_val / (1 + exp(-deq_val));
quant_val = round(deq_swish / Y_scale + Y_offset);
result = quant_val;
if (quant_val > max_val) {
result = max_val;
}
if (quant_val < min_val) {
result = min_val;
}
out[i] = static_cast<uint8_t>(result);
}
}
} // namespace
class SwishInt8NNPIOp final : public Operator<CPUContext> {
public:
using Operator<CPUContext>::Operator;
template <class... Args>
explicit SwishInt8NNPIOp(Args&&... args)
: Operator<CPUContext>(std::forward<Args>(args)...) {}
bool RunOnDevice() override {
const auto& X = Inputs()[0]->template Get<Int8TensorCPU>();
auto* Y = Outputs()[0]->template GetMutable<Int8TensorCPU>();
Y->t.ResizeLike(X.t);
int32_t Y_offset_ =
this->template GetSingleArgument<int>("Y_zero_point", 0);
auto Y_scale_ = this->template GetSingleArgument<float>("Y_scale", 1);
Y->scale = Y_scale_;
Y->zero_point = Y_offset_;
SwishFakeInt8NNPI(
X.t.data<uint8_t>(),
Y->t.mutable_data<uint8_t>(),
X.t.numel(),
X.scale,
X.zero_point,
Y_scale_,
Y_offset_);
return true;
}
};
} // namespace int8
} // namespace caffe2
#endif // CAFFE2_OPERATORS_INT8_SWISH_OP_H_
| 2,150
| 23.443182
| 74
|
h
|
null |
pytorch-main/caffe2/contrib/fakelowp/layernorm_fp16_fake_op.h
|
#pragma once
#include <algorithm>
#include <array>
#include <functional>
#include <string>
#include <vector>
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include <fbgemm/FbgemmConvert.h>
#include "caffe2/utils/eigen_utils.h"
#include "caffe2/utils/math.h"
#include "fp16_fma.h"
C10_DECLARE_bool(caffe2_fbgemm_fake_fp16_clamp);
namespace caffe2 {
class LayerNormUtils {
public:
static void calcY(
const int M,
const int N,
const float* X,
const float* mean,
const float* std,
const float* gamma,
const float* beta,
float* Y);
static void calcMeanStd(
const int M,
const int N,
const float eps,
const float* X,
float* mean,
float* std);
static float ReducedAdd(const std::vector<float>& vec);
};
template <bool quantizeOutput=false>
class LayerNormFakeFp16Op final : public Operator<CPUContext> {
public:
template <class... Args>
explicit LayerNormFakeFp16Op(Args&&... args)
: Operator<CPUContext>(std::forward<Args>(args)...),
OP_SINGLE_ARG(int, "axis", axis_, 1),
OP_SINGLE_ARG(float, "epsilon", epsilon_, 1e-5f),
OP_SINGLE_ARG(bool, "elementwise_affine", elementwise_affine_, false) {}
~LayerNormFakeFp16Op() noexcept override {}
bool RunOnDevice() override {
return DoRunWithType();
}
bool DoRunWithType() {
const auto& X = Input(INPUT);
vector <float> Y_fp16;
Tensor *Y;
if (!quantizeOutput) {
Y = Output(OUTPUT, X.sizes(), at::dtype<float>());
} else {
Y_fp16.resize(X.numel());
}
CAFFE_ENFORCE_GE(X.dim(), 2, "LayerNorm requires input dim >=2.");
const int canonical_axis = X.canonical_axis_index(axis_);
std::vector<int64_t> moments_dims(
X.sizes().cbegin(), X.sizes().cbegin() + canonical_axis);
moments_dims.push_back(1);
auto* mean = Output(MEAN, moments_dims, at::dtype<float>());
auto* sigma = Output(STD, moments_dims, at::dtype<float>());
const int M = X.size_to_dim(canonical_axis);
const int N = X.size_from_dim(canonical_axis);
if (!quantizeOutput) {
Y->ResizeLike(X);
}
const float* X_data = X.template data<float>();
float *Y_data;
if (!quantizeOutput) {
Y_data = Y->template mutable_data<float>();
} else {
Y_data = Y_fp16.data();
}
float* mean_data = mean->template mutable_data<float>();
float* sigma_data = sigma->template mutable_data<float>();
std::vector<float> X_rounded(X.numel());
fbgemm::RoundToFloat16(
X_data,
X_rounded.data(),
X.numel(),
FLAGS_caffe2_fbgemm_fake_fp16_clamp,
false /*USE_ACC_FP16*/);
X_data = X_rounded.data();
// Mean and Standard Deviation computation for the input data
LayerNormUtils::calcMeanStd(M, N, epsilon_, X_data, mean_data, sigma_data);
const float* gamma_data = nullptr;
const float* beta_data = nullptr;
// Layer Normalized Output computation
LayerNormUtils::calcY(
M, N, X_data, mean_data, sigma_data, gamma_data, beta_data, Y_data);
if (InputSize() == 3) {
// handle scale and bias via fp16_fma
std::vector<float> scale_data(N);
std::vector<float> bias_data(N);
fbgemm::RoundToFloat16(
Input(1).template data<float>(),
scale_data.data(),
N,
FLAGS_caffe2_fbgemm_fake_fp16_clamp,
false /*USE_ACC_FP16*/);
fbgemm::RoundToFloat16(
Input(2).template data<float>(),
bias_data.data(),
N,
FLAGS_caffe2_fbgemm_fake_fp16_clamp,
false /*USE_ACC_FP16*/);
for (const auto i : c10::irange(M)) {
// fma_fp16(A, B, Out) -> Out = A * B + Out
std::vector<float> out(N);
std::memcpy(out.data(), bias_data.data(), sizeof(float) * N);
fake_fp16::fma_fp16(N, Y_data + i * N, scale_data.data(), out.data());
std::memcpy(Y_data + i * N, out.data(), sizeof(float) * N);
}
}
// Quantize
// We should be using the same quantization fucntion from int8quantize,
// but we need to adjust for int8 vs uint8 bias. A simple shift of the output is not enough
// because this causes problems when rounding inside the fma.
// TODO: figure out how to commonize this with int8 quantize
if (quantizeOutput) {
auto* Y_int8 = Outputs()[0]->template GetMutable<int8::Int8TensorCPU>();
Y_int8->t.ResizeLike(X);
int32_t Y_offset =
this->template GetSingleArgument<int>("Y_zero_point", 0);
auto Y_scale = this->template GetSingleArgument<float>("Y_scale", 1);
float inv_scale = 1.0f / Y_scale;
fbgemm::RoundToFloat16(
&inv_scale, &inv_scale, 1, false /* no clamping */);
Y_int8->scale = Y_scale;
Y_int8->zero_point = Y_offset;
int Nout = X.numel();
std::vector<float> inv_scalev(Nout, inv_scale);
std::vector<float> offsetv(Nout, Y_offset);
uint8_t* Y_uint8_data = Y_int8->t.template mutable_data<uint8_t>();
fake_fp16::fma_fp16(Nout, Y_fp16.data(), inv_scalev.data(), offsetv.data());
const int32_t qmin = std::numeric_limits<uint8_t>::min();
const int32_t qmax = std::numeric_limits<uint8_t>::max();
for (const auto i : c10::irange(Nout)) {
float halfRes = offsetv[i];
halfRes = round(halfRes);
if (std::isinf(halfRes)) {
if (halfRes > 0) {
halfRes = qmax;
} else {
halfRes = qmin;
}
}
if (halfRes > qmax) {
halfRes = qmax;
}
if (halfRes < qmin) {
halfRes = qmin;
}
Y_uint8_data[i] = static_cast<uint8_t>(halfRes);
}
}
return true;
}
private:
const int axis_;
const float epsilon_;
// LayerNorm FP16 FakeLowP Op applies the scales and biases (or gamma and beta)
// whenever those inputs are provided else it will omit them.
// We are keeping elementwise_affine to keep it consistent with LayerNorm FP32 Op.
const bool elementwise_affine_;
INPUT_TAGS(INPUT);
OUTPUT_TAGS(OUTPUT, MEAN, STD);
};
} // namespace caffe2
| 6,155
| 28.596154
| 95
|
h
|
null |
pytorch-main/caffe2/contrib/fakelowp/lengths_reducer_fused_4bit_rowwise_fp16_fake_op.h
|
#pragma once
#include <immintrin.h>
#include "caffe2/perfkernels/fused_8bit_rowwise_embedding_lookup.h"
#include "fp16_fma.h"
#include "lengths_reducer_ops.h"
C10_DECLARE_bool(caffe2_fbgemm_fake_fp16_clamp);
C10_DECLARE_bool(caffe2_fbgemm_fake_fp16_clamp_denorms);
namespace caffe2 {
template <
class Context,
bool with_weights = 0,
bool use_fp16_for_embedding_only = 0>
class SparseLengthsFused4BitRowwiseFakeFP16Op final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
explicit SparseLengthsFused4BitRowwiseFakeFP16Op(
const OperatorDef& operator_def,
Workspace* ws)
: Operator<Context>(operator_def, ws) {}
~SparseLengthsFused4BitRowwiseFakeFP16Op() noexcept override {}
bool RunOnDevice() override {
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(
this, Input(INDICES));
}
template <typename IndexType>
bool DoRunWithType() {
const auto& data = Input(DATA);
const auto& indices = Input(INDICES);
const auto& lengths = Input(LENGTHS);
CAFFE_ENFORCE_EQ(indices.dim(), 1, "INDICES must be a vector");
CAFFE_ENFORCE_EQ(lengths.dim(), 1, "LENGTHS must be a vector");
const float* weights = nullptr;
if (with_weights) {
const auto& weights_input = Input(WEIGHTS);
CAFFE_ENFORCE_EQ(weights_input.dim(), 1, "WEIGHTS must be a vector");
CAFFE_ENFORCE_EQ(
weights_input.numel(),
indices.numel(),
"WEIGHTS should have the same length as INDICES.");
weights = weights_input.template data<float>();
}
CAFFE_ENFORCE_GT(
data.size(1),
sizeof(at::Half) * 2,
"DATA must have more than 4 columns");
constexpr int NUM_ELEM_PER_BYTE = 2;
// Subtract 8 from the #columns of data for the 4 bytes for scale and 4
// bytes for bias that we use in the fused representation (per row).
const std::vector<int64_t> shape = {
lengths.size(0),
static_cast<int64_t>(data.size(1) - 2 * sizeof(at::Half)) *
NUM_ELEM_PER_BYTE};
auto* output = Output(0, shape, at::dtype<float>());
// Copied from Fused8BitRowwiseEmbeddingLookupGenericSlow in
// fused_8bit_rowwise_embedding_lookup.cc
int64_t output_block_size = output->size(1);
CAFFE_ENFORCE_EQ(
output_block_size % NUM_ELEM_PER_BYTE,
0,
"block size must be divisible by 2");
int64_t input_block_size = output_block_size / NUM_ELEM_PER_BYTE;
int64_t output_size = output->size(0);
int64_t index_size = indices.numel();
int64_t data_size = data.size(0);
const uint8_t* input = data.template data<uint8_t>();
const IndexType* indices_data = indices.template data<IndexType>();
const int* lengths_data = lengths.template data<int>();
float* out = output->template mutable_data<float>();
std::vector<float> rowTempSums[2];
rowTempSums[0].resize(output_block_size);
rowTempSums[1].resize(output_block_size);
const auto scale_bias_offset = 2 * sizeof(at::Half);
const int64_t input_fused_block_size = input_block_size + scale_bias_offset;
int64_t current = 0;
for (const auto m : c10::irange(output_size)) {
if (!use_fp16_for_embedding_only) {
memset(rowTempSums[0].data(), 0, sizeof(float) * output_block_size);
memset(rowTempSums[1].data(), 0, sizeof(float) * output_block_size);
}
memset(out, 0, sizeof(float) * output_block_size);
if (current + lengths_data[m] > index_size) {
return false;
}
for (int i = 0; i < lengths_data[m]; ++i) {
int64_t idx = indices_data[current];
int accIdx = 0;
if (output_block_size % 2 == 0 && output_block_size <= 96 &&
data.size(1) % 2 == 0) {
accIdx = i % 2;
}
if (idx < 0 || idx >= data_size) {
return false;
}
const at::Half* scale_bias = reinterpret_cast<const at::Half*>(
input + input_fused_block_size * indices_data[current] +
input_block_size);
float weight = 1.0f;
if (weights) {
weight = weights[current];
if (!use_fp16_for_embedding_only) {
// Fake fp16 rounding of weight
fbgemm::RoundToFloat16(
&weight, &weight, 1, FLAGS_caffe2_fbgemm_fake_fp16_clamp);
}
}
float scale = scale_bias[0];
float bias = scale_bias[1];
if (!use_fp16_for_embedding_only) {
scale *= weight;
fbgemm::RoundToFloat16(
&scale, &scale, 1, FLAGS_caffe2_fbgemm_fake_fp16_clamp);
}
// Unpack int4 elements
std::vector<float> input_rounded(output_block_size);
int k = 0;
for (const auto j : c10::irange(input_block_size)) {
input_rounded[k++] =
input[input_fused_block_size * indices_data[current] + j] & 0x0f;
input_rounded[k++] =
input[input_fused_block_size * indices_data[current] + j] >> 4;
}
if (use_fp16_for_embedding_only) {
std::vector<float> product_rounded(output_block_size);
TypedAxpy<float, float>(
output_block_size,
scale,
input_rounded.data(),
product_rounded.data());
for (const auto j : c10::irange(output_block_size)) {
product_rounded[j] += bias;
}
// Fake fp16 rounding of scale x input + bias
fbgemm::RoundToFloat16(
reinterpret_cast<const float*>(product_rounded.data()),
product_rounded.data(),
output_block_size,
FLAGS_caffe2_fbgemm_fake_fp16_clamp,
FLAGS_caffe2_fbgemm_fake_fp16_clamp_denorms);
// Accumulate w x (scale x input + bias) to output
TypedAxpy<float, float>(
output_block_size,
weight,
reinterpret_cast<const float*>(product_rounded.data()),
out);
} else {
std::vector<float> product(output_block_size);
std::vector<float> scalev(output_block_size, scale);
std::vector<float> mBias(output_block_size, bias);
std::vector<float> mWeight(output_block_size, weight);
fake_fp16::fma_fp16(
output_block_size,
mBias.data(),
mWeight.data(),
rowTempSums[accIdx].data());
fake_fp16::fma_fp16(
output_block_size,
scalev.data(),
input_rounded.data(),
rowTempSums[accIdx].data());
}
++current;
}
if (!use_fp16_for_embedding_only) {
for (const auto j : c10::irange(output_block_size)) {
out[j] = rowTempSums[0][j] + rowTempSums[1][j];
}
fbgemm::RoundToFloat16(
reinterpret_cast<const float*>(out),
out,
output_block_size,
FLAGS_caffe2_fbgemm_fake_fp16_clamp);
}
out += output_block_size;
}
return current == index_size;
}
enum {
DATA = 0,
WEIGHTS = 1,
INDICES = 1 + with_weights,
LENGTHS = 2 + with_weights,
};
};
} // namespace caffe2
| 7,210
| 32.230415
| 80
|
h
|
null |
pytorch-main/caffe2/contrib/fakelowp/lengths_reducer_fused_8bit_rowwise_fp16_fake_op.h
|
#pragma once
#include "caffe2/perfkernels/fused_8bit_rowwise_embedding_lookup.h"
#include "fp16_fma.h"
#include "lengths_reducer_ops.h"
C10_DECLARE_bool(caffe2_fbgemm_fake_fp16_clamp);
C10_DECLARE_bool(caffe2_fbgemm_fake_fp16_clamp_denorms);
namespace caffe2 {
template <
class Context,
bool with_weights = 0,
bool is_mean = 0,
bool use_acc_fp16 = 0,
bool use_inv_scale = 0,
bool use_nnpi_fma = 0,
bool use_fp16_for_embedding_only = 0,
bool use_acc_fp32 = 0>
class SparseLengthsFused8BitRowwiseFakeFP16Op final : public Operator<Context> {
public:
static_assert(
!(with_weights && is_mean),
"Cannot have with_weights and is_mean a the same time");
USE_OPERATOR_CONTEXT_FUNCTIONS;
explicit SparseLengthsFused8BitRowwiseFakeFP16Op(
const OperatorDef& operator_def,
Workspace* ws)
: Operator<Context>(operator_def, ws) {}
~SparseLengthsFused8BitRowwiseFakeFP16Op() noexcept override {}
bool RunOnDevice() override {
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(
this, Input(INDICES));
}
template <typename IndexType>
bool DoRunWithType() {
const auto& data = Input(DATA);
const auto& indices = Input(INDICES);
const auto& lengths = Input(LENGTHS);
CAFFE_ENFORCE_EQ(indices.dim(), 1, "INDICES must be a vector");
CAFFE_ENFORCE_EQ(lengths.dim(), 1, "LENGTHS must be a vector");
const float* weights = nullptr;
if (with_weights) {
const auto& weights_input = Input(WEIGHTS);
CAFFE_ENFORCE_EQ(weights_input.dim(), 1, "WEIGHTS must be a vector");
CAFFE_ENFORCE_EQ(
weights_input.numel(),
indices.numel(),
"WEIGHTS should have the same length as INDICES.");
weights = weights_input.template data<float>();
}
CAFFE_ENFORCE_GT(data.size(1), 8, "DATA must have more than 8 columns");
// Subtract 8 from the #columns of data for the 4 bytes for scale and 4
// bytes for bias that we use in the fused representation (per row).
const std::vector<int64_t> shape = {lengths.size(0), data.size(1) - 8};
auto* output = Output(0, shape, at::dtype<float>());
// Copied from Fused8BitRowwiseEmbeddingLookupGenericSlow in
// fused_8bit_rowwise_embedding_lookup.cc
int64_t block_size = output->size(1);
int64_t output_size = output->size(0);
int64_t index_size = indices.numel();
int64_t data_size = data.size(0);
const uint8_t* input = data.template data<uint8_t>();
const IndexType* indices_data = indices.template data<IndexType>();
const int* lengths_data = lengths.template data<int>();
bool normalize_by_length = is_mean;
float* out = output->template mutable_data<float>();
std::vector<float> rowTempSums[2];
rowTempSums[0].resize(block_size);
rowTempSums[1].resize(block_size);
// block_size is the number of elements and fused_block_size is the size of
// an entire row, including scale and bias.
const auto scale_bias_offset = 8 / sizeof(uint8_t);
const int64_t fused_block_size = block_size + scale_bias_offset;
int64_t current = 0;
for (const auto m : c10::irange(output_size)) {
memset(out, 0, sizeof(float) * block_size);
memset(rowTempSums[0].data(), 0, sizeof(float) * block_size);
memset(rowTempSums[1].data(), 0, sizeof(float) * block_size);
if (current + lengths_data[m] > index_size) {
return false;
}
for (int i = 0; i < lengths_data[m]; ++i) {
int64_t idx = indices_data[current];
int accIdx = 0;
// Only do double buffer accumulation when block size is even
if (use_nnpi_fma && block_size % 2 == 0 && block_size <= 96) {
accIdx = i % 2;
}
if (idx < 0 || idx >= data_size) {
return false;
}
const float* scale_bias = reinterpret_cast<const float*>(
input + fused_block_size * indices_data[current] + block_size);
float weight = 1.0f;
if (weights) {
weight = weights[current];
if (!use_fp16_for_embedding_only && !use_acc_fp32) {
// Fake fp16 rounding of weight
fbgemm::RoundToFloat16(
&weight, &weight, 1, FLAGS_caffe2_fbgemm_fake_fp16_clamp);
}
}
float scale = scale_bias[0];
float bias = scale_bias[1];
// Vendor might store scale as s' = 1 / s which implies b' = b / s
// We do x = x_q * s + b
// Vendor does x = (x_q + b') / s'
// Solving these equations yields to the results above
if (use_inv_scale) {
constexpr float kEpsilon = 1e-8;
if (fabs(scale) < kEpsilon) {
if (scale < 0) {
scale = -kEpsilon;
} else {
scale = kEpsilon;
}
}
scale = 1.0 / (1.0 / scale);
bias = (bias / scale) * scale;
}
if (!use_fp16_for_embedding_only && !use_acc_fp32) {
// Fake fp16 rounding of scale and bias
fbgemm::RoundToFloat16(
&scale, &scale, 1, FLAGS_caffe2_fbgemm_fake_fp16_clamp);
fbgemm::RoundToFloat16(
&bias, &bias, 1, FLAGS_caffe2_fbgemm_fake_fp16_clamp);
scale *= weight;
// Fake fp16 rounding of scale and bias
fbgemm::RoundToFloat16(
&scale, &scale, 1, FLAGS_caffe2_fbgemm_fake_fp16_clamp);
}
// Fake fp16 rounding of input/ it is already ints
std::vector<float> input_rounded(block_size);
for (const auto j : c10::irange(block_size)) {
input_rounded[j] =
input[fused_block_size * indices_data[current] + j];
}
if (use_fp16_for_embedding_only) {
// bias *= weight;
std::vector<float> product_rounded(block_size);
TypedAxpy<float, float>(
block_size, scale, input_rounded.data(), product_rounded.data());
for (const auto j : c10::irange(block_size)) {
product_rounded[j] += bias;
}
// Fake fp16 rounding of scale x input + bias
fbgemm::RoundToFloat16(
reinterpret_cast<const float*>(product_rounded.data()),
product_rounded.data(),
block_size,
FLAGS_caffe2_fbgemm_fake_fp16_clamp,
FLAGS_caffe2_fbgemm_fake_fp16_clamp_denorms);
// Accumulate w x (scale x input + bias) to output
TypedAxpy<float, float>(
block_size,
weight,
reinterpret_cast<const float*>(product_rounded.data()),
out);
} else if (use_nnpi_fma) {
std::vector<float> mScale(block_size, scale);
std::vector<float> mBias(block_size, bias);
std::vector<float> mWeight(block_size, weight);
fake_fp16::fma_fp16(
block_size,
mBias.data(),
mWeight.data(),
rowTempSums[accIdx].data());
fake_fp16::fma_fp16(
block_size,
mScale.data(),
input_rounded.data(),
rowTempSums[accIdx].data());
} else if (use_acc_fp16) {
bias *= weight;
fbgemm::RoundToFloat16(
&bias, &bias, 1, FLAGS_caffe2_fbgemm_fake_fp16_clamp);
std::vector<float> product_rounded(block_size);
TypedAxpy<float, float>(
block_size, scale, input_rounded.data(), product_rounded.data());
// Fake fp16 rounding of w x scale x input
fbgemm::RoundToFloat16(
reinterpret_cast<const float*>(product_rounded.data()),
product_rounded.data(),
block_size,
FLAGS_caffe2_fbgemm_fake_fp16_clamp);
for (const auto j : c10::irange(block_size)) {
product_rounded[j] += bias;
}
// Fake fp16 rounding of w x scale x input + w x bias
fbgemm::RoundToFloat16(
reinterpret_cast<const float*>(product_rounded.data()),
product_rounded.data(),
block_size,
FLAGS_caffe2_fbgemm_fake_fp16_clamp);
// Accumulate w x scale x input + w x bias to output
TypedAxpy<float, float>(
block_size,
1.0,
reinterpret_cast<const float*>(product_rounded.data()),
out);
// Fake fp16 rounding of out + (w x scale x input + w x bias)
fbgemm::RoundToFloat16(
reinterpret_cast<const float*>(out),
out,
block_size,
FLAGS_caffe2_fbgemm_fake_fp16_clamp);
} else if (use_acc_fp32) {
for (const auto j : c10::irange(block_size)) {
float deqVal = fake_fp16::fmafp32_avx_emulation(
scale,
input_rounded[j],
bias);
rowTempSums[accIdx][j] = fake_fp16::fmafp32_avx_emulation(
deqVal,
weight,
rowTempSums[accIdx][j]);
}
} else {
bias *= weight;
fbgemm::RoundToFloat16(
&bias, &bias, 1, FLAGS_caffe2_fbgemm_fake_fp16_clamp);
TypedAxpy<float, float>(block_size, scale, input_rounded.data(), out);
for (const auto j : c10::irange(block_size)) {
out[j] += bias;
}
}
++current;
}
if (use_nnpi_fma || use_acc_fp32) {
for (const auto j : c10::irange(block_size)) {
out[j] = rowTempSums[0][j] + rowTempSums[1][j];
}
}
if (use_nnpi_fma) {
fbgemm::RoundToFloat16(
reinterpret_cast<const float*>(out),
out,
block_size,
FLAGS_caffe2_fbgemm_fake_fp16_clamp);
}
if (normalize_by_length && lengths_data[m]) {
float scale = 1.f / lengths_data[m];
if (!use_fp16_for_embedding_only) {
// Fake fp16 rounding of scale and out
fbgemm::RoundToFloat16(
&scale, &scale, 1, FLAGS_caffe2_fbgemm_fake_fp16_clamp);
fbgemm::RoundToFloat16(
reinterpret_cast<const float*>(out),
out,
block_size,
FLAGS_caffe2_fbgemm_fake_fp16_clamp);
}
// hack: context is not really used
math::Scale<float, float, CPUContext>(
block_size, scale, out, out, nullptr);
}
out += block_size;
}
return current == index_size;
}
enum {
DATA = 0,
WEIGHTS = 1,
INDICES = 1 + with_weights,
LENGTHS = 2 + with_weights,
};
};
} // namespace caffe2
| 10,661
| 33.063898
| 80
|
h
|
null |
pytorch-main/caffe2/contrib/fakelowp/lengths_reducer_ops.h
|
#pragma once
#include <fbgemm/FbgemmConvert.h>
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/perfkernels/typed_axpy.h"
C10_DECLARE_bool(caffe2_fbgemm_fake_fp16_clamp);
C10_DECLARE_bool(caffe2_fbgemm_fake_fp16_clamp_denorms);
namespace caffe2 {
// A templated class that implements SparseLengths[Sum,WeightedSum,Mean].
template <
class InputTypes, // supported input types, such as TensorTypes<float>
bool USE_WEIGHT = 0, // Whether it is SparseLengthsWeightedSum
bool USE_MEAN = 0, // Whether this is SparseLengthsMean
bool USE_POSITIONAL_WEIGHT = 0,
bool USE_ACC_FP16 = 0, // Whether use fp16 accumulation
bool USE_FP16_FOR_EMBEDDING_ONLY =
0 // Whether use fp16 for embedding entries only
// USE_WEIGHT = 1 and USE_POSITIONAL_WEIGHT = 1
// -> SparseLengthsPositionalWeightedSum
>
class SparseLengthsReductionFakeFp16Op final : public Operator<CPUContext> {
public:
USE_OPERATOR_FUNCTIONS(CPUContext);
template <class... Args>
explicit SparseLengthsReductionFakeFp16Op(Args&&... args)
: Operator<CPUContext>(std::forward<Args>(args)...) {
static_assert(
!(USE_WEIGHT & USE_MEAN), "Cannot both specify weight and mean.");
}
~SparseLengthsReductionFakeFp16Op() noexcept override {}
// Currently, we support float and at::Half inputs for input data type, and
// int32_t and int64_t for the index type.
bool RunOnDevice() override {
return DispatchHelper<InputTypes>::call(this, Input(DATA));
}
template <typename InputType>
bool DoRunWithType() {
return DispatchHelper<TensorTypes2<int32_t, int64_t>, InputType>::call(
this, Input(INDICES));
}
template <typename InputType, typename IndexType>
bool DoRunWithType2() {
auto& dataInput = Input(DATA);
auto& indicesInput = Input(INDICES);
auto& lengthsInput = Input(LENGTHS);
CAFFE_ENFORCE_EQ(1, indicesInput.dim(), "INDICES must be a vector");
CAFFE_ENFORCE_EQ(1, lengthsInput.dim(), "LENGTHS must be a vector");
const int64_t N = dataInput.size(0);
const int D = dataInput.size_from_dim(1);
const int64_t M = lengthsInput.size(0);
const int64_t indices_size = indicesInput.numel();
auto shape = dataInput.sizes().vec();
shape[0] = M;
auto* output = Output(0, shape, at::dtype<float>());
float* out_data = output->template mutable_data<float>();
const InputType* in_data = dataInput.template data<InputType>();
const IndexType* indices = indicesInput.template data<IndexType>();
const int* lengths = lengthsInput.template data<int>();
const float* in_weight = nullptr;
if (USE_WEIGHT) {
// static if
auto& weightInput = Input(WEIGHT);
CAFFE_ENFORCE_EQ(1, weightInput.dim(), "WEIGHT must be a vector");
if (!USE_POSITIONAL_WEIGHT) {
CAFFE_ENFORCE_EQ(
weightInput.numel(),
indices_size,
"Weight should have the same length as indices.");
}
in_weight = weightInput.template data<float>();
}
// Copied from EmbeddingLookupGenericSlow in perfkernels/embedding_lookup.cc
int64_t block_size = D;
int64_t output_size = M;
int64_t index_size = indices_size;
int64_t data_size = N;
const InputType* input = in_data;
const float* weights = in_weight;
bool normalize_by_lengths = USE_MEAN;
float* out = out_data;
int64_t current = 0;
for (const auto m : c10::irange(output_size)) {
memset(out, 0, sizeof(float) * block_size);
if (current + lengths[m] > index_size) {
return false;
}
for (int i = 0; i < lengths[m]; ++i) {
int64_t idx = indices[current];
if (idx < 0 || idx >= data_size) {
return false;
}
float w = 1.f;
if (weights) {
w = weights[USE_POSITIONAL_WEIGHT ? i : current];
if (!USE_FP16_FOR_EMBEDDING_ONLY) {
// Fake fp16 rounding of w
fbgemm::RoundToFloat16(
&w, &w, 1, FLAGS_caffe2_fbgemm_fake_fp16_clamp);
}
}
if (USE_FP16_FOR_EMBEDDING_ONLY) {
std::vector<float> product_rounded(block_size);
if (std::is_same<InputType, at::Half>::value) {
TypedAxpy<InputType, float>(
block_size,
w,
input + block_size * indices[current],
product_rounded.data());
} else {
bool is_float = std::is_same<InputType, float>::value;
assert(is_float);
// Fake fp16 rounding of input
std::vector<float> input_rounded(block_size);
fbgemm::RoundToFloat16(
reinterpret_cast<const float*>(
input + block_size * indices[current]),
input_rounded.data(),
block_size,
FLAGS_caffe2_fbgemm_fake_fp16_clamp,
FLAGS_caffe2_fbgemm_fake_fp16_clamp_denorms);
TypedAxpy<float, float>(
block_size,
w,
reinterpret_cast<const float*>(input_rounded.data()),
product_rounded.data());
}
// Accumulate w x input to output
TypedAxpy<float, float>(
block_size,
1.0,
reinterpret_cast<const float*>(product_rounded.data()),
out);
} else if (USE_ACC_FP16) {
std::vector<float> product_rounded(block_size);
if (std::is_same<InputType, at::Half>::value) {
TypedAxpy<InputType, float>(
block_size,
w,
input + block_size * indices[current],
product_rounded.data());
} else {
bool is_float = std::is_same<InputType, float>::value;
assert(is_float);
// Fake fp16 rounding of input
std::vector<float> input_rounded(block_size);
fbgemm::RoundToFloat16(
reinterpret_cast<const float*>(
input + block_size * indices[current]),
input_rounded.data(),
block_size,
FLAGS_caffe2_fbgemm_fake_fp16_clamp);
TypedAxpy<float, float>(
block_size,
w,
reinterpret_cast<const float*>(input_rounded.data()),
product_rounded.data());
}
// Fake fp16 rounding of w x input
fbgemm::RoundToFloat16(
reinterpret_cast<const float*>(product_rounded.data()),
product_rounded.data(),
block_size,
FLAGS_caffe2_fbgemm_fake_fp16_clamp);
// Accumulate w x input to output
TypedAxpy<float, float>(
block_size,
1.0,
reinterpret_cast<const float*>(product_rounded.data()),
out);
// Fake fp16 rounding of out + w x input
fbgemm::RoundToFloat16(
reinterpret_cast<const float*>(out),
out,
block_size,
FLAGS_caffe2_fbgemm_fake_fp16_clamp);
} else {
if (std::is_same<InputType, at::Half>::value) {
TypedAxpy<InputType, float>(
block_size, w, input + block_size * indices[current], out);
} else {
bool is_float = std::is_same<InputType, float>::value;
assert(is_float);
// Fake fp16 rounding of input
std::vector<float> input_rounded(block_size);
fbgemm::RoundToFloat16(
reinterpret_cast<const float*>(
input + block_size * indices[current]),
input_rounded.data(),
block_size,
FLAGS_caffe2_fbgemm_fake_fp16_clamp);
TypedAxpy<float, float>(
block_size,
w,
reinterpret_cast<const float*>(input_rounded.data()),
out);
}
}
++current;
}
if (normalize_by_lengths && lengths[m]) {
float scale = 1.f / lengths[m];
if (!USE_FP16_FOR_EMBEDDING_ONLY) {
// Fake fp16 rounding of scale and out
fbgemm::RoundToFloat16(
&scale, &scale, 1, FLAGS_caffe2_fbgemm_fake_fp16_clamp);
fbgemm::RoundToFloat16(
reinterpret_cast<const float*>(out),
out,
block_size,
FLAGS_caffe2_fbgemm_fake_fp16_clamp);
}
// hack: context is not really used
math::Scale<float, float, CPUContext>(
block_size, scale, out, out, nullptr);
}
if (!USE_FP16_FOR_EMBEDDING_ONLY) {
// Fake fp16 rounding of out
fbgemm::RoundToFloat16(
reinterpret_cast<const float*>(out),
reinterpret_cast<float*>(out),
block_size,
FLAGS_caffe2_fbgemm_fake_fp16_clamp);
}
out += block_size;
}
return current == index_size;
}
enum {
DATA = 0, // Data input.
WEIGHT = 1, // Weight input used in SparseLengthsWeightedSum
INDICES = 1 + USE_WEIGHT, // 1 in SparseLengths[Sum,Mean] and
// 2 in SparseLengthsWeightedSum
LENGTHS = 2 + USE_WEIGHT, // 2 in SparseLengths[Sum, Mean],
// 3 in SparseLengthsWeightedSum
};
};
} // namespace caffe2
| 9,405
| 33.966543
| 80
|
h
|
null |
pytorch-main/caffe2/contrib/fakelowp/quant_lut_fp16_fake_op.h
|
#pragma once
#include <array>
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/core/tensor_int8.h"
#include "caffe2/operators/quantized/int8_utils.h"
#include <immintrin.h>
#include <emmintrin.h>
namespace caffe2 {
namespace {
class TanhInt8QuantizeNNPIOp final : public Operator<CPUContext> {
public:
using Operator<CPUContext>::Operator;
bool RunOnDevice() override {
const auto& X = Input(0);
auto* Y = Outputs()[0]->template GetMutable<int8::Int8TensorCPU>();
Y->t.ResizeLike(X);
int32_t Y_offset = this->template GetSingleArgument<int>("Y_zero_point", 0);
auto Y_scale = this->template GetSingleArgument<float>("Y_scale", 1);
Y->scale = Y_scale;
Y->zero_point = Y_offset;
constexpr int tanhLUTMinOffset = 0;
constexpr int tanhLUTMaxOffset = 18000;
constexpr int lutSize = tanhLUTMaxOffset - tanhLUTMinOffset;
std::array<uint8_t, lutSize> tanhLUT;
Y_scale = 1.0f / Y_scale;
// create table once
for (const auto i : c10::irange(lutSize)) {
short input = i + tanhLUTMinOffset;
float x = _cvtsh_ss(input);
float tanh_x = tanh(x);
tanh_x = round(tanh_x * Y_scale + Y_offset);
if (tanh_x < 0 || tanh_x > 255.0) {
tanh_x = 255.0;
}
uint32_t tanh_quant = (uint32_t)(tanh_x);
tanhLUT[i] = (uint8_t)tanh_quant;
}
const float* X_data = X.template data<float>();
for (const auto i : c10::irange(X.numel())) {
short val = _cvtss_sh(X_data[i], 0);
unsigned short max16BitPositive = 0x7FFF;
unsigned short input16Bit = (*(unsigned short*)& val);
short shortAbsInput = input16Bit & max16BitPositive; // mask out negative bit
short clampShortAbsInput = shortAbsInput;
if (shortAbsInput < (short)tanhLUTMinOffset) {
clampShortAbsInput = (short)tanhLUTMinOffset;
}
if (shortAbsInput > (short)(tanhLUTMaxOffset - 1)) {
clampShortAbsInput = (short)(tanhLUTMaxOffset - 1);
}
short inputInLutRange = clampShortAbsInput - tanhLUTMinOffset;
short temp = tanhLUT[inputInLutRange];
if (input16Bit > max16BitPositive) { // negative value
temp = temp - Y_offset;
temp = temp * (-1);
temp = temp + Y_offset;
}
uint8_t output = (uint8_t)temp;
if (temp < 0) {
output = 0;
}
Y->t.mutable_data<uint8_t>()[i] = output;
}
return true;
}
};
}
}
| 2,539
| 26.608696
| 85
|
h
|
null |
pytorch-main/caffe2/contrib/fakelowp/spatial_batch_norm_fp16_fake_op.h
|
#pragma once
#include <algorithm>
#include <array>
#include <functional>
#include <string>
#include <vector>
#include <fbgemm/FbgemmConvert.h>
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/eigen_utils.h"
#include "caffe2/utils/math.h"
#include "fp16_fma.h"
C10_DECLARE_bool(caffe2_fbgemm_fake_fp16_clamp);
namespace caffe2 {
class SpatialBNFakeLoweredFp16Op : public Operator<CPUContext> {
public:
USE_OPERATOR_FUNCTIONS(CPUContext);
template <class... Args>
explicit SpatialBNFakeLoweredFp16Op(Args&&... args)
: Operator<CPUContext>(std::forward<Args>(args)...),
OP_SINGLE_ARG(bool, OpSchema::Arg_IsTest, is_test_, false),
OP_SINGLE_ARG(double, "epsilon", epsilon_, 1e-5),
order_(StringToStorageOrder(
this->template GetSingleArgument<std::string>("order", "NCHW"))),
OP_SINGLE_ARG(int, "num_batches", num_batches_, 1) {
// TODO: only support NCHW for now
CAFFE_ENFORCE_EQ(order_, StorageOrder::NCHW);
CAFFE_ENFORCE(
(is_test_ && OutputSize() == 1) || (!is_test_ && OutputSize() == 5));
CAFFE_ENFORCE_GT(epsilon_, 0);
}
~SpatialBNFakeLoweredFp16Op() override = default;
bool RunOnDevice() override {
return DispatchHelper<TensorTypes<float>>::call(this, Input(0));
}
template <typename T>
bool DoRunWithType() {
const auto& X = Input(INPUT);
const auto& scale = Input(SCALE);
const auto& bias = Input(BIAS);
const int ndim = X.dim();
CAFFE_ENFORCE_GE(ndim, 2);
const int N = X.dim32(0);
const int C =
(order_ == StorageOrder::NCHW ? X.dim32(1) : X.dim32(ndim - 1));
const std::vector<int> X_dims(X.sizes().cbegin(), X.sizes().cend());
const int HxW =
std::accumulate(
X_dims.cbegin() + 1, X_dims.cend(), 1, std::multiplies<int>()) /
C;
CAFFE_ENFORCE_EQ(scale.numel(), C);
CAFFE_ENFORCE_EQ(bias.numel(), C);
auto* Y = Output(OUTPUT, X.sizes(), at::dtype<T>());
T* Y_data = Y->template mutable_data<T>();
ReinitializeTensor(
&alpha_, {C}, at::dtype<T>().device(CPUContext::GetDeviceType()));
T* alpha_data = alpha_.template mutable_data<T>();
// We only support this case at the moment
CAFFE_ENFORCE(is_test_);
std::vector<float> X_fp16(X.numel());
fbgemm::RoundToFloat16(
X.template data<T>(),
X_fp16.data(),
N * C * HxW,
FLAGS_caffe2_fbgemm_fake_fp16_clamp);
if (N == 0) {
return true;
}
const auto& mean = Input(EST_MEAN);
const auto& var = Input(EST_VAR);
CAFFE_ENFORCE_EQ(mean.numel(), C);
CAFFE_ENFORCE_EQ(var.numel(), C);
std::vector<float> mean_fp16(C), var_fp16(C);
std::vector<float> scale_fp16(C), bias_fp16(C);
fbgemm::RoundToFloat16(
scale.template data<T>(),
scale_fp16.data(),
C,
FLAGS_caffe2_fbgemm_fake_fp16_clamp);
fbgemm::RoundToFloat16(
bias.template data<T>(),
bias_fp16.data(),
C,
FLAGS_caffe2_fbgemm_fake_fp16_clamp);
fbgemm::RoundToFloat16(
mean.template data<T>(),
mean_fp16.data(),
C,
FLAGS_caffe2_fbgemm_fake_fp16_clamp);
fbgemm::RoundToFloat16(
var.template data<T>(),
var_fp16.data(),
C,
FLAGS_caffe2_fbgemm_fake_fp16_clamp);
EigenVectorArrayMap<T> alpha_arr(alpha_data, C);
std::vector<float> tmp(C);
EigenVectorArrayMap<T> tmp_arr(tmp.data(), C);
auto epsilon = static_cast<T>(epsilon_);
fbgemm::RoundToFloat16(
&epsilon, &epsilon, 1, FLAGS_caffe2_fbgemm_fake_fp16_clamp);
tmp_arr = (ConstEigenVectorArrayMap<T>(var_fp16.data(), C) + epsilon);
fbgemm::RoundToFloat16(
tmp.data(), tmp.data(), C, FLAGS_caffe2_fbgemm_fake_fp16_clamp);
tmp_arr = tmp_arr.pow(0.5);
fbgemm::RoundToFloat16(
tmp.data(), tmp.data(), C, FLAGS_caffe2_fbgemm_fake_fp16_clamp);
alpha_arr = ConstEigenVectorArrayMap<T>(scale_fp16.data(), C) / tmp_arr;
fbgemm::RoundToFloat16(
alpha_data, alpha_data, C, FLAGS_caffe2_fbgemm_fake_fp16_clamp);
AffineChannel_NCHW(
N,
C,
HxW,
X_fp16.data(),
alpha_data,
bias_fp16.data(),
mean_fp16.data(),
Y_data);
fbgemm::RoundToFloat16(
Y_data, Y_data, N * HxW * C, FLAGS_caffe2_fbgemm_fake_fp16_clamp);
return true;
}
protected:
void AffineChannel_NCHW(
const int N,
const int C,
const int HxW,
const float* X,
const float* scale,
const float* bias,
const float* mean,
float* Y) {
ConstEigenVectorArrayMap<float> scale_arr(scale, C);
ConstEigenVectorArrayMap<float> bias_arr(bias, C);
ConstEigenVectorArrayMap<float> mean_arr(mean, C);
const int stride = C * HxW;
const float* X_ptr = X;
float* Y_ptr = Y;
for (const auto i : c10::irange(N)) {
EigenArrayMap<float>(Y_ptr, HxW, C) =
ConstEigenArrayMap<float>(X_ptr, HxW, C).rowwise() -
mean_arr.transpose();
fbgemm::RoundToFloat16(
Y_ptr, Y_ptr, HxW * C, FLAGS_caffe2_fbgemm_fake_fp16_clamp);
EigenArrayMap<float>(Y_ptr, HxW, C).rowwise() *= scale_arr.transpose();
fbgemm::RoundToFloat16(
Y_ptr, Y_ptr, HxW * C, FLAGS_caffe2_fbgemm_fake_fp16_clamp);
EigenArrayMap<float>(Y_ptr, HxW, C).rowwise() += bias_arr.transpose();
X_ptr += stride;
Y_ptr += stride;
}
fbgemm::RoundToFloat16(
Y, Y, N * HxW * C, FLAGS_caffe2_fbgemm_fake_fp16_clamp);
}
const bool is_test_;
double epsilon_;
const StorageOrder order_;
const int num_batches_;
Tensor alpha_;
INPUT_TAGS(
INPUT,
SCALE,
BIAS,
EST_MEAN,
EST_VAR,
BATCH_MEAN_SUM,
BATCH_VAR_SUM);
OUTPUT_TAGS(OUTPUT, RUNNING_MEAN, RUNNING_VAR, SAVED_MEAN, SAVED_INV_STD);
};
// Emulation of the NNPI SpatialBN kernel
class SpatialBNFakeFp16Op : public Operator<CPUContext> {
public:
USE_OPERATOR_FUNCTIONS(CPUContext);
template <class... Args>
explicit SpatialBNFakeFp16Op(Args&&... args)
: Operator<CPUContext>(std::forward<Args>(args)...),
OP_SINGLE_ARG(bool, OpSchema::Arg_IsTest, is_test_, false),
OP_SINGLE_ARG(float, "epsilon", epsilon_, 1e-5),
order_(StringToStorageOrder(
this->template GetSingleArgument<std::string>("order", "NCHW"))),
OP_SINGLE_ARG(int, "num_batches", num_batches_, 1) {
// TODO: only support NCHW for now
CAFFE_ENFORCE_EQ(order_, StorageOrder::NCHW);
// We only support this case at the moment
CAFFE_ENFORCE(is_test_);
CAFFE_ENFORCE_GT(epsilon_, 0);
}
~SpatialBNFakeFp16Op() override = default;
bool RunOnDevice() override {
return DispatchHelper<TensorTypes<float>>::call(this, Input(0));
}
template <typename T>
bool DoRunWithType() {
LOG(INFO) << "Running with " << sizeof(T);
const auto& X = Input(INPUT);
const auto& scale = Input(SCALE);
const auto& bias = Input(BIAS);
const int ndim = X.dim();
CAFFE_ENFORCE_GE(ndim, 2);
const int N = X.dim32(0);
const int C =
(order_ == StorageOrder::NCHW ? X.dim32(1) : X.dim32(ndim - 1));
const std::vector<int> X_dims(X.sizes().cbegin(), X.sizes().cend());
const int HxW =
std::accumulate(
X_dims.cbegin() + 1, X_dims.cend(), 1, std::multiplies<int>()) /
C;
CAFFE_ENFORCE_EQ(scale.numel(), C);
CAFFE_ENFORCE_EQ(bias.numel(), C);
auto* Y = Output(OUTPUT, X.sizes(), at::dtype<T>());
T* Y_data = Y->template mutable_data<T>();
ReinitializeTensor(
&alpha_, {C}, at::dtype<T>().device(CPUContext::GetDeviceType()));
ReinitializeTensor(
&beta_, {C}, at::dtype<T>().device(CPUContext::GetDeviceType()));
T* alpha_data = alpha_.template mutable_data<T>();
T* beta_data = beta_.template mutable_data<T>();
std::vector<float> X_fp16(X.numel());
fbgemm::RoundToFloat16(
X.template data<T>(),
X_fp16.data(),
N * C * HxW,
FLAGS_caffe2_fbgemm_fake_fp16_clamp);
const auto& mean = Input(EST_MEAN);
const auto& var = Input(EST_VAR);
CAFFE_ENFORCE_EQ(mean.numel(), C);
CAFFE_ENFORCE_EQ(var.numel(), C);
std::vector<float> mean_fp16(C), var_fp16(C);
std::vector<float> scale_fp16(C), bias_fp16(C);
fbgemm::RoundToFloat16(
scale.template data<T>(),
scale_fp16.data(),
C,
FLAGS_caffe2_fbgemm_fake_fp16_clamp);
fbgemm::RoundToFloat16(
bias.template data<T>(),
bias_fp16.data(),
C,
FLAGS_caffe2_fbgemm_fake_fp16_clamp);
fbgemm::RoundToFloat16(
mean.template data<T>(),
mean_fp16.data(),
C,
FLAGS_caffe2_fbgemm_fake_fp16_clamp);
fbgemm::RoundToFloat16(
var.template data<T>(),
var_fp16.data(),
C,
FLAGS_caffe2_fbgemm_fake_fp16_clamp);
// This part is run on the CPU/x86 core
ComputeFusedParam<T>(
C,
scale_fp16.data(),
bias_fp16.data(),
mean_fp16.data(),
var_fp16.data(),
alpha_data,
beta_data);
AffineChannel_NCHW(N, C, HxW, X_fp16.data(), alpha_data, beta_data, Y_data);
fbgemm::RoundToFloat16(
Y_data, Y_data, N * HxW * C, FLAGS_caffe2_fbgemm_fake_fp16_clamp);
return true;
}
protected:
template <typename T>
void ComputeFusedParam(
const int C,
const T* scale,
const T* bias,
const T* mean,
const T* var,
T* alpha,
T* beta) {
// alpha = scale / sqrt(var + epsilon)
// beta = bias - alpha * mean
EigenVectorArrayMap<T> alpha_arr(alpha, C);
EigenVectorArrayMap<T> beta_arr(beta, C);
std::vector<T> tmp(C, 0.0);
EigenVectorArrayMap<T> tmp_arr(tmp.data(), C);
tmp_arr = ConstEigenVectorArrayMap<T>(var, C) + static_cast<T>(epsilon_);
// sqrt using intrinsics
int i = 0;
constexpr int blockSize = 8;
for (i = 0; i + blockSize <= C; i += blockSize) {
__m256 t = _mm256_loadu_ps(&tmp[i]);
_mm256_storeu_ps(&tmp[i], _mm256_sqrt_ps(t));
}
for (; i < C; i++) {
tmp[i] = sqrt(tmp[i]);
}
alpha_arr = ConstEigenVectorArrayMap<T>(scale, C) / tmp_arr;
beta_arr = ConstEigenVectorArrayMap<T>(bias, C) -
alpha_arr * ConstEigenVectorArrayMap<T>(mean, C);
fbgemm::RoundToFloat16(
alpha, alpha, C, FLAGS_caffe2_fbgemm_fake_fp16_clamp);
fbgemm::RoundToFloat16(beta, beta, C, FLAGS_caffe2_fbgemm_fake_fp16_clamp);
}
void AffineChannel_NCHW(
const int N,
const int C,
const int HxW,
const float* X,
const float* scale,
const float* bias,
float* Y) {
ConstEigenVectorArrayMap<float> scale_arr(scale, C);
ConstEigenVectorArrayMap<float> bias_arr(bias, C);
const int stride = C * HxW;
const float* X_ptr = X;
float* Y_ptr = Y;
// Do Y = X * scale + bias
for (const auto i : c10::irange(N)) {
for (const auto j : c10::irange(C)) {
for (const auto k : c10::irange(HxW)) {
Y_ptr[HxW * j + k] = bias[j];
}
std::vector<float> s2(HxW, scale[j]);
fake_fp16::fma_fp16(
HxW, X_ptr + j * HxW, s2.data(), Y_ptr + HxW * j); // b2.data());
}
X_ptr += stride;
Y_ptr += stride;
}
fbgemm::RoundToFloat16(
Y, Y, N * HxW * C, FLAGS_caffe2_fbgemm_fake_fp16_clamp);
}
const bool is_test_;
float epsilon_;
const StorageOrder order_;
const int num_batches_;
Tensor alpha_;
Tensor beta_;
INPUT_TAGS(
INPUT,
SCALE,
BIAS,
EST_MEAN,
EST_VAR,
BATCH_MEAN_SUM,
BATCH_VAR_SUM);
OUTPUT_TAGS(OUTPUT, RUNNING_MEAN, RUNNING_VAR, SAVED_MEAN, SAVED_INV_STD);
}; // namespace caffe2
} // namespace caffe2
| 11,829
| 28.873737
| 80
|
h
|
null |
pytorch-main/caffe2/contrib/fakelowp/sum_fp16_fake_op.h
|
#pragma once
#include <caffe2/core/operator.h>
C10_DECLARE_bool(caffe2_fbgemm_fake_fp16_clamp);
namespace caffe2 {
template <class Context>
class SumFP16FP16AccOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
USE_SIMPLE_CTOR_DTOR(SumFP16FP16AccOp);
bool DoRunWithFloat() {
auto& input0 = Input(0);
size_t N = input0.numel();
auto* output = Output(0, input0.sizes(), at::dtype<float>());
// Dimension checking
for (const auto i : c10::irange(1, InputSize())) {
if (output->sizes() != Input(i).sizes()) {
CAFFE_THROW(
"Check failed: output->sizes() == Input(i).sizes().",
"Description: Input #",
i,
", input dimension:",
Input(i).sizes(),
" should match output dimension: ",
output->sizes());
}
}
float* output_data = output->template mutable_data<float>();
memset(output_data, 0, sizeof(float) * input0.numel());
std::vector<float> t1(N);
std::vector<float> t2(N);
for (const auto i : c10::irange(InputSize())) {
fbgemm::RoundToFloat16(
Input(i).template data<float>(),
t1.data(),
N,
FLAGS_caffe2_fbgemm_fake_fp16_clamp);
fbgemm::RoundToFloat16(
output_data, t2.data(), N, FLAGS_caffe2_fbgemm_fake_fp16_clamp);
math::Add(N, t1.data(), t2.data(), output_data, &context_);
}
fbgemm::RoundToFloat16(
output_data, output_data, N, FLAGS_caffe2_fbgemm_fake_fp16_clamp);
return true;
}
bool RunOnDevice() override {
if (Input(0).template IsType<float>()) {
return DoRunWithFloat();
} else {
CAFFE_THROW(
"Sum operator only supports 32-bit float, but",
" input was of type ",
Input(0).dtype().name());
}
}
};
} // namespace caffe2
| 1,861
| 25.6
| 74
|
h
|
null |
pytorch-main/caffe2/contrib/fakelowp/unary_fp16_fake_op.h
|
#pragma once
#include <vector>
#include <fbgemm/FbgemmConvert.h>
#include "caffe2/operators/elementwise_ops.h"
#include "caffe2/utils/eigen_utils.h"
#include "caffe2/utils/math.h"
C10_DECLARE_bool(caffe2_fbgemm_fake_fp16_clamp);
namespace caffe2 {
using namespace std;
template <class Context>
struct ReluFakeFp16Functor {
template <typename T>
bool operator()(const int N, const T* X, T* Y, Context* /* unused */) const {
std::vector<float> X_fp16(N);
fbgemm::RoundToFloat16(
X, X_fp16.data(), N, FLAGS_caffe2_fbgemm_fake_fp16_clamp);
EigenVectorMap<T>(Y, N) =
ConstEigenVectorMap<float>(X_fp16.data(), N).cwiseMax(T(0));
return true;
}
};
template <class Context>
struct SqrFakeFp16Functor {
template <typename T>
bool operator()(const int N, const T* X, T* Y, Context* context) const {
std::vector<float> X_fp16(N);
fbgemm::RoundToFloat16(
X, X_fp16.data(), N, FLAGS_caffe2_fbgemm_fake_fp16_clamp);
math::Sqr(N, X_fp16.data(), Y, context);
fbgemm::RoundToFloat16(Y, Y, N, FLAGS_caffe2_fbgemm_fake_fp16_clamp);
return true;
}
};
struct SigmoidFakeIdealFp16Functor {
template <typename T>
bool operator()(const int N, const T* X, T* Y, CPUContext* /* unused */)
const {
std::vector<float> X_fp16(N);
fbgemm::RoundToFloat16(X, X_fp16.data(), N);
EigenVectorArrayMap<T>(Y, N) =
T(1) / (T(1) + (-ConstEigenVectorArrayMap<T>(X_fp16.data(), N)).exp());
fbgemm::RoundToFloat16(Y, Y, N, FLAGS_caffe2_fbgemm_fake_fp16_clamp);
return true;
}
};
struct TanhFakeIdealFp16Functor {
template <typename T>
bool operator()(const int N, const T* X, T* Y, CPUContext* context) const {
std::vector<float> X_fp16(N);
fbgemm::RoundToFloat16(
X, X_fp16.data(), N, FLAGS_caffe2_fbgemm_fake_fp16_clamp);
math::Tanh<T, CPUContext>(N, X_fp16.data(), Y, context);
fbgemm::RoundToFloat16(Y, Y, N, FLAGS_caffe2_fbgemm_fake_fp16_clamp);
return true;
}
};
} // namespace caffe2
namespace fake_fp16 {
at::Half CalcSigmoidByLUT(at::Half x);
at::Half CalcSwishByLUT(at::Half x);
at::Half CalcSwishByLUTCubic(at::Half x);
at::Half CalcTanhByLUT(at::Half input);
} // namespace fake_fp16
| 2,211
| 28.493333
| 79
|
h
|
null |
pytorch-main/caffe2/contrib/gloo/allgather_ops.h
|
/**
* Copyright (c) 2017-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <algorithm>
#include "caffe2/contrib/gloo/common.h"
#include "caffe2/core/operator.h"
#include "caffe2/core/types.h"
#include <gloo/algorithm.h>
#include <gloo/common/error.h>
#include <gloo/context.h>
namespace caffe2 {
namespace gloo {
template <class Context>
class AllgatherOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
AllgatherOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
ws_(ws),
status_blob_(
OperatorBase::GetSingleArgument<std::string>("status_blob", "")) {
if (status_blob_ != "") {
ws_->CreateBlob(status_blob_);
}
}
~AllgatherOp() override {}
bool RunOnDevice() override {
std::call_once(once_, [&] { initialize(); });
// If any parameter has changed in between runs, the initialized
// algorithm is invalid and cannot be used.
update(current_);
CAFFE_ENFORCE(current_ == init_, "Inputs/outputs have changed");
try {
algorithm_->run();
} catch (::gloo::IoException& ioe) {
LOG(ERROR) << "Caught gloo IO exception: " << ioe.what();
if (status_blob_ != "") {
signalFailure(ws_->GetBlob(status_blob_), ioe);
return false;
} else {
throw;
}
}
return true;
}
protected:
void initialize() {
// Allocate output tensor
CAFFE_ENFORCE_EQ(OutputSize(), 1);
auto comm_size =
OperatorBase::Input<std::shared_ptr<::gloo::Context>>(0)->size;
const auto dims = std::vector<int64_t>(
1, (InputSize() - 1) * Input(1).numel() * comm_size);
Output(0)->Resize(dims);
// Store which inputs/outputs this instance initialized with
update(init_);
CAFFE_ENFORCE_EQ(init_.outputs.size(), 1);
// Verify tensors all have same size
size_t size = Input(1).numel();
for (const auto i : c10::irange(2, InputSize())) {
CAFFE_ENFORCE_EQ(Input(i).numel(), size);
}
// Verify tensors all have same type
TypeMeta meta = Input(1).dtype();
for (const auto i : c10::irange(2, InputSize())) {
CAFFE_ENFORCE(Input(i).dtype() == meta);
}
// Finally initialize the algorithm
initializeAlgorithm();
}
void initializeAlgorithm();
std::once_flag once_;
std::unique_ptr<::gloo::Algorithm> algorithm_;
// Captures the parameters passed to Gloo when first initialized.
// An instance is updated every time this op runs and is compared
// to the reference instance for equality. If any parameter has
// changed from run to run, the initialized algorithm is invalid.
void update(GlooParameters& params) {
params.context = OperatorBase::Input<std::shared_ptr<::gloo::Context>>(0);
params.inputs.resize(InputSize() - 1);
params.size = Input(1).numel();
params.meta = Input(1).dtype();
for (const auto i : c10::irange(params.inputs.size())) {
params.inputs[i] = Input(i + 1).raw_data();
}
params.outputs.resize(OutputSize());
params.outputs[0] = Output(0)->raw_mutable_data(params.meta);
}
GlooParameters init_;
GlooParameters current_;
Workspace* ws_;
std::string status_blob_;
};
} // namespace gloo
} // namespace caffe2
| 3,834
| 28.274809
| 78
|
h
|
null |
pytorch-main/caffe2/contrib/gloo/allreduce_ops.h
|
#pragma once
#include <algorithm>
#include "caffe2/contrib/gloo/common.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
#include <gloo/algorithm.h>
#include <gloo/common/error.h>
#include <gloo/context.h>
namespace caffe2 {
namespace gloo {
template <class Context>
class AllreduceOp final : public Operator<Context> {
enum Mode { RING_FULL, RING_CHUNKED, HALVING_DOUBLING, BCUBE };
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
AllreduceOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
ws_(ws),
status_blob_(
OperatorBase::GetSingleArgument<std::string>("status_blob", "")),
gpu_direct_(
OperatorBase::GetSingleArgument<bool>("gpu_direct", false)) {
if (status_blob_ != "") {
ws_->CreateBlob(status_blob_);
}
}
~AllreduceOp() override {}
bool RunOnDevice() override {
std::call_once(once_, [&] { initialize(); });
// If any parameter has changed in between runs, the initialized
// algorithm is invalid and cannot be used.
update(current_);
CAFFE_ENFORCE(current_ == init_, "Inputs/outputs have changed");
try {
algorithm_->run();
} catch (::gloo::IoException& ioe) {
LOG(ERROR) << "Caught gloo IO exception: " << ioe.what();
if (status_blob_ != "") {
signalFailure(ws_->GetBlob(status_blob_), ioe);
return false;
} else {
throw;
}
}
return true;
}
protected:
void initialize() {
Mode mode = HALVING_DOUBLING;
// Store which inputs/outputs this instance initialized with
update(init_);
// Verify inputs == outputs
CAFFE_ENFORCE_EQ(init_.inputs.size(), init_.outputs.size());
for (const auto i : c10::irange(0U, init_.inputs.size())) {
CAFFE_ENFORCE_EQ(init_.inputs[i], init_.outputs[i]);
}
// Verify tensors all have same size
auto size = Input(1).numel();
for (const auto i : c10::irange(2, InputSize())) {
CAFFE_ENFORCE_EQ(Input(i).numel(), size);
}
// Verify tensors all have same type
TypeMeta meta = Input(1).dtype();
for (const auto i : c10::irange(2, InputSize())) {
CAFFE_ENFORCE(Input(i).dtype() == meta);
}
switch (mode) {
case RING_FULL:
initializeRingFull();
return;
case RING_CHUNKED:
initializeRingChunked();
return;
case HALVING_DOUBLING:
initializeHalvingDoubling();
return;
case BCUBE:
initializeBcube();
return;
}
CAFFE_ENFORCE(false, "Unreachable code");
}
void initializeBcube();
void initializeHalvingDoubling();
void initializeRingFull();
void initializeRingChunked();
std::once_flag once_;
std::unique_ptr<::gloo::Algorithm> algorithm_;
// Captures the parameters passed to Gloo when first initialized.
// An instance is updated every time this op runs and is compared
// to the reference instance for equality. If any parameter has
// changed from run to run, the initialized algorithm is invalid.
void update(GlooParameters& params) {
params.context = OperatorBase::Input<std::shared_ptr<::gloo::Context>>(0);
params.inputs.resize(InputSize() - 1);
params.outputs.resize(OutputSize());
for (const auto i : c10::irange(0U, params.inputs.size())) {
params.inputs[i] = Input(i + 1).raw_data();
params.outputs[i] = Output(i)->raw_mutable_data();
}
params.size = Output(0)->numel();
params.meta = Output(0)->dtype();
}
GlooParameters init_;
GlooParameters current_;
Workspace* ws_;
std::string status_blob_;
const bool gpu_direct_;
};
} // namespace gloo
} // namespace caffe2
| 3,705
| 26.451852
| 78
|
h
|
null |
pytorch-main/caffe2/contrib/gloo/barrier_ops.h
|
#pragma once
#include "caffe2/contrib/gloo/common.h"
#include "caffe2/core/operator.h"
#include <gloo/algorithm.h>
#include <gloo/barrier_all_to_one.h>
#include <gloo/common/error.h>
#include <gloo/context.h>
namespace caffe2 {
namespace gloo {
template <class Context>
class BarrierOp final : public Operator<Context> {
public:
BarrierOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
ws_(ws),
status_blob_(
OperatorBase::GetSingleArgument<std::string>("status_blob", "")) {
if (status_blob_ != "") {
ws_->CreateBlob(status_blob_);
}
}
~BarrierOp() override {}
bool RunOnDevice() override {
auto context = OperatorBase::Input<std::shared_ptr<::gloo::Context>>(0);
std::call_once(once_, [&] {
initContext_ = context;
// Use an all-to-one barrier synchronizing against rank 0
algorithm_.reset(new ::gloo::BarrierAllToOne(initContext_, 0));
});
// If any parameter has changed in between runs, the initialized
// algorithm is invalid and cannot be used.
CAFFE_ENFORCE(context == initContext_, "Context has changed");
try {
algorithm_->run();
} catch (::gloo::IoException& ioe) {
LOG(ERROR) << "Caught gloo IO exception: " << ioe.what();
if (status_blob_ != "") {
signalFailure(ws_->GetBlob(status_blob_), ioe);
return false;
} else {
throw;
}
}
return true;
}
protected:
std::once_flag once_;
std::shared_ptr<::gloo::Context> initContext_;
std::unique_ptr<::gloo::Algorithm> algorithm_;
Workspace* ws_;
std::string status_blob_;
};
} // namespace gloo
} // namespace caffe2
| 1,703
| 25.625
| 78
|
h
|
null |
pytorch-main/caffe2/contrib/gloo/broadcast_ops.h
|
#pragma once
#include <algorithm>
#include "caffe2/contrib/gloo/common.h"
#include "caffe2/core/operator.h"
#include "caffe2/core/types.h"
#include <gloo/algorithm.h>
#include <gloo/common/error.h>
#include <gloo/context.h>
namespace caffe2 {
namespace gloo {
template <class Context>
class BroadcastOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
BroadcastOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
root_(OperatorBase::template GetSingleArgument<int>("root", 0)),
ws_(ws),
status_blob_(
OperatorBase::GetSingleArgument<std::string>("status_blob", "")) {
if (status_blob_ != "") {
ws_->CreateBlob(status_blob_);
}
}
~BroadcastOp() override {}
bool RunOnDevice() override {
std::call_once(once_, [&] { initialize(); });
// If any parameter has changed in between runs, the initialized
// algorithm is invalid and cannot be used.
update(current_);
CAFFE_ENFORCE(current_ == init_, "Inputs/outputs have changed");
try {
algorithm_->run();
} catch (::gloo::IoException& ioe) {
LOG(ERROR) << "Caught gloo IO exception: " << ioe.what();
if (status_blob_ != "") {
signalFailure(ws_->GetBlob(status_blob_), ioe);
return false;
} else {
throw;
}
}
return true;
}
protected:
void initialize() {
// Store which inputs/outputs this instance initialized with
update(init_);
// Verify inputs == outputs
CAFFE_ENFORCE_EQ(init_.inputs.size(), init_.outputs.size());
for (const auto i : c10::irange(init_.inputs.size())) {
CAFFE_ENFORCE_EQ(init_.inputs[i], init_.outputs[i]);
}
// Verify tensors all have same size
size_t size = Input(1).numel();
for (const auto i : c10::irange(2, InputSize())) {
CAFFE_ENFORCE_EQ(Input(i).numel(), size);
}
// Verify tensors all have same size
TypeMeta meta = Input(1).dtype();
for (const auto i : c10::irange(2, InputSize())) {
CAFFE_ENFORCE(Input(i).dtype() == meta);
}
// Finally initialize the algorithm
initializeAlgorithm();
}
void initializeAlgorithm();
const int root_;
std::once_flag once_;
std::unique_ptr<::gloo::Algorithm> algorithm_;
// Captures the parameters passed to Gloo when first initialized.
// An instance is updated every time this op runs and is compared
// to the reference instance for equality. If any parameter has
// changed from run to run, the initialized algorithm is invalid.
void update(GlooParameters& params) {
params.context = OperatorBase::Input<std::shared_ptr<::gloo::Context>>(0);
params.inputs.resize(InputSize() - 1);
params.outputs.resize(OutputSize());
for (const auto i : c10::irange(params.inputs.size())) {
params.inputs[i] = Input(i + 1).raw_data();
params.outputs[i] = Output(i)->raw_mutable_data();
}
params.size = Output(0)->numel();
params.meta = Output(0)->dtype();
}
GlooParameters init_;
GlooParameters current_;
Workspace* ws_;
std::string status_blob_;
};
} // namespace gloo
} // namespace caffe2
| 3,185
| 27.19469
| 78
|
h
|
null |
pytorch-main/caffe2/contrib/gloo/common.h
|
#pragma once
#include <exception>
#include "caffe2/core/blob.h"
#include <gloo/config.h>
#include <gloo/context.h>
#include <gloo/transport/device.h>
namespace caffe2 {
namespace gloo {
TORCH_API void signalFailure(Blob* status_blob, std::exception& exception);
struct createDeviceAttr {
// "tcp" or "ibverbs"
std::string transport;
// E.g. "eth0" (tcp), or "mlx5_0" (ibverbs).
// This may be empty to make Gloo figure it out.
std::string interface;
};
TORCH_API std::shared_ptr<::gloo::transport::Device> createDevice(
const createDeviceAttr attr);
// Captures the parameters passed to Gloo.
struct GlooParameters {
std::shared_ptr<::gloo::Context> context;
std::vector<const void*> inputs;
std::vector<void*> outputs;
size_t size;
TypeMeta meta;
template <typename T>
std::vector<const T*> getInputs() {
std::vector<const T*> result;
result.reserve(inputs.size());
for (auto& input : inputs) {
result.push_back(reinterpret_cast<const T*>(input));
}
return result;
}
template <typename T>
std::vector<T*> getOutputs() {
std::vector<T*> result;
result.reserve(outputs.size());
for (auto& output : outputs) {
result.push_back(reinterpret_cast<T*>(output));
}
return result;
}
template <typename T>
T* getOutput() {
return reinterpret_cast<T*>(outputs[0]);
}
template <typename T>
bool IsType() const {
return meta.Match<T>();
}
bool operator==(GlooParameters const& other) const {
return context == other.context && inputs == other.inputs &&
outputs == other.outputs && size == other.size;
}
};
} // namespace gloo
} // namespace caffe2
| 1,685
| 21.783784
| 75
|
h
|
null |
pytorch-main/caffe2/contrib/gloo/common_world_ops.h
|
#pragma once
#include "caffe2/contrib/gloo/common.h"
#include "caffe2/contrib/gloo/store_handler.h"
#include "caffe2/core/operator.h"
#include "caffe2/distributed/store_handler.h"
#include <gloo/common/error.h>
#include <gloo/config.h>
#include <gloo/rendezvous/context.h>
#include <gloo/rendezvous/prefix_store.h>
#if defined(GLOO_USE_MPI) && GLOO_USE_MPI
#include <gloo/mpi/context.h>
#endif
namespace caffe2 {
namespace gloo {
template <class Context>
class CreateCommonWorld final : public Operator<Context> {
public:
using CommonWorld = std::shared_ptr<::gloo::Context>;
USE_OPERATOR_CONTEXT_FUNCTIONS;
CreateCommonWorld(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
size_(OperatorBase::template GetSingleArgument<int>("size", 0)),
rank_(OperatorBase::template GetSingleArgument<int>("rank", 0)),
sync_(OperatorBase::template GetSingleArgument<bool>("sync", false)),
transport_(OperatorBase::template GetSingleArgument<std::string>(
"transport", "tcp")),
interface_(OperatorBase::template GetSingleArgument<std::string>(
"interface", "")),
mpi_rendezvous_(OperatorBase::template GetSingleArgument<bool>(
"mpi_rendezvous", false)),
status_blob_(
OperatorBase::GetSingleArgument<std::string>("status_blob", "")),
timeout_ms_(OperatorBase::GetSingleArgument<int>("timeout_ms", -1)),
ws_(ws) {
CAFFE_ENFORCE(
operator_def.has_name(), "CreateCommonWorld operator requires name");
CAFFE_ENFORCE(rank_ >= 0 && rank_ < size_);
name_ = operator_def.name();
if (status_blob_ != "") {
ws_->CreateBlob(status_blob_);
}
initialize();
}
~CreateCommonWorld() override {
}
CommonWorld rendezvousWithMPI() {
#if defined(GLOO_USE_MPI) && GLOO_USE_MPI
auto context = ::gloo::mpi::Context::createManaged();
if (timeout_ms_ != -1) {
context->setTimeout(std::chrono::milliseconds(timeout_ms_));
}
context->connectFullMesh(device_);
return context;
#else
CAFFE_THROW(
"Gloo was not compiled with MPI support. ",
"Please recompile with -DUSE_MPI=1.");
#endif
}
CommonWorld rendezvousWithStore(
const std::unique_ptr<StoreHandler>& handler) {
// Use PrefixStore to isolate different CreateCommonWorld instances
StoreHandlerWrapper wrapper(*handler);
::gloo::rendezvous::PrefixStore store(name_, wrapper);
auto context = std::make_shared<::gloo::rendezvous::Context>(rank_, size_);
if (timeout_ms_ != -1) {
context->setTimeout(std::chrono::milliseconds(timeout_ms_));
}
context->connectFullMesh(store, device_);
return context;
}
bool RunOnDevice() override {
try {
CommonWorld context;
if (mpi_rendezvous_) {
context = rendezvousWithMPI();
} else {
CAFFE_ENFORCE_EQ(InputSize(), 1, "Expected store handler input");
const auto& handler =
OperatorBase::Input<std::unique_ptr<StoreHandler>>(STORE_HANDLER);
context = rendezvousWithStore(handler);
}
// Switch pairs to synchronous mode if configured to do so
if (sync_) {
for (int i = 0; i < context->size; i++) {
auto& pair = context->getPair(i);
if (pair) {
pair->setSync(true, false);
}
}
}
*OperatorBase::Output<CommonWorld>(COMM) = std::move(context);
} catch (::gloo::IoException& ioe) {
LOG(ERROR) << "Caught gloo IO exception: " << ioe.what();
return handleException(ioe);
} catch (::caffe2::StoreHandlerTimeoutException& te) {
LOG(ERROR) << "Caught store handler timeout exception: " << te.what();
return handleException(te);
}
return true;
}
private:
bool handleException(std::exception& ex) {
if (status_blob_ != "") {
signalFailure(ws_->GetBlob(status_blob_), ex);
return false;
} else {
throw;
}
}
void initialize() {
// Share single device between all common worlds.
static std::once_flag once;
static std::shared_ptr<::gloo::transport::Device> device;
std::call_once(once, [&]() {
createDeviceAttr attr;
attr.transport = transport_;
attr.interface = interface_;
device = createDevice(attr);
});
device_ = device;
// Context specific initialization.
initializeForContext();
}
void initializeForContext();
const int size_;
const int rank_;
const bool sync_;
const std::string transport_;
const std::string interface_;
const bool mpi_rendezvous_;
const std::string status_blob_;
const int timeout_ms_;
Workspace* ws_;
std::string name_;
std::shared_ptr<::gloo::transport::Device> device_;
INPUT_TAGS(STORE_HANDLER);
OUTPUT_TAGS(COMM);
};
template <class Context>
class CloneCommonWorld final : public Operator<Context> {
public:
using CommonWorld = std::shared_ptr<::gloo::Context>;
USE_OPERATOR_CONTEXT_FUNCTIONS;
CloneCommonWorld(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
sync_(OperatorBase::template GetSingleArgument<bool>("sync", false)),
ws_(ws),
status_blob_(
OperatorBase::GetSingleArgument<std::string>("status_blob", "")) {
if (status_blob_ != "") {
ws_->CreateBlob(status_blob_);
}
}
~CloneCommonWorld() override {}
bool RunOnDevice() override {
try {
auto existing = OperatorBase::Input<CommonWorld>(EXISTING_COMM);
::gloo::rendezvous::ContextFactory factory(existing);
auto clone = factory.makeContext(existing->getDevice());
// Switch pairs to synchronous mode if configured to do so
if (sync_) {
for (int i = 0; i < clone->size; i++) {
auto& pair = clone->getPair(i);
if (pair) {
pair->setSync(true, false);
}
}
}
*OperatorBase::Output<CommonWorld>(CLONED_COMM) = std::move(clone);
} catch (::gloo::IoException& ioe) {
LOG(ERROR) << "Caught gloo IO exception: " << ioe.what();
return handleException(ioe);
}
return true;
}
private:
bool handleException(std::exception& ex) {
if (status_blob_ != "") {
signalFailure(ws_->GetBlob(status_blob_), ex);
return false;
} else {
throw;
}
}
const bool sync_;
Workspace* ws_;
std::string status_blob_;
INPUT_TAGS(EXISTING_COMM);
OUTPUT_TAGS(CLONED_COMM);
};
class DestroyCommonWorld final : public Operator<CPUContext> {
public:
DestroyCommonWorld(const OperatorDef& operator_def, Workspace* ws)
: Operator<CPUContext>(operator_def, ws) {
cw_name_ = operator_def.input(0);
}
bool RunOnDevice() override {
if (OperatorBase::InputBlob(0).GetRaw() == nullptr) {
return true;
}
const auto& context =
OperatorBase::Input<std::shared_ptr<::gloo::Context>>(0);
if (context) {
LOG(INFO) << "Closing connections: " << cw_name_;
context->closeConnections();
}
return true;
}
private:
std::string cw_name_;
};
} // namespace gloo
} // namespace caffe2
| 7,206
| 27.828
| 79
|
h
|
null |
pytorch-main/caffe2/contrib/gloo/reduce_scatter_ops.h
|
/**
* Copyright (c) 2018-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <algorithm>
#include "caffe2/contrib/gloo/common.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
#include <gloo/algorithm.h>
#include <gloo/common/error.h>
#include <gloo/context.h>
namespace caffe2 {
namespace gloo {
template <class Context>
class ReduceScatterOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
ReduceScatterOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
ws_(ws),
status_blob_(
OperatorBase::GetSingleArgument<std::string>("status_blob", "")) {
if (status_blob_ != "") {
ws_->CreateBlob(status_blob_);
}
}
~ReduceScatterOp() override {}
bool RunOnDevice() override {
std::call_once(once_, [&] { initialize(); });
// If any parameter has changed in between runs, the initialized
// algorithm is invalid and cannot be used.
update(current_);
CAFFE_ENFORCE(current_ == init_, "Inputs/outputs have changed");
try {
algorithm_->run();
} catch (::gloo::IoException& ioe) {
LOG(ERROR) << "Caught gloo IO exception: " << ioe.what();
if (status_blob_ != "") {
signalFailure(ws_->GetBlob(status_blob_), ioe);
return false;
} else {
throw;
}
}
return true;
}
protected:
void initialize() {
// Store which inputs/outputs this instance initialized with
update(init_);
// Verify inputs == outputs
CAFFE_ENFORCE_EQ(init_.inputs.size(), init_.outputs.size());
for (const auto i : c10::irange(init_.inputs.size())) {
CAFFE_ENFORCE_EQ(init_.inputs[i], init_.outputs[i]);
}
// Verify tensors all have same size
size_t size = Input(1).numel();
for (auto i = 2; i < InputSize() - 1; i++) {
CAFFE_ENFORCE_EQ(Input(i).numel(), size);
}
// Verify tensors all have same type
TypeMeta meta = Input(1).dtype();
for (auto i = 2; i < InputSize() - 1; i++) {
CAFFE_ENFORCE(Input(i).dtype() == meta);
}
initializeHalvingDoubling();
}
void initializeHalvingDoubling();
std::once_flag once_;
std::unique_ptr<::gloo::Algorithm> algorithm_;
// Captures the parameters passed to Gloo when first initialized.
// An instance is updated every time this op runs and is compared
// to the reference instance for equality. If any parameter has
// changed from run to run, the initialized algorithm is invalid.
void update(GlooParameters& params) {
params.context = OperatorBase::Input<std::shared_ptr<::gloo::Context>>(0);
params.inputs.resize(InputSize() - 2);
params.outputs.resize(OutputSize() - 1);
for (const auto i : c10::irange(params.inputs.size())) {
params.inputs[i] = Input(i + 1).raw_data();
params.outputs[i] = Output(i)->raw_mutable_data();
}
params.size = Output(0)->numel();
params.meta = Output(0)->dtype();
// Verify recvCountsSize == comm_size
CAFFE_ENFORCE_EQ(Input(InputSize() - 1).numel(), params.context->size);
int* recvCounts = (int*)Input(InputSize() - 1).raw_data();
recvCounts_.assign(recvCounts, recvCounts + Input(InputSize() - 1).numel());
}
GlooParameters init_;
GlooParameters current_;
Workspace* ws_;
std::string status_blob_;
std::vector<int> recvCounts_;
};
} // namespace gloo
} // namespace caffe2
| 3,977
| 29.136364
| 80
|
h
|
null |
pytorch-main/caffe2/contrib/gloo/store_handler.h
|
#pragma once
#include "caffe2/core/common.h"
#include "caffe2/distributed/store_handler.h"
#include <gloo/rendezvous/store.h>
namespace caffe2 {
namespace gloo {
class TORCH_API StoreHandlerWrapper : public ::gloo::rendezvous::Store {
public:
explicit StoreHandlerWrapper(StoreHandler& handler) : handler_(handler) {}
virtual ~StoreHandlerWrapper() override {}
virtual void set(const std::string& key, const std::vector<char>& data)
override;
std::vector<char> get(const std::string& key) override;
void wait(const std::vector<std::string>& keys) override {
wait(keys, ::gloo::rendezvous::Store::kDefaultTimeout);
}
virtual void wait(
const std::vector<std::string>& keys,
const std::chrono::milliseconds& timeout) override;
protected:
StoreHandler& handler_;
};
} // namespace gloo
} // namespace caffe2
| 858
| 22.861111
| 76
|
h
|
null |
pytorch-main/caffe2/contrib/nccl/cuda_nccl_gpu.h
|
#pragma once
#include <cstddef>
#include "caffe2/core/common_gpu.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/core/logging.h"
#include <nccl.h>
#include <unordered_map>
#define NCCL_VERSION_MIN(major, minor, patch) \
((NCCL_MAJOR > major) || \
((NCCL_MAJOR == major) && \
((NCCL_MINOR > minor) || \
((NCCL_MINOR == minor) && (NCCL_PATCH >= patch)))))
namespace caffe2 {
namespace nccl {
#define CAFFE_NCCL_CHECK(condition) \
do { \
ncclResult_t status = (condition); \
CAFFE_ENFORCE_EQ( \
status, \
ncclSuccess, \
" ", \
"Error at: ", \
__FILE__, \
__LINE__, \
": ", \
ncclGetErrorString(status)); \
} while (0)
struct NCCLElement {
const TensorCUDA* src{nullptr};
TensorCUDA* dst{nullptr};
int device{0};
};
struct NCCLExecution {
int stream_gpu_id{0};
cudaStream_t stream{nullptr};
std::vector<NCCLElement> elements;
size_t root{0};
};
// Called when the last NCCL op is destructed and all lazily created
// NCCLContext instances can safely be destroyed.
void destroyContexts();
template <typename T>
class NCCL {
public:
static void AllReduce(const NCCLExecution& ex);
static void Broadcast(const NCCLExecution& ex);
static void Reduce(const NCCLExecution& ex);
static void AllGather(const NCCLExecution& ex);
static void ReduceScatter(const NCCLExecution& ex);
};
} // namespace nccl
} // namespace caffe2
| 1,706
| 25.671875
| 68
|
h
|
null |
pytorch-main/caffe2/contrib/opencl/context.h
|
#ifndef CAFFE2_OPENCL_CONTEXT_H_
#define CAFFE2_OPENCL_CONTEXT_H_
#include "caffe2/core/context.h"
#define CL_HPP_ENABLE_EXCEPTIONS 1
#define CL_HPP_CL_1_2_DEFAULT_BUILD 1
#define CL_HPP_TARGET_OPENCL_VERSION 120
#define CL_HPP_MINIMUM_OPENCL_VERSION 120
//#include "libopencl.h"
#if defined(__APPLE__) || defined(__MACOSX)
#include <OpenCL/cl.hpp>
#else
#include <CL/cl.hpp>
#endif
#define OPENCL_CHECK(expr) (void)expr
namespace caffe2 {
struct OpenCLContextSingleton {
private:
OpenCLContextSingleton();
OpenCLContextSingleton(const OpenCLContextSingleton &) = delete;
OpenCLContextSingleton(OpenCLContextSingleton&&) = delete;
public:
static OpenCLContextSingleton& getInstance();
cl::Platform platform;
cl::Device device;
std::vector<cl::Device> devices;
cl::Context context;
cl::CommandQueue queue;
};
class OpenCLContext final {
public:
explicit OpenCLContext();
explicit OpenCLContext(const DeviceOption& option) {
TORCH_DCHECK_EQ(option.device_type(), PROTO_OPENCL);
OpenCLContext();
}
~OpenCLContext() {}
/*
* Everything below is basically boiler plate for Context classes
*/
static std::pair<void*, MemoryDeleter> New(size_t nbytes);
static void Delete(void* data);
template <class SrcContext, class DstContext>
inline void CopyBytes(size_t nbytes, const void* src, void* dst) {}
template <typename T, class SrcContext, class DstContext>
inline void Copy(int n, const T* src, T* dst) {
CopyBytes<SrcContext, DstContext>(
n * sizeof(T), static_cast<const void*>(src), static_cast<void*>(dst));
}
template <class SrcContext, class DstContext>
inline void
CopyItems(const TypeMeta meta, size_t n, const void* src, void* dst) {
CAFFE_ENFORCE(!meta.copy(), "OpenCLContext requires fundamental types.");
CopyBytes<SrcContext, DstContext>(n * meta.itemsize(), src, dst);
}
void SwitchToDevice(int64_t a, ...) {
auto& ctx = GetSingleton();
CAFFE_ENFORCE(a < ctx.devices.size());
ctx.device = ctx.devices[a];
}
void SwitchToDevice() {
SwitchToDevice(0);
}
inline void WaitEvent(const Event& ev) { /* TODO */
}
void FinishDeviceComputation() {
auto& ctx = GetSingleton();
ctx.queue.finish();
}
inline void Record(Event* ev, const char*&) const { /* TODO */
}
static bool IsStreamFree(const DeviceOption& /* unused */, int /* unused */) {
return true;
}
bool HasAsyncPartDefault() const {
return false;
}
bool SupportsAsyncScheduling() const {
return false;
}
// OpenCL specific helper functions
cl::Kernel BuildKernel(const char* src, std::string additional_options = "", const char* fn_name = "K");
static struct OpenCLContextSingleton& GetSingleton();
static std::string BuildArgumentList(std::vector<std::pair<std::string, std::string>> args);
};
} // namespace caffe2
#endif /* CAFFE2_OPENCL_CONTEXT_H_ */
| 2,892
| 26.552381
| 106
|
h
|
null |
pytorch-main/caffe2/contrib/prof/prof_dag_stats_op.h
|
#ifndef CAFFE2_OPERATORS_FULLY_CONNECTED_OP_H_
#define CAFFE2_OPERATORS_FULLY_CONNECTED_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/net_async_base.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
// This operator outputs the prof_dag stats
template <typename T, class Context, class Engine = DefaultEngine>
class GetProfDagStatsOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
GetProfDagStatsOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
net_name_(OperatorBase::GetSingleArgument<std::string>("net_name", "")),
partial_net_name_(OperatorBase::GetSingleArgument<std::string>(
"partial_net_name",
"")),
per_op_(OperatorBase::GetSingleArgument<bool>("per_op", false)) {
ws_ = ws;
CAFFE_ENFORCE(
!(net_name_.empty() && partial_net_name_.empty()),
"You need to provide net_name or partial_net_name");
CAFFE_ENFORCE(
net_name_.empty() || partial_net_name_.empty(),
"You can not provide both net_name and partial_net_name");
}
~GetProfDagStatsOp() {}
bool RunOnDevice() override {
// find the net by net_name_ or partial_net_name
NetBase* net = nullptr;
if (!net_name_.empty()) {
net = ws_->GetNet(net_name_);
} else if (!partial_net_name_.empty()) {
for (auto& current_net : ws_->Nets()) {
if (current_net.find(partial_net_name_) != std::string::npos) {
CAFFE_ENFORCE(
net == nullptr,
"There are multiple nets with ",
partial_net_name_,
" as part of their name");
net = ws_->GetNet(current_net);
}
}
CAFFE_ENFORCE(
net,
"Can not find a net with ",
partial_net_name_,
" as part of its name");
}
auto async_net = dynamic_cast_if_rtti<AsyncNetBase*>(net);
CAFFE_ENFORCE(async_net);
auto stats = getProtos(async_net);
// Write protobuf message to the output blob
std::string serialized_data;
CAFFE_ENFORCE(stats.SerializeToString(&serialized_data));
Output(0)->Resize(1);
Output(0)->template mutable_data<std::string>()[0] = serialized_data;
return true;
}
ProfDAGProtos getProtos(AsyncNetBase* net) {
ProfDAGProtos stats;
if (per_op_) {
stats = net->GetPerOperatorCost();
} else {
stats = net->GetOperatorStats();
}
return stats;
}
protected:
std::string net_name_;
std::string partial_net_name_;
bool per_op_;
Workspace* ws_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_FULLY_CONNECTED_OP_H_
| 2,699
| 29.337079
| 80
|
h
|
null |
pytorch-main/caffe2/contrib/shm_mutex/shm_mutex.h
|
/*
* This implements a machine-wide mutex to be used
* to synchronize CUDA calls (memory allocation and frees) and
* NCCL calls. This prevents a potential deadlock that
* can occur.
*
* The implementation has a few caveats:
* - it assumes that PID are not reused
* - there is a possible race between the creation (shm_open followed
* by ftruncate) and the spin on 'isInitialized' (if the memory region is
* not all zeroes).
*
* There are two implementations of the mutex and they vary mostly by how
* they wait:
* - The ShmTicketMutex_t is a simple ticket based lock and processes will
* queue up and only attempt to grab the lock when it is their turn
* - The ShmTTSetMutex_t is a simple test-test-and-set mutex. It is possibly
* faster for low contention.
*
* Use both as you would use any std::mutex. Both mutexes support try_lock as
* well.
*/
#pragma once
#include <fcntl.h>
#include <signal.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include <climits>
#include <atomic>
#include <mutex>
#include <string>
#include <unordered_set>
#include "caffe2/core/logging.h"
const int kTicketDelay = 1000;
const int kTimeout = 1000;
class ShmProcessMutexCheck {
public:
static ShmProcessMutexCheck& getInstance();
ShmProcessMutexCheck(const ShmProcessMutexCheck&) = delete;
ShmProcessMutexCheck& operator=(const ShmProcessMutexCheck&) = delete;
bool addLock(const std::string& name);
bool removeLock(const std::string& name);
protected:
ShmProcessMutexCheck() = default;
std::mutex m_;
std::unordered_set<std::string> shmLocks_;
};
template <class Derived>
struct shm_traits;
struct ShmBaseHeader {
std::atomic<bool> isInitialized;
std::atomic<int> countMapped;
std::atomic<pid_t> owner;
};
template <class Impl>
class ShmProcessMutex {
public:
using header_t = typename shm_traits<Impl>::header_t;
explicit ShmProcessMutex(const char* name)
: name_(name), check_(ShmProcessMutexCheck::getInstance()) {
CAFFE_ENFORCE(check_.addLock(name_), "Creating duplicate lock: ", name_);
myPid_ = getpid();
// Try to open and map the shared memory location
int fd = -1;
while (true) {
fd = shm_open(name, O_RDWR, 0);
if (fd == -1) {
CAFFE_ENFORCE(
errno == ENOENT,
"shm_open failed with not ENOENT: ",
strerror(errno));
// Create new object
fd = shm_open(name, O_RDWR | O_CREAT | O_EXCL, 0700);
if (fd == -1 && errno == EEXIST) {
// Some other process created first; loop around to re-open
continue;
}
CAFFE_ENFORCE(
fd != -1, "shm_open failed with create: ", strerror(errno));
// At this point, we are the creator of the shared object.
// Initialize the header_ (it's all 0 right now)
auto rv = ftruncate(fd, sizeof(header_t));
CAFFE_ENFORCE(rv != -1, "ftruncate: ", strerror(errno));
// Map memory and initialize
header_ = (header_t*)mmap(
nullptr,
sizeof(header_t),
PROT_READ | PROT_WRITE,
MAP_SHARED,
fd,
0);
CAFFE_ENFORCE(header_ != MAP_FAILED, "mmap: ", strerror(errno));
header_->countMapped = 1;
header_->owner = 0;
header_->isInitialized.store(true, std::memory_order_release);
close(fd);
break;
} else {
// Object exists, we just map it
header_ = (header_t*)mmap(
nullptr,
sizeof(header_t),
PROT_READ | PROT_WRITE,
MAP_SHARED,
fd,
0);
CAFFE_ENFORCE(header_ != MAP_FAILED, "mmap: ", strerror(errno));
// Wait for memory to be initialized
while (header_->isInitialized.load(std::memory_order_acquire) == 0) {
// Spin; should be done soon
}
// Now check if we can register ourself by incrementing countMapped.
// If we are "locked-out" (shared object being destroyed), retry
if (header_->countMapped.fetch_add(1, std::memory_order_relaxed) < 0) {
header_->countMapped.fetch_sub(1, std::memory_order_relaxed);
int rv = munmap(header_, sizeof(header_t));
CAFFE_ENFORCE(rv == 0, "munmap (to retry) failed: ", strerror(errno));
close(fd);
continue;
}
close(fd);
break;
}
}
}
~ShmProcessMutex() {
if (header_ != nullptr) {
// We are participating in a lock. Destroy
internalDestroy();
}
}
// Copy and assignment operator are implicitly deleted
ShmProcessMutex(ShmProcessMutex&& toMove) noexcept
: header_(toMove.header_),
myPid_(toMove.myPid_),
name_(toMove.name_),
check_(toMove.check_) {
toMove.header_ = nullptr;
toMove.myPid_ = -1;
}
ShmProcessMutex& operator=(ShmProcessMutex&& toMove) {
CAFFE_ENFORCE(toMove.myPid_ == this->myPid_);
if (&toMove != this) {
internalDestroy();
header_ = toMove.header_;
name_ = toMove.name_;
toMove.header_ = nullptr;
toMove.myPid_ = -1;
}
return *this;
}
void lock() {
pid_t expectedPid = 0;
while (not header_->owner.compare_exchange_weak(
expectedPid,
myPid_,
std::memory_order_relaxed,
std::memory_order_relaxed)) {
if (expectedPid == 0) {
continue;
}
// Someone else has the lock. We check if that process is
// still alive
if (kill(expectedPid, 0) < 0 && errno == ESRCH) {
// The process no longer exists. Try to "steal" the lock
continue;
}
while (true) {
if (static_cast<Impl*>(this)->waitForLock()) {
return;
}
expectedPid = header_->owner.load(std::memory_order_relaxed);
if (expectedPid == 0 || (kill(expectedPid, 0) < 0 && errno == ESRCH)) {
break;
}
}
}
}
bool try_lock() {
pid_t expectedPid = 0;
bool firstTry = true;
while (not header_->owner.compare_exchange_weak(
expectedPid,
myPid_,
std::memory_order_relaxed,
std::memory_order_relaxed)) {
if (expectedPid == 0) {
continue;
}
// Someone else has the lock. We check if that process is
// still alive
if (firstTry && kill(expectedPid, 0) < 0 && errno == ESRCH) {
firstTry = false;
// The process no longer exists. Try to "steal" the lock once
continue;
}
return false;
}
return true;
}
void unlock() noexcept {
header_->owner.store(0, std::memory_order_relaxed);
static_cast<Impl*>(this)->subUnlock();
}
protected:
header_t* header_;
pid_t myPid_;
std::string name_;
ShmProcessMutexCheck& check_;
private:
void internalDestroy() {
CAFFE_ENFORCE(header_ != nullptr, "Internal error");
CAFFE_ENFORCE(check_.removeLock(name_), "Double free of lock: ", name_);
// Unmap the memory. If we are the last one, "lock" the
// shared memory and free it if successful
int oldCount = header_->countMapped.fetch_sub(1, std::memory_order_relaxed);
bool doUnlink = false;
if (oldCount == 1) {
// We were the last one. We attempt to lock out
// future processes by exchanging with something very negative
// This simplifies the checks when checking for lock out
oldCount = 0;
if (header_->countMapped.compare_exchange_strong(
oldCount,
INT_MIN,
std::memory_order_relaxed,
std::memory_order_relaxed)) {
doUnlink = true;
}
}
int rv = munmap(header_, sizeof(header_t));
CAFFE_ENFORCE(rv == 0, "munmap failed: ", strerror(errno));
if (doUnlink) {
rv = shm_unlink(name_.c_str());
CAFFE_ENFORCE(rv == 0, "shm_unlink failed: ", strerror(errno));
}
}
};
template <class T>
class ShmTTSetMutex : public ShmProcessMutex<ShmTTSetMutex<T>> {
public:
friend class ShmProcessMutex<ShmTTSetMutex<T>>;
explicit ShmTTSetMutex(const char* name, int timeout = kTimeout)
: ShmProcessMutex<ShmTTSetMutex>(name), timeout_(timeout) {}
protected:
bool waitForLock() {
int delay = timeout_;
pid_t expectedPid = 0;
while (--delay > 0 &&
this->header_->owner.load(std::memory_order_relaxed)) {
// Empty loop
__asm__ __volatile__("");
}
return this->header_->owner.compare_exchange_strong(
expectedPid, this->myPid_, std::memory_order_relaxed);
}
void subUnlock() noexcept {}
int timeout_;
};
template <class T>
class ShmTicketMutex : public ShmProcessMutex<ShmTicketMutex<T>> {
public:
friend class ShmProcessMutex<ShmTicketMutex<T>>;
explicit ShmTicketMutex(const char* name, int delay = kTicketDelay)
: ShmProcessMutex<ShmTicketMutex>(name), delay_(delay) {}
protected:
bool waitForLock() {
pid_t expectedPid = 0;
int slot = this->header_->ticket.fetch_add(1, std::memory_order_relaxed);
for (;;) {
int spintime =
(slot - this->header_->now.load(std::memory_order_relaxed)) * delay_;
for (int i = 0; i < spintime; i++) {
// Empty loop
__asm__ __volatile__("");
}
if (this->header_->now.load(std::memory_order_relaxed) == slot) {
break;
}
}
return this->header_->owner.compare_exchange_strong(
expectedPid, this->myPid_, std::memory_order_relaxed);
}
void subUnlock() noexcept {
this->header_->now.fetch_add(1, std::memory_order_relaxed);
}
int delay_;
};
template <class T>
struct shm_traits<ShmTTSetMutex<T>> {
using header_t = T;
};
template <class T>
struct shm_traits<ShmTicketMutex<T>> {
using header_t = T;
};
struct TicketStruct : ShmBaseHeader {
std::atomic<unsigned> ticket;
std::atomic<unsigned> now;
};
template class ShmTicketMutex<TicketStruct>;
template class ShmTTSetMutex<ShmBaseHeader>;
using ShmTicketMutex_t = ShmTicketMutex<TicketStruct>;
using ShmTTSetMutex_t = ShmTTSetMutex<ShmBaseHeader>;
| 10,098
| 28.357558
| 80
|
h
|
null |
pytorch-main/caffe2/contrib/tensorrt/tensorrt_op_trt.h
|
#pragma once
#include "caffe2/contrib/tensorrt/trt_utils.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/core/operator.h"
#include <NvInfer.h>
#include <unordered_map>
namespace caffe2 {
class TensorRTOp final : public Operator<CUDAContext> {
public:
USE_OPERATOR_FUNCTIONS(CUDAContext);
TensorRTOp(const OperatorDef& operator_def, Workspace* ws);
bool RunOnDevice() override;
virtual ~TensorRTOp() noexcept {}
private:
void MaybeAdjustOutputShape(int output_idx, std::vector<int64_t>* dims);
tensorrt::TrtLogger logger_;
int max_batch_size_;
std::vector<nvinfer1::Dims> nv_dims_;
std::vector<bool> is_input_;
std::unordered_map<int, std::vector<int64_t>> output_size_hints_;
std::shared_ptr<nvinfer1::ICudaEngine> trt_engine_{nullptr};
std::shared_ptr<nvinfer1::IExecutionContext> trt_executor_{nullptr};
bool batch_warning_issued_{false};
};
} // namespace caffe2
| 911
| 25.823529
| 74
|
h
|
null |
pytorch-main/caffe2/contrib/tensorrt/tensorrt_tranformer.h
|
#pragma once
#include <cstdint>
#include <string>
#include <unordered_map>
#include <vector>
#include "caffe2/core/common.h"
#include "caffe2/core/operator.h"
#include "caffe2/core/workspace.h"
#include "caffe2/onnx/onnx_exporter.h"
#include "caffe2/proto/caffe2_pb.h"
#include "onnx/onnx_pb.h"
namespace caffe2 {
TORCH_API void BuildInitializationList(
Workspace* ws,
::ONNX_NAMESPACE::GraphProto* g,
std::unordered_set<std::string>* initialization_list);
class TORCH_API TensorRTTransformer {
public:
TensorRTTransformer(
size_t max_batch_size,
size_t max_workspace_size,
int verbosity,
bool debug_builder,
bool build_serializable_op = false)
: build_serializable_op_(build_serializable_op),
max_batch_size_(max_batch_size),
max_workspace_size_(max_workspace_size),
verbosity_(verbosity),
debug_builder_(debug_builder) {}
OperatorDef BuildTrtOp(
const std::string& onnx_model_str,
const std::unordered_map<std::string, std::vector<int>>&
output_size_hints);
void Transform(
Workspace* ws,
NetDef* pred_net,
const std::unordered_map<std::string, TensorShape>& shape_hints);
private:
caffe2::NetDef SubnetToTrtOp(
const caffe2::NetDef& net,
Workspace* ws,
onnx::OnnxExporter* exporter,
std::unordered_map<std::string, TensorShape>* shape_hints);
void AddTrtOptions(
caffe2::OperatorDef* op,
const std::unordered_map<std::string, std::vector<int>>&
output_size_hints);
// A lazy version of Trt op building function, where instead of invoking the
// trt build engine and serialize the trt runtime, we just attach the
// serialized trt model string. The runtime will be built when trt op is
// constructed, during which the weights will be pulled from the workspace.
// The benefit of doing so is that we can avoid serialize/deserialize the
// weights across OperatorDef.
OperatorDef BuildTrtOpLazy(
const std::string& onnx_model_str,
const std::unordered_map<std::string, std::vector<int>>&
output_size_hints,
const std::unordered_set<std::string>& initialization_list,
const caffe2::NetDef& net);
CaffeMap<std::string, TensorShape> SsaRewriteAndMapNames(
Workspace* ws,
NetDef* pred_net,
const std::unordered_map<std::string, TensorShape>& input_shape_hints);
// Prune the unreferenced weights in original workspace to save memory
void PruneUnusedWeights(Workspace* ws, const NetDef& pred_net);
// Input mapping
std::unordered_map<std::string, std::string> input_mapping_;
// Generate serializable trt op or defer the onnx->trt process to ctor of the
// Trt op
bool build_serializable_op_{true};
// TensorRT params
size_t max_batch_size_{50};
size_t max_workspace_size_{1024 * 1024 * 2};
int verbosity_{2};
bool debug_builder_{false};
};
} // namespace caffe2
| 2,944
| 30.666667
| 79
|
h
|
null |
pytorch-main/caffe2/contrib/tensorrt/trt_utils.h
|
#pragma once
#include <iostream>
#include <NvInfer.h>
#include "caffe2/core/logging.h"
namespace caffe2 { namespace tensorrt {
// Logger for GIE info/warning/errors
class TrtLogger : public nvinfer1::ILogger {
using nvinfer1::ILogger::Severity;
public:
TrtLogger(Severity verbosity = Severity::kWARNING) : _verbosity(verbosity) {}
void log(Severity severity, const char* msg) override {
if (severity <= _verbosity) {
if (severity == Severity::kINTERNAL_ERROR || severity == Severity::kERROR) {
LOG(ERROR) << msg;
} else if (severity == Severity::kWARNING) {
LOG(WARNING) << msg;
} else if (severity == Severity::kINFO) {
LOG(INFO) << msg;
}
}
}
private:
Severity _verbosity;
};
struct TrtDeleter {
template <typename T>
void operator()(T* obj) const {
if (obj) {
obj->destroy();
}
}
};
template <typename T>
inline std::shared_ptr<T> TrtObject(T* obj) {
CAFFE_ENFORCE(obj, "Failed to create TensorRt object");
return std::shared_ptr<T>(obj, TrtDeleter());
}
std::shared_ptr<nvinfer1::ICudaEngine> BuildTrtEngine(
const std::string& onnx_model_str,
TrtLogger* logger,
size_t max_batch_size,
size_t max_workspace_size,
bool debug_builder);
}
}
| 1,267
| 21.642857
| 82
|
h
|
null |
pytorch-main/caffe2/contrib/warpctc/ctc_op.h
|
#pragma once
#include <ctc.h>
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
#include "caffe2/core/common_cudnn.h"
#define CTC_CHECK(condition) \
do { \
ctcStatus_t status = condition; \
CAFFE_ENFORCE_EQ( \
status, \
CTC_STATUS_SUCCESS, \
" Error at: ", \
__FILE__, \
":", \
__LINE__, \
": ", \
::ctcGetStatusString(status)); \
} while (0)
namespace caffe2 {
namespace detail {
template <typename Context>
ctcComputeInfo workspaceInfo(const Context& context);
}
template <typename T, typename Context>
class CTCOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
CTCOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
is_test_(
OperatorBase::GetSingleArgument<int>(OpSchema::Arg_IsTest, 0)) {
CAFFE_ENFORCE(
(is_test_ && OutputSize() == 2) || (!is_test_ && OutputSize() == 3));
}
bool RunOnDevice() override {
// inputs
const auto& inputs = Input(INPUTS);
const auto maxTimeSteps = inputs.size(0);
const auto minibatchSize = inputs.size(1);
const auto alphabetSize = inputs.size(2);
const auto& labels = OperatorBase::template Input<Tensor>(LABELS, CPU);
const auto& labelLengths =
OperatorBase::template Input<Tensor>(LABEL_LENGTHS, CPU);
const int* inputLengthsData = nullptr;
if (InputSize() == 4) {
const auto& inputLengths =
OperatorBase::template Input<Tensor>(INPUT_LENGTHS, CPU);
inputLengthsData = inputLengths.template data<int>();
} else {
// Input lengths not passed in. Default to max timesteps for
// each item in minibatch.
default_input_lengths_.resize(minibatchSize, maxTimeSteps);
inputLengthsData = default_input_lengths_.data();
}
// outputs
Tensor* gradients = nullptr;
TensorCPU* costs;
Tensor* workspace;
if (!is_test_) {
// [grads, costs, workspace] to maintain backward compatibility
gradients = Output(0);
gradients->ResizeLike(inputs);
costs = OperatorBase::template Output<Tensor>(1, CPU);
costs->ResizeLike(labelLengths);
workspace = Output(2);
} else {
// [costs, workspace]
costs = OperatorBase::template Output<Tensor>(0, CPU);
costs->ResizeLike(labelLengths);
workspace = Output(1);
}
size_t workspaceSizeBytes;
CTC_CHECK(get_workspace_size(
labelLengths.template data<int>(),
inputLengthsData,
alphabetSize,
minibatchSize,
detail::workspaceInfo(context_),
&workspaceSizeBytes));
workspace->Resize(workspaceSizeBytes);
auto* workspaceData = workspace->template mutable_data<uint8_t>();
if (is_test_ && labels.size(0) == 0) {
// compute_ctc_loss doesn't handle empty labels well
T* costsData = costs->template mutable_data<T>();
for (int i = 0; i < costs->numel(); ++i) {
costsData[i] = 0;
}
return true;
}
CTC_CHECK(compute_ctc_loss(
inputs.template data<T>(),
gradients ? gradients->template mutable_data<T>() : nullptr,
labels.template data<int>(),
labelLengths.template data<int>(),
inputLengthsData,
alphabetSize,
minibatchSize,
costs->template mutable_data<T>(),
workspaceData,
detail::workspaceInfo(context_)));
return true;
}
private:
bool is_test_;
std::vector<int> default_input_lengths_;
INPUT_TAGS(INPUTS, LABELS, LABEL_LENGTHS, INPUT_LENGTHS);
};
}
#undef CTC_CHECK
| 3,860
| 29.164063
| 77
|
h
|
null |
pytorch-main/caffe2/core/blob.h
|
#ifndef CAFFE2_CORE_BLOB_H_
#define CAFFE2_CORE_BLOB_H_
#include <cstddef>
#include <sstream>
#include <typeinfo>
#include <type_traits>
#include <vector>
#include "caffe2/core/common.h"
#include <ATen/core/blob.h>
#include <c10/util/typeid.h>
#include "caffe2/core/logging.h"
#include "caffe2/core/tensor.h"
#include "caffe2/core/tensor_int8.h"
namespace caffe2 {
inline bool BlobIsInt8TensorCPUType(const Blob& blob) {
return blob.meta().Match<int8::Int8TensorCPU>();
}
inline bool BlobIsTensorType(const Blob& blob, DeviceType device_type) {
bool is_match = blob.meta().Match<Tensor>();
if (!is_match) {
return false;
}
const Tensor* tensor = &blob.Get<Tensor>();
return tensor && *tensor && tensor->GetDeviceType() == device_type;
}
inline Tensor* BlobSetTensor(Blob* blob, Tensor&& tensor) {
return blob->Reset<Tensor>(new Tensor(std::move(tensor)));
}
inline Tensor GetSizedTensorWithOptions(
Tensor&& previous_tensor,
at::IntArrayRef dims,
at::TensorOptions options) {
Tensor tensor = std::move(previous_tensor);
if (!tensor.defined()) {
return caffe2::empty(dims, options);
}
if (tensor.GetDevice() == options.device() ||
(!tensor.GetDevice().has_index() &&
tensor.GetDeviceType() == options.device().type())) {
if (tensor.sizes() != dims) {
// Resize when the dims doesn't match
tensor.Resize(dims);
}
if (tensor.dtype() == options.dtype()) {
tensor.raw_mutable_data();
} else {
// create a new Tensor when the data_type doesn't match
return caffe2::empty(dims, options);
}
return tensor;
}
return caffe2::empty(dims, options);
}
// need to keep both functions that returns Tensor* and the one
// returns Tensor for clangr codemod
inline Tensor*
BlobGetMutableTensor(Blob* blob, at::IntArrayRef dims, at::TensorOptions options) {
if (blob->IsType<Tensor>()) {
Tensor* tensor = blob->GetMutable<Tensor>();
if (*tensor) {
// We only compare device_type if the index is not set since there are Tensors
// TODO: remove the extra check when all the Tensors are properly initialized
const auto tensorDevice = tensor->GetDevice();
if (tensorDevice == options.device() || (!tensorDevice.has_index() && tensor->GetDeviceType() == options.device().type())) {
if (tensor->sizes() != dims) {
// Resize when the dims doesn't match
tensor->Resize(dims);
}
tensor->raw_mutable_data(options.dtype());
return tensor;
}
// create a new Tensor when device doesn't match
}
}
VLOG(1) << "Create new mutable object " << TypeMeta::TypeName<Tensor>()
<< " dims: " << dims;
// << " options: " << options; (operator<< for Options is in at:: now)
return BlobSetTensor(blob, caffe2::empty(dims, options));
}
inline Tensor
XBlobGetMutableTensor(Blob* blob, at::IntArrayRef dims, at::TensorOptions options) {
return BlobGetMutableTensor(blob, dims, options)->UnsafeSharedInstance();
}
inline Tensor* BlobGetMutableTensor(Blob* blob, DeviceType device_type) {
if (blob->IsType<Tensor>()) {
Tensor* tensor = blob->GetMutable<Tensor>();
if (*tensor && tensor->GetDeviceType() == device_type) {
return tensor;
}
}
// if we're here, then either Blob didn't hold a Tensor
// or that Tensor had the wrong DeviceType.
VLOG(1) << "Create new mutable object " << TypeMeta::TypeName<Tensor>()
<< " DeviceType:" << device_type;
return BlobSetTensor(blob, Tensor(device_type));
}
inline const Tensor& BlobGetTensor(const Blob& blob, DeviceType device_type) {
if (blob.IsType<Tensor>()) {
const auto& tensor = blob.Get<Tensor>();
if (tensor.GetDeviceType() == device_type) {
return tensor;
}
}
CAFFE_THROW("Blob didn't contain a Tensor or the device_type doesn't match");
}
inline Tensor BlobGetTensorOrUndefined(const Blob& blob) {
if (blob.IsType<Tensor>()) {
return blob.Get<Tensor>().UnsafeSharedInstance();
} else {
return Tensor();
}
}
} // namespace caffe2
#endif // CAFFE2_CORE_BLOB_H_
| 4,090
| 30.229008
| 130
|
h
|
null |
pytorch-main/caffe2/core/blob_serialization.h
|
#ifndef CAFFE2_CORE_BLOB_SERIALIZATION_H_
#define CAFFE2_CORE_BLOB_SERIALIZATION_H_
#include <limits>
#include <future>
#include <google/protobuf/repeated_field.h>
#include "caffe2/core/blob.h"
#include "caffe2/core/blob_serializer_base.h"
#include "caffe2/core/tensor.h"
#include <c10/util/irange.h>
#include <c10/util/typeid.h>
#include "caffe2/core/types.h"
#include "caffe2/utils/simple_queue.h"
C10_DECLARE_int(caffe2_tensor_chunk_size);
C10_DECLARE_int(caffe2_max_tensor_serializer_threads);
C10_DECLARE_bool(caffe2_serialize_fp16_as_bytes);
#ifdef _MSC_VER
// It's MSVC, so we just have to guess ... and allow an override
#ifdef FOLLY_ENDIAN_BE
constexpr auto kIsLittleEndian = false;
#else
constexpr auto kIsLittleEndian = true;
#endif
#else
constexpr auto kIsLittleEndian = __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__;
#endif
namespace caffe2 {
constexpr auto kTensorBlobType = "Tensor";
// String used to separate chunk id from the blob name when storing in DB
constexpr auto kChunkIdSeparator = "#%";
/**
* Serializes the given blob, if possible. Note that this serialization uses
* the registration mechanism and one has to implement specific serialization
* approaches for specific classes. Acceptor should take care of writing data
* to the actual storage.
*/
TORCH_API void SerializeBlob(
const Blob& blob,
const string& name,
BlobSerializerBase::SerializationAcceptor acceptor);
TORCH_API void SerializeBlob(
const Blob& blob,
const string& name,
BlobSerializerBase::SerializationAcceptor acceptor,
const BlobSerializationOptions& options);
TORCH_API size_t EstimateSerializedBlobSize(
const Blob& blob,
c10::string_view name,
const BlobSerializationOptions& options);
/**
* @brief Convenience function to serialize a blob to a string.
*
* This is a convenience function to serialize small Blobs that produce
* manageable serialized strings. To serialize big blobs such as
* large sparse tensors, use the fully-functional interface in
* blob_serializer_base.h.
*
* NOTE: this function doesn't do chunking and might break with big tensors.
*/
TORCH_API string SerializeBlob(const Blob& blob, const string& name);
/**
* Deserializes from a string containing either BlobProto or TensorProto. If
* the deserialization fails, the content in the blob should no longer be
* trusted.
*/
TORCH_API void DeserializeBlob(const string& content, Blob* result);
TORCH_API void DeserializeBlob(const BlobProto& proto, Blob* result);
/*
* Get an empty Tensor from the TensorProto given the meta data in proto (data
* type and size of the Tensor) without actually filling in the data.
*
* We need this function because we want to construct a fully initialized Tensor
* in the beginning instead of keeping partially initialized Tensor around the
* process. Consider the case when we have a Tensor that is split into multiple
* protos during serialization, in deserialization, we have to fill the Tensor
* in multiple calls to Deserialize, therefore we need to create a new Tensor
* with the correct size and data type before the call to Deserialize, because
* otherwise we will have to check whether the function call is the first call
* to initialize the underlying Tensor, which makes the function stateful and
* complicated.
*
* The legacy code get away with this problem by passing in a partially
* initialized Tensor and use Resize and mutable_data to set the correct size,
* data type and allocate memory for the Tensor, so the state is encoded in
* these function calls. e.g. mutable_data will allocate memory on the first
* call and it will return a pointer to the allocated memory on later calls.
*/
TORCH_API Tensor EmptyTensorFromProto(const TensorProto& proto);
/**
* @brief TensorSerializer is the serializer for Tensors.
*
* TensorSerializer takes in a blob that contains a Tensor, and serializes it
* into a TensorProto protocol buffer.
*/
class TORCH_API TensorSerializer : public BlobSerializerBase {
public:
TensorSerializer() {}
~TensorSerializer() override {}
/**
* Serializes a Blob. Note that this blob has to contain Tensor,
* otherwise this function produces a fatal error.
*/
void Serialize(
const void* pointer,
TypeMeta typeMeta,
const string& name,
SerializationAcceptor acceptor) override;
void SerializeWithOptions(
const void* pointer,
TypeMeta typeMeta,
const string& name,
SerializationAcceptor acceptor,
const BlobSerializationOptions& options) override;
void Serialize(
const Tensor& tensor,
const string& name,
TensorProto* proto,
const BlobSerializationOptions& options,
size_t chunkBegin,
int32_t chunkSize);
void Serialize(
const Tensor& tensor,
const string& name,
TensorProto* proto,
size_t chunkBegin,
int32_t chunkSize) {
BlobSerializationOptions options;
Serialize(tensor, name, proto, options, chunkBegin, chunkSize);
}
size_t EstimateSerializedBlobSize(
const void* pointer,
TypeMeta typeMeta,
c10::string_view name,
const BlobSerializationOptions& options) override;
private:
// A utility function to store the device context detauls.
void StoreDeviceDetail(const Tensor& input, TensorProto* proto);
unique_ptr<BaseContext> context_;
};
/**
* @brief TensorDeserializer is the deserializer for Tensors.
*
* The device that the deserialized Tensor will live under is determined by the
* device_detail field. If you want to specify the device of the deserialized
* tensor, change the TensorProto's corresponding fields before calling
* Deserialize.
*/
class TORCH_API TensorDeserializer : public BlobDeserializerBase {
public:
void Deserialize(const BlobProto& proto, Blob* blob) override;
/* There are cases when a Tensor is split into multiple protos and
* we have to call Deserialize multiple times to get the complete deserialized
* Tensor, each call will fill part of the Tensor given the segment begin and
* end information in proto, therefore we have to pass in the Tensor pointer
* rather than create a new Tensor every time.
*
* Precondition: Tensor must be initialized
*/
void DeserializeToTensor(const TensorProto& proto, Tensor* tensor);
/* Deserialize the proto and return a new Tensor
* This is a utility function that combines EmptyTensorFromProto and
* Deserialize(const TensorProto&, Tensor*);
*/
Tensor Deserialize(const TensorProto& proto);
};
////////////////////////////////////////////////////////////////////////////////
// Implementations
////////////////////////////////////////////////////////////////////////////////
namespace detail {
// Make space for new elements to be copied to the end of the repeated field.
// The new space is not guaranteed to be initialized.
template <typename T>
void ExtendRepeatedField(
google::protobuf::RepeatedField<T>* field,
size_t size) {
field->Reserve(field->size() + size);
#if GOOGLE_PROTOBUF_VERSION >= 3000000
field->AddNAlreadyReserved(size);
#else
// We unfortunately do still need to support old protobuf versions in some
// build configurations.
for (const auto i : c10::irange(size)) {
field->Add(0);
}
#endif
}
template <typename SrcType, typename DstType>
inline void CopyToProtoAsIs(
const size_t size,
const SrcType* src,
google::protobuf::RepeatedField<DstType>* field,
BaseContext* context) {
static_assert(
sizeof(SrcType) == sizeof(DstType),
"The source type and dest type cannot be copied as-is. Did "
"you mean CopyToProtoWithCast?");
ExtendRepeatedField(field, size);
context->template CopyToCPU<SrcType>(
size, src, reinterpret_cast<SrcType*>(field->mutable_data()));
// Make sure that we finish the copy into the protobuf.
context->FinishDeviceComputation();
}
template <typename SrcType, typename DstType>
inline void CopyToProtoWithCast(
const size_t size,
const SrcType* src,
google::protobuf::RepeatedField<DstType>* field,
BaseContext* context) {
// TODO: we are having one unnecessary copy here if the context is already
// CPUContext. Remove it if it is performance critical.
unique_ptr<SrcType[]> buffer(new SrcType[size]);
context->template CopyToCPU<SrcType>(size, src, buffer.get());
context->FinishDeviceComputation();
field->Reserve(size);
for (const auto i : c10::irange(size)) {
field->Add(static_cast<DstType>(buffer[i]));
}
}
template <typename SrcType, typename DstType>
inline void CopyFromProtoAsIs(
const size_t size,
const google::protobuf::RepeatedField<SrcType>& field,
DstType* dst,
BaseContext* context) {
static_assert(
sizeof(SrcType) == sizeof(DstType),
"The source type and dest type cannot be copied as-is. Did "
"you mean CopyFromProtoWithCast?");
CAFFE_ENFORCE_EQ(size, field.size(), "Incorrect proto field size.");
context->template CopyFromCPU<DstType>(
size, reinterpret_cast<const DstType*>(field.data()), dst);
}
template <typename SrcType, typename DstType>
inline void CopyFromProtoWithCast(
const size_t size,
const google::protobuf::RepeatedField<SrcType>& field,
DstType* dst,
BaseContext* context) {
CAFFE_ENFORCE_EQ(size, field.size(), "Incorrect proto field size.");
// TODO: we are having one unnecessary copy here if the context is already
// CPUContext. Remove it if it is performance critical.
unique_ptr<DstType[]> buffer(new DstType[size]);
const SrcType* src = field.data();
for (const auto i : c10::irange(size)) {
buffer[i] = static_cast<DstType>(src[i]);
}
context->template CopyFromCPU<DstType>(size, buffer.get(), dst);
}
} // namespace detail
////////////////////////////////////////////////////////////////////////////////
// Serialization Helpers
////////////////////////////////////////////////////////////////////////////////
// Converts MessageLite to string while also checking that SerializeAsString
// succeeds. Pass description of class/function of the call if you'd
// like it appended to the error message.
TORCH_API std::string SerializeAsString_EnforceCheck(
const google::protobuf::MessageLite&,
const char* error_location = nullptr);
// Convert BlobProto to string with success checks.
inline std::string SerializeBlobProtoAsString_EnforceCheck(
const BlobProto& blob) {
return SerializeAsString_EnforceCheck(blob, blob.name().c_str());
}
int64_t NumelFromTensorProto(const TensorProto& tensor_proto);
c10::IntArrayRef DimsFromTensorProto(const TensorProto& proto);
TypeMeta GetDataType(const TensorProto& tensor_proto);
std::unique_ptr<BaseContext> ContextFromProto(const TensorProto& tensor_proto);
} // namespace caffe2
#endif // CAFFE2_CORE_BLOB_SERIALIZATION_H_
| 10,832
| 34.172078
| 80
|
h
|
null |
pytorch-main/caffe2/core/blob_serializer_base.h
|
#pragma once
#include <string>
#include <functional>
#include <c10/util/Registry.h>
#include <c10/util/string_view.h>
#include "caffe2/core/common.h"
#include "caffe2/proto/caffe2_pb.h"
namespace caffe2 {
class Blob;
// Constants for use in the BlobSerializationOptions chunk_size field.
// These should ideally be defined in caffe2.proto so they can be exposed across
// languages, but protobuf does not appear to allow defining constants.
constexpr int kDefaultChunkSize = 0;
constexpr int kNoChunking = -1;
/**
* @brief BlobSerializerBase is an abstract class that serializes a blob to a
* string.
*
* This class exists purely for the purpose of registering type-specific
* serialization code. If you need to serialize a specific type, you should
* write your own Serializer class, and then register it using
* REGISTER_BLOB_SERIALIZER. For a detailed example, see TensorSerializer for
* details.
*/
class BlobSerializerBase {
public:
virtual ~BlobSerializerBase() {}
using SerializationAcceptor =
std::function<void(const std::string& blobName, std::string&& data)>;
/**
* @brief The virtual function that returns a serialized string for the input
* blob.
* @param blob
* the input blob to be serialized.
* @param name
* the blob name to be used in the serialization implementation. It is up
* to the implementation whether this name field is going to be used or
* not.
* @param acceptor
* a lambda which accepts key value pairs to save them to storage.
* serailizer can use it to save blob in several chunks
* acceptor should be thread-safe
*/
virtual void Serialize(
const void* pointer,
TypeMeta typeMeta,
const std::string& name,
SerializationAcceptor acceptor) = 0;
virtual void SerializeWithOptions(
const void* pointer,
TypeMeta typeMeta,
const std::string& name,
SerializationAcceptor acceptor,
const BlobSerializationOptions& /*options*/) {
// Base implementation.
Serialize(pointer, typeMeta, name, acceptor);
}
virtual size_t EstimateSerializedBlobSize(
const void* /*pointer*/,
TypeMeta /*typeMeta*/,
c10::string_view /*name*/,
const BlobSerializationOptions& /*options*/) {
// Base implementation.
// This returns 0 just to allow us to roll this out without needing to
// define an implementation for all serializer types. Returning a size of 0
// for less-commonly used blob types is acceptable for now. Eventually it
// would be nice to ensure that this method is implemented for all
// serializers and then make this method virtual.
return 0;
}
};
// The Blob serialization registry and serializer creator functions.
C10_DECLARE_TYPED_REGISTRY(
BlobSerializerRegistry,
TypeIdentifier,
BlobSerializerBase,
std::unique_ptr);
#define REGISTER_BLOB_SERIALIZER(id, ...) \
C10_REGISTER_TYPED_CLASS(BlobSerializerRegistry, id, __VA_ARGS__)
// Creates an operator with the given operator definition.
inline unique_ptr<BlobSerializerBase> CreateSerializer(TypeIdentifier id) {
return BlobSerializerRegistry()->Create(id);
}
/**
* @brief BlobDeserializerBase is an abstract class that deserializes a blob
* from a BlobProto or a TensorProto.
*/
class TORCH_API BlobDeserializerBase {
public:
virtual ~BlobDeserializerBase() {}
// Deserializes from a BlobProto object.
virtual void Deserialize(const BlobProto& proto, Blob* blob) = 0;
};
C10_DECLARE_REGISTRY(BlobDeserializerRegistry, BlobDeserializerBase);
#define REGISTER_BLOB_DESERIALIZER(name, ...) \
C10_REGISTER_CLASS(BlobDeserializerRegistry, name, __VA_ARGS__)
// Creates an operator with the given operator definition.
inline unique_ptr<BlobDeserializerBase> CreateDeserializer(const string& type) {
return BlobDeserializerRegistry()->Create(type);
}
} // namespace caffe2
| 3,905
| 32.384615
| 80
|
h
|
null |
pytorch-main/caffe2/core/blob_stats.h
|
#pragma once
#include "c10/util/Registry.h"
#include "caffe2/core/blob.h"
#include <c10/util/typeid.h>
#include <unordered_map>
namespace caffe2 {
struct BlobStatGetter {
virtual size_t sizeBytes(const Blob& blob) const = 0;
virtual ~BlobStatGetter() {}
};
struct BlobStatRegistry {
private:
std::unordered_map<TypeIdentifier, std::unique_ptr<BlobStatGetter>> map_;
void doRegister(TypeIdentifier id, std::unique_ptr<BlobStatGetter>&& v);
public:
template <typename T, typename Getter>
struct Registrar {
Registrar() {
BlobStatRegistry::instance().doRegister(
TypeMeta::Id<T>(), std::unique_ptr<Getter>(new Getter));
}
};
const BlobStatGetter* get(TypeIdentifier id);
static BlobStatRegistry& instance();
};
#define REGISTER_BLOB_STAT_GETTER(Type, BlobStatGetterClass) \
static BlobStatRegistry::Registrar<Type, BlobStatGetterClass> \
C10_ANONYMOUS_VARIABLE(BlobStatRegistry)
namespace BlobStat {
/**
* Return size in bytes of the blob, if available for a blob of given type.
* If not available, return 0.
*/
TORCH_API size_t sizeBytes(const Blob& blob);
}
}
| 1,127
| 23
| 75
|
h
|
null |
pytorch-main/caffe2/core/common.h
|
#ifndef CAFFE2_CORE_COMMON_H_
#define CAFFE2_CORE_COMMON_H_
#include <algorithm>
#include <cmath>
#include <map>
#include <memory>
#include <numeric>
#include <set>
#include <sstream>
#include <string>
#include <type_traits>
#include <vector>
#ifdef __APPLE__
#include <TargetConditionals.h>
#endif
#if defined(_MSC_VER)
#include <io.h>
#else
#include <unistd.h>
#endif
// Macros used during the build of this caffe2 instance. This header file
// is automatically generated by the cmake script during build.
#include "caffe2/core/macros.h"
#include <c10/macros/Macros.h>
#include "c10/util/string_utils.h"
namespace caffe2 {
// Note(Yangqing): NVCC does not play well with unordered_map on some platforms,
// forcing us to use std::map instead of unordered_map. This may affect speed
// in some cases, but in most of the computation code we do not access map very
// often, so it should be fine for us. I am putting a CaffeMap alias so we can
// change it more easily if things work out for unordered_map down the road.
template <typename Key, typename Value>
using CaffeMap = std::map<Key, Value>;
// using CaffeMap = std::unordered_map;
// Using statements for common classes that we refer to in caffe2 very often.
// Note that we only place it inside caffe2 so the global namespace is not
// polluted.
/* using override */
using std::set;
using std::string;
using std::unique_ptr;
using std::vector;
// Just in order to mark things as not implemented. Do not use in final code.
#define CAFFE_NOT_IMPLEMENTED CAFFE_THROW("Not Implemented.")
// suppress an unused variable.
#if defined(_MSC_VER) && !defined(__clang__)
#define CAFFE2_UNUSED __pragma(warning(suppress : 4100 4101))
#define CAFFE2_USED
#else
#define CAFFE2_UNUSED __attribute__((__unused__))
#define CAFFE2_USED __attribute__((__used__))
#endif //_MSC_VER
// Define alignment macro that is cross platform
#if defined(_MSC_VER) && !defined(__clang__)
#define CAFFE2_ALIGNED(x) __declspec(align(x))
#else
#define CAFFE2_ALIGNED(x) __attribute__((aligned(x)))
#endif
#if (defined _MSC_VER && !defined NOMINMAX)
#define NOMINMAX
#endif
#if defined(__has_cpp_attribute)
#if __has_cpp_attribute(nodiscard)
#define CAFFE2_NODISCARD [[nodiscard]]
#endif
#endif
#if !defined(CAFFE2_NODISCARD)
#define CAFFE2_NODISCARD
#endif
using std::make_unique;
#if defined(__ANDROID__) && !defined(__NDK_MAJOR__)
using ::round;
#else
using std::round;
#endif // defined(__ANDROID__) && !defined(__NDK_MAJOR__)
// dynamic cast reroute: if RTTI is disabled, go to reinterpret_cast
template <typename Dst, typename Src>
inline Dst dynamic_cast_if_rtti(Src ptr) {
#ifdef __GXX_RTTI
return dynamic_cast<Dst>(ptr);
#else
return static_cast<Dst>(ptr);
#endif
}
// SkipIndices are used in operator_fallback_gpu.h and operator_fallback_mkl.h
// as utility functions that marks input / output indices to skip when we use a
// CPU operator as the fallback of GPU/MKL operator option.
template <int... values>
class SkipIndices {
private:
template <int V>
static inline bool ContainsInternal(const int i) {
return (i == V);
}
template <int First, int Second, int... Rest>
static inline bool ContainsInternal(const int i) {
return (i == First) || ContainsInternal<Second, Rest...>(i);
}
public:
static inline bool Contains(const int i) {
return ContainsInternal<values...>(i);
}
};
template <>
class SkipIndices<> {
public:
static inline bool Contains(const int /*i*/) {
return false;
}
};
// HasCudaRuntime() tells the program whether the binary has Cuda runtime
// linked. This function should not be used in static initialization functions
// as the underlying boolean variable is going to be switched on when one
// loads libtorch_gpu.so.
TORCH_API bool HasCudaRuntime();
TORCH_API bool HasHipRuntime();
namespace internal {
// Sets the Cuda Runtime flag that is used by HasCudaRuntime(). You should
// never use this function - it is only used by the Caffe2 gpu code to notify
// Caffe2 core that cuda runtime has been loaded.
TORCH_API void SetCudaRuntimeFlag();
TORCH_API void SetHipRuntimeFlag();
} // namespace internal
// Returns which setting Caffe2 was configured and built with (exported from
// CMake)
TORCH_API const std::map<string, string>& GetBuildOptions();
} // namespace caffe2
#endif // CAFFE2_CORE_COMMON_H_
| 4,329
| 27.486842
| 80
|
h
|
null |
pytorch-main/caffe2/core/common_cudnn.h
|
#ifndef CAFFE2_CORE_COMMON_CUDNN_H_
#define CAFFE2_CORE_COMMON_CUDNN_H_
#include <array>
#include <mutex>
#include "caffe2/core/common.h"
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/types.h"
#ifndef CAFFE2_USE_CUDNN
#error("This Caffe2 install is not built with cudnn, so you should not include this file.");
#endif
#include <cudnn.h>
static_assert(
CUDNN_VERSION >= 5000,
"Caffe2 requires cudnn version 5.0 or above.");
#if CUDNN_VERSION < 6000
#pragma message "CUDNN version under 6.0 is supported at best effort."
#pragma message "We strongly encourage you to move to 6.0 and above."
#pragma message "This message is intended to annoy you enough to update."
#endif // CUDNN_VERSION < 6000
#define CUDNN_VERSION_MIN(major, minor, patch) \
(CUDNN_VERSION >= ((major) * 1000 + (minor) * 100 + (patch)))
namespace caffe2 {
namespace internal {
/**
* A helper function to obtain cudnn error strings.
*/
inline const char* cudnnGetErrorString(cudnnStatus_t status) {
switch (status) {
case CUDNN_STATUS_SUCCESS:
return "CUDNN_STATUS_SUCCESS";
case CUDNN_STATUS_NOT_INITIALIZED:
return "CUDNN_STATUS_NOT_INITIALIZED";
case CUDNN_STATUS_ALLOC_FAILED:
return "CUDNN_STATUS_ALLOC_FAILED";
case CUDNN_STATUS_BAD_PARAM:
return "CUDNN_STATUS_BAD_PARAM";
case CUDNN_STATUS_INTERNAL_ERROR:
return "CUDNN_STATUS_INTERNAL_ERROR";
case CUDNN_STATUS_INVALID_VALUE:
return "CUDNN_STATUS_INVALID_VALUE";
case CUDNN_STATUS_ARCH_MISMATCH:
return "CUDNN_STATUS_ARCH_MISMATCH";
case CUDNN_STATUS_MAPPING_ERROR:
return "CUDNN_STATUS_MAPPING_ERROR";
case CUDNN_STATUS_EXECUTION_FAILED:
return "CUDNN_STATUS_EXECUTION_FAILED";
case CUDNN_STATUS_NOT_SUPPORTED:
return "CUDNN_STATUS_NOT_SUPPORTED";
case CUDNN_STATUS_LICENSE_ERROR:
return "CUDNN_STATUS_LICENSE_ERROR";
default:
return "Unknown cudnn error number";
}
}
} // namespace internal
// A macro that wraps around a cudnn statement so we can check if the cudnn
// execution finishes or not.
#define CUDNN_ENFORCE(condition) \
do { \
cudnnStatus_t status = condition; \
CAFFE_ENFORCE_EQ( \
status, \
CUDNN_STATUS_SUCCESS, \
", Error at: ", \
__FILE__, \
":", \
__LINE__, \
": ", \
::caffe2::internal::cudnnGetErrorString(status)); \
} while (0)
#define CUDNN_CHECK(condition) \
do { \
cudnnStatus_t status = condition; \
CHECK(status == CUDNN_STATUS_SUCCESS) \
<< ::caffe2::internal::cudnnGetErrorString(status); \
} while (0)
// report the version of cuDNN Caffe2 was compiled with
inline size_t cudnnCompiledVersion() {
return CUDNN_VERSION;
}
// report the runtime version of cuDNN
inline size_t cudnnRuntimeVersion() {
return cudnnGetVersion();
}
// Check compatibility of compiled and runtime cuDNN versions
inline void CheckCuDNNVersions() {
// Version format is major*1000 + minor*100 + patch
// If compiled with version < 7, major, minor and patch must all match
// If compiled with version >= 7, then either
// runtime_version > compiled_version
// major and minor match
bool version_match = cudnnCompiledVersion() == cudnnRuntimeVersion();
bool compiled_with_7 = cudnnCompiledVersion() >= 7000;
bool backwards_compatible_7 = compiled_with_7 && cudnnRuntimeVersion() >= cudnnCompiledVersion();
bool patch_compatible = compiled_with_7 && (cudnnRuntimeVersion() / 100) == (cudnnCompiledVersion() / 100);
CAFFE_ENFORCE(version_match || backwards_compatible_7 || patch_compatible,
"cuDNN compiled (", cudnnCompiledVersion(), ") and "
"runtime (", cudnnRuntimeVersion(), ") versions mismatch");
}
/**
* cudnnTypeWrapper is a wrapper class that allows us to refer to the cudnn type
* in a template function. The class is specialized explicitly for different
* data types below.
*/
template <typename T>
class cudnnTypeWrapper;
template <>
class cudnnTypeWrapper<float> {
public:
static const cudnnDataType_t type = CUDNN_DATA_FLOAT;
typedef const float ScalingParamType;
typedef float BNParamType;
static ScalingParamType* kOne() {
static ScalingParamType v = 1.0;
return &v;
}
static const ScalingParamType* kZero() {
static ScalingParamType v = 0.0;
return &v;
}
};
#if CUDNN_VERSION_MIN(6, 0, 0)
template <>
class cudnnTypeWrapper<int> {
public:
static const cudnnDataType_t type = CUDNN_DATA_INT32;
typedef const int ScalingParamType;
typedef int BNParamType;
static ScalingParamType* kOne() {
static ScalingParamType v = 1;
return &v;
}
static const ScalingParamType* kZero() {
static ScalingParamType v = 0;
return &v;
}
};
#endif // CUDNN_VERSION_MIN(6, 0, 0)
template <>
class cudnnTypeWrapper<double> {
public:
static const cudnnDataType_t type = CUDNN_DATA_DOUBLE;
typedef const double ScalingParamType;
typedef double BNParamType;
static ScalingParamType* kOne() {
static ScalingParamType v = 1.0;
return &v;
}
static ScalingParamType* kZero() {
static ScalingParamType v = 0.0;
return &v;
}
};
template <>
class cudnnTypeWrapper<at::Half> {
public:
static const cudnnDataType_t type = CUDNN_DATA_HALF;
typedef const float ScalingParamType;
typedef float BNParamType;
static ScalingParamType* kOne() {
static ScalingParamType v = 1.0;
return &v;
}
static ScalingParamType* kZero() {
static ScalingParamType v = 0.0;
return &v;
}
};
/**
* A wrapper function to convert the Caffe storage order to cudnn storage order
* enum values.
*/
inline cudnnTensorFormat_t GetCudnnTensorFormat(const StorageOrder& order) {
switch (order) {
case StorageOrder::NHWC:
return CUDNN_TENSOR_NHWC;
case StorageOrder::NCHW:
return CUDNN_TENSOR_NCHW;
default:
LOG(FATAL) << "Unknown cudnn equivalent for order: " << order;
}
// Just to suppress compiler warnings
return CUDNN_TENSOR_NCHW;
}
/**
* cudnnTensorDescWrapper is the placeholder that wraps around a
* cudnnTensorDescriptor_t, allowing us to do descriptor change as-needed during
* runtime.
*/
class cudnnTensorDescWrapper {
public:
cudnnTensorDescWrapper() {
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&desc_));
}
~cudnnTensorDescWrapper() noexcept {
CUDNN_CHECK(cudnnDestroyTensorDescriptor(desc_));
}
inline cudnnTensorDescriptor_t Descriptor(
const cudnnTensorFormat_t format,
const cudnnDataType_t type,
const vector<int>& dims,
bool* changed) {
if (type_ == type && format_ == format && dims_ == dims) {
// if not changed, simply return the current descriptor.
if (changed)
*changed = false;
return desc_;
}
CAFFE_ENFORCE_EQ(
dims.size(), 4U, "Currently only 4-dimensional descriptor supported.");
format_ = format;
type_ = type;
dims_ = dims;
CUDNN_ENFORCE(cudnnSetTensor4dDescriptor(
desc_,
format,
type,
dims_[0],
(format == CUDNN_TENSOR_NCHW ? dims_[1] : dims_[3]),
(format == CUDNN_TENSOR_NCHW ? dims_[2] : dims_[1]),
(format == CUDNN_TENSOR_NCHW ? dims_[3] : dims_[2])));
if (changed)
*changed = true;
return desc_;
}
template <typename T>
inline cudnnTensorDescriptor_t Descriptor(
const StorageOrder& order,
const vector<int>& dims) {
return Descriptor(
GetCudnnTensorFormat(order), cudnnTypeWrapper<T>::type, dims, nullptr);
}
private:
cudnnTensorDescriptor_t desc_;
cudnnTensorFormat_t format_;
cudnnDataType_t type_;
vector<int> dims_;
C10_DISABLE_COPY_AND_ASSIGN(cudnnTensorDescWrapper);
};
class cudnnFilterDescWrapper {
public:
cudnnFilterDescWrapper() {
CUDNN_ENFORCE(cudnnCreateFilterDescriptor(&desc_));
}
~cudnnFilterDescWrapper() noexcept {
CUDNN_CHECK(cudnnDestroyFilterDescriptor(desc_));
}
inline cudnnFilterDescriptor_t Descriptor(
const StorageOrder& order,
const cudnnDataType_t type,
const vector<int>& dims,
bool* changed) {
if (type_ == type && order_ == order && dims_ == dims) {
// if not changed, simply return the current descriptor.
if (changed)
*changed = false;
return desc_;
}
CAFFE_ENFORCE_EQ(
dims.size(), 4U, "Currently only 4-dimensional descriptor supported.");
order_ = order;
type_ = type;
dims_ = dims;
CUDNN_ENFORCE(cudnnSetFilter4dDescriptor(
desc_,
type,
GetCudnnTensorFormat(order),
dims_[0],
// TODO - confirm that this is correct for NHWC
(order == StorageOrder::NCHW ? dims_[1] : dims_[3]),
(order == StorageOrder::NCHW ? dims_[2] : dims_[1]),
(order == StorageOrder::NCHW ? dims_[3] : dims_[2])));
if (changed)
*changed = true;
return desc_;
}
template <typename T>
inline cudnnFilterDescriptor_t Descriptor(
const StorageOrder& order,
const vector<int>& dims) {
return Descriptor(order, cudnnTypeWrapper<T>::type, dims, nullptr);
}
private:
cudnnFilterDescriptor_t desc_;
StorageOrder order_;
cudnnDataType_t type_;
vector<int> dims_;
C10_DISABLE_COPY_AND_ASSIGN(cudnnFilterDescWrapper);
};
} // namespace caffe2
#endif // CAFFE2_CORE_COMMON_CUDNN_H_
| 9,893
| 29.726708
| 109
|
h
|
null |
pytorch-main/caffe2/core/common_gpu.h
|
#ifndef CAFFE2_CORE_COMMON_GPU_H_
#define CAFFE2_CORE_COMMON_GPU_H_
#include <assert.h>
#include <cuda.h>
#include <cuda_runtime.h>
#if !defined(USE_ROCM)
#ifdef __GNUC__
#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
#pragma GCC diagnostic push
#endif
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#endif // __GNUC__
#endif // USE_ROCM
#include <cublas_v2.h>
#include <curand.h>
#include <driver_types.h>
#include "caffe2/core/common.h"
#include "caffe2/core/logging.h"
#include "c10/cuda/CUDAMacros.h"
#include "c10/cuda/CUDAMathCompat.h"
#include <c10/cuda/CUDAGuard.h>
#define CAFFE2_CUDA_EXPORT C10_EXPORT
// CAFFE2_CUDA_API gets translated to CAFFE2_HIP_API in hipify script, which
// causes a marco redefinition issue with the later definition of
// CAFFE2_HIP_API, so we exclude this definition when HIP is specified
#if !defined(USE_ROCM)
#define CAFFE2_CUDA_API TORCH_CUDA_CPP_API
#endif // USE_ROCM
//TODO: [ROCm] Need to remove this after CUDA->HIP mapping is updated.
#define CAFFE2_HIP_EXPORT C10_EXPORT
#define CAFFE2_HIP_API TORCH_HIP_API
// This is a macro defined for cuda fp16 support. In default, cuda fp16 is
// supported by NVCC 7.5, but it is also included in the Tegra X1 platform with
// a (custom?) NVCC 7.0. As a result, we would normally just check the cuda
// version here, but would also allow a use to pass in the flag
// CAFFE_HAS_CUDA_FP16 manually.
#ifndef CAFFE_HAS_CUDA_FP16
#define CAFFE_HAS_CUDA_FP16
#endif // CAFFE_HAS_CUDA_FP16
#ifdef CAFFE_HAS_CUDA_FP16
#include <cuda_fp16.h>
#endif
// cuda major revision number below which fp16 compute is not supoorted
#if !defined(USE_ROCM)
constexpr int kFp16CUDADevicePropMajor = 6;
#else
constexpr int kFp16CUDADevicePropMajor = 3;
#endif
// Re-enable strict aliasing diagnostic if it was disabled.
#if !defined(USE_ROCM)
#ifdef __GNUC__
#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
#pragma GCC diagnostic pop
#endif
#endif // __GNUC__
#endif // USE_ROCM
/**
* The maximum number of peers that each gpu can have when doing p2p setup.
* Currently, according to NVidia documentation, each device can support a
* system-wide maximum of eight peer connections.
* When Caffe2 sets up peer access resources, if we have more than 8 gpus,
* we will enable peer access in groups of 8.
*/
#define CAFFE2_CUDA_MAX_PEER_SIZE 8
namespace caffe2 {
#if !defined(USE_ROCM)
/**
* Empty class to identify TensorCore-based math
*/
class TensorCoreEngine {};
#endif // USE_ROCM
#if !defined(USE_ROCM)
#define CAFFE2_CUDA_PTRATTR_MEMTYPE type
#else
#define CAFFE2_CUDA_PTRATTR_MEMTYPE memoryType
#endif
/**
* A runtime function to report the cuda version that Caffe2 is built with.
*/
inline int CudaVersion() {
#if defined(USE_ROCM)
return ROCM_VERSION;
#else
return CUDA_VERSION;
#endif
}
/**
* Returns the number of devices.
*/
CAFFE2_CUDA_API int NumCudaDevices();
/**
* Check if the current running session has a cuda gpu present.
*
* Note that this is different from having caffe2 built with cuda. Building
* Caffe2 with cuda only guarantees that this function exists. If there are no
* cuda gpus present in the machine, or there are hardware configuration
* problems like an insufficient driver, this function will still return false,
* meaning that there is no usable GPU present.
*
* In the open source build, it is possible that Caffe2's GPU code is
* dynamically loaded, and as a result a library could be only linked to the
* CPU code, but want to test if cuda is later available or not. In this case,
* one should use HasCudaRuntime() from common.h.
*/
inline bool HasCudaGPU() {
return NumCudaDevices() > 0;
}
/**
* Gets the current GPU id. This is a simple wrapper around cudaGetDevice().
*/
CAFFE2_CUDA_API int CaffeCudaGetDevice();
/**
* Gets the current GPU id. This is a simple wrapper around cudaGetDevice().
*/
CAFFE2_CUDA_API void CaffeCudaSetDevice(const int id);
/**
* Gets the GPU id that the current pointer is located at.
*/
CAFFE2_CUDA_API int GetGPUIDForPointer(const void* ptr);
/**
* Gets the device property for the given device. This function is thread safe.
* The initial run on this function is ~1ms/device; however, the results are
* cached so subsequent runs should be much faster.
*/
CAFFE2_CUDA_API const cudaDeviceProp& GetDeviceProperty(const int device);
/**
* Runs a device query function and prints out the results to LOG(INFO).
*/
CAFFE2_CUDA_API void DeviceQuery(const int deviceid);
/**
* Return a peer access pattern by returning a matrix (in the format of a
* nested vector) of boolean values specifying whether peer access is possible.
*
* This function returns false if anything wrong happens during the query of
* the GPU access pattern.
*/
CAFFE2_CUDA_API bool GetCudaPeerAccessPattern(vector<vector<bool>>* pattern);
/**
* Return the availability of TensorCores for math
*/
CAFFE2_CUDA_API bool TensorCoreAvailable();
/**
* Return a human readable cublas error string.
*/
CAFFE2_CUDA_API const char* cublasGetErrorString(cublasStatus_t error);
/**
* Return a human readable curand error string.
*/
CAFFE2_CUDA_API const char* curandGetErrorString(curandStatus_t error);
// CUDA: various checks for different function calls.
#define CUDA_ENFORCE(condition, ...) \
do { \
cudaError_t error = condition; \
CAFFE_ENFORCE_EQ( \
error, \
cudaSuccess, \
"Error at: ", \
__FILE__, \
":", \
__LINE__, \
": ", \
cudaGetErrorString(error), \
##__VA_ARGS__); \
} while (0)
#define CUDA_CHECK(condition) \
do { \
cudaError_t error = condition; \
CHECK(error == cudaSuccess) << cudaGetErrorString(error); \
} while (0)
#define CUDA_DRIVERAPI_ENFORCE(condition) \
do { \
CUresult result = condition; \
if (result != CUDA_SUCCESS) { \
const char* msg; \
cuGetErrorName(result, &msg); \
CAFFE_THROW("Error at: ", __FILE__, ":", __LINE__, ": ", msg); \
} \
} while (0)
#define CUDA_DRIVERAPI_CHECK(condition) \
do { \
CUresult result = condition; \
if (result != CUDA_SUCCESS) { \
const char* msg; \
cuGetErrorName(result, &msg); \
LOG(FATAL) << "Error at: " << __FILE__ << ":" << __LINE__ << ": " \
<< msg; \
} \
} while (0)
#define CUBLAS_ENFORCE(condition) \
do { \
cublasStatus_t status = condition; \
CAFFE_ENFORCE_EQ( \
status, \
CUBLAS_STATUS_SUCCESS, \
"Error at: ", \
__FILE__, \
":", \
__LINE__, \
": ", \
::caffe2::cublasGetErrorString(status)); \
} while (0)
#define CUBLAS_CHECK(condition) \
do { \
cublasStatus_t status = condition; \
CHECK(status == CUBLAS_STATUS_SUCCESS) \
<< ::caffe2::cublasGetErrorString(status); \
} while (0)
#define CURAND_ENFORCE(condition) \
do { \
curandStatus_t status = condition; \
CAFFE_ENFORCE_EQ( \
status, \
CURAND_STATUS_SUCCESS, \
"Error at: ", \
__FILE__, \
":", \
__LINE__, \
": ", \
::caffe2::curandGetErrorString(status)); \
} while (0)
#define CURAND_CHECK(condition) \
do { \
curandStatus_t status = condition; \
CHECK(status == CURAND_STATUS_SUCCESS) \
<< ::caffe2::curandGetErrorString(status); \
} while (0)
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
#define CUDA_2D_KERNEL_LOOP(i, n, j, m) \
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x) \
for (size_t j = blockIdx.y * blockDim.y + threadIdx.y; j < (m); \
j += blockDim.y * gridDim.y)
// The following helper functions are here so that you can write a kernel call
// when you are not particularly interested in maxing out the kernels'
// performance. Usually, this will give you a reasonable speed, but if you
// really want to find the best performance, it is advised that you tune the
// size of the blocks and grids more reasonably.
// A legacy note: this is derived from the old good Caffe days, when I simply
// hard-coded the number of threads and wanted to keep backward compatibility
// for different computation capabilities.
// For more info on CUDA compute capabilities, visit the NVidia website at:
// http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#compute-capabilities
// The number of cuda threads to use. Since work is assigned to SMs at the
// granularity of a block, 128 is chosen to allow utilizing more SMs for
// smaller input sizes.
// 1D grid
constexpr int CAFFE_CUDA_NUM_THREADS = 128;
// 2D grid
constexpr int CAFFE_CUDA_NUM_THREADS_2D_DIMX = 16;
constexpr int CAFFE_CUDA_NUM_THREADS_2D_DIMY = 16;
// The maximum number of blocks to use in the default kernel call. We set it to
// 4096 which would work for compute capability 2.x (where 65536 is the limit).
// This number is very carelessly chosen. Ideally, one would like to look at
// the hardware at runtime, and pick the number of blocks that makes most
// sense for the specific runtime environment. This is a todo item.
// 1D grid
constexpr int CAFFE_MAXIMUM_NUM_BLOCKS = 4096;
// 2D grid
constexpr int CAFFE_MAXIMUM_NUM_BLOCKS_2D_DIMX = 128;
constexpr int CAFFE_MAXIMUM_NUM_BLOCKS_2D_DIMY = 128;
constexpr int kCUDAGridDimMaxX = 2147483647;
constexpr int kCUDAGridDimMaxY = 65535;
constexpr int kCUDAGridDimMaxZ = 65535;
/**
* @brief Compute the number of blocks needed to run N threads.
*/
inline int CAFFE_GET_BLOCKS(const int N) {
return std::max(
std::min(
(N + CAFFE_CUDA_NUM_THREADS - 1) / CAFFE_CUDA_NUM_THREADS,
CAFFE_MAXIMUM_NUM_BLOCKS),
// Use at least 1 block, since CUDA does not allow empty block
1);
}
/**
* @brief Compute the number of blocks needed to run N threads for a 2D grid
*/
inline dim3 CAFFE_GET_BLOCKS_2D(const int N, const int /* M */) {
dim3 grid;
// Not calling the 1D version for each dim to keep all constants as literals
grid.x = std::max(
std::min(
(N + CAFFE_CUDA_NUM_THREADS_2D_DIMX - 1) /
CAFFE_CUDA_NUM_THREADS_2D_DIMX,
CAFFE_MAXIMUM_NUM_BLOCKS_2D_DIMX),
// Use at least 1 block, since CUDA does not allow empty block
1);
grid.y = std::max(
std::min(
(N + CAFFE_CUDA_NUM_THREADS_2D_DIMY - 1) /
CAFFE_CUDA_NUM_THREADS_2D_DIMY,
CAFFE_MAXIMUM_NUM_BLOCKS_2D_DIMY),
// Use at least 1 block, since CUDA does not allow empty block
1);
return grid;
}
using CUDAGuard = c10::cuda::CUDAGuard;
template <typename T, int N>
struct SimpleArray {
T data[N];
};
constexpr int kCUDATensorMaxDims = 8;
#define DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1(val, Func, T, ...) \
do { \
CAFFE_ENFORCE_LE(val, kCUDATensorMaxDims); \
switch (val) { \
case 1: { \
Func<T, 1>(__VA_ARGS__); \
break; \
} \
case 2: { \
Func<T, 2>(__VA_ARGS__); \
break; \
} \
case 3: { \
Func<T, 3>(__VA_ARGS__); \
break; \
} \
case 4: { \
Func<T, 4>(__VA_ARGS__); \
break; \
} \
case 5: { \
Func<T, 5>(__VA_ARGS__); \
break; \
} \
case 6: { \
Func<T, 6>(__VA_ARGS__); \
break; \
} \
case 7: { \
Func<T, 7>(__VA_ARGS__); \
break; \
} \
case 8: { \
Func<T, 8>(__VA_ARGS__); \
break; \
} \
default: { \
break; \
} \
} \
} while (false)
#define DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_2(val, Func, T1, T2, ...) \
do { \
CAFFE_ENFORCE_LE(val, kCUDATensorMaxDims); \
switch (val) { \
case 1: { \
Func<T1, T2, 1>(__VA_ARGS__); \
break; \
} \
case 2: { \
Func<T1, T2, 2>(__VA_ARGS__); \
break; \
} \
case 3: { \
Func<T1, T2, 3>(__VA_ARGS__); \
break; \
} \
case 4: { \
Func<T1, T2, 4>(__VA_ARGS__); \
break; \
} \
case 5: { \
Func<T1, T2, 5>(__VA_ARGS__); \
break; \
} \
case 6: { \
Func<T1, T2, 6>(__VA_ARGS__); \
break; \
} \
case 7: { \
Func<T1, T2, 7>(__VA_ARGS__); \
break; \
} \
case 8: { \
Func<T1, T2, 8>(__VA_ARGS__); \
break; \
} \
default: { \
break; \
} \
} \
} while (false)
#define DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_3(val, Func, T1, T2, T3, ...) \
do { \
CAFFE_ENFORCE_LE(val, kCUDATensorMaxDims); \
switch (val) { \
case 1: { \
Func<T1, T2, T3, 1>(__VA_ARGS__); \
break; \
} \
case 2: { \
Func<T1, T2, T3, 2>(__VA_ARGS__); \
break; \
} \
case 3: { \
Func<T1, T2, T3, 3>(__VA_ARGS__); \
break; \
} \
case 4: { \
Func<T1, T2, T3, 4>(__VA_ARGS__); \
break; \
} \
case 5: { \
Func<T1, T2, T3, 5>(__VA_ARGS__); \
break; \
} \
case 6: { \
Func<T1, T2, T3, 6>(__VA_ARGS__); \
break; \
} \
case 7: { \
Func<T1, T2, T3, 7>(__VA_ARGS__); \
break; \
} \
case 8: { \
Func<T1, T2, T3, 8>(__VA_ARGS__); \
break; \
} \
default: { \
break; \
} \
} \
} while (false)
} // namespace caffe2
#endif // CAFFE2_CORE_COMMON_GPU_H_
| 21,462
| 43.529046
| 90
|
h
|
null |
pytorch-main/caffe2/core/context.h
|
#ifndef CAFFE2_CORE_CONTEXT_H_
#define CAFFE2_CORE_CONTEXT_H_
#include <cstdlib>
#include <ctime>
#include <random>
#include <unordered_map>
#include <c10/util/typeid.h>
#include "caffe2/core/allocator.h"
#include "caffe2/core/context_base.h"
#include "caffe2/core/event.h"
#include "caffe2/core/logging.h"
#include "caffe2/proto/caffe2_pb.h"
#include <c10/util/ArrayRef.h>
#if !defined(CAFFE2_IS_XPLAT_BUILD) && !defined(C10_MOBILE)
#include <c10/core/GeneratorImpl.h>
#include <c10/util/irange.h>
#include <ATen/core/DistributionsHelper.h>
#include <ATen/core/MT19937RNGEngine.h>
#else
#include "caffe2/core/distributions_stubs.h"
#endif
C10_DECLARE_bool(caffe2_report_cpu_memory_usage);
namespace caffe2 {
/**
* A function to generate a random number seed that is unique in a best-effort
* basis, using an ever-incrementing seed and the current time.
*/
TORCH_API uint32_t RandomNumberSeed();
/**
* The CPU Context, representing the bare minimum of what a Context class in
* Caffe2 should implement.
*
* // TODO modify docs
* See operator.h, especially Operator<Context>, for how Context are used in
* actual operator implementations that are associated with specific devices.
* In general, the Context class is passed in as a template argument, and
* the operator can use the functions defined in the context to execute whatever
* computation it has.
*
*/
class TORCH_API CPUContext final : public BaseContext {
public:
#if !defined(CAFFE2_IS_XPLAT_BUILD) && !defined(C10_MOBILE)
class rand_gen_type {
public:
explicit rand_gen_type(uint64_t seed_in = default_rng_seed_val)
: engine_{seed_in} {}
uint32_t random() {
return engine_();
}
uint64_t random64() {
uint32_t random1 = engine_();
uint32_t random2 = engine_();
return (static_cast<uint64_t>(random1) << 32) | random2;
}
c10::optional<float> next_float_normal_sample() {
return next_float_normal_sample_;
}
c10::optional<double> next_double_normal_sample() {
return next_double_normal_sample_;
}
void set_next_float_normal_sample(c10::optional<float> randn) {
next_float_normal_sample_ = randn;
}
void set_next_double_normal_sample(c10::optional<double> randn) {
next_double_normal_sample_ = randn;
}
private:
at::mt19937 engine_;
c10::optional<float> next_float_normal_sample_;
c10::optional<double> next_double_normal_sample_;
};
#else
typedef std::mt19937 rand_gen_type;
#endif
CPUContext() {}
explicit CPUContext(const DeviceOption& option)
: random_seed_(option.has_random_seed() ? option.random_seed() : 1701),
random_seed_set_(option.has_random_seed() ? true : false) {
CAFFE_ENFORCE_EQ(option.device_type(), PROTO_CPU);
}
explicit CPUContext(const at::Device& device)
: CPUContext(DeviceToOption(device)) {}
~CPUContext() noexcept override {}
inline void SwitchToDevice(int64_t /*stream_id*/) override {}
using BaseContext::SwitchToDevice;
inline void WaitEvent(const Event& ev) override {
ev.Wait(CPU, this);
}
inline void Record(Event* ev, const char* err_msg = nullptr) const override {
CAFFE_ENFORCE(ev, "Event must not be null.");
ev->Record(CPU, this, err_msg);
}
inline void FinishDeviceComputation() override {}
inline rand_gen_type* RandGenerator() {
if (!random_generator_.get()) {
random_generator_.reset(new rand_gen_type(RandSeed()));
}
return random_generator_.get();
}
inline uint32_t RandSeed() {
if (!random_seed_set_) {
random_seed_ = RandomNumberSeed();
random_seed_set_ = true;
}
return static_cast<uint32_t>(random_seed_);
}
inline static at::DataPtr New(size_t nbytes) {
return GetCPUAllocator()->allocate(nbytes);
}
void CopyBytesSameDevice(size_t nbytes, const void* src, void* dst) override;
void CopyBytesFromCPU(size_t nbytes, const void* src, void* dst) override {
CopyBytesSameDevice(nbytes, src, dst);
}
void CopyBytesToCPU(size_t nbytes, const void* src, void* dst) override {
CopyBytesSameDevice(nbytes, src, dst);
}
bool SupportsNonFundamentalTypes() const override {
// CPU non fumdamental type copy OK
return true;
}
template <class SrcContext, class DstContext>
inline void CopyBytes(size_t nbytes, const void* src, void* dst);
template <typename T, class SrcContext, class DstContext>
inline void Copy(size_t n, const T* src, T* dst) {
if (c10::guts::is_fundamental<T>::value) {
CopyBytes<SrcContext, DstContext>(
n * sizeof(T),
static_cast<const void*>(src),
static_cast<void*>(dst));
} else {
for (const auto i : c10::irange(n)) {
dst[i] = src[i];
}
}
}
template <class SrcContext, class DstContext>
inline void
CopyItems(const TypeMeta meta, size_t n, const void* src, void* dst) {
if (meta.copy()) {
meta.copy()(src, dst, n);
} else {
CopyBytes<SrcContext, DstContext>(n * meta.itemsize(), src, dst);
}
}
// By default CPU operators don't have async device parts
static bool HasAsyncPartDefault() {
return false;
}
static bool SupportsAsyncScheduling() {
return false;
}
// CPU streams are not implemented and are silently ignored by CPU ops,
// return true to signal executor to schedule a CPU op
static bool IsStreamFree(
const DeviceOption& /* option */,
int /* stream_id */) {
return true;
}
at::Device device() const override {
// TODO: numa?
return at::Device(CPU);
}
DeviceType device_type() const override {
return CPU;
}
static constexpr DeviceType GetDeviceType() {
return CPU;
}
protected:
// TODO(jiayq): instead of hard-coding a generator, make it more flexible.
int random_seed_{1701};
bool random_seed_set_{false};
std::unique_ptr<rand_gen_type> random_generator_;
};
template <>
inline void CPUContext::CopyBytes<CPUContext, CPUContext>(
size_t nbytes,
const void* src,
void* dst) {
if (nbytes == 0) {
return;
}
CAFFE_ENFORCE(src);
CAFFE_ENFORCE(dst);
memcpy(dst, src, nbytes);
}
} // namespace caffe2
#endif // CAFFE2_CORE_CONTEXT_H_
| 6,208
| 26.232456
| 80
|
h
|
null |
pytorch-main/caffe2/core/context_base.h
|
#pragma once
#include <array>
#include <cstdlib>
#include <ctime>
#include <memory>
#include <unordered_map>
#include <c10/macros/Macros.h>
#include <c10/core/Allocator.h>
#include <c10/util/typeid.h>
#include <c10/util/Exception.h>
#include <c10/util/Registry.h>
#include <c10/core/CopyBytes.h>
#include "caffe2/core/common.h"
#include "caffe2/core/logging.h"
#include "caffe2/proto/caffe2_pb.h"
namespace caffe2 {
class Event;
} // namespace caffe2
namespace at {
class BaseContext;
/**
* Virtual interface for the Context class in Caffe2.
*
* A Context defines all the necessities to run an operator on a specific
* device. Specific Context classes needs to implement all the pure virtual
* functions in the BaseContext class.
* TODO: add docs after this is finalized.
*/
class TORCH_API BaseContext {
public:
virtual ~BaseContext() noexcept {}
virtual Device device() const = 0;
/* Sorry for the naming, will get rid of this in future diff */
virtual DeviceType device_type() const = 0;
virtual void SwitchToDevice(int64_t /*stream_id*/) = 0;
inline void SwitchToDevice() {
SwitchToDevice(0);
}
virtual void WaitEvent(const caffe2::Event& ev) = 0;
virtual void Record(caffe2::Event* ev, const char* err_msg = nullptr)
const = 0;
virtual void FinishDeviceComputation() = 0;
// This used to be arbitrary cross-device copy, but it turns out everyone
// did direct CPU-X copy, so we just make three functions for it (to avoid
// double dispatch). This will get obsoleted by C10. where copies
// will be proper operators (and get to rely on multiple dispatch there.)
virtual void CopyBytesSameDevice(
size_t nbytes,
const void* src,
void* dst) = 0;
virtual void CopyBytesFromCPU(size_t nbytes, const void* src, void* dst) = 0;
virtual void CopyBytesToCPU(size_t nbytes, const void* src, void* dst) = 0;
template <typename T>
inline void CopySameDevice(size_t n, const T* src, T* dst) {
static_assert(
c10::guts::is_fundamental<T>::value,
"CopySameDevice requires fundamental types");
CopyBytesSameDevice(
n * sizeof(T), static_cast<const void*>(src), static_cast<void*>(dst));
}
template <typename T>
inline void CopyFromCPU(size_t n, const T* src, T* dst) {
static_assert(
c10::guts::is_fundamental<T>::value,
"CopyFromCPU requires fundamental types");
CopyBytesFromCPU(
n * sizeof(T), static_cast<const void*>(src), static_cast<void*>(dst));
}
template <typename T>
inline void CopyToCPU(size_t n, const T* src, T* dst) {
static_assert(
c10::guts::is_fundamental<T>::value, "CopyToCPU requires fundamental types");
CopyBytesToCPU(
n * sizeof(T), static_cast<const void*>(src), static_cast<void*>(dst));
}
virtual bool SupportsNonFundamentalTypes() const {
return false;
}
inline void EnforceMetaCopyOK() {
AT_ASSERTM(
SupportsNonFundamentalTypes(), "Context requires fundamental types");
}
void CopyItemsSameDevice(
const caffe2::TypeMeta meta,
size_t n,
const void* src,
void* dst) {
if (meta.copy()) {
EnforceMetaCopyOK();
meta.copy()(src, dst, n);
} else {
CopyBytesSameDevice(n * meta.itemsize(), src, dst);
}
}
void CopyItemsFromCPU(
const caffe2::TypeMeta meta,
size_t n,
const void* src,
void* dst) {
if (meta.copy()) {
EnforceMetaCopyOK();
meta.copy()(src, dst, n);
} else {
CopyBytesFromCPU(n * meta.itemsize(), src, dst);
}
}
void CopyItemsToCPU(
const caffe2::TypeMeta meta,
size_t n,
const void* src,
void* dst) {
if (meta.copy()) {
EnforceMetaCopyOK();
meta.copy()(src, dst, n);
} else {
CopyBytesToCPU(n * meta.itemsize(), src, dst);
}
}
};
// Context constructor registry
C10_DECLARE_TYPED_REGISTRY(
ContextRegistry,
at::DeviceType,
at::BaseContext,
std::unique_ptr,
at::Device);
#define REGISTER_CONTEXT(type, ...) \
C10_REGISTER_TYPED_CLASS(ContextRegistry, type, __VA_ARGS__)
inline std::unique_ptr<at::BaseContext> CreateContext(
const at::Device& device) {
return at::ContextRegistry()->Create(device.type(), device);
}
} // namespace at
namespace caffe2 {
using at::BaseContext;
using at::CreateContext;
} // namespace caffe2
| 4,382
| 24.934911
| 85
|
h
|
null |
pytorch-main/caffe2/core/context_gpu.h
|
#ifndef CAFFE2_CORE_CONTEXT_GPU_H_
#define CAFFE2_CORE_CONTEXT_GPU_H_
#include <ctime>
#include <mutex>
#include "caffe2/core/common.h"
#include "caffe2/core/common_gpu.h"
#include "caffe2/core/context.h"
#include "caffe2/core/context_base.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/numa.h"
#include "caffe2/core/tensor.h"
#include "caffe2/core/types.h"
#include "caffe2/proto/caffe2_pb.h"
// Since we are using the macro CAFFE2_USE_CUDNN, we will need to include this
// file after common.h is included.
#ifdef CAFFE2_USE_CUDNN
#include "caffe2/core/common_cudnn.h"
#endif // CAFFE2_USE_CUDNN
#include <c10/core/Device.h>
#include <c10/core/Stream.h>
#include <c10/cuda/CUDAStream.h>
#include <c10/cuda/CUDAGuard.h>
namespace caffe2 {
enum class CudaMemoryPoolType {
NONE = 0,
CUB = 1,
THC = 2,
};
/**
* Gets the current memory pool type used by Caffe2.
*
* The memory pool is set up during caffe2's global initialization time.
*/
CAFFE2_CUDA_API CudaMemoryPoolType GetCudaMemoryPoolType();
/**
* A struct to host thread-local cuda objects.
*
* In Caffe2, each thread has its own non-default cuda stream as well as
* related objects such as cublas and curand handles. This is achieved by
* having the ThreadLocalCUDAObjects wrapper that takes care of allocating
* and deallocating these objects at the thread scope. This class is solely
* used inside CUDAContext and should not be used externally.
*
* This class manages the mapping from logical stream ID (int stream_id
* passed around in Caffe2) and CUDAStream objects. We intend to eventually
* deprecate the logical stream ID interface, but not for now.
*/
class CAFFE2_CUDA_API ThreadLocalCUDAObjects {
friend class CUDAContext;
private:
ThreadLocalCUDAObjects() {
for (DeviceIndex i = 0; i < C10_COMPILE_TIME_MAX_GPUS; ++i) {
cuda_streams_[i] = vector<c10::cuda::CUDAStream>();
}
}
// Record current stream id for the current thread.
// This is the new API we're trying to migrate use cases to and get rid of
// explicit stream id passing. For now it's invoked in
// CUDAContext::SwitchToDevice
void SetCurrentStreamId(DeviceIndex gpu, StreamId stream_id) {
// TODO: use current device id from thread local instead of passing gpu in
if (stream_id != -1) {
c10::cuda::setCurrentCUDAStream(GetCUDAStream(gpu, stream_id));
}
}
// Retrieves the CUDAStream corresponding to a logical stream ID, ensuring
// that it exists in cuda_streams_ if it has not been allocated yet.
c10::cuda::CUDAStream GetCUDAStream(DeviceIndex gpu, StreamId stream_id) {
vector<c10::cuda::CUDAStream>& gpu_streams = cuda_streams_[gpu];
while (gpu_streams.size() <= static_cast<size_t>(stream_id)) {
// NB: This streams are not guaranteed to be unique; we'll
// wrap around once we run out of streams in the pool.
gpu_streams.emplace_back(c10::cuda::getStreamFromPool(/* high priority */ false, gpu));
}
return gpu_streams[stream_id];
}
// Uses the logical stream id from the thread local to pick the stream
// We're going to migrate all usages to this case API instead of passing the
// stream id directly
cudaStream_t GetStream(DeviceIndex gpu) {
return c10::cuda::getCurrentCUDAStream(gpu).stream();
}
cudaStream_t GetStream(DeviceIndex gpu, StreamId stream_id) {
return GetCUDAStream(gpu, stream_id).stream();
}
// Uses the logical stream id from the thread local to pick the stream
// We're going to migrate all usages to this case API instead of passing the
// stream id directly
cublasHandle_t GetHandle(DeviceIndex gpu) {
return GetHandle(c10::cuda::getCurrentCUDAStream(gpu));
}
cublasHandle_t GetHandle(c10::cuda::CUDAStream cuda_stream) {
CUDAGuard guard(cuda_stream.device_index());
// Default construct in the map if it doesn't exist, and return a mutable
// reference to it.
auto& r = cublas_handles_[cuda_stream];
if (r == nullptr) {
CUBLAS_ENFORCE(cublasCreate(&r));
// The default is CUBLAS_POINTER_MODE_HOST. You can override
// it after obtaining the cublas handle, but do that with
// caution.
CUBLAS_ENFORCE(cublasSetPointerMode(r, CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasSetStream(r, cuda_stream));
}
return r;
}
#ifdef CAFFE2_USE_CUDNN
// Uses the logical stream id from the thread local to pick the stream
// We're going to migrate all usages to this case API instead of passing the
// stream id directly
cudnnHandle_t GetCudnnHandle(DeviceIndex gpu) {
return GetCudnnHandle(c10::cuda::getCurrentCUDAStream(gpu));
}
cudnnHandle_t GetCudnnHandle(c10::cuda::CUDAStream cuda_stream) {
CUDAGuard guard(cuda_stream.device_index());
auto& r = cudnn_handles_[cuda_stream];
if (r == nullptr) {
CUDNN_ENFORCE(cudnnCreate(&r));
CUDNN_ENFORCE(cudnnSetStream(r, cuda_stream));
}
return r;
}
#endif // CAFFE2_USE_CUDNN
~ThreadLocalCUDAObjects() noexcept {
for (auto element : cublas_handles_) {
if (element.second) {
CUBLAS_CHECK(cublasDestroy(element.second));
}
}
#ifdef CAFFE2_USE_CUDNN
for (auto element : cudnn_handles_) {
if (element.second) {
#ifdef _WIN32
// this is because of something dumb in the ordering of
// destruction. Sometimes at exit, the cuda context would already
// be destroyed by the time this gets destroyed. This happens on
// windows with cuda 11 and cuda 12.
cudnnDestroy(element.second);
#else
CUDNN_CHECK(cudnnDestroy(element.second));
#endif // _WIN32
}
}
#endif // CAFFE2_USE_CUDNN
}
// WARNING: mapping from logical stream ID to c10::cuda::CUDAStream
// is NOT bijective; multiple logical stream IDs may map to the
// same underlying stream ID.
vector<c10::cuda::CUDAStream> cuda_streams_[C10_COMPILE_TIME_MAX_GPUS];
std::unordered_map<c10::cuda::CUDAStream, cublasHandle_t> cublas_handles_;
#ifdef CAFFE2_USE_CUDNN
std::unordered_map<c10::cuda::CUDAStream, cudnnHandle_t> cudnn_handles_;
#endif // CAFFE2_USE_CUDNN
};
class CAFFE2_CUDA_API CUDAContext final : public BaseContext {
public:
// The default cuda context constructor.
explicit CUDAContext(DeviceIndex gpu_id = -1);
explicit CUDAContext(const DeviceOption& option);
explicit CUDAContext(Device device)
: CUDAContext(DeviceToOption(device)) {}
~CUDAContext() override;
inline void SwitchToDevice(StreamId stream_id) override {
getCudaObjects().SetCurrentStreamId(gpu_id_, stream_id);
CaffeCudaSetDevice(gpu_id_);
}
// void SwitchToDevice()
using BaseContext::SwitchToDevice;
inline void WaitEvent(const Event& ev) override {
ev.Wait(CUDA, this);
}
inline void Record(Event* ev, const char* err_msg = nullptr) const override {
CAFFE_ENFORCE(ev, "Event must not be null.");
ev->Record(CUDA, this, err_msg);
}
// Note on current use cases:
// FinishDeviceComputation must be called on the same cpu thread as
// SwitchToDevice()
void FinishDeviceComputation() override {
CUDA_ENFORCE(cudaStreamSynchronize(getCudaObjects().GetStream(gpu_id_)));
}
inline int device_id() const {
return gpu_id_;
}
inline c10::cuda::CUDAStream stream() const {
return at::cuda::getStreamFromExternal(getCudaObjects().GetStream(gpu_id_), gpu_id_);
}
inline cudaStream_t cuda_stream() const {
return getCudaObjects().GetStream(gpu_id_);
}
static cudaStream_t cuda_stream(DeviceIndex gpu_id, StreamId stream_id) {
return getCudaObjects().GetStream(gpu_id, stream_id);
}
cublasHandle_t cublas_handle() {
return getCudaObjects().GetHandle(gpu_id_);
}
#ifdef CAFFE2_USE_CUDNN
cudnnHandle_t cudnn_handle() {
return getCudaObjects().GetCudnnHandle(gpu_id_);
}
#endif // CAFFE2_USE_CUDNN
curandGenerator_t& curand_generator() {
if (!curand_generator_) {
CUDAGuard guard(gpu_id_);
CURAND_ENFORCE(
curandCreateGenerator(&curand_generator_, CURAND_RNG_PSEUDO_DEFAULT));
CURAND_ENFORCE(
curandSetPseudoRandomGeneratorSeed(curand_generator_, random_seed_));
TORCH_CHECK_NOTNULL(curand_generator_);
}
CURAND_ENFORCE(curandSetStream(curand_generator_, cuda_stream()));
return curand_generator_;
}
inline static at::DataPtr New(size_t nbytes) {
return GetAllocator(CUDA)->allocate(nbytes);
}
// Get a mutex to lock out cudaMalloc / cudaFree calls when
// NCCL kernels are being launched. Should remove threat of
// deadlocks
static std::mutex& mutex();
// Functions to query memory stats. Only available if flag
// --caffe2_gpu_memory_tracking is enabled.
static std::vector<long> TotalMemoryByGpu();
static std::vector<long> MaxMemoryByGpu();
template <class SrcContext, class DstContext>
inline void CopyBytes(size_t nbytes, const void* src, void* dst) {
CUDA_ENFORCE(cudaMemcpyAsync(
dst,
src,
nbytes,
cudaMemcpyDefault,
getCudaObjects().GetStream(gpu_id_)));
}
void CopyBytesSameDevice(size_t nbytes, const void* src, void* dst) override {
CopyBytes<CUDAContext, CUDAContext>(nbytes, src, dst);
}
void CopyBytesToCPU(size_t nbytes, const void* src, void* dst) override {
CopyBytes<CUDAContext, CPUContext>(nbytes, src, dst);
}
void CopyBytesFromCPU(size_t nbytes, const void* src, void* dst) override {
CopyBytes<CPUContext, CUDAContext>(nbytes, src, dst);
}
template <typename T, class SrcContext, class DstContext>
inline void Copy(int n, const T* src, T* dst) {
CopyBytes<SrcContext, DstContext>(n * sizeof(T),
static_cast<const void*>(src),
static_cast<void*>(dst));
}
template <class SrcContext, class DstContext>
inline void
CopyItems(const TypeMeta meta, size_t n, const void* src, void* dst) {
CAFFE_ENFORCE(!meta.copy(), "CUDAContext requires fundamental types.");
CopyBytes<SrcContext, DstContext>(n * meta.itemsize(), src, dst);
}
static void CopyBytesAsync(
size_t nbytes,
const void* src,
Device src_device,
void* dst,
Device dst_device);
static void CopyBytesSync(
size_t nbytes,
const void* src,
Device src_device,
void* dst,
Device dst_device);
// By default CUDA operators have async device parts
static bool HasAsyncPartDefault() {
return true;
}
static bool SupportsAsyncScheduling() {
return true;
}
static bool IsStreamFree(const DeviceOption& option, StreamId stream_id) {
const auto stream = CUDAContext::cuda_stream(option.device_id(), stream_id);
const auto status = C10_CUDA_ERROR_HANDLED(cudaStreamQuery(stream));
if (status == cudaErrorNotReady) {
// ignore and clear the error if not ready
C10_CUDA_CLEAR_ERROR();
} else {
C10_CUDA_CHECK(status); // Reraise error
}
return status == cudaSuccess;
}
at::Device device() const override {
return at::Device(CUDA, gpu_id_);
}
DeviceType device_type() const override {
return CUDA;
}
static constexpr DeviceType GetDeviceType() {
return CUDA;
}
protected:
int gpu_id_;
int random_seed_;
curandGenerator_t curand_generator_{nullptr};
static ThreadLocalCUDAObjects& getCudaObjects();
};
using TensorCUDA = Tensor;
} // namespace caffe2
#endif // CAFFE2_CORE_CONTEXT_GPU_H_
| 11,432
| 31.205634
| 93
|
h
|
null |
pytorch-main/caffe2/core/cudnn_wrappers.h
|
// Copyright 2004-present Facebook. All Rights Reserved.
#ifndef CAFFE2_CORE_CUDNN_WRAPPERS_H_
#define CAFFE2_CORE_CUDNN_WRAPPERS_H_
#include "caffe2/core/common_cudnn.h"
#include "caffe2/core/context_gpu.h"
// Note [What is CuDNNWrapper good for?]
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// Suppose you are writing a kernel that calls into CuDNN, and
// you need a cudnnHandle_t to pass to the kernel call. How should
// you go about getting one of those handles? You'd prefer not
// to make a new cudnnHandle_t every call; this can be somewhat
// expensive (1-2%, according to some measurements in TensorFlow.)
// But cudnnHandle_t is not thread-safe, so we can't just have
// a single global cudnnHandle_t that everyone uses.
//
// Thus, the most common method in Caffe2 for getting a CuDNN handle
// is to get a per-thread, per-stream CuDNN handle from CUDAContext
// (which knows what the current thread and stream are). The idiomatic
// way to do this in Caffe2 today is to make a CuDNNWrapper and then call
// inline_cudnn_handle(), although you didn't really need the
// CuDNNWrapper at all (you could have gotten it directly from
// CUDAContext.)
//
// So, what's all this business about CuDNNWrapper? In theory, it was
// designed with a more specialized use-case in mind, where you need to
// make multiple calls to CuDNN in parallel; e.g., when manually
// computing group convolution. By using with_cudnn_state(), you can
// get separate cudnnHandle_t and CUDA stream per parallel thread of
// execution, and run all of the cuDNN calls in parallel. CuDNNWrapper
// handles the business of synchronizing with the stream prior to this
// call.
//
// (By the way, this is why no such CUBLASWrapper exists; there isn't
// ever any reason you need to call cublas in parallel, since most
// cublas operations have batched variants.)
//
// Now, that's the theory... in practice, this is only ever used when
// multiple operators are run in parallel, and not to actually
// parallelize multiple CuDNN calls (for example, group convolution is
// now supported natively in CuDNN.) So... while the kit provided here
// might be useful for someone else in the future, it's not really used
// now. So we might consider deleting it, or unifying this mechanism
// with PyTorch's own CuDNN handle pool. (which is it's own thing.)
namespace caffe2 {
class CuDNNWrapper;
/**
* CuDNNWorkspace is a wrapper around a raw cuda pointer that holds the cudnn
* scratch space. This struct is meant to be only used in CuDNNWrapper to
* provide a program-wide scratch space for CuDNN. The reason behind it is that
* cudnn function calls are usually very efficient, hence one probably does not
* want to run multiple cudnn calls at the same time. As a result, one should
* not need more than one cudnn workspace per device.
*/
struct CuDNNWorkspace {
~CuDNNWorkspace() noexcept {}
void* get(size_t nbytes) {
if (nbytes_ < nbytes) {
reset();
data_ = CUDAContext::New(nbytes);
nbytes_ = nbytes;
}
CAFFE_ENFORCE_GE(nbytes_, nbytes);
return data_.get();
}
void reset() {
data_.clear();
nbytes_ = 0;
}
private:
at::DataPtr data_{nullptr, nullptr, &NoDelete, at::Device(CUDA)};
size_t nbytes_{0};
};
// CuDNNState is the owner of the CuDNNWorkspace, and serializes all
// executions of operations that use the state onto it's own stream
// (so multiple Net workers can reuse the same workspace from
// different threads and CUDA streams).
class CuDNNState {
public:
explicit CuDNNState(size_t gpu_id) : gpu_id_(gpu_id) {
CUDAGuard g(gpu_id_);
CUDNN_ENFORCE(cudnnCreate(&cudnn_handle_));
CUDA_ENFORCE(cudaEventCreate(&before_));
CUDA_ENFORCE(cudaEventCreate(&after_));
CUDA_ENFORCE(cudaStreamCreate(&stream_));
CUDNN_ENFORCE(cudnnSetStream(cudnn_handle_, stream_));
}
~CuDNNState() noexcept {
CUDAGuard g(gpu_id_);
CUDNN_CHECK(cudnnDestroy(cudnn_handle_));
CUDA_CHECK(cudaStreamDestroy(stream_));
CUDA_CHECK(cudaEventDestroy(after_));
CUDA_CHECK(cudaEventDestroy(before_));
}
cudnnHandle_t& cudnn_handle() {
return cudnn_handle_;
}
CuDNNWorkspace& workspace() {
return workspace_;
}
template <typename F>
void execute(cudaStream_t stream, F&& f) {
CUDA_ENFORCE(cudaEventRecord(before_, stream));
CUDA_ENFORCE(cudaStreamWaitEvent(stream_, before_, 0));
f(this);
CUDA_ENFORCE(cudaEventRecord(after_, stream_));
CUDA_ENFORCE(cudaStreamWaitEvent(stream, after_, 0));
}
private:
cudnnHandle_t cudnn_handle_{nullptr};
cudaEvent_t before_{nullptr};
cudaEvent_t after_{nullptr};
cudaStream_t stream_{nullptr};
CuDNNWorkspace workspace_;
size_t gpu_id_{0};
C10_DISABLE_COPY_AND_ASSIGN(CuDNNState);
};
/**
* CuDNNWrapper is a class that wraps the cudnn handles and cudnn workspaces.
*
* The wrapper ensures that for each thread and each gpu, there is one
* identical cudnn handle, which is also associated with the thread-local
* per-device cuda stream. The wrapper also hosts the device-specific cudnn
* workspace (scratch space for some cudnn functions).
*
*/
class CuDNNWrapper {
public:
/**
* Creates a cudnn wrapper associated with a CUDAContext object. Note that
* the CUDAContext object should outlive the CuDNNWrapper.
*/
explicit CuDNNWrapper(CUDAContext* context) : context_(context) {}
/**
* Returns the inline cudnn handle that executes on the current
* thread's cuda_stream.
*/
cudnnHandle_t inline_cudnn_handle() {
return context_->cudnn_handle();
}
// Executes the closure F on the CuDNNState associated with state_idx
template <typename F>
void with_cudnn_state(size_t state_idx, F&& f) {
CAFFE_ENFORCE(
state_idx < CAFFE2_COMPILE_TIME_MAX_CUDNN_STATES, "Invalid state_idx");
auto& sync_state = cudnn_states()[context_->device_id()][state_idx];
CUDAGuard dg(context_->device_id());
// We need to serialize execution on the CuDNNState as we can't
// allow multiple threads to race through the cudaEventRecord
// calls (so a worker thread might wait on another worker thread's
// execution)
std::lock_guard<std::mutex> g(sync_state.mutex);
if (!sync_state.state.get()) {
sync_state.state.reset(new CuDNNState(context_->device_id()));
}
TORCH_CHECK_NOTNULL(sync_state.state.get())->execute(context_->cuda_stream(), f);
}
protected:
// Pointer to an external cuda context that the cudnn wrapper will use.
CUDAContext* context_;
static constexpr size_t CAFFE2_COMPILE_TIME_MAX_CUDNN_STATES = 4;
struct SyncedCuDNNState {
std::mutex mutex;
std::unique_ptr<CuDNNState> state;
};
using PerGPUCuDNNStates = std::array<
std::array<SyncedCuDNNState, CAFFE2_COMPILE_TIME_MAX_CUDNN_STATES>,
C10_COMPILE_TIME_MAX_GPUS>;
static PerGPUCuDNNStates& cudnn_states();
C10_DISABLE_COPY_AND_ASSIGN(CuDNNWrapper);
};
}; // namespace caffe2
#endif
| 6,962
| 33.815
| 85
|
h
|
null |
pytorch-main/caffe2/core/db.h
|
#ifndef CAFFE2_CORE_DB_H_
#define CAFFE2_CORE_DB_H_
#include <mutex>
#include <c10/util/Registry.h>
#include <c10/util/irange.h>
#include <c10/util/string_view.h>
#include "caffe2/core/blob_serialization.h"
#include "caffe2/proto/caffe2_pb.h"
namespace caffe2 {
namespace db {
/**
* The mode of the database, whether we are doing a read, write, or creating
* a new database.
*/
enum Mode { READ, WRITE, NEW };
/**
* An abstract class for the cursor of the database while reading.
*/
class TORCH_API Cursor {
public:
Cursor() {}
virtual ~Cursor() {}
/**
* Seek to a specific key (or if the key does not exist, seek to the
* immediate next). This is optional for dbs, and in default, SupportsSeek()
* returns false meaning that the db cursor does not support it.
*/
virtual void Seek(const string& key) = 0;
virtual bool SupportsSeek() {
return false;
}
/**
* Seek to the first key in the database.
*/
virtual void SeekToFirst() = 0;
/**
* Go to the next location in the database.
*/
virtual void Next() = 0;
/**
* Returns the current key.
*/
virtual string key() = 0;
/**
* Returns the current value.
*/
virtual string value() = 0;
/**
* Returns whether the current location is valid - for example, if we have
* reached the end of the database, return false.
*/
virtual bool Valid() = 0;
C10_DISABLE_COPY_AND_ASSIGN(Cursor);
};
/**
* An abstract class for the current database transaction while writing.
*/
class TORCH_API Transaction {
public:
Transaction() {}
virtual ~Transaction() {}
/**
* Puts the key value pair to the database.
*/
virtual void Put(const std::string& key, std::string&& value) = 0;
/**
* Commits the current writes.
*/
virtual void Commit() = 0;
C10_DISABLE_COPY_AND_ASSIGN(Transaction);
};
/**
* An abstract class for accessing a database of key-value pairs.
*/
class TORCH_API DB {
public:
DB(const string& /*source*/, Mode mode) : mode_(mode) {}
virtual ~DB() {}
/**
* Closes the database.
*/
virtual void Close() = 0;
/**
* Returns a cursor to read the database. The caller takes the ownership of
* the pointer.
*/
virtual std::unique_ptr<Cursor> NewCursor() = 0;
/**
* Returns a transaction to write data to the database. The caller takes the
* ownership of the pointer.
*/
virtual std::unique_ptr<Transaction> NewTransaction() = 0;
/**
* Set DB options.
*
* These options should apply for the lifetime of the DB, or until a
* subsequent SetOptions() call overrides them.
*
* This is used by the Save operator to allow the client to pass in
* DB-specific options to control the behavior. This is an opaque string,
* where the format is specific to the DB type. DB types may pass in a
* serialized protobuf message here if desired.
*/
virtual void SetOptions(c10::string_view /* options */) {}
protected:
Mode mode_;
C10_DISABLE_COPY_AND_ASSIGN(DB);
};
// Database classes are registered by their names so we can do optional
// dependencies.
C10_DECLARE_REGISTRY(Caffe2DBRegistry, DB, const string&, Mode);
#define REGISTER_CAFFE2_DB(name, ...) \
C10_REGISTER_CLASS(Caffe2DBRegistry, name, __VA_ARGS__)
/**
* Returns a database object of the given database type, source and mode. The
* caller takes the ownership of the pointer. If the database type is not
* supported, a nullptr is returned. The caller is responsible for examining the
* validity of the pointer.
*/
inline unique_ptr<DB>
CreateDB(const string& db_type, const string& source, Mode mode) {
auto result = Caffe2DBRegistry()->Create(db_type, source, mode);
VLOG(1) << ((!result) ? "not found db " : "found db ") << db_type;
return result;
}
/**
* Returns whether or not a database exists given the database type and path.
*/
inline bool DBExists(const string& db_type, const string& full_db_name) {
// Warning! We assume that creating a DB throws an exception if the DB
// does not exist. If the DB constructor does not follow this design
// pattern,
// the returned output (the existence tensor) can be wrong.
try {
std::unique_ptr<DB> db(
caffe2::db::CreateDB(db_type, full_db_name, caffe2::db::READ));
return true;
} catch (...) {
return false;
}
}
/**
* A reader wrapper for DB that also allows us to serialize it.
*/
class TORCH_API DBReader {
public:
friend class DBReaderSerializer;
DBReader() {}
DBReader(
const string& db_type,
const string& source,
const int32_t num_shards = 1,
const int32_t shard_id = 0) {
Open(db_type, source, num_shards, shard_id);
}
explicit DBReader(const DBReaderProto& proto) {
Open(proto.db_type(), proto.source());
if (proto.has_key()) {
CAFFE_ENFORCE(
cursor_->SupportsSeek(),
"Encountering a proto that needs seeking but the db type "
"does not support it.");
cursor_->Seek(proto.key());
}
num_shards_ = 1;
shard_id_ = 0;
}
explicit DBReader(std::unique_ptr<DB> db)
: db_type_("<memory-type>"),
source_("<memory-source>"),
db_(std::move(db)) {
CAFFE_ENFORCE(db_.get(), "Passed null db");
cursor_ = db_->NewCursor();
}
void Open(
const string& db_type,
const string& source,
const int32_t num_shards = 1,
const int32_t shard_id = 0) {
// Note(jiayq): resetting is needed when we re-open e.g. leveldb where no
// concurrent access is allowed.
cursor_.reset();
db_.reset();
db_type_ = db_type;
source_ = source;
db_ = CreateDB(db_type_, source_, READ);
CAFFE_ENFORCE(
db_,
"Cannot find db implementation of type ",
db_type,
" (while trying to open ",
source_,
")");
InitializeCursor(num_shards, shard_id);
}
void Open(
unique_ptr<DB>&& db,
const int32_t num_shards = 1,
const int32_t shard_id = 0) {
cursor_.reset();
db_.reset();
db_ = std::move(db);
CAFFE_ENFORCE(db_.get(), "Passed null db");
InitializeCursor(num_shards, shard_id);
}
public:
/**
* Read a set of key and value from the db and move to next. Thread safe.
*
* The string objects key and value must be created by the caller and
* explicitly passed in to this function. This saves one additional object
* copy.
*
* If the cursor reaches its end, the reader will go back to the head of
* the db. This function can be used to enable multiple input ops to read
* the same db.
*
* Note(jiayq): we loosen the definition of a const function here a little
* bit: the state of the cursor is actually changed. However, this allows
* us to pass in a DBReader to an Operator without the need of a duplicated
* output blob.
*/
void Read(string* key, string* value) const {
CAFFE_ENFORCE(cursor_ != nullptr, "Reader not initialized.");
std::unique_lock<std::mutex> mutex_lock(reader_mutex_);
*key = cursor_->key();
*value = cursor_->value();
// In sharded mode, each read skips num_shards_ records
for (const auto s : c10::irange(num_shards_)) {
(void)s; // Suppress unused variable
cursor_->Next();
if (!cursor_->Valid()) {
MoveToBeginning();
break;
}
}
}
/**
* @brief Seeks to the first key. Thread safe.
*/
void SeekToFirst() const {
CAFFE_ENFORCE(cursor_ != nullptr, "Reader not initialized.");
std::unique_lock<std::mutex> mutex_lock(reader_mutex_);
MoveToBeginning();
}
/**
* Returns the underlying cursor of the db reader.
*
* Note that if you directly use the cursor, the read will not be thread
* safe, because there is no mechanism to stop multiple threads from
* accessing the same cursor. You should consider using Read() explicitly.
*/
inline Cursor* cursor() const {
VLOG(1) << "Usually for a DBReader you should use Read() to be "
"thread safe. Consider refactoring your code.";
return cursor_.get();
}
private:
void InitializeCursor(const int32_t num_shards, const int32_t shard_id) {
CAFFE_ENFORCE(num_shards >= 1);
CAFFE_ENFORCE(shard_id >= 0);
CAFFE_ENFORCE(shard_id < num_shards);
num_shards_ = num_shards;
shard_id_ = shard_id;
cursor_ = db_->NewCursor();
SeekToFirst();
}
void MoveToBeginning() const {
cursor_->SeekToFirst();
for (const auto s : c10::irange(shard_id_)) {
(void)s; // Suppress unused variable
cursor_->Next();
CAFFE_ENFORCE(
cursor_->Valid(), "Db has fewer rows than shard id: ", s, shard_id_);
}
}
string db_type_;
string source_;
unique_ptr<DB> db_;
unique_ptr<Cursor> cursor_;
mutable std::mutex reader_mutex_;
uint32_t num_shards_{};
uint32_t shard_id_{};
C10_DISABLE_COPY_AND_ASSIGN(DBReader);
};
class TORCH_API DBReaderSerializer : public BlobSerializerBase {
public:
/**
* Serializes a DBReader. Note that this blob has to contain DBReader,
* otherwise this function produces a fatal error.
*/
void Serialize(
const void* pointer,
TypeMeta typeMeta,
const string& name,
BlobSerializerBase::SerializationAcceptor acceptor) override;
};
class TORCH_API DBReaderDeserializer : public BlobDeserializerBase {
public:
void Deserialize(const BlobProto& proto, Blob* blob) override;
};
} // namespace db
} // namespace caffe2
#endif // CAFFE2_CORE_DB_H_
| 9,473
| 27.029586
| 80
|
h
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.