diff --git a/videollama2/lib/python3.10/site-packages/torch/include/c10/util/ApproximateClock.h b/videollama2/lib/python3.10/site-packages/torch/include/c10/util/ApproximateClock.h new file mode 100644 index 0000000000000000000000000000000000000000..7de498cebed6a436eac4b25ffdabadbebd700459 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/c10/util/ApproximateClock.h @@ -0,0 +1,121 @@ +// Copyright 2023-present Facebook. All Rights Reserved. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#ifndef _WIN32 +#include +#endif +#if defined(C10_IOS) && defined(C10_MOBILE) +#include // for gettimeofday() +#endif + +#if defined(__i386__) || defined(__x86_64__) || defined(__amd64__) +#define C10_RDTSC +#if defined(_MSC_VER) +#include +#elif defined(__CUDACC__) || defined(__HIPCC__) +#undef C10_RDTSC +#elif defined(__clang__) +// `__rdtsc` is available by default. +// NB: This has to be first, because Clang will also define `__GNUC__` +#elif defined(__GNUC__) +#include +#else +#undef C10_RDTSC +#endif +#endif + +namespace c10 { + +using time_t = int64_t; +using steady_clock_t = std::conditional< + std::chrono::high_resolution_clock::is_steady, + std::chrono::high_resolution_clock, + std::chrono::steady_clock>::type; + +inline time_t getTimeSinceEpoch() { + auto now = std::chrono::system_clock::now().time_since_epoch(); + return std::chrono::duration_cast(now).count(); +} + +inline time_t getTime(bool allow_monotonic = false) { +#if defined(C10_IOS) && defined(C10_MOBILE) + // clock_gettime is only available on iOS 10.0 or newer. Unlike OS X, iOS + // can't rely on CLOCK_REALTIME, as it is defined no matter if clock_gettime + // is implemented or not + struct timeval now; + gettimeofday(&now, NULL); + return static_cast(now.tv_sec) * 1000000000 + + static_cast(now.tv_usec) * 1000; +#elif defined(_WIN32) || defined(__MACH__) + return std::chrono::duration_cast( + steady_clock_t::now().time_since_epoch()) + .count(); +#else + // clock_gettime is *much* faster than std::chrono implementation on Linux + struct timespec t {}; + auto mode = CLOCK_REALTIME; + if (allow_monotonic) { + mode = CLOCK_MONOTONIC; + } + clock_gettime(mode, &t); + return static_cast(t.tv_sec) * 1000000000 + + static_cast(t.tv_nsec); +#endif +} + +// We often do not need to capture true wall times. If a fast mechanism such +// as TSC is available we can use that instead and convert back to epoch time +// during post processing. This greatly reduce the clock's contribution to +// profiling. +// http://btorpey.github.io/blog/2014/02/18/clock-sources-in-linux/ +// https://quick-bench.com/q/r8opkkGZSJMu9wM_XTbDouq-0Io +// TODO: We should use +// `https://github.com/google/benchmark/blob/main/src/cycleclock.h` +inline auto getApproximateTime() { +#if defined(C10_RDTSC) + return static_cast(__rdtsc()); +#else + return getTime(); +#endif +} + +using approx_time_t = decltype(getApproximateTime()); +static_assert( + std::is_same::value || + std::is_same::value, + "Expected either int64_t (`getTime`) or uint64_t (some TSC reads)."); + +// Convert `getCount` results to Nanoseconds since unix epoch. +class C10_API ApproximateClockToUnixTimeConverter final { + public: + ApproximateClockToUnixTimeConverter(); + std::function makeConverter(); + + struct UnixAndApproximateTimePair { + time_t t_; + approx_time_t approx_t_; + }; + static UnixAndApproximateTimePair measurePair(); + + private: + static constexpr size_t replicates = 1001; + using time_pairs = std::array; + time_pairs measurePairs(); + + time_pairs start_times_; +}; + +} // namespace c10 diff --git a/videollama2/lib/python3.10/site-packages/torch/include/c10/util/BFloat16.h b/videollama2/lib/python3.10/site-packages/torch/include/c10/util/BFloat16.h new file mode 100644 index 0000000000000000000000000000000000000000..4a431285973a20034d2b72a8c11e23b9fe10900e --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/c10/util/BFloat16.h @@ -0,0 +1,116 @@ +#pragma once + +// Defines the bloat16 type (brain floating-point). This representation uses +// 1 bit for the sign, 8 bits for the exponent and 7 bits for the mantissa. + +#include +#include +#include + +#if defined(__CUDACC__) && !defined(USE_ROCM) +#include +#endif + +#if defined(SYCL_EXT_ONEAPI_BFLOAT16_MATH_FUNCTIONS) +#if defined(CL_SYCL_LANGUAGE_VERSION) +#include // for SYCL 1.2.1 +#else +#include // for SYCL 2020 +#endif +#include +#endif + +namespace c10 { + +namespace detail { +inline C10_HOST_DEVICE float f32_from_bits(uint16_t src) { + float res = 0; + uint32_t tmp = src; + tmp <<= 16; + +#if defined(USE_ROCM) + float* tempRes; + + // We should be using memcpy in order to respect the strict aliasing rule + // but it fails in the HIP environment. + tempRes = reinterpret_cast(&tmp); + res = *tempRes; +#else + std::memcpy(&res, &tmp, sizeof(tmp)); +#endif + + return res; +} + +inline C10_HOST_DEVICE uint16_t bits_from_f32(float src) { + uint32_t res = 0; + +#if defined(USE_ROCM) + // We should be using memcpy in order to respect the strict aliasing rule + // but it fails in the HIP environment. + uint32_t* tempRes = reinterpret_cast(&src); + res = *tempRes; +#else + std::memcpy(&res, &src, sizeof(res)); +#endif + + return res >> 16; +} + +inline C10_HOST_DEVICE uint16_t round_to_nearest_even(float src) { +#if defined(USE_ROCM) + if (src != src) { +#elif defined(_MSC_VER) + if (isnan(src)) { +#else + if (std::isnan(src)) { +#endif + return UINT16_C(0x7FC0); + } else { + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + union { + uint32_t U32; + float F32; + }; + + F32 = src; + uint32_t rounding_bias = ((U32 >> 16) & 1) + UINT32_C(0x7FFF); + return static_cast((U32 + rounding_bias) >> 16); + } +} +} // namespace detail + +struct alignas(2) BFloat16 { + uint16_t x; + + // HIP wants __host__ __device__ tag, CUDA does not +#if defined(USE_ROCM) + C10_HOST_DEVICE BFloat16() = default; +#else + BFloat16() = default; +#endif + + struct from_bits_t {}; + static constexpr C10_HOST_DEVICE from_bits_t from_bits() { + return from_bits_t(); + } + + constexpr C10_HOST_DEVICE BFloat16(unsigned short bits, from_bits_t) + : x(bits){}; + inline C10_HOST_DEVICE BFloat16(float value); + inline C10_HOST_DEVICE operator float() const; + +#if defined(__CUDACC__) && !defined(USE_ROCM) + inline C10_HOST_DEVICE BFloat16(const __nv_bfloat16& value); + explicit inline C10_HOST_DEVICE operator __nv_bfloat16() const; +#endif + +#if defined(SYCL_EXT_ONEAPI_BFLOAT16_MATH_FUNCTIONS) + inline C10_HOST_DEVICE BFloat16(const sycl::ext::oneapi::bfloat16& value); + explicit inline C10_HOST_DEVICE operator sycl::ext::oneapi::bfloat16() const; +#endif +}; + +} // namespace c10 + +#include // IWYU pragma: keep diff --git a/videollama2/lib/python3.10/site-packages/torch/include/c10/util/Bitset.h b/videollama2/lib/python3.10/site-packages/torch/include/c10/util/Bitset.h new file mode 100644 index 0000000000000000000000000000000000000000..fedca4f02aeab35fd75fdf722b5940bc51d1f69f --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/c10/util/Bitset.h @@ -0,0 +1,120 @@ +#pragma once + +#include +#include +#include +#if defined(_MSC_VER) +#include +#endif + +namespace c10 { +namespace utils { + +/** + * This is a simple bitset class with sizeof(long long int) bits. + * You can set bits, unset bits, query bits by index, + * and query for the first set bit. + * Before using this class, please also take a look at std::bitset, + * which has more functionality and is more generic. It is probably + * a better fit for your use case. The sole reason for c10::utils::bitset + * to exist is that std::bitset misses a find_first_set() method. + */ +struct bitset final { + private: +#if defined(_MSC_VER) + // MSVCs _BitScanForward64 expects int64_t + using bitset_type = int64_t; +#else + // POSIX ffsll expects long long int + using bitset_type = long long int; +#endif + public: + static constexpr size_t NUM_BITS() { + return 8 * sizeof(bitset_type); + } + + constexpr bitset() noexcept = default; + constexpr bitset(const bitset&) noexcept = default; + constexpr bitset(bitset&&) noexcept = default; + // there is an issure for gcc 5.3.0 when define default function as constexpr + // see https://gcc.gnu.org/bugzilla/show_bug.cgi?id=68754. + bitset& operator=(const bitset&) noexcept = default; + bitset& operator=(bitset&&) noexcept = default; + + constexpr void set(size_t index) noexcept { + bitset_ |= (static_cast(1) << index); + } + + constexpr void unset(size_t index) noexcept { + bitset_ &= ~(static_cast(1) << index); + } + + constexpr bool get(size_t index) const noexcept { + return bitset_ & (static_cast(1) << index); + } + + constexpr bool is_entirely_unset() const noexcept { + return 0 == bitset_; + } + + // Call the given functor with the index of each bit that is set + template + void for_each_set_bit(Func&& func) const { + bitset cur = *this; + size_t index = cur.find_first_set(); + while (0 != index) { + // -1 because find_first_set() is not one-indexed. + index -= 1; + func(index); + cur.unset(index); + index = cur.find_first_set(); + } + } + + private: + // Return the index of the first set bit. The returned index is one-indexed + // (i.e. if the very first bit is set, this function returns '1'), and a + // return of '0' means that there was no bit set. + size_t find_first_set() const { +#if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_ARM64)) + unsigned long result; + bool has_bits_set = (0 != _BitScanForward64(&result, bitset_)); + if (!has_bits_set) { + return 0; + } + return result + 1; +#elif defined(_MSC_VER) && defined(_M_IX86) + unsigned long result; + if (static_cast(bitset_) != 0) { + bool has_bits_set = + (0 != _BitScanForward(&result, static_cast(bitset_))); + if (!has_bits_set) { + return 0; + } + return result + 1; + } else { + bool has_bits_set = + (0 != _BitScanForward(&result, static_cast(bitset_ >> 32))); + if (!has_bits_set) { + return 32; + } + return result + 33; + } +#else + return __builtin_ffsll(bitset_); +#endif + } + + friend bool operator==(bitset lhs, bitset rhs) noexcept { + return lhs.bitset_ == rhs.bitset_; + } + + bitset_type bitset_{0}; +}; + +inline bool operator!=(bitset lhs, bitset rhs) noexcept { + return !(lhs == rhs); +} + +} // namespace utils +} // namespace c10 diff --git a/videollama2/lib/python3.10/site-packages/torch/include/c10/util/DeadlockDetection.h b/videollama2/lib/python3.10/site-packages/torch/include/c10/util/DeadlockDetection.h new file mode 100644 index 0000000000000000000000000000000000000000..da177995ad74e9dd5841e0deb1fcbf05f8408358 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/c10/util/DeadlockDetection.h @@ -0,0 +1,50 @@ +#pragma once + +#include + +/// This file provides some simple utilities for detecting common deadlocks in +/// PyTorch. For now, we focus exclusively on detecting Python GIL deadlocks, +/// as the GIL is a wide ranging lock that is taken out in many situations. +/// The basic strategy is before performing an operation that may block, you +/// can use TORCH_ASSERT_NO_GIL_WITHOUT_PYTHON_DEP() to assert that the GIL is +/// not held. This macro is to be used in contexts where no static dependency +/// on Python is available (we will handle indirecting a virtual call for you). +/// +/// If the GIL is held by a torchdeploy interpreter, we always report false. +/// If you are in a context where Python bindings are available, it's better +/// to directly assert on PyGILState_Check (as it avoids a vcall and also +/// works correctly with torchdeploy.) + +namespace c10 { + +#define TORCH_ASSERT_NO_GIL_WITHOUT_PYTHON_DEP() \ + TORCH_INTERNAL_ASSERT( \ + !c10::impl::check_python_gil(), \ + "Holding GIL before a blocking operation! Please release the GIL before blocking, or see https://github.com/pytorch/pytorch/issues/56297 for how to release the GIL for destructors of objects") + +namespace impl { + +C10_API bool check_python_gil(); + +struct C10_API PythonGILHooks { + virtual ~PythonGILHooks() = default; + // Returns true if we hold the GIL. If not linked against Python we + // always return false. + virtual bool check_python_gil() const = 0; +}; + +C10_API void SetPythonGILHooks(PythonGILHooks* factory); + +// DO NOT call this registerer from a torch deploy instance! You will clobber +// other registrations +struct C10_API PythonGILHooksRegisterer { + explicit PythonGILHooksRegisterer(PythonGILHooks* factory) { + SetPythonGILHooks(factory); + } + ~PythonGILHooksRegisterer() { + SetPythonGILHooks(nullptr); + } +}; + +} // namespace impl +} // namespace c10 diff --git a/videollama2/lib/python3.10/site-packages/torch/include/c10/util/Deprecated.h b/videollama2/lib/python3.10/site-packages/torch/include/c10/util/Deprecated.h new file mode 100644 index 0000000000000000000000000000000000000000..88440a0242eb4e9e87433278006863fd38c5450d --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/c10/util/Deprecated.h @@ -0,0 +1,102 @@ +#pragma once + +/** + * This file provides portable macros for marking declarations + * as deprecated. You should generally use C10_DEPRECATED, + * except when marking 'using' declarations as deprecated, + * in which case you should use C10_DEFINE_DEPRECATED_USING + * (due to portability concerns). + */ + +// Sample usage: +// +// C10_DEPRECATED void bad_func(); +// struct C10_DEPRECATED BadStruct { +// ... +// }; + +// NB: __cplusplus doesn't work for MSVC, so for now MSVC always uses +// the "__declspec(deprecated)" implementation and not the C++14 +// "[[deprecated]]" attribute. We tried enabling "[[deprecated]]" for C++14 on +// MSVC, but ran into issues with some older MSVC versions. +#if (defined(__cplusplus) && __cplusplus >= 201402L) +#define C10_DEPRECATED [[deprecated]] +#define C10_DEPRECATED_MESSAGE(message) [[deprecated(message)]] +#elif defined(__GNUC__) +#define C10_DEPRECATED __attribute__((deprecated)) +// TODO Is there some way to implement this? +#define C10_DEPRECATED_MESSAGE(message) __attribute__((deprecated)) + +#elif defined(_MSC_VER) +#define C10_DEPRECATED __declspec(deprecated) +#define C10_DEPRECATED_MESSAGE(message) __declspec(deprecated(message)) +#else +#warning "You need to implement C10_DEPRECATED for this compiler" +#define C10_DEPRECATED +#endif + +// Sample usage: +// +// C10_DEFINE_DEPRECATED_USING(BadType, int) +// +// which is the portable version of +// +// using BadType [[deprecated]] = int; + +// technically [[deprecated]] syntax is from c++14 standard, but it works in +// many compilers. +#if defined(__has_cpp_attribute) +#if __has_cpp_attribute(deprecated) && !defined(__CUDACC__) +#define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) \ + using TypeName [[deprecated]] = TypeThingy; +#endif +#endif + +#if defined(_MSC_VER) +#if defined(__CUDACC__) +// neither [[deprecated]] nor __declspec(deprecated) work on nvcc on Windows; +// you get the error: +// +// error: attribute does not apply to any entity +// +// So we just turn the macro off in this case. +#if defined(C10_DEFINE_DEPRECATED_USING) +#undef C10_DEFINE_DEPRECATED_USING +#endif +#define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) \ + using TypeName = TypeThingy; +#else +// [[deprecated]] does work in windows without nvcc, though msc doesn't support +// `__has_cpp_attribute` when c++14 is supported, otherwise +// __declspec(deprecated) is used as the alternative. +#ifndef C10_DEFINE_DEPRECATED_USING +#if defined(_MSVC_LANG) && _MSVC_LANG >= 201402L +#define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) \ + using TypeName [[deprecated]] = TypeThingy; +#else +#define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) \ + using TypeName = __declspec(deprecated) TypeThingy; +#endif +#endif +#endif +#endif + +#if !defined(C10_DEFINE_DEPRECATED_USING) && defined(__GNUC__) +// nvcc has a bug where it doesn't understand __attribute__((deprecated)) +// declarations even when the host compiler supports it. We'll only use this gcc +// attribute when not cuda, and when using a GCC compiler that doesn't support +// the c++14 syntax we checked for above (available in __GNUC__ >= 5) +#if !defined(__CUDACC__) +#define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) \ + using TypeName __attribute__((deprecated)) = TypeThingy; +#else +// using cuda + gcc < 5, neither deprecated syntax is available so turning off. +#define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) \ + using TypeName = TypeThingy; +#endif +#endif + +#if !defined(C10_DEFINE_DEPRECATED_USING) +#warning "You need to implement C10_DEFINE_DEPRECATED_USING for this compiler" +#define C10_DEFINE_DEPRECATED_USING +#endif diff --git a/videollama2/lib/python3.10/site-packages/torch/include/c10/util/Exception.h b/videollama2/lib/python3.10/site-packages/torch/include/c10/util/Exception.h new file mode 100644 index 0000000000000000000000000000000000000000..6cf142c46095c5516de83530efef8da86df100fe --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/c10/util/Exception.h @@ -0,0 +1,715 @@ +#ifndef C10_UTIL_EXCEPTION_H_ +#define C10_UTIL_EXCEPTION_H_ + +#include +#include + +#include +#include +#include +#include +#include + +#if defined(_MSC_VER) && _MSC_VER <= 1900 +#define __func__ __FUNCTION__ +#endif + +namespace c10 { + +/// The primary ATen error class. +/// Provides a complete error message with source location information via +/// `what()`, and a more concise message via `what_without_backtrace()`. +/// Don't throw this directly; use TORCH_CHECK/TORCH_INTERNAL_ASSERT instead. +/// +/// NB: c10::Error is handled specially by the default torch to suppress the +/// backtrace, see torch/csrc/Exceptions.h +class C10_API Error : public std::exception { + // The actual error message. + std::string msg_; + + // Context for the message (in order of decreasing specificity). Context will + // be automatically formatted appropriately, so it is not necessary to add + // extra leading/trailing newlines to strings inside this vector + std::vector context_; + + // The C++ backtrace at the point when this exception was raised. This + // may be empty if there is no valid backtrace. (We don't use optional + // here to reduce the dependencies this file has.) + std::string backtrace_; + + // These two are derived fields from msg_stack_ and backtrace_, but we need + // fields for the strings so that we can return a const char* (as the + // signature of std::exception requires). Currently, the invariant + // is that these fields are ALWAYS populated consistently with respect + // to msg_stack_ and backtrace_. + std::string what_; + std::string what_without_backtrace_; + + // This is a little debugging trick: you can stash a relevant pointer + // in caller, and then when you catch the exception, you can compare + // against pointers you have on hand to get more information about + // where the exception came from. In Caffe2, this is used to figure + // out which operator raised an exception. + const void* caller_; + + public: + // PyTorch-style Error constructor. NB: the implementation of this + // is actually in Logging.cpp + Error(SourceLocation source_location, std::string msg); + + // Caffe2-style error message + Error( + const char* file, + const uint32_t line, + const char* condition, + const std::string& msg, + const std::string& backtrace, + const void* caller = nullptr); + + // Base constructor + Error(std::string msg, std::string backtrace, const void* caller = nullptr); + + // Add some new context to the message stack. The last added context + // will be formatted at the end of the context list upon printing. + // WARNING: This method is O(n) in the size of the stack, so don't go + // wild adding a ridiculous amount of context to error messages. + void add_context(std::string msg); + + const std::string& msg() const { + return msg_; + } + + const std::vector& context() const { + return context_; + } + + const std::string& backtrace() const { + return backtrace_; + } + + /// Returns the complete error message, including the source location. + /// The returned pointer is invalidated if you call add_context() on + /// this object. + const char* what() const noexcept override { + return what_.c_str(); + } + + const void* caller() const noexcept { + return caller_; + } + + /// Returns only the error message string, without source location. + /// The returned pointer is invalidated if you call add_context() on + /// this object. + virtual const char* what_without_backtrace() const noexcept { + return what_without_backtrace_.c_str(); + } + + private: + void refresh_what(); + std::string compute_what(bool include_backtrace) const; +}; + +class C10_API Warning { + public: + class C10_API UserWarning {}; + class C10_API DeprecationWarning {}; + + using warning_variant_t = std::variant; + + Warning( + warning_variant_t type, + const SourceLocation& source_location, + std::string msg, + bool verbatim); + + Warning( + warning_variant_t type, + SourceLocation source_location, + const char* msg, + bool verbatim); + + Warning( + warning_variant_t type, + SourceLocation source_location, + ::c10::detail::CompileTimeEmptyString msg, + bool verbatim); + + // Getters for members + warning_variant_t type() const; + const SourceLocation& source_location() const; + const std::string& msg() const; + bool verbatim() const; + + private: + // The type of warning + warning_variant_t type_; + + // Where the warning happened. + SourceLocation source_location_; + + // The actual warning message. + std::string msg_; + + // See note: [Verbatim Warnings] + bool verbatim_; +}; + +using UserWarning = Warning::UserWarning; +using DeprecationWarning = Warning::DeprecationWarning; + +// Issue a warning with a given message. Dispatched to the current +// warning handler. +void C10_API warn(const Warning& warning); + +class C10_API WarningHandler { + public: + virtual ~WarningHandler() = default; + /// The default warning handler. Prints the message to stderr. + virtual void process(const Warning& warning); +}; + +namespace WarningUtils { + +// Note: [Verbatim Warnings] +// Warnings originating in C++ code can appear out-of-place to Python users: +// a user runs a line in Python, but the warning references a line in C++. +// Some parts of PyTorch, like the JIT, are cognizant of this mismatch +// and take care to map warnings back to the user's program, but most +// of PyTorch simply throws a context-free warning. To allow warning +// handlers to add context where appropriate, warn takes the +// "verbatim" flag. When this is false a warning handler might append +// the C++ warning to a Python warning message that relates the warning +// back to the user's program. Callers who have already accounted for +// context in their warnings should set verbatim to true so their warnings +// appear without modification. + +/// Sets the global warning handler. This is not thread-safe, so it should +/// generally be called once during initialization or while holding the GIL +/// for programs that use python. +/// User is responsible for keeping the WarningHandler alive until +/// it is not needed. +C10_API void set_warning_handler(WarningHandler* handler) noexcept(true); +/// Gets the global warning handler. +C10_API WarningHandler* get_warning_handler() noexcept(true); + +class C10_API WarningHandlerGuard { + WarningHandler* prev_handler_; + + public: + WarningHandlerGuard(WarningHandler* new_handler) + : prev_handler_(c10::WarningUtils::get_warning_handler()) { + c10::WarningUtils::set_warning_handler(new_handler); + } + ~WarningHandlerGuard() { + c10::WarningUtils::set_warning_handler(prev_handler_); + } +}; + +/// The TORCH_WARN_ONCE macro is difficult to test for. Use +/// setWarnAlways(true) to turn it into TORCH_WARN, which can be +/// tested for more easily. +C10_API void set_warnAlways(bool) noexcept(true); +C10_API bool get_warnAlways() noexcept(true); + +// A RAII guard that sets warn_always (not thread-local) on +// construction, and sets it back to the original value upon destruction. +struct C10_API WarnAlways { + public: + explicit WarnAlways(bool setting = true); + ~WarnAlways(); + + private: + bool prev_setting; +}; + +} // namespace WarningUtils + +// Like Error, but we always report the C++ backtrace, instead of only +// reporting when TORCH_SHOW_CPP_STACKTRACES +class C10_API ErrorAlwaysShowCppStacktrace : public Error { + using Error::Error; + const char* what_without_backtrace() const noexcept override { + return what(); + } +}; + +// Used in ATen for out-of-bound indices that can reasonably only be detected +// lazily inside a kernel (See: advanced indexing). These turn into +// IndexError when they cross to Python. +class C10_API IndexError : public Error { + using Error::Error; +}; + +// Used in ATen for invalid values. These turn into +// ValueError when they cross to Python. +class C10_API ValueError : public Error { + using Error::Error; +}; + +// Used in ATen for invalid types. These turn into +// TypeError when they cross to Python. +class C10_API TypeError : public Error { + using Error::Error; +}; + +// Used in ATen for functionality that is not implemented. These turn into +// NotImplementedError when they cross to Python. +class C10_API NotImplementedError : public Error { + using Error::Error; +}; + +// Used in ATen for non finite indices. These turn into +// ExitException when they cross to Python. +class C10_API EnforceFiniteError : public Error { + using Error::Error; +}; + +// Used in Onnxifi backend lowering. These turn into +// ExitException when they cross to Python. +class C10_API OnnxfiBackendSystemError : public Error { + using Error::Error; +}; + +// Used for numerical errors from the linalg module. These +// turn into LinAlgError when they cross into Python. +class C10_API LinAlgError : public Error { + using Error::Error; +}; + +class C10_API OutOfMemoryError : public Error { + using Error::Error; +}; + +// Base error type for all distributed errors. +// These turn into DistError when they cross into Python. +class C10_API DistError : public Error { + using Error::Error; +}; + +// Used for collective communication library errors from the distributed module. +// These turn into DistBackendError when they cross into Python. +class C10_API DistBackendError : public DistError { + using DistError::DistError; +}; + +// Used for errors originating from the store. +// These turn into DistStoreError when they cross into Python. +class C10_API DistStoreError : public DistError { + using DistError::DistError; +}; + +// Used for errors originating from the TCP/IP stack and not from collective +// libraries. These turn into DistNetworkError when they cross into Python. +class C10_API DistNetworkError : public DistError { + using DistError::DistError; +}; + +// A utility function to return an exception std::string by prepending its +// exception type before its what() content +C10_API std::string GetExceptionString(const std::exception& e); + +} // namespace c10 + +// Private helper macro for implementing TORCH_INTERNAL_ASSERT and TORCH_CHECK +// +// Note: In the debug build With MSVC, __LINE__ might be of long type (a.k.a +// int32_t), which is different from the definition of `SourceLocation` that +// requires unsigned int (a.k.a uint32_t) and may cause a compile error with the +// message: error C2397: conversion from 'long' to 'uint32_t' requires a +// narrowing conversion Here the static cast is used to pass the build. if this +// is used inside a lambda the __func__ macro expands to operator(), which isn't +// very useful, but hard to fix in a macro so suppressing the warning. +#define C10_THROW_ERROR(err_type, msg) \ + throw ::c10::err_type( \ + {__func__, __FILE__, static_cast(__LINE__)}, msg) + +#define C10_BUILD_ERROR(err_type, msg) \ + ::c10::err_type({__func__, __FILE__, static_cast(__LINE__)}, msg) + +// Private helper macro for workaround MSVC misexpansion of nested macro +// invocations involving __VA_ARGS__. See +// https://stackoverflow.com/questions/5134523/msvc-doesnt-expand-va-args-correctly +#define C10_EXPAND_MSVC_WORKAROUND(x) x + +// On nvcc, C10_UNLIKELY thwarts missing return statement analysis. In cases +// where the unlikely expression may be a constant, use this macro to ensure +// return statement analysis keeps working (at the cost of not getting the +// likely/unlikely annotation on nvcc). +// https://github.com/pytorch/pytorch/issues/21418 +// +// Currently, this is only used in the error reporting macros below. If you +// want to use it more generally, move me to Macros.h +// +// TODO: Brian Vaughan observed that we might be able to get this to work on +// nvcc by writing some sort of C++ overload that distinguishes constexpr inputs +// from non-constexpr. Since there isn't any evidence that losing C10_UNLIKELY +// in nvcc is causing us perf problems, this is not yet implemented, but this +// might be an interesting piece of C++ code for an intrepid bootcamper to +// write. +#if defined(__CUDACC__) +#define C10_UNLIKELY_OR_CONST(e) e +#else +#define C10_UNLIKELY_OR_CONST(e) C10_UNLIKELY(e) +#endif + +// ---------------------------------------------------------------------------- +// Error reporting macros +// ---------------------------------------------------------------------------- + +#ifdef STRIP_ERROR_MESSAGES +#define TORCH_RETHROW(e, ...) throw +#else +#define TORCH_RETHROW(e, ...) \ + do { \ + e.add_context(::c10::str(__VA_ARGS__)); \ + throw; \ + } while (false) +#endif + +// A utility macro to provide assert()-like functionality; that is, enforcement +// of internal invariants in code. It supports an arbitrary number of extra +// arguments (evaluated only on failure), which will be printed in the assert +// failure message using operator<< (this is useful to print some variables +// which may be useful for debugging.) +// +// Usage: +// TORCH_INTERNAL_ASSERT(should_be_true); +// TORCH_INTERNAL_ASSERT(x == 0, "x = ", x); +// +// Assuming no bugs in PyTorch, the conditions tested by this macro should +// always be true; e.g., it should be possible to disable all of these +// conditions without changing observable user behavior. If you would like to +// do error reporting for user input, please use TORCH_CHECK instead. +// +// NOTE: It is SAFE to use this macro in production code; on failure, this +// simply raises an exception, it does NOT unceremoniously quit the process +// (unlike assert()). +// +#ifdef STRIP_ERROR_MESSAGES +#define TORCH_INTERNAL_ASSERT(cond, ...) \ + if (C10_UNLIKELY_OR_CONST(!(cond))) { \ + ::c10::detail::torchCheckFail( \ + __func__, \ + __FILE__, \ + static_cast(__LINE__), \ + #cond " INTERNAL ASSERT FAILED at " C10_STRINGIZE(__FILE__)); \ + } +#else +// It would be nice if we could build a combined string literal out of +// the TORCH_INTERNAL_ASSERT prefix and a user-provided string literal +// as the first argument, but there doesn't seem to be any good way to +// do that while still supporting having a first argument that isn't a +// string literal. +#define TORCH_INTERNAL_ASSERT(cond, ...) \ + if (C10_UNLIKELY_OR_CONST(!(cond))) { \ + ::c10::detail::torchInternalAssertFail( \ + __func__, \ + __FILE__, \ + static_cast(__LINE__), \ + #cond \ + " INTERNAL ASSERT FAILED at " C10_STRINGIZE(__FILE__) ":" C10_STRINGIZE( \ + __LINE__) ", please report a bug to PyTorch. ", \ + c10::str(__VA_ARGS__)); \ + } +#endif + +// A utility macro to make it easier to test for error conditions from user +// input. Like TORCH_INTERNAL_ASSERT, it supports an arbitrary number of extra +// arguments (evaluated only on failure), which will be printed in the error +// message using operator<< (e.g., you can pass any object which has +// operator<< defined. Most objects in PyTorch have these definitions!) +// +// Usage: +// TORCH_CHECK(should_be_true); // A default error message will be provided +// // in this case; but we recommend writing an +// // explicit error message, as it is more +// // user friendly. +// TORCH_CHECK(x == 0, "Expected x to be 0, but got ", x); +// +// On failure, this macro will raise an exception. If this exception propagates +// to Python, it will convert into a Python RuntimeError. +// +// NOTE: It is SAFE to use this macro in production code; on failure, this +// simply raises an exception, it does NOT unceremoniously quit the process +// (unlike CHECK() from glog.) +// +#define TORCH_CHECK_WITH(error_t, cond, ...) \ + TORCH_CHECK_WITH_MSG(error_t, cond, "", __VA_ARGS__) + +#ifdef STRIP_ERROR_MESSAGES +#define TORCH_CHECK_MSG(cond, type, ...) \ + (#cond #type " CHECK FAILED at " C10_STRINGIZE(__FILE__)) +#define TORCH_CHECK_WITH_MSG(error_t, cond, type, ...) \ + if (C10_UNLIKELY_OR_CONST(!(cond))) { \ + C10_THROW_ERROR(Error, TORCH_CHECK_MSG(cond, type, __VA_ARGS__)); \ + } +#else +namespace c10 { +namespace detail { +template +decltype(auto) torchCheckMsgImpl(const char* /*msg*/, const Args&... args) { + return ::c10::str(args...); +} +inline C10_API const char* torchCheckMsgImpl(const char* msg) { + return msg; +} +// If there is just 1 user-provided C-string argument, use it. +inline C10_API const char* torchCheckMsgImpl( + const char* /*msg*/, + const char* args) { + return args; +} +} // namespace detail +} // namespace c10 + +#define TORCH_CHECK_MSG(cond, type, ...) \ + (::c10::detail::torchCheckMsgImpl( \ + "Expected " #cond \ + " to be true, but got false. " \ + "(Could this error message be improved? If so, " \ + "please report an enhancement request to PyTorch.)", \ + ##__VA_ARGS__)) +#define TORCH_CHECK_WITH_MSG(error_t, cond, type, ...) \ + if (C10_UNLIKELY_OR_CONST(!(cond))) { \ + C10_THROW_ERROR(error_t, TORCH_CHECK_MSG(cond, type, __VA_ARGS__)); \ + } +#endif + +namespace c10 { +namespace detail { + +[[noreturn]] C10_API void torchCheckFail( + const char* func, + const char* file, + uint32_t line, + const std::string& msg); +[[noreturn]] C10_API void torchCheckFail( + const char* func, + const char* file, + uint32_t line, + const char* msg); + +// The c10::str() call that creates userMsg can have 1 of 3 return +// types depending on the number and types of arguments passed to +// TORCH_INTERNAL_ASSERT. 0 arguments will get a +// CompileTimeEmptyString, 1 const char * will be passed straight +// through, and anything else will get converted to std::string. +[[noreturn]] C10_API void torchInternalAssertFail( + const char* func, + const char* file, + uint32_t line, + const char* condMsg, + const char* userMsg); +[[noreturn]] inline C10_API void torchInternalAssertFail( + const char* func, + const char* file, + uint32_t line, + const char* condMsg, + ::c10::detail::CompileTimeEmptyString /*userMsg*/) { + torchCheckFail(func, file, line, condMsg); +} +[[noreturn]] C10_API void torchInternalAssertFail( + const char* func, + const char* file, + uint32_t line, + const char* condMsg, + const std::string& userMsg); + +} // namespace detail +} // namespace c10 + +#ifdef STRIP_ERROR_MESSAGES +#define TORCH_CHECK(cond, ...) \ + if (C10_UNLIKELY_OR_CONST(!(cond))) { \ + ::c10::detail::torchCheckFail( \ + __func__, \ + __FILE__, \ + static_cast(__LINE__), \ + TORCH_CHECK_MSG(cond, "", __VA_ARGS__)); \ + } +#else +#define TORCH_CHECK(cond, ...) \ + if (C10_UNLIKELY_OR_CONST(!(cond))) { \ + ::c10::detail::torchCheckFail( \ + __func__, \ + __FILE__, \ + static_cast(__LINE__), \ + TORCH_CHECK_MSG(cond, "", ##__VA_ARGS__)); \ + } +#endif + +// An utility macro that does what `TORCH_CHECK` does if compiled in the host +// code, otherwise does nothing. Supposed to be used in the code shared between +// host and device code as an alternative for `TORCH_CHECK`. +#if defined(__CUDACC__) || defined(__HIPCC__) +#define TORCH_CHECK_IF_NOT_ON_CUDA(cond, ...) +#else +#define TORCH_CHECK_IF_NOT_ON_CUDA(cond, ...) TORCH_CHECK(cond, ##__VA_ARGS__) +#endif + +// Debug only version of TORCH_INTERNAL_ASSERT. This macro only checks in debug +// build, and does nothing in release build. It is appropriate to use +// in situations where you want to add an assert to a hotpath, but it is +// too expensive to run this assert on production builds. +#ifdef NDEBUG +// Optimized version - generates no code. +#define TORCH_INTERNAL_ASSERT_DEBUG_ONLY(...) \ + while (false) \ + C10_EXPAND_MSVC_WORKAROUND(TORCH_INTERNAL_ASSERT(__VA_ARGS__)) +#else +#define TORCH_INTERNAL_ASSERT_DEBUG_ONLY(...) \ + C10_EXPAND_MSVC_WORKAROUND(TORCH_INTERNAL_ASSERT(__VA_ARGS__)) +#endif + +// TODO: We're going to get a lot of similar looking string literals +// this way; check if this actually affects binary size. + +// Like TORCH_CHECK, but raises LinAlgError instead of Error. +#define TORCH_CHECK_LINALG(cond, ...) \ + TORCH_CHECK_WITH_MSG(LinAlgError, cond, "LINALG", __VA_ARGS__) + +// Like TORCH_CHECK, but raises IndexErrors instead of Errors. +#define TORCH_CHECK_INDEX(cond, ...) \ + TORCH_CHECK_WITH_MSG(IndexError, cond, "INDEX", __VA_ARGS__) + +// Like TORCH_CHECK, but raises ValueErrors instead of Errors. +#define TORCH_CHECK_VALUE(cond, ...) \ + TORCH_CHECK_WITH_MSG(ValueError, cond, "VALUE", __VA_ARGS__) + +// Like TORCH_CHECK, but raises TypeErrors instead of Errors. +#define TORCH_CHECK_TYPE(cond, ...) \ + TORCH_CHECK_WITH_MSG(TypeError, cond, "TYPE", __VA_ARGS__) + +// Like TORCH_CHECK, but raises NotImplementedErrors instead of Errors. +#define TORCH_CHECK_NOT_IMPLEMENTED(cond, ...) \ + TORCH_CHECK_WITH_MSG(NotImplementedError, cond, "TYPE", __VA_ARGS__) + +#define TORCH_CHECK_ALWAYS_SHOW_CPP_STACKTRACE(cond, ...) \ + TORCH_CHECK_WITH_MSG( \ + ErrorAlwaysShowCppStacktrace, cond, "TYPE", ##__VA_ARGS__) + +#ifdef STRIP_ERROR_MESSAGES +#define WARNING_MESSAGE_STRING(...) \ + ::c10::detail::CompileTimeEmptyString {} +#else +#define WARNING_MESSAGE_STRING(...) ::c10::str(__VA_ARGS__) +#endif + +// Report a warning to the user. Accepts an arbitrary number of extra +// arguments which are concatenated into the warning message using operator<< +// +#ifdef DISABLE_WARN +#define _TORCH_WARN_WITH(...) ((void)0); +#else +#define _TORCH_WARN_WITH(warning_t, ...) \ + ::c10::warn(::c10::Warning( \ + warning_t(), \ + {__func__, __FILE__, static_cast(__LINE__)}, \ + WARNING_MESSAGE_STRING(__VA_ARGS__), \ + false)); +#endif + +#define TORCH_WARN(...) _TORCH_WARN_WITH(::c10::UserWarning, __VA_ARGS__); + +#define TORCH_WARN_DEPRECATION(...) \ + _TORCH_WARN_WITH(::c10::DeprecationWarning, __VA_ARGS__); + +// Report a warning to the user only once. Accepts an arbitrary number of extra +// arguments which are concatenated into the warning message using operator<< +// +#define _TORCH_WARN_ONCE(...) \ + C10_UNUSED static const auto C10_ANONYMOUS_VARIABLE(torch_warn_once_) = \ + [&] { \ + TORCH_WARN(__VA_ARGS__); \ + return true; \ + }() + +#ifdef DISABLE_WARN +#define TORCH_WARN_ONCE(...) ((void)0); +#else +#define TORCH_WARN_ONCE(...) \ + if (::c10::WarningUtils::get_warnAlways()) { \ + TORCH_WARN(__VA_ARGS__); \ + } else { \ + _TORCH_WARN_ONCE(__VA_ARGS__); \ + } +#endif + +// Report an error with a specific argument +// NOTE: using the argument name in TORCH_CHECK's message is preferred +#define TORCH_CHECK_ARG(cond, argN, ...) \ + TORCH_CHECK(cond, "invalid argument ", argN, ": ", __VA_ARGS__) + +// ---------------------------------------------------------------------------- +// Deprecated macros +// ---------------------------------------------------------------------------- + +namespace c10 { +namespace detail { + +/* +// Deprecation disabled until we fix sites in our codebase +C10_DEPRECATED_MESSAGE("AT_ERROR(msg) is deprecated, use TORCH_CHECK(false, msg) +instead.") +*/ +inline void deprecated_AT_ERROR() {} + +/* +// Deprecation disabled until we fix sites in our codebase +C10_DEPRECATED_MESSAGE("AT_ASSERT is deprecated, if you mean to indicate an +internal invariant failure, use " \ + "TORCH_INTERNAL_ASSERT instead; if you mean to do user +error checking, use " \ "TORCH_CHECK. See +https://github.com/pytorch/pytorch/issues/20287 for more details.") +*/ +inline void deprecated_AT_ASSERT() {} + +/* +// Deprecation disabled until we fix sites in our codebase +C10_DEPRECATED_MESSAGE("AT_ASSERTM is deprecated, if you mean to indicate an +internal invariant failure, use " \ + "TORCH_INTERNAL_ASSERT instead; if you mean to do user +error checking, use " \ "TORCH_CHECK. See +https://github.com/pytorch/pytorch/issues/20287 for more details.") +*/ +inline void deprecated_AT_ASSERTM() {} + +} // namespace detail +} // namespace c10 + +// Deprecated alias; this alias was deprecated because people kept mistakenly +// using it for user error checking. Use TORCH_INTERNAL_ASSERT or TORCH_CHECK +// instead. See https://github.com/pytorch/pytorch/issues/20287 for more +// details. +#define AT_ASSERT(...) \ + do { \ + ::c10::detail::deprecated_AT_ASSERT(); \ + C10_EXPAND_MSVC_WORKAROUND(TORCH_INTERNAL_ASSERT(__VA_ARGS__)); \ + } while (false) + +// Deprecated alias, like AT_ASSERT. The new TORCH_INTERNAL_ASSERT macro +// supports both 0-ary and variadic calls, so having a separate +// message-accepting macro is not necessary. +// +// NB: we MUST include cond explicitly here, as MSVC will miscompile the macro +// expansion, shunting all of __VA_ARGS__ to cond. An alternate workaround +// can be seen at +// https://stackoverflow.com/questions/5134523/msvc-doesnt-expand-va-args-correctly +#define AT_ASSERTM(cond, ...) \ + do { \ + ::c10::detail::deprecated_AT_ASSERTM(); \ + C10_EXPAND_MSVC_WORKAROUND(TORCH_INTERNAL_ASSERT(cond, __VA_ARGS__)); \ + } while (false) + +// Deprecated alias; this alias was deprecated because it represents extra API +// surface that makes it hard for people to understand what macro to use. +// Use TORCH_CHECK(false, ...) or TORCH_INTERNAL_ASSERT(false, ...) to +// unconditionally fail at a line of code. +#define AT_ERROR(...) \ + do { \ + ::c10::detail::deprecated_AT_ERROR(); \ + C10_EXPAND_MSVC_WORKAROUND(TORCH_CHECK(false, ::c10::str(__VA_ARGS__))); \ + } while (false) + +#endif // C10_UTIL_EXCEPTION_H_ diff --git a/videollama2/lib/python3.10/site-packages/torch/include/c10/util/ExclusivelyOwnedTensorTraits.h b/videollama2/lib/python3.10/site-packages/torch/include/c10/util/ExclusivelyOwnedTensorTraits.h new file mode 100644 index 0000000000000000000000000000000000000000..143b4df0a4e5f4623a0f9109e74c002064c49292 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/c10/util/ExclusivelyOwnedTensorTraits.h @@ -0,0 +1,74 @@ +#pragma once + +#include + +#include + +namespace c10 { +// Shared ExclusivelyOwnedTraits implementation between caffe2::Tensor and +// at::TensorBase. +template +struct ExclusivelyOwnedTensorTraits { + using repr_type = TensorType; + using pointer_type = TensorType*; + using const_pointer_type = const TensorType*; + + static repr_type nullRepr() { + return TensorType(); + } + + template + static repr_type createInPlace(Args&&... args) { + return TensorType(std::forward(args)...); + } + + static repr_type moveToRepr(TensorType&& x) { + return std::move(x); + } + + static void destroyOwned(TensorType& x) { + TensorImpl* const toDestroy = x.unsafeReleaseTensorImpl(); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + toDestroy != nullptr, "Tensor somehow got null TensorImpl?"); + // May be 0 because UndefinedTensorImpl doesn't get its refcount + // incremented. + const bool isUndefined = toDestroy == UndefinedTensorImpl::singleton(); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + toDestroy->refcount_ == 1 || (toDestroy->refcount_ == 0 && isUndefined), + "ExclusivelyOwned destroyed with isUndefined ", + isUndefined, + " and refcount ", + toDestroy->refcount_, + ", expected 1 or, if isUndefined, 0!"); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + toDestroy->weakcount_ == 1 || + (toDestroy->weakcount_ == 0 && + toDestroy == UndefinedTensorImpl::singleton()), + "ExclusivelyOwned destroyed with isUndefined ", + isUndefined, + " and weakcount ", + toDestroy->weakcount_, + ", expected 1 or, if isUndefined, 0!"); + if (!isUndefined) { +#ifndef NDEBUG + // Needed to pass the debug assertions in ~intrusive_ptr_target. + toDestroy->refcount_ = 0; + toDestroy->weakcount_ = 0; +#endif + delete toDestroy; + } + } + + static TensorType take(TensorType& x) { + return std::move(x); + } + + static pointer_type getImpl(repr_type& x) { + return &x; + } + + static const_pointer_type getImpl(const repr_type& x) { + return &x; + } +}; +} // namespace c10 diff --git a/videollama2/lib/python3.10/site-packages/torch/include/c10/util/Float8_e4m3fnuz-inl.h b/videollama2/lib/python3.10/site-packages/torch/include/c10/util/Float8_e4m3fnuz-inl.h new file mode 100644 index 0000000000000000000000000000000000000000..c1aab8bfe4dcc619546604ad8d268442ed28e923 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/c10/util/Float8_e4m3fnuz-inl.h @@ -0,0 +1,90 @@ +#pragma once + +#include +#include + +C10_CLANG_DIAGNOSTIC_PUSH() +#if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion") +C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-int-float-conversion") +#endif + +namespace c10 { + +/// Constructors + +C10_HOST_DEVICE inline Float8_e4m3fnuz::Float8_e4m3fnuz(float value) + : x(detail::fp8e4m3fnuz_from_fp32_value(value)) {} + +/// Implicit conversions + +C10_HOST_DEVICE inline Float8_e4m3fnuz::operator float() const { + return detail::fp8e4m3fnuz_to_fp32_value(x); +} + +/// Special values helper + +C10_HOST_DEVICE inline bool Float8_e4m3fnuz::isnan() const { + return x == 0b10000000; +} + +} // namespace c10 + +namespace std { + +template <> +class numeric_limits { + public: + static constexpr bool is_specialized = true; + static constexpr bool is_signed = true; + static constexpr bool is_integer = false; + static constexpr bool is_exact = false; + static constexpr bool has_infinity = false; + static constexpr bool has_quiet_NaN = true; + static constexpr bool has_signaling_NaN = false; + static constexpr auto has_denorm = true; + static constexpr auto has_denorm_loss = true; + static constexpr auto round_style = numeric_limits::round_style; + static constexpr bool is_iec559 = false; + static constexpr bool is_bounded = true; + static constexpr bool is_modulo = false; + static constexpr int digits = 4; + static constexpr int digits10 = 0; + static constexpr int max_digits10 = 3; + static constexpr int radix = 2; + static constexpr int min_exponent = -6; + static constexpr int min_exponent10 = -1; + static constexpr int max_exponent = 8; + static constexpr int max_exponent10 = 2; + static constexpr auto traps = numeric_limits::traps; + static constexpr auto tinyness_before = false; + + static constexpr c10::Float8_e4m3fnuz min() { + return c10::Float8_e4m3fnuz(0x08, c10::Float8_e4m3fnuz::from_bits()); + } + static constexpr c10::Float8_e4m3fnuz lowest() { + return c10::Float8_e4m3fnuz(0xFF, c10::Float8_e4m3fnuz::from_bits()); + } + static constexpr c10::Float8_e4m3fnuz max() { + return c10::Float8_e4m3fnuz(0x7F, c10::Float8_e4m3fnuz::from_bits()); + } + static constexpr c10::Float8_e4m3fnuz epsilon() { + return c10::Float8_e4m3fnuz(0x28, c10::Float8_e4m3fnuz::from_bits()); + } + static constexpr c10::Float8_e4m3fnuz round_error() { + return c10::Float8_e4m3fnuz(0x38, c10::Float8_e4m3fnuz::from_bits()); + } + static constexpr c10::Float8_e4m3fnuz infinity() { + // NaN (no infinities) + return c10::Float8_e4m3fnuz(0x80, c10::Float8_e4m3fnuz::from_bits()); + } + static constexpr c10::Float8_e4m3fnuz quiet_NaN() { + return c10::Float8_e4m3fnuz(0x80, c10::Float8_e4m3fnuz::from_bits()); + } + static constexpr c10::Float8_e4m3fnuz denorm_min() { + return c10::Float8_e4m3fnuz(0x01, c10::Float8_e4m3fnuz::from_bits()); + } +}; + +} // namespace std + +C10_CLANG_DIAGNOSTIC_POP() diff --git a/videollama2/lib/python3.10/site-packages/torch/include/c10/util/Float8_e4m3fnuz.h b/videollama2/lib/python3.10/site-packages/torch/include/c10/util/Float8_e4m3fnuz.h new file mode 100644 index 0000000000000000000000000000000000000000..0b42c062a280a9958b79a951149863cebae0544c --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/c10/util/Float8_e4m3fnuz.h @@ -0,0 +1,155 @@ +#pragma once + +/// Defines the Float8_e4m3fnuz type (8-bit floating-point) including +/// conversions to standard C types and basic arithmetic operations. Note that +/// arithmetic operations are implemented by converting to floating point and +/// performing the operation in float32. +/// +/// Binary configuration remains the same as Float8_e4m3fn: +/// s eeee mmm +/// 1 sign bit +/// 4 exponent bits +/// 3 mantissa bits +/// +/// The key differences versus Float8_e4m3fn are: +/// bias = 8 +/// no infinities or negative zero +/// NaN only when sign bit is 1, rest all 0s +/// +/// Implementation based on the paper https://arxiv.org/pdf/2206.02915.pdf and +/// the existing Float8_e4m3fn implementation. + +#include +#include +#include +#include + +#if defined(__cplusplus) && (__cplusplus >= 201103L) +#include +#elif !defined(__OPENCL_VERSION__) +#include +#include +#endif + +#include +#include + +namespace c10 { + +namespace detail { + +/* + * Convert a 8-bit floating-point number in fp8 E4M3FNUZ format, in bit + * representation, to a 32-bit floating-point number in IEEE single-precision + * format, in bit representation. + * + * @note The implementation doesn't use any floating-point operations. + */ +#if defined(__CUDA_ARCH__) || defined(__HIP__) +C10_HOST_DEVICE C10_API inline float fp8e4m3fnuz_to_fp32_value(uint8_t) { + CUDA_KERNEL_ASSERT(false && "e4m3fnuz is not supported by CUDA or HIP"); + return -1.0; +} +#else +C10_API float fp8e4m3fnuz_to_fp32_value(uint8_t input); +#endif + +/* + * Convert a 32-bit floating-point number in IEEE single-precision format to a + * 8-bit floating-point number in fp8 E4M3FNUZ format, in bit representation. + */ +C10_HOST_DEVICE inline uint8_t fp8e4m3fnuz_from_fp32_value(float f) { + /* + * Binary representation of 256.0f, which is the first value not representable + * (i.e. the first value which would overflow in to the sign bit, resulting in + * a NaN) in fp8e4m3fnuz range: + * 1 0000 000 - fp8e4m3fnuz + * 0 10000111 00000000000000000000000 - fp32 + */ + constexpr uint32_t fnuz_max = UINT32_C(0x87) << 23; + + /* + * A mask for converting fp32 numbers lower than fp8e4m3fnuz normal range + * into denormalized representation. + * magic number: ((127 - 8) + (23 - 3) + 1) + */ + constexpr uint32_t denorm_mask = UINT32_C(0x8C) << 23; + + uint32_t f_bits = fp32_to_bits(f); + + uint32_t result = 0u; + + /* + * Extract the sign of the input number into the high bit of the 32-bit word: + * + * +---+----------------------------------+ + * | S |0000000 00000000 00000000 00000000| + * +---+----------------------------------+ + * Bits 31 0-31 + */ + const uint32_t sign = f_bits & UINT32_C(0x80000000); + + /* + * Set sign bit to 0 + */ + f_bits ^= sign; + + if (f_bits >= fnuz_max) { + // NaN -- sign bit set to 1, rest 0s. + return 0x80; + } + + if (f_bits < (UINT32_C(0x78) << 23) /* 2^-7 in float32 */) { + // Input exponent is less than -7, the smallest e4m3fnuz exponent, so the + // number will become subnormal. + f_bits = fp32_to_bits(fp32_from_bits(f_bits) + fp32_from_bits(denorm_mask)); + result = static_cast(f_bits - denorm_mask); + if (result == 0) { + // fnuz types don't have negative zero. + return 0; + } + } else { + // resulting mantissa is odd + uint8_t mant_odd = (f_bits >> 20) & 1; + + // update exponent, rounding bias part 1 + f_bits += ((uint32_t)(8 - 127) << 23) + 0x7FFFF; + + // rounding bias part 2 + f_bits += mant_odd; + + // take the bits! + result = static_cast(f_bits >> 20); + } + + result |= sign >> 24; + + return result; +} + +} // namespace detail + +struct alignas(1) Float8_e4m3fnuz { + uint8_t x; + + struct from_bits_t {}; + static constexpr C10_HOST_DEVICE from_bits_t from_bits() { + return from_bits_t(); + } + + Float8_e4m3fnuz() = default; + + constexpr C10_HOST_DEVICE Float8_e4m3fnuz(uint8_t bits, from_bits_t) + : x(bits){}; + inline C10_HOST_DEVICE Float8_e4m3fnuz(float value); + inline C10_HOST_DEVICE operator float() const; + inline C10_HOST_DEVICE bool isnan() const; +}; + +C10_API std::ostream& operator<<( + std::ostream& out, + const Float8_e4m3fnuz& value); + +} // namespace c10 + +#include // IWYU pragma: keep diff --git a/videollama2/lib/python3.10/site-packages/torch/include/c10/util/Float8_e5m2fnuz.h b/videollama2/lib/python3.10/site-packages/torch/include/c10/util/Float8_e5m2fnuz.h new file mode 100644 index 0000000000000000000000000000000000000000..ea549797bd458c7f2d905956480e85236b0e20f5 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/c10/util/Float8_e5m2fnuz.h @@ -0,0 +1,154 @@ +#pragma once + +/// Defines the Float8_e5m2fnuz type (8-bit floating-point) including +/// conversions to standard C types and basic arithmetic operations. Note that +/// arithmetic operations are implemented by converting to floating point and +/// performing the operation in float32. +/// +/// Binary configuration remains the same as e5m2: +/// s eeeee mm +/// 1 sign bit +/// 5 exponent bits +/// 2 mantissa bits +/// +/// The key differences that e5m2fnuz brings are: +/// bias = 16 +/// no infinities or negative zero +/// NaN only when sign bit is 1, rest all 0s +/// +/// Implementation based on the paper https://arxiv.org/pdf/2206.02915.pdf and +/// the existing Float8_e4m3fn implementation. + +#include +#include +#include +#include + +#if defined(__cplusplus) && (__cplusplus >= 201103L) +#include +#elif !defined(__OPENCL_VERSION__) +#include +#include +#endif + +#include +#include + +namespace c10 { + +namespace detail { + +/* + * Convert a 8-bit floating-point number in fp8 E5M2FNUZ format, in bit + * representation, to a 32-bit floating-point number in IEEE single-precision + * format, in bit representation. + * + * @note The implementation doesn't use any floating-point operations. + */ +#if defined(__CUDA_ARCH__) || defined(__HIP__) +C10_HOST_DEVICE C10_API inline float fp8e5m2fnuz_to_fp32_value(uint8_t) { + CUDA_KERNEL_ASSERT(false && "e5m2fnuz is not supported by CUDA or HIP"); + return -1.0; +} +#else +C10_API float fp8e5m2fnuz_to_fp32_value(uint8_t input); +#endif + +/* + * Convert a 32-bit floating-point number in IEEE single-precision format to a + * 8-bit floating-point number in fp8 E5M2 format, in bit representation. + */ +C10_HOST_DEVICE inline uint8_t fp8e5m2fnuz_from_fp32_value(float f) { + /* + * Binary representation of 65536.0f, which is the first value not + * representable (i.e. the first value which would overflow in to the sign + * bit, resulting in a NaN) in fp8e4m3fnuz range: + * 1 00000 00 - fp8e5m2fnuz + * 0 10001111 00000000000000000000000 - fp32 + */ + constexpr uint32_t fnuz_max = UINT32_C(0x8F) << 23; + + /* + * A mask for converting fp32 numbers lower than fp8e5m2fnuz normal range + * into denormalized representation. + * magic number: ((127 - 16) + (23 - 2) + 1) + */ + constexpr uint32_t denorm_mask = UINT32_C(0x85) << 23; + + uint32_t f_bits = fp32_to_bits(f); + + uint32_t result = 0u; + + /* + * Extract the sign of the input number into the high bit of the 32-bit word: + * + * +---+----------------------------------+ + * | S |0000000 00000000 00000000 00000000| + * +---+----------------------------------+ + * Bits 31 0-31 + */ + const uint32_t sign = f_bits & UINT32_C(0x80000000); + + /* + * Set sign bit to 0 + */ + f_bits ^= sign; + + if (f_bits >= fnuz_max) { + // NaN -- sign bit set to 1, rest 0s + return 0x80; + } + + if (f_bits < (UINT32_C(0x70) << 23) /* 2^-15 in float32 */) { + // Input exponent is less than -15, the smallest e5m2fnuz exponent, so the + // number will become subnormal. + f_bits = fp32_to_bits(fp32_from_bits(f_bits) + fp32_from_bits(denorm_mask)); + result = static_cast(f_bits - denorm_mask); + if (result == 0) { + // fnuz types don't have negative zero. + return 0; + } + } else { + // resulting mantissa is odd + uint8_t mant_odd = (f_bits >> 21) & 1; + + // update exponent, rounding bias part 1 + f_bits += ((uint32_t)(16 - 127) << 23) + 0xFFFFF; + + // rounding bias part 2 + f_bits += mant_odd; + + // take the bits! + result = static_cast(f_bits >> 21); + } + + result |= sign >> 24; + return result; +} + +} // namespace detail + +struct alignas(1) Float8_e5m2fnuz { + uint8_t x; + + struct from_bits_t {}; + static constexpr C10_HOST_DEVICE from_bits_t from_bits() { + return from_bits_t(); + } + + Float8_e5m2fnuz() = default; + + constexpr C10_HOST_DEVICE Float8_e5m2fnuz(uint8_t bits, from_bits_t) + : x(bits){}; + inline C10_HOST_DEVICE Float8_e5m2fnuz(float value); + inline C10_HOST_DEVICE operator float() const; + inline C10_HOST_DEVICE bool isnan() const; +}; + +C10_API std::ostream& operator<<( + std::ostream& out, + const Float8_e5m2fnuz& value); + +} // namespace c10 + +#include // IWYU pragma: keep diff --git a/videollama2/lib/python3.10/site-packages/torch/include/c10/util/Half-inl.h b/videollama2/lib/python3.10/site-packages/torch/include/c10/util/Half-inl.h new file mode 100644 index 0000000000000000000000000000000000000000..d2c836eecf7754288f1f38bc70d401ff0b2ec2ac --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/c10/util/Half-inl.h @@ -0,0 +1,343 @@ +#pragma once + +#include +#include + +#include +#include + +#ifdef __CUDACC__ +#include +#endif + +#ifdef __HIPCC__ +#include +#endif + +#if defined(CL_SYCL_LANGUAGE_VERSION) +#include // for SYCL 1.2.1 +#elif defined(SYCL_LANGUAGE_VERSION) +#include // for SYCL 2020 +#endif + +#if (defined(CPU_CAPABILITY_AVX2) || defined(CPU_CAPABILITY_AVX512)) && \ + !defined(__APPLE__) +#include +#endif + +C10_CLANG_DIAGNOSTIC_PUSH() +#if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion") +C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-int-float-conversion") +#endif + +namespace c10 { + +/// Constructors + +inline C10_HOST_DEVICE Half::Half(float value) + : +#if defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__) + x(__half_as_short(__float2half(value))) +#elif defined(__SYCL_DEVICE_ONLY__) + x(c10::bit_cast(sycl::half(value))) +#else +#if (defined(CPU_CAPABILITY_AVX2) || defined(CPU_CAPABILITY_AVX512)) && \ + !defined(__APPLE__) + x(at::vec::float2half_scalar(value)) +#else + x(detail::fp16_ieee_from_fp32_value(value)) +#endif +#endif +{ +} + +/// Implicit conversions + +inline C10_HOST_DEVICE Half::operator float() const { +#if defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__) + return __half2float(*reinterpret_cast(&x)); +#elif defined(__SYCL_DEVICE_ONLY__) + return float(c10::bit_cast(x)); +#else +#if (defined(CPU_CAPABILITY_AVX2) || defined(CPU_CAPABILITY_AVX512)) && \ + !defined(__APPLE__) + return at::vec::half2float_scalar(x); +#else + return detail::fp16_ieee_to_fp32_value(x); +#endif +#endif +} + +#if defined(__CUDACC__) || defined(__HIPCC__) +inline C10_HOST_DEVICE Half::Half(const __half& value) { + x = *reinterpret_cast(&value); +} +inline C10_HOST_DEVICE Half::operator __half() const { + return *reinterpret_cast(&x); +} +#endif + +#ifdef SYCL_LANGUAGE_VERSION +inline C10_HOST_DEVICE Half::Half(const sycl::half& value) { + x = *reinterpret_cast(&value); +} +inline C10_HOST_DEVICE Half::operator sycl::half() const { + return *reinterpret_cast(&x); +} +#endif + +// CUDA intrinsics + +#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 350)) || \ + (defined(__clang__) && defined(__CUDA__)) +inline __device__ Half __ldg(const Half* ptr) { + return __ldg(reinterpret_cast(ptr)); +} +#endif + +/// Arithmetic + +inline C10_HOST_DEVICE Half operator+(const Half& a, const Half& b) { + return static_cast(a) + static_cast(b); +} + +inline C10_HOST_DEVICE Half operator-(const Half& a, const Half& b) { + return static_cast(a) - static_cast(b); +} + +inline C10_HOST_DEVICE Half operator*(const Half& a, const Half& b) { + return static_cast(a) * static_cast(b); +} + +inline C10_HOST_DEVICE Half operator/(const Half& a, const Half& b) + __ubsan_ignore_float_divide_by_zero__ { + return static_cast(a) / static_cast(b); +} + +inline C10_HOST_DEVICE Half operator-(const Half& a) { +#if (defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530) || \ + defined(__HIP_DEVICE_COMPILE__) + return __hneg(a); +#elif defined(__SYCL_DEVICE_ONLY__) + return -c10::bit_cast(a); +#else + return -static_cast(a); +#endif +} + +inline C10_HOST_DEVICE Half& operator+=(Half& a, const Half& b) { + a = a + b; + return a; +} + +inline C10_HOST_DEVICE Half& operator-=(Half& a, const Half& b) { + a = a - b; + return a; +} + +inline C10_HOST_DEVICE Half& operator*=(Half& a, const Half& b) { + a = a * b; + return a; +} + +inline C10_HOST_DEVICE Half& operator/=(Half& a, const Half& b) { + a = a / b; + return a; +} + +/// Arithmetic with floats + +inline C10_HOST_DEVICE float operator+(Half a, float b) { + return static_cast(a) + b; +} +inline C10_HOST_DEVICE float operator-(Half a, float b) { + return static_cast(a) - b; +} +inline C10_HOST_DEVICE float operator*(Half a, float b) { + return static_cast(a) * b; +} +inline C10_HOST_DEVICE float operator/(Half a, float b) + __ubsan_ignore_float_divide_by_zero__ { + return static_cast(a) / b; +} + +inline C10_HOST_DEVICE float operator+(float a, Half b) { + return a + static_cast(b); +} +inline C10_HOST_DEVICE float operator-(float a, Half b) { + return a - static_cast(b); +} +inline C10_HOST_DEVICE float operator*(float a, Half b) { + return a * static_cast(b); +} +inline C10_HOST_DEVICE float operator/(float a, Half b) + __ubsan_ignore_float_divide_by_zero__ { + return a / static_cast(b); +} + +inline C10_HOST_DEVICE float& operator+=(float& a, const Half& b) { + return a += static_cast(b); +} +inline C10_HOST_DEVICE float& operator-=(float& a, const Half& b) { + return a -= static_cast(b); +} +inline C10_HOST_DEVICE float& operator*=(float& a, const Half& b) { + return a *= static_cast(b); +} +inline C10_HOST_DEVICE float& operator/=(float& a, const Half& b) { + return a /= static_cast(b); +} + +/// Arithmetic with doubles + +inline C10_HOST_DEVICE double operator+(Half a, double b) { + return static_cast(a) + b; +} +inline C10_HOST_DEVICE double operator-(Half a, double b) { + return static_cast(a) - b; +} +inline C10_HOST_DEVICE double operator*(Half a, double b) { + return static_cast(a) * b; +} +inline C10_HOST_DEVICE double operator/(Half a, double b) + __ubsan_ignore_float_divide_by_zero__ { + return static_cast(a) / b; +} + +inline C10_HOST_DEVICE double operator+(double a, Half b) { + return a + static_cast(b); +} +inline C10_HOST_DEVICE double operator-(double a, Half b) { + return a - static_cast(b); +} +inline C10_HOST_DEVICE double operator*(double a, Half b) { + return a * static_cast(b); +} +inline C10_HOST_DEVICE double operator/(double a, Half b) + __ubsan_ignore_float_divide_by_zero__ { + return a / static_cast(b); +} + +/// Arithmetic with ints + +inline C10_HOST_DEVICE Half operator+(Half a, int b) { + return a + static_cast(b); +} +inline C10_HOST_DEVICE Half operator-(Half a, int b) { + return a - static_cast(b); +} +inline C10_HOST_DEVICE Half operator*(Half a, int b) { + return a * static_cast(b); +} +inline C10_HOST_DEVICE Half operator/(Half a, int b) { + return a / static_cast(b); +} + +inline C10_HOST_DEVICE Half operator+(int a, Half b) { + return static_cast(a) + b; +} +inline C10_HOST_DEVICE Half operator-(int a, Half b) { + return static_cast(a) - b; +} +inline C10_HOST_DEVICE Half operator*(int a, Half b) { + return static_cast(a) * b; +} +inline C10_HOST_DEVICE Half operator/(int a, Half b) { + return static_cast(a) / b; +} + +//// Arithmetic with int64_t + +inline C10_HOST_DEVICE Half operator+(Half a, int64_t b) { + return a + static_cast(b); +} +inline C10_HOST_DEVICE Half operator-(Half a, int64_t b) { + return a - static_cast(b); +} +inline C10_HOST_DEVICE Half operator*(Half a, int64_t b) { + return a * static_cast(b); +} +inline C10_HOST_DEVICE Half operator/(Half a, int64_t b) { + return a / static_cast(b); +} + +inline C10_HOST_DEVICE Half operator+(int64_t a, Half b) { + return static_cast(a) + b; +} +inline C10_HOST_DEVICE Half operator-(int64_t a, Half b) { + return static_cast(a) - b; +} +inline C10_HOST_DEVICE Half operator*(int64_t a, Half b) { + return static_cast(a) * b; +} +inline C10_HOST_DEVICE Half operator/(int64_t a, Half b) { + return static_cast(a) / b; +} + +/// NOTE: we do not define comparisons directly and instead rely on the implicit +/// conversion from c10::Half to float. + +} // namespace c10 + +namespace std { + +template <> +class numeric_limits { + public: + static constexpr bool is_specialized = true; + static constexpr bool is_signed = true; + static constexpr bool is_integer = false; + static constexpr bool is_exact = false; + static constexpr bool has_infinity = true; + static constexpr bool has_quiet_NaN = true; + static constexpr bool has_signaling_NaN = true; + static constexpr auto has_denorm = numeric_limits::has_denorm; + static constexpr auto has_denorm_loss = + numeric_limits::has_denorm_loss; + static constexpr auto round_style = numeric_limits::round_style; + static constexpr bool is_iec559 = true; + static constexpr bool is_bounded = true; + static constexpr bool is_modulo = false; + static constexpr int digits = 11; + static constexpr int digits10 = 3; + static constexpr int max_digits10 = 5; + static constexpr int radix = 2; + static constexpr int min_exponent = -13; + static constexpr int min_exponent10 = -4; + static constexpr int max_exponent = 16; + static constexpr int max_exponent10 = 4; + static constexpr auto traps = numeric_limits::traps; + static constexpr auto tinyness_before = + numeric_limits::tinyness_before; + static constexpr c10::Half min() { + return c10::Half(0x0400, c10::Half::from_bits()); + } + static constexpr c10::Half lowest() { + return c10::Half(0xFBFF, c10::Half::from_bits()); + } + static constexpr c10::Half max() { + return c10::Half(0x7BFF, c10::Half::from_bits()); + } + static constexpr c10::Half epsilon() { + return c10::Half(0x1400, c10::Half::from_bits()); + } + static constexpr c10::Half round_error() { + return c10::Half(0x3800, c10::Half::from_bits()); + } + static constexpr c10::Half infinity() { + return c10::Half(0x7C00, c10::Half::from_bits()); + } + static constexpr c10::Half quiet_NaN() { + return c10::Half(0x7E00, c10::Half::from_bits()); + } + static constexpr c10::Half signaling_NaN() { + return c10::Half(0x7D00, c10::Half::from_bits()); + } + static constexpr c10::Half denorm_min() { + return c10::Half(0x0001, c10::Half::from_bits()); + } +}; + +} // namespace std + +C10_CLANG_DIAGNOSTIC_POP() diff --git a/videollama2/lib/python3.10/site-packages/torch/include/c10/util/LeftRight.h b/videollama2/lib/python3.10/site-packages/torch/include/c10/util/LeftRight.h new file mode 100644 index 0000000000000000000000000000000000000000..b93877105a53b8ffa76285d63970c2478587d722 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/c10/util/LeftRight.h @@ -0,0 +1,221 @@ +#include +#include +#include +#include +#include +#include +#include + +namespace c10 { + +namespace detail { + +struct IncrementRAII final { + public: + explicit IncrementRAII(std::atomic* counter) : _counter(counter) { + _counter->fetch_add(1); + } + + ~IncrementRAII() { + _counter->fetch_sub(1); + } + + private: + std::atomic* _counter; + + C10_DISABLE_COPY_AND_ASSIGN(IncrementRAII); +}; + +} // namespace detail + +// LeftRight wait-free readers synchronization primitive +// https://hal.archives-ouvertes.fr/hal-01207881/document +// +// LeftRight is quite easy to use (it can make an arbitrary +// data structure permit wait-free reads), but it has some +// particular performance characteristics you should be aware +// of if you're deciding to use it: +// +// - Reads still incur an atomic write (this is how LeftRight +// keeps track of how long it needs to keep around the old +// data structure) +// +// - Writes get executed twice, to keep both the left and right +// versions up to date. So if your write is expensive or +// nondeterministic, this is also an inappropriate structure +// +// LeftRight is used fairly rarely in PyTorch's codebase. If you +// are still not sure if you need it or not, consult your local +// C++ expert. +// +template +class LeftRight final { + public: + template + explicit LeftRight(const Args&... args) + : _counters{{{0}, {0}}}, + _foregroundCounterIndex(0), + _foregroundDataIndex(0), + _data{{T{args...}, T{args...}}}, + _writeMutex() {} + + // Copying and moving would not be threadsafe. + // Needs more thought and careful design to make that work. + LeftRight(const LeftRight&) = delete; + LeftRight(LeftRight&&) noexcept = delete; + LeftRight& operator=(const LeftRight&) = delete; + LeftRight& operator=(LeftRight&&) noexcept = delete; + + ~LeftRight() { + // wait until any potentially running writers are finished + { std::unique_lock lock(_writeMutex); } + + // wait until any potentially running readers are finished + while (_counters[0].load() != 0 || _counters[1].load() != 0) { + std::this_thread::yield(); + } + } + + template + auto read(F&& readFunc) const -> typename c10::invoke_result_t { + detail::IncrementRAII _increment_counter( + &_counters[_foregroundCounterIndex.load()]); + + return readFunc(_data[_foregroundDataIndex.load()]); + } + + // Throwing an exception in writeFunc is ok but causes the state to be either + // the old or the new state, depending on if the first or the second call to + // writeFunc threw. + template + auto write(F&& writeFunc) -> typename c10::invoke_result_t { + std::unique_lock lock(_writeMutex); + + return _write(writeFunc); + } + + private: + template + auto _write(const F& writeFunc) -> typename c10::invoke_result_t { + /* + * Assume, A is in background and B in foreground. In simplified terms, we + * want to do the following: + * 1. Write to A (old background) + * 2. Switch A/B + * 3. Write to B (new background) + * + * More detailed algorithm (explanations on why this is important are below + * in code): + * 1. Write to A + * 2. Switch A/B data pointers + * 3. Wait until A counter is zero + * 4. Switch A/B counters + * 5. Wait until B counter is zero + * 6. Write to B + */ + + auto localDataIndex = _foregroundDataIndex.load(); + + // 1. Write to A + _callWriteFuncOnBackgroundInstance(writeFunc, localDataIndex); + + // 2. Switch A/B data pointers + localDataIndex = localDataIndex ^ 1; + _foregroundDataIndex = localDataIndex; + + /* + * 3. Wait until A counter is zero + * + * In the previous write run, A was foreground and B was background. + * There was a time after switching _foregroundDataIndex (B to foreground) + * and before switching _foregroundCounterIndex, in which new readers could + * have read B but incremented A's counter. + * + * In this current run, we just switched _foregroundDataIndex (A back to + * foreground), but before writing to the new background B, we have to make + * sure A's counter was zero briefly, so all these old readers are gone. + */ + auto localCounterIndex = _foregroundCounterIndex.load(); + _waitForBackgroundCounterToBeZero(localCounterIndex); + + /* + * 4. Switch A/B counters + * + * Now that we know all readers on B are really gone, we can switch the + * counters and have new readers increment A's counter again, which is the + * correct counter since they're reading A. + */ + localCounterIndex = localCounterIndex ^ 1; + _foregroundCounterIndex = localCounterIndex; + + /* + * 5. Wait until B counter is zero + * + * This waits for all the readers on B that came in while both data and + * counter for B was in foreground, i.e. normal readers that happened + * outside of that brief gap between switching data and counter. + */ + _waitForBackgroundCounterToBeZero(localCounterIndex); + + // 6. Write to B + return _callWriteFuncOnBackgroundInstance(writeFunc, localDataIndex); + } + + template + auto _callWriteFuncOnBackgroundInstance( + const F& writeFunc, + uint8_t localDataIndex) -> typename c10::invoke_result_t { + try { + return writeFunc(_data[localDataIndex ^ 1]); + } catch (...) { + // recover invariant by copying from the foreground instance + _data[localDataIndex ^ 1] = _data[localDataIndex]; + // rethrow + throw; + } + } + + void _waitForBackgroundCounterToBeZero(uint8_t counterIndex) { + while (_counters[counterIndex ^ 1].load() != 0) { + std::this_thread::yield(); + } + } + + mutable std::array, 2> _counters; + std::atomic _foregroundCounterIndex; + std::atomic _foregroundDataIndex; + std::array _data; + std::mutex _writeMutex; +}; + +// RWSafeLeftRightWrapper is API compatible with LeftRight and uses a +// read-write lock to protect T (data). +template +class RWSafeLeftRightWrapper final { + public: + template + explicit RWSafeLeftRightWrapper(const Args&... args) : data_{args...} {} + + // RWSafeLeftRightWrapper is not copyable or moveable since LeftRight + // is not copyable or moveable. + RWSafeLeftRightWrapper(const RWSafeLeftRightWrapper&) = delete; + RWSafeLeftRightWrapper(RWSafeLeftRightWrapper&&) noexcept = delete; + RWSafeLeftRightWrapper& operator=(const RWSafeLeftRightWrapper&) = delete; + RWSafeLeftRightWrapper& operator=(RWSafeLeftRightWrapper&&) noexcept = delete; + + template + auto read(F&& readFunc) const -> typename c10::invoke_result_t { + return data_.withLock( + [&readFunc](T const& data) { return readFunc(data); }); + } + + template + auto write(F&& writeFunc) -> typename c10::invoke_result_t { + return data_.withLock([&writeFunc](T& data) { return writeFunc(data); }); + } + + private: + c10::Synchronized data_; +}; + +} // namespace c10 diff --git a/videollama2/lib/python3.10/site-packages/torch/include/c10/util/Registry.h b/videollama2/lib/python3.10/site-packages/torch/include/c10/util/Registry.h new file mode 100644 index 0000000000000000000000000000000000000000..dac1ec69384c0f46dbe18f7d33575eeb72a55eb3 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/c10/util/Registry.h @@ -0,0 +1,327 @@ +#ifndef C10_UTIL_REGISTRY_H_ +#define C10_UTIL_REGISTRY_H_ + +/** + * Simple registry implementation that uses static variables to + * register object creators during program initialization time. + */ + +// NB: This Registry works poorly when you have other namespaces. +// Make all macro invocations from inside the at namespace. + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +namespace c10 { + +template +inline std::string KeyStrRepr(const KeyType& /*key*/) { + return "[key type printing not supported]"; +} + +template <> +inline std::string KeyStrRepr(const std::string& key) { + return key; +} + +enum RegistryPriority { + REGISTRY_FALLBACK = 1, + REGISTRY_DEFAULT = 2, + REGISTRY_PREFERRED = 3, +}; + +/** + * @brief A template class that allows one to register classes by keys. + * + * The keys are usually a std::string specifying the name, but can be anything + * that can be used in a std::map. + * + * You should most likely not use the Registry class explicitly, but use the + * helper macros below to declare specific registries as well as registering + * objects. + */ +template +class Registry { + public: + typedef std::function Creator; + + Registry(bool warning = true) + : registry_(), priority_(), terminate_(true), warning_(warning) {} + + void Register( + const SrcType& key, + Creator creator, + const RegistryPriority priority = REGISTRY_DEFAULT) { + std::lock_guard lock(register_mutex_); + // The if statement below is essentially the same as the following line: + // TORCH_CHECK_EQ(registry_.count(key), 0) << "Key " << key + // << " registered twice."; + // However, TORCH_CHECK_EQ depends on google logging, and since registration + // is carried out at static initialization time, we do not want to have an + // explicit dependency on glog's initialization function. + if (registry_.count(key) != 0) { + auto cur_priority = priority_[key]; + if (priority > cur_priority) { +#ifdef DEBUG + std::string warn_msg = + "Overwriting already registered item for key " + KeyStrRepr(key); + fprintf(stderr, "%s\n", warn_msg.c_str()); +#endif + registry_[key] = creator; + priority_[key] = priority; + } else if (priority == cur_priority) { + std::string err_msg = + "Key already registered with the same priority: " + KeyStrRepr(key); + fprintf(stderr, "%s\n", err_msg.c_str()); + if (terminate_) { + std::exit(1); + } else { + throw std::runtime_error(err_msg); + } + } else if (warning_) { + std::string warn_msg = + "Higher priority item already registered, skipping registration of " + + KeyStrRepr(key); + fprintf(stderr, "%s\n", warn_msg.c_str()); + } + } else { + registry_[key] = creator; + priority_[key] = priority; + } + } + + void Register( + const SrcType& key, + Creator creator, + const std::string& help_msg, + const RegistryPriority priority = REGISTRY_DEFAULT) { + Register(key, creator, priority); + help_message_[key] = help_msg; + } + + inline bool Has(const SrcType& key) { + return (registry_.count(key) != 0); + } + + ObjectPtrType Create(const SrcType& key, Args... args) { + auto it = registry_.find(key); + if (it == registry_.end()) { + // Returns nullptr if the key is not registered. + return nullptr; + } + return it->second(args...); + } + + /** + * Returns the keys currently registered as a std::vector. + */ + std::vector Keys() const { + std::vector keys; + keys.reserve(registry_.size()); + for (const auto& it : registry_) { + keys.push_back(it.first); + } + return keys; + } + + inline const std::unordered_map& HelpMessage() const { + return help_message_; + } + + const char* HelpMessage(const SrcType& key) const { + auto it = help_message_.find(key); + if (it == help_message_.end()) { + return nullptr; + } + return it->second.c_str(); + } + + // Used for testing, if terminate is unset, Registry throws instead of + // calling std::exit + void SetTerminate(bool terminate) { + terminate_ = terminate; + } + + private: + std::unordered_map registry_; + std::unordered_map priority_; + bool terminate_; + const bool warning_; + std::unordered_map help_message_; + std::mutex register_mutex_; + + C10_DISABLE_COPY_AND_ASSIGN(Registry); +}; + +template +class Registerer { + public: + explicit Registerer( + const SrcType& key, + Registry* registry, + typename Registry::Creator creator, + const std::string& help_msg = "") { + registry->Register(key, creator, help_msg); + } + + explicit Registerer( + const SrcType& key, + const RegistryPriority priority, + Registry* registry, + typename Registry::Creator creator, + const std::string& help_msg = "") { + registry->Register(key, creator, help_msg, priority); + } + + template + static ObjectPtrType DefaultCreator(Args... args) { + return ObjectPtrType(new DerivedType(args...)); + } +}; + +/** + * C10_DECLARE_TYPED_REGISTRY is a macro that expands to a function + * declaration, as well as creating a convenient typename for its corresponding + * registerer. + */ +// Note on C10_IMPORT and C10_EXPORT below: we need to explicitly mark DECLARE +// as import and DEFINE as export, because these registry macros will be used +// in downstream shared libraries as well, and one cannot use *_API - the API +// macro will be defined on a per-shared-library basis. Semantically, when one +// declares a typed registry it is always going to be IMPORT, and when one +// defines a registry (which should happen ONLY ONCE and ONLY IN SOURCE FILE), +// the instantiation unit is always going to be exported. +// +// The only unique condition is when in the same file one does DECLARE and +// DEFINE - in Windows compilers, this generates a warning that dllimport and +// dllexport are mixed, but the warning is fine and linker will be properly +// exporting the symbol. Same thing happens in the gflags flag declaration and +// definition caes. +#define C10_DECLARE_TYPED_REGISTRY( \ + RegistryName, SrcType, ObjectType, PtrType, ...) \ + C10_API ::c10::Registry, ##__VA_ARGS__>* \ + RegistryName(); \ + typedef ::c10::Registerer, ##__VA_ARGS__> \ + Registerer##RegistryName + +#define TORCH_DECLARE_TYPED_REGISTRY( \ + RegistryName, SrcType, ObjectType, PtrType, ...) \ + TORCH_API ::c10::Registry, ##__VA_ARGS__>* \ + RegistryName(); \ + typedef ::c10::Registerer, ##__VA_ARGS__> \ + Registerer##RegistryName + +#define C10_DEFINE_TYPED_REGISTRY( \ + RegistryName, SrcType, ObjectType, PtrType, ...) \ + C10_EXPORT ::c10::Registry, ##__VA_ARGS__>* \ + RegistryName() { \ + static ::c10::Registry, ##__VA_ARGS__>* \ + registry = new ::c10:: \ + Registry, ##__VA_ARGS__>(); \ + return registry; \ + } + +#define C10_DEFINE_TYPED_REGISTRY_WITHOUT_WARNING( \ + RegistryName, SrcType, ObjectType, PtrType, ...) \ + C10_EXPORT ::c10::Registry, ##__VA_ARGS__>* \ + RegistryName() { \ + static ::c10::Registry, ##__VA_ARGS__>* \ + registry = \ + new ::c10::Registry, ##__VA_ARGS__>( \ + false); \ + return registry; \ + } + +// Note(Yangqing): The __VA_ARGS__ below allows one to specify a templated +// creator with comma in its templated arguments. +#define C10_REGISTER_TYPED_CREATOR(RegistryName, key, ...) \ + static Registerer##RegistryName C10_ANONYMOUS_VARIABLE(g_##RegistryName)( \ + key, RegistryName(), ##__VA_ARGS__); + +#define C10_REGISTER_TYPED_CREATOR_WITH_PRIORITY( \ + RegistryName, key, priority, ...) \ + static Registerer##RegistryName C10_ANONYMOUS_VARIABLE(g_##RegistryName)( \ + key, priority, RegistryName(), ##__VA_ARGS__); + +#define C10_REGISTER_TYPED_CLASS(RegistryName, key, ...) \ + static Registerer##RegistryName C10_ANONYMOUS_VARIABLE(g_##RegistryName)( \ + key, \ + RegistryName(), \ + Registerer##RegistryName::DefaultCreator<__VA_ARGS__>, \ + ::c10::demangle_type<__VA_ARGS__>()); + +#define C10_REGISTER_TYPED_CLASS_WITH_PRIORITY( \ + RegistryName, key, priority, ...) \ + static Registerer##RegistryName C10_ANONYMOUS_VARIABLE(g_##RegistryName)( \ + key, \ + priority, \ + RegistryName(), \ + Registerer##RegistryName::DefaultCreator<__VA_ARGS__>, \ + ::c10::demangle_type<__VA_ARGS__>()); + +// C10_DECLARE_REGISTRY and C10_DEFINE_REGISTRY are hard-wired to use +// std::string as the key type, because that is the most commonly used cases. +#define C10_DECLARE_REGISTRY(RegistryName, ObjectType, ...) \ + C10_DECLARE_TYPED_REGISTRY( \ + RegistryName, std::string, ObjectType, std::unique_ptr, ##__VA_ARGS__) + +#define TORCH_DECLARE_REGISTRY(RegistryName, ObjectType, ...) \ + TORCH_DECLARE_TYPED_REGISTRY( \ + RegistryName, std::string, ObjectType, std::unique_ptr, ##__VA_ARGS__) + +#define C10_DEFINE_REGISTRY(RegistryName, ObjectType, ...) \ + C10_DEFINE_TYPED_REGISTRY( \ + RegistryName, std::string, ObjectType, std::unique_ptr, ##__VA_ARGS__) + +#define C10_DEFINE_REGISTRY_WITHOUT_WARNING(RegistryName, ObjectType, ...) \ + C10_DEFINE_TYPED_REGISTRY_WITHOUT_WARNING( \ + RegistryName, std::string, ObjectType, std::unique_ptr, ##__VA_ARGS__) + +#define C10_DECLARE_SHARED_REGISTRY(RegistryName, ObjectType, ...) \ + C10_DECLARE_TYPED_REGISTRY( \ + RegistryName, std::string, ObjectType, std::shared_ptr, ##__VA_ARGS__) + +#define TORCH_DECLARE_SHARED_REGISTRY(RegistryName, ObjectType, ...) \ + TORCH_DECLARE_TYPED_REGISTRY( \ + RegistryName, std::string, ObjectType, std::shared_ptr, ##__VA_ARGS__) + +#define C10_DEFINE_SHARED_REGISTRY(RegistryName, ObjectType, ...) \ + C10_DEFINE_TYPED_REGISTRY( \ + RegistryName, std::string, ObjectType, std::shared_ptr, ##__VA_ARGS__) + +#define C10_DEFINE_SHARED_REGISTRY_WITHOUT_WARNING( \ + RegistryName, ObjectType, ...) \ + C10_DEFINE_TYPED_REGISTRY_WITHOUT_WARNING( \ + RegistryName, std::string, ObjectType, std::shared_ptr, ##__VA_ARGS__) + +// C10_REGISTER_CREATOR and C10_REGISTER_CLASS are hard-wired to use std::string +// as the key +// type, because that is the most commonly used cases. +#define C10_REGISTER_CREATOR(RegistryName, key, ...) \ + C10_REGISTER_TYPED_CREATOR(RegistryName, #key, __VA_ARGS__) + +#define C10_REGISTER_CREATOR_WITH_PRIORITY(RegistryName, key, priority, ...) \ + C10_REGISTER_TYPED_CREATOR_WITH_PRIORITY( \ + RegistryName, #key, priority, __VA_ARGS__) + +#define C10_REGISTER_CLASS(RegistryName, key, ...) \ + C10_REGISTER_TYPED_CLASS(RegistryName, #key, __VA_ARGS__) + +#define C10_REGISTER_CLASS_WITH_PRIORITY(RegistryName, key, priority, ...) \ + C10_REGISTER_TYPED_CLASS_WITH_PRIORITY( \ + RegistryName, #key, priority, __VA_ARGS__) + +} // namespace c10 + +#endif // C10_UTIL_REGISTRY_H_ diff --git a/videollama2/lib/python3.10/site-packages/torch/include/c10/util/SmallBuffer.h b/videollama2/lib/python3.10/site-packages/torch/include/c10/util/SmallBuffer.h new file mode 100644 index 0000000000000000000000000000000000000000..1fc7c6f16be70612f8d1fa35ac2ea48efa157ba8 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/c10/util/SmallBuffer.h @@ -0,0 +1,88 @@ +#pragma once +#include +#include + +/** Helper class for allocating temporary fixed size arrays with SBO. + * + * This is intentionally much simpler than SmallVector, to improve performance + * at the expense of many features: + * - No zero-initialization for numeric types + * - No resizing after construction + * - No copy/move + * - No non-trivial types + */ + +namespace c10 { + +template +class SmallBuffer { + static_assert( + std::is_trivial::value, + "SmallBuffer is intended for POD types"); + + std::array storage_; + size_t size_{}; + T* data_{}; + + public: + SmallBuffer(size_t size) : size_(size) { + if (size > N) { + data_ = new T[size]; + } else { + data_ = &storage_[0]; + } + } + + SmallBuffer(const SmallBuffer&) = delete; + SmallBuffer& operator=(const SmallBuffer&) = delete; + + // move constructor is needed in function return + SmallBuffer(SmallBuffer&& rhs) noexcept : size_{rhs.size_} { + rhs.size_ = 0; + if (size_ > N) { + data_ = rhs.data_; + rhs.data_ = nullptr; + } else { + storage_ = std::move(rhs.storage_); + data_ = &storage_[0]; + } + } + + SmallBuffer& operator=(SmallBuffer&&) = delete; + + ~SmallBuffer() { + if (size_ > N) { + delete[] data_; + } + } + + T& operator[](int64_t idx) { + return data()[idx]; + } + const T& operator[](int64_t idx) const { + return data()[idx]; + } + T* data() { + return data_; + } + const T* data() const { + return data_; + } + size_t size() const { + return size_; + } + T* begin() { + return data_; + } + const T* begin() const { + return data_; + } + T* end() { + return data_ + size_; + } + const T* end() const { + return data_ + size_; + } +}; + +} // namespace c10 diff --git a/videollama2/lib/python3.10/site-packages/torch/include/c10/util/SmallVector.h b/videollama2/lib/python3.10/site-packages/torch/include/c10/util/SmallVector.h new file mode 100644 index 0000000000000000000000000000000000000000..a5f6d7fc53e0044e76c0c1fee23b3f61642946ab --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/c10/util/SmallVector.h @@ -0,0 +1,1477 @@ +//===- llvm/ADT/SmallVector.h - 'Normally small' vectors --------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines the SmallVector class. +// +//===----------------------------------------------------------------------===// + +// ATen: modified from llvm::SmallVector. +// used std::is_trivially_{copy,move}_constructible +// replaced iterator_range constructor with inline Container&& constructor +// replaced LLVM_NODISCARD, LLVM_LIKELY, and LLVM_UNLIKELY with c10 equivalents +// removed LLVM_GSL_OWNER +// added SmallVector::at +// added operator<< for std::ostream +// added C10_API to export SmallVectorBase + +#pragma once + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +C10_CLANG_DIAGNOSTIC_PUSH() +#if C10_CLANG_HAS_WARNING("-Wshorten-64-to-32") +C10_CLANG_DIAGNOSTIC_IGNORE("-Wshorten-64-to-32") +#endif + +namespace c10 { + +/// This is all the stuff common to all SmallVectors. +/// +/// The template parameter specifies the type which should be used to hold the +/// Size and Capacity of the SmallVector, so it can be adjusted. +/// Using 32 bit size is desirable to shrink the size of the SmallVector. +/// Using 64 bit size is desirable for cases like SmallVector, where a +/// 32 bit size would limit the vector to ~4GB. SmallVectors are used for +/// buffering bitcode output - which can exceed 4GB. +template +class C10_API SmallVectorBase { + protected: + void* BeginX; + Size_T Size = 0, Capacity; + + /// The maximum value of the Size_T used. + static constexpr size_t SizeTypeMax() { + return std::numeric_limits::max(); + } + + SmallVectorBase(void* FirstEl, size_t TotalCapacity) + : BeginX(FirstEl), Capacity(TotalCapacity) {} + + /// This is a helper for \a grow() that's out of line to reduce code + /// duplication. This function will report a fatal error if it can't grow at + /// least to \p MinSize. + void* mallocForGrow(size_t MinSize, size_t TSize, size_t& NewCapacity); + + /// This is an implementation of the grow() method which only works + /// on POD-like data types and is out of line to reduce code duplication. + /// This function will report a fatal error if it cannot increase capacity. + void grow_pod(void* FirstEl, size_t MinSize, size_t TSize); + + public: + SmallVectorBase() = delete; + size_t size() const { + return Size; + } + size_t capacity() const { + return Capacity; + } + + C10_NODISCARD bool empty() const { + return !Size; + } + + /// Set the array size to \p N, which the current array must have enough + /// capacity for. + /// + /// This does not construct or destroy any elements in the vector. + /// + /// Clients can use this in conjunction with capacity() to write past the end + /// of the buffer when they know that more elements are available, and only + /// update the size later. This avoids the cost of value initializing elements + /// which will only be overwritten. + void set_size(size_t N) { + assert(N <= capacity()); + Size = N; + } +}; + +template +using SmallVectorSizeType = typename std:: + conditional= 8, uint64_t, uint32_t>::type; + +/// Figure out the offset of the first element. +template +struct SmallVectorAlignmentAndSize { + alignas(SmallVectorBase>) char Base[sizeof( + SmallVectorBase>)]; + alignas(T) char FirstEl[sizeof(T)]; +}; + +/// This is the part of SmallVectorTemplateBase which does not depend on whether +/// the type T is a POD. The extra dummy template argument is used by ArrayRef +/// to avoid unnecessarily requiring T to be complete. +template +class SmallVectorTemplateCommon + : public SmallVectorBase> { + using Base = SmallVectorBase>; + + /// Find the address of the first element. For this pointer math to be valid + /// with small-size of 0 for T with lots of alignment, it's important that + /// SmallVectorStorage is properly-aligned even for small-size of 0. + void* getFirstEl() const { + return const_cast(reinterpret_cast( + reinterpret_cast(this) + + offsetof(SmallVectorAlignmentAndSize, FirstEl))); + } + // Space after 'FirstEl' is clobbered, do not add any instance vars after it. + + protected: + SmallVectorTemplateCommon(size_t Size) : Base(getFirstEl(), Size) {} + + void grow_pod(size_t MinSize, size_t TSize) { + Base::grow_pod(getFirstEl(), MinSize, TSize); + } + + /// Return true if this is a smallvector which has not had dynamic + /// memory allocated for it. + bool isSmall() const { + return this->BeginX == getFirstEl(); + } + + /// Put this vector in a state of being small. + void resetToSmall() { + this->BeginX = getFirstEl(); + this->Size = this->Capacity = 0; // FIXME: Setting Capacity to 0 is suspect. + } + + /// Return true if V is an internal reference to the given range. + bool isReferenceToRange(const void* V, const void* First, const void* Last) + const { + // Use std::less to avoid UB. + std::less<> LessThan; + return !LessThan(V, First) && LessThan(V, Last); + } + + /// Return true if V is an internal reference to this vector. + bool isReferenceToStorage(const void* V) const { + return isReferenceToRange(V, this->begin(), this->end()); + } + + /// Return true if First and Last form a valid (possibly empty) range in this + /// vector's storage. + bool isRangeInStorage(const void* First, const void* Last) const { + // Use std::less to avoid UB. + std::less<> LessThan; + return !LessThan(First, this->begin()) && !LessThan(Last, First) && + !LessThan(this->end(), Last); + } + + /// Return true unless Elt will be invalidated by resizing the vector to + /// NewSize. + bool isSafeToReferenceAfterResize(const void* Elt, size_t NewSize) { + // Past the end. + if (C10_LIKELY(!isReferenceToStorage(Elt))) + return true; + + // Return false if Elt will be destroyed by shrinking. + if (NewSize <= this->size()) + return Elt < this->begin() + NewSize; + + // Return false if we need to grow. + return NewSize <= this->capacity(); + } + + /// Check whether Elt will be invalidated by resizing the vector to NewSize. + void assertSafeToReferenceAfterResize(const void* Elt, size_t NewSize) { + (void)Elt; // Suppress unused variable warning + (void)NewSize; // Suppress unused variable warning + assert( + isSafeToReferenceAfterResize(Elt, NewSize) && + "Attempting to reference an element of the vector in an operation " + "that invalidates it"); + } + + /// Check whether Elt will be invalidated by increasing the size of the + /// vector by N. + void assertSafeToAdd(const void* Elt, size_t N = 1) { + this->assertSafeToReferenceAfterResize(Elt, this->size() + N); + } + + /// Check whether any part of the range will be invalidated by clearing. + void assertSafeToReferenceAfterClear(const T* From, const T* To) { + if (From == To) + return; + this->assertSafeToReferenceAfterResize(From, 0); + this->assertSafeToReferenceAfterResize(To - 1, 0); + } + template < + class ItTy, + std::enable_if_t< + !std::is_same, T*>::value, + bool> = false> + void assertSafeToReferenceAfterClear(ItTy, ItTy) {} + + /// Check whether any part of the range will be invalidated by growing. + void assertSafeToAddRange(const T* From, const T* To) { + if (From == To) + return; + this->assertSafeToAdd(From, To - From); + this->assertSafeToAdd(To - 1, To - From); + } + template < + class ItTy, + std::enable_if_t< + !std::is_same, T*>::value, + bool> = false> + void assertSafeToAddRange(ItTy, ItTy) {} + + /// Reserve enough space to add one element, and return the updated element + /// pointer in case it was a reference to the storage. + template + static const T* reserveForParamAndGetAddressImpl( + U* This, + const T& Elt, + size_t N) { + size_t NewSize = This->size() + N; + if (C10_LIKELY(NewSize <= This->capacity())) + return &Elt; + + bool ReferencesStorage = false; + int64_t Index = -1; + if (!U::TakesParamByValue) { + if (C10_UNLIKELY(This->isReferenceToStorage(&Elt))) { + ReferencesStorage = true; + Index = &Elt - This->begin(); + } + } + This->grow(NewSize); + return ReferencesStorage ? This->begin() + Index : &Elt; + } + + public: + using size_type = size_t; + using difference_type = ptrdiff_t; + using value_type = T; + using iterator = T*; + using const_iterator = const T*; + + using const_reverse_iterator = std::reverse_iterator; + using reverse_iterator = std::reverse_iterator; + + using reference = T&; + using const_reference = const T&; + using pointer = T*; + using const_pointer = const T*; + + using Base::capacity; + using Base::empty; + using Base::size; + + // forward iterator creation methods. + iterator begin() { + return (iterator)this->BeginX; + } + const_iterator begin() const { + return (const_iterator)this->BeginX; + } + iterator end() { + return begin() + size(); + } + const_iterator end() const { + return begin() + size(); + } + + // reverse iterator creation methods. + reverse_iterator rbegin() { + return reverse_iterator(end()); + } + const_reverse_iterator rbegin() const { + return const_reverse_iterator(end()); + } + reverse_iterator rend() { + return reverse_iterator(begin()); + } + const_reverse_iterator rend() const { + return const_reverse_iterator(begin()); + } + + size_type size_in_bytes() const { + return size() * sizeof(T); + } + size_type max_size() const { + return std::min(this->SizeTypeMax(), size_type(-1) / sizeof(T)); + } + + size_t capacity_in_bytes() const { + return capacity() * sizeof(T); + } + + /// Return a pointer to the vector's buffer, even if empty(). + pointer data() { + return pointer(begin()); + } + /// Return a pointer to the vector's buffer, even if empty(). + const_pointer data() const { + return const_pointer(begin()); + } + + // SmallVector::at is NOT from LLVM. + reference at(size_type idx) { + assert(idx < size()); + return begin()[idx]; + } + const_reference at(size_type idx) const { + assert(idx < size()); + return begin()[idx]; + } + reference operator[](size_type idx) { + assert(idx < size()); + return begin()[idx]; + } + const_reference operator[](size_type idx) const { + assert(idx < size()); + return begin()[idx]; + } + + reference front() { + assert(!empty()); + return begin()[0]; + } + const_reference front() const { + assert(!empty()); + return begin()[0]; + } + + reference back() { + assert(!empty()); + return end()[-1]; + } + const_reference back() const { + assert(!empty()); + return end()[-1]; + } +}; + +/// SmallVectorTemplateBase - This is where we put +/// method implementations that are designed to work with non-trivial T's. +/// +/// We approximate is_trivially_copyable with trivial move/copy construction and +/// trivial destruction. While the standard doesn't specify that you're allowed +/// copy these types with memcpy, there is no way for the type to observe this. +/// This catches the important case of std::pair, which is not +/// trivially assignable. +/// +/// XXX: if build fails here fall back to C10_IS_TRIVIALLY_COPYABLE and make a +/// note +template < + typename T, + bool = (std::is_trivially_copy_constructible::value) && + (std::is_trivially_move_constructible::value) && + std::is_trivially_destructible::value> +class SmallVectorTemplateBase : public SmallVectorTemplateCommon { + friend class SmallVectorTemplateCommon; + + protected: + static constexpr bool TakesParamByValue = false; + using ValueParamT = const T&; + + SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon(Size) {} + + static void destroy_range(T* S, T* E) { + while (S != E) { + --E; + E->~T(); + } + } + + /// Move the range [I, E) into the uninitialized memory starting with "Dest", + /// constructing elements as needed. + template + static void uninitialized_move(It1 I, It1 E, It2 Dest) { + std::uninitialized_copy( + std::make_move_iterator(I), std::make_move_iterator(E), Dest); + } + + /// Copy the range [I, E) onto the uninitialized memory starting with "Dest", + /// constructing elements as needed. + template + static void uninitialized_copy(It1 I, It1 E, It2 Dest) { + std::uninitialized_copy(I, E, Dest); + } + + /// Grow the allocated memory (without initializing new elements), doubling + /// the size of the allocated memory. Guarantees space for at least one more + /// element, or MinSize more elements if specified. + void grow(size_t MinSize = 0); + + /// Create a new allocation big enough for \p MinSize and pass back its size + /// in \p NewCapacity. This is the first section of \a grow(). + T* mallocForGrow(size_t MinSize, size_t& NewCapacity) { + return static_cast( + SmallVectorBase>::mallocForGrow( + MinSize, sizeof(T), NewCapacity)); + } + + /// Move existing elements over to the new allocation \p NewElts, the middle + /// section of \a grow(). + void moveElementsForGrow(T* NewElts); + + /// Transfer ownership of the allocation, finishing up \a grow(). + void takeAllocationForGrow(T* NewElts, size_t NewCapacity); + + /// Reserve enough space to add one element, and return the updated element + /// pointer in case it was a reference to the storage. + const T* reserveForParamAndGetAddress(const T& Elt, size_t N = 1) { + return this->reserveForParamAndGetAddressImpl(this, Elt, N); + } + + /// Reserve enough space to add one element, and return the updated element + /// pointer in case it was a reference to the storage. + T* reserveForParamAndGetAddress(T& Elt, size_t N = 1) { + return const_cast(this->reserveForParamAndGetAddressImpl(this, Elt, N)); + } + + static T&& forward_value_param(T&& V) { + return std::move(V); + } + static const T& forward_value_param(const T& V) { + return V; + } + + void growAndAssign(size_t NumElts, const T& Elt) { + // Grow manually in case Elt is an internal reference. + size_t NewCapacity = 0; + T* NewElts = mallocForGrow(NumElts, NewCapacity); + std::uninitialized_fill_n(NewElts, NumElts, Elt); + this->destroy_range(this->begin(), this->end()); + takeAllocationForGrow(NewElts, NewCapacity); + this->set_size(NumElts); + } + + template + T& growAndEmplaceBack(ArgTypes&&... Args) { + // Grow manually in case one of Args is an internal reference. + size_t NewCapacity = 0; + T* NewElts = mallocForGrow(0, NewCapacity); + ::new ((void*)(NewElts + this->size())) T(std::forward(Args)...); + moveElementsForGrow(NewElts); + takeAllocationForGrow(NewElts, NewCapacity); + this->set_size(this->size() + 1); + return this->back(); + } + + public: + void push_back(const T& Elt) { + const T* EltPtr = reserveForParamAndGetAddress(Elt); + ::new ((void*)this->end()) T(*EltPtr); + this->set_size(this->size() + 1); + } + + void push_back(T&& Elt) { + T* EltPtr = reserveForParamAndGetAddress(Elt); + ::new ((void*)this->end()) T(::std::move(*EltPtr)); + this->set_size(this->size() + 1); + } + + void pop_back() { + this->set_size(this->size() - 1); + this->end()->~T(); + } +}; + +// Define this out-of-line to dissuade the C++ compiler from inlining it. +template +void SmallVectorTemplateBase::grow(size_t MinSize) { + size_t NewCapacity = 0; + T* NewElts = mallocForGrow(MinSize, NewCapacity); + moveElementsForGrow(NewElts); + takeAllocationForGrow(NewElts, NewCapacity); +} + +// Define this out-of-line to dissuade the C++ compiler from inlining it. +template +void SmallVectorTemplateBase::moveElementsForGrow( + T* NewElts) { + // Move the elements over. + this->uninitialized_move(this->begin(), this->end(), NewElts); + + // Destroy the original elements. + destroy_range(this->begin(), this->end()); +} + +// Define this out-of-line to dissuade the C++ compiler from inlining it. +template +void SmallVectorTemplateBase::takeAllocationForGrow( + T* NewElts, + size_t NewCapacity) { + // If this wasn't grown from the inline copy, deallocate the old space. + if (!this->isSmall()) + free(this->begin()); + + this->BeginX = NewElts; + this->Capacity = NewCapacity; +} + +/// SmallVectorTemplateBase - This is where we put +/// method implementations that are designed to work with trivially copyable +/// T's. This allows using memcpy in place of copy/move construction and +/// skipping destruction. +template +class SmallVectorTemplateBase : public SmallVectorTemplateCommon { + friend class SmallVectorTemplateCommon; + + protected: + /// True if it's cheap enough to take parameters by value. Doing so avoids + /// overhead related to mitigations for reference invalidation. + static constexpr bool TakesParamByValue = sizeof(T) <= 2 * sizeof(void*); + + /// Either const T& or T, depending on whether it's cheap enough to take + /// parameters by value. + using ValueParamT = + typename std::conditional::type; + + SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon(Size) {} + + // No need to do a destroy loop for POD's. + static void destroy_range(T*, T*) {} + + /// Move the range [I, E) onto the uninitialized memory + /// starting with "Dest", constructing elements into it as needed. + template + static void uninitialized_move(It1 I, It1 E, It2 Dest) { + // Just do a copy. + uninitialized_copy(I, E, Dest); + } + + /// Copy the range [I, E) onto the uninitialized memory + /// starting with "Dest", constructing elements into it as needed. + template + static void uninitialized_copy(It1 I, It1 E, It2 Dest) { + // Arbitrary iterator types; just use the basic implementation. + std::uninitialized_copy(I, E, Dest); + } + + /// Copy the range [I, E) onto the uninitialized memory + /// starting with "Dest", constructing elements into it as needed. + template + static void uninitialized_copy( + T1* I, + T1* E, + T2* Dest, + std::enable_if_t< + std::is_same::type, T2>::value>* = + nullptr) { + // Use memcpy for PODs iterated by pointers (which includes SmallVector + // iterators): std::uninitialized_copy optimizes to memmove, but we can + // use memcpy here. Note that I and E are iterators and thus might be + // invalid for memcpy if they are equal. + if (I != E) + memcpy(reinterpret_cast(Dest), I, (E - I) * sizeof(T)); + } + + /// Double the size of the allocated memory, guaranteeing space for at + /// least one more element or MinSize if specified. + void grow(size_t MinSize = 0) { + this->grow_pod(MinSize, sizeof(T)); + } + + /// Reserve enough space to add one element, and return the updated element + /// pointer in case it was a reference to the storage. + const T* reserveForParamAndGetAddress(const T& Elt, size_t N = 1) { + return this->reserveForParamAndGetAddressImpl(this, Elt, N); + } + + /// Reserve enough space to add one element, and return the updated element + /// pointer in case it was a reference to the storage. + T* reserveForParamAndGetAddress(T& Elt, size_t N = 1) { + return const_cast(this->reserveForParamAndGetAddressImpl(this, Elt, N)); + } + + /// Copy \p V or return a reference, depending on \a ValueParamT. + static ValueParamT forward_value_param(ValueParamT V) { + return V; + } + + void growAndAssign(size_t NumElts, T Elt) { + // Elt has been copied in case it's an internal reference, side-stepping + // reference invalidation problems without losing the realloc optimization. + this->set_size(0); + this->grow(NumElts); + std::uninitialized_fill_n(this->begin(), NumElts, Elt); + this->set_size(NumElts); + } + + template + T& growAndEmplaceBack(ArgTypes&&... Args) { + // Use push_back with a copy in case Args has an internal reference, + // side-stepping reference invalidation problems without losing the realloc + // optimization. + push_back(T(std::forward(Args)...)); + return this->back(); + } + + public: + void push_back(ValueParamT Elt) { + const T* EltPtr = reserveForParamAndGetAddress(Elt); + memcpy(reinterpret_cast(this->end()), EltPtr, sizeof(T)); + this->set_size(this->size() + 1); + } + + void pop_back() { + this->set_size(this->size() - 1); + } +}; + +/// This class consists of common code factored out of the SmallVector class to +/// reduce code duplication based on the SmallVector 'N' template parameter. +template +class SmallVectorImpl : public SmallVectorTemplateBase { + using SuperClass = SmallVectorTemplateBase; + + public: + using iterator = typename SuperClass::iterator; + using const_iterator = typename SuperClass::const_iterator; + using reference = typename SuperClass::reference; + using size_type = typename SuperClass::size_type; + + protected: + using SmallVectorTemplateBase::TakesParamByValue; + using ValueParamT = typename SuperClass::ValueParamT; + + // Default ctor - Initialize to empty. + explicit SmallVectorImpl(unsigned N) : SmallVectorTemplateBase(N) {} + + public: + SmallVectorImpl(const SmallVectorImpl&) = delete; + + ~SmallVectorImpl() { + // Subclass has already destructed this vector's elements. + // If this wasn't grown from the inline copy, deallocate the old space. + if (!this->isSmall()) + free(this->begin()); + } + + void clear() { + this->destroy_range(this->begin(), this->end()); + this->Size = 0; + } + + private: + template + void resizeImpl(size_type N) { + if (N < this->size()) { + this->pop_back_n(this->size() - N); + } else if (N > this->size()) { + this->reserve(N); + for (auto I = this->end(), E = this->begin() + N; I != E; ++I) + if (ForOverwrite) + new (&*I) T; + else + new (&*I) T(); + this->set_size(N); + } + } + + public: + void resize(size_type N) { + resizeImpl(N); + } + + /// Like resize, but \ref T is POD, the new values won't be initialized. + void resize_for_overwrite(size_type N) { + resizeImpl(N); + } + + void resize(size_type N, ValueParamT NV) { + if (N == this->size()) + return; + + if (N < this->size()) { + this->pop_back_n(this->size() - N); + return; + } + + // N > this->size(). Defer to append. + this->append(N - this->size(), NV); + } + + void reserve(size_type N) { + if (this->capacity() < N) + this->grow(N); + } + + void pop_back_n(size_type NumItems) { + assert(this->size() >= NumItems); + this->destroy_range(this->end() - NumItems, this->end()); + this->set_size(this->size() - NumItems); + } + + C10_NODISCARD T pop_back_val() { + T Result = ::std::move(this->back()); + this->pop_back(); + return Result; + } + + void swap(SmallVectorImpl& RHS); + + /// Add the specified range to the end of the SmallVector. + template < + typename in_iter, + typename = std::enable_if_t::iterator_category, + std::input_iterator_tag>::value>> + void append(in_iter in_start, in_iter in_end) { + this->assertSafeToAddRange(in_start, in_end); + size_type NumInputs = std::distance(in_start, in_end); + this->reserve(this->size() + NumInputs); + this->uninitialized_copy(in_start, in_end, this->end()); + this->set_size(this->size() + NumInputs); + } + + /// Append \p NumInputs copies of \p Elt to the end. + void append(size_type NumInputs, ValueParamT Elt) { + const T* EltPtr = this->reserveForParamAndGetAddress(Elt, NumInputs); + std::uninitialized_fill_n(this->end(), NumInputs, *EltPtr); + this->set_size(this->size() + NumInputs); + } + + void append(std::initializer_list IL) { + append(IL.begin(), IL.end()); + } + + void append(const SmallVectorImpl& RHS) { + append(RHS.begin(), RHS.end()); + } + + void assign(size_type NumElts, ValueParamT Elt) { + // Note that Elt could be an internal reference. + if (NumElts > this->capacity()) { + this->growAndAssign(NumElts, Elt); + return; + } + + // Assign over existing elements. + std::fill_n(this->begin(), std::min(NumElts, this->size()), Elt); + if (NumElts > this->size()) + std::uninitialized_fill_n(this->end(), NumElts - this->size(), Elt); + else if (NumElts < this->size()) + this->destroy_range(this->begin() + NumElts, this->end()); + this->set_size(NumElts); + } + + // FIXME: Consider assigning over existing elements, rather than clearing & + // re-initializing them - for all assign(...) variants. + + template < + typename in_iter, + typename = std::enable_if_t::iterator_category, + std::input_iterator_tag>::value>> + void assign(in_iter in_start, in_iter in_end) { + this->assertSafeToReferenceAfterClear(in_start, in_end); + clear(); + append(in_start, in_end); + } + + void assign(std::initializer_list IL) { + clear(); + append(IL); + } + + void assign(const SmallVectorImpl& RHS) { + assign(RHS.begin(), RHS.end()); + } + + iterator erase(const_iterator CI) { + // Just cast away constness because this is a non-const member function. + iterator I = const_cast(CI); + + assert( + this->isReferenceToStorage(CI) && + "Iterator to erase is out of bounds."); + + iterator N = I; + // Shift all elts down one. + std::move(I + 1, this->end(), I); + // Drop the last elt. + this->pop_back(); + return (N); + } + + iterator erase(const_iterator CS, const_iterator CE) { + // Just cast away constness because this is a non-const member function. + iterator S = const_cast(CS); + iterator E = const_cast(CE); + + assert(this->isRangeInStorage(S, E) && "Range to erase is out of bounds."); + + iterator N = S; + // Shift all elts down. + iterator I = std::move(E, this->end(), S); + // Drop the last elts. + this->destroy_range(I, this->end()); + this->set_size(I - this->begin()); + return (N); + } + + private: + template + iterator insert_one_impl(iterator I, ArgType&& Elt) { + // Callers ensure that ArgType is derived from T. + static_assert( + std::is_same>, T>:: + value, + "ArgType must be derived from T!"); + + if (I == this->end()) { // Important special case for empty vector. + this->push_back(::std::forward(Elt)); + return this->end() - 1; + } + + assert( + this->isReferenceToStorage(I) && + "Insertion iterator is out of bounds."); + + // Grow if necessary. + size_t Index = I - this->begin(); + std::remove_reference_t* EltPtr = + this->reserveForParamAndGetAddress(Elt); + I = this->begin() + Index; + + ::new ((void*)this->end()) T(::std::move(this->back())); + // Push everything else over. + std::move_backward(I, this->end() - 1, this->end()); + this->set_size(this->size() + 1); + + // If we just moved the element we're inserting, be sure to update + // the reference (never happens if TakesParamByValue). + static_assert( + !TakesParamByValue || std::is_same::value, + "ArgType must be 'T' when taking by value!"); + if (!TakesParamByValue && this->isReferenceToRange(EltPtr, I, this->end())) + ++EltPtr; + + *I = ::std::forward(*EltPtr); + return I; + } + + public: + iterator insert(iterator I, T&& Elt) { + return insert_one_impl(I, this->forward_value_param(std::move(Elt))); + } + + iterator insert(iterator I, const T& Elt) { + return insert_one_impl(I, this->forward_value_param(Elt)); + } + + iterator insert(iterator I, size_type NumToInsert, ValueParamT Elt) { + // Convert iterator to elt# to avoid invalidating iterator when we reserve() + size_t InsertElt = I - this->begin(); + + if (I == this->end()) { // Important special case for empty vector. + append(NumToInsert, Elt); + return this->begin() + InsertElt; + } + + assert( + this->isReferenceToStorage(I) && + "Insertion iterator is out of bounds."); + + // Ensure there is enough space, and get the (maybe updated) address of + // Elt. + const T* EltPtr = this->reserveForParamAndGetAddress(Elt, NumToInsert); + + // Uninvalidate the iterator. + I = this->begin() + InsertElt; + + // If there are more elements between the insertion point and the end of the + // range than there are being inserted, we can use a simple approach to + // insertion. Since we already reserved space, we know that this won't + // reallocate the vector. + if (size_t(this->end() - I) >= NumToInsert) { + T* OldEnd = this->end(); + append( + std::move_iterator(this->end() - NumToInsert), + std::move_iterator(this->end())); + + // Copy the existing elements that get replaced. + std::move_backward(I, OldEnd - NumToInsert, OldEnd); + + // If we just moved the element we're inserting, be sure to update + // the reference (never happens if TakesParamByValue). + if (!TakesParamByValue && I <= EltPtr && EltPtr < this->end()) + EltPtr += NumToInsert; + + std::fill_n(I, NumToInsert, *EltPtr); + return I; + } + + // Otherwise, we're inserting more elements than exist already, and we're + // not inserting at the end. + + // Move over the elements that we're about to overwrite. + T* OldEnd = this->end(); + this->set_size(this->size() + NumToInsert); + size_t NumOverwritten = OldEnd - I; + this->uninitialized_move(I, OldEnd, this->end() - NumOverwritten); + + // If we just moved the element we're inserting, be sure to update + // the reference (never happens if TakesParamByValue). + if (!TakesParamByValue && I <= EltPtr && EltPtr < this->end()) + EltPtr += NumToInsert; + + // Replace the overwritten part. + std::fill_n(I, NumOverwritten, *EltPtr); + + // Insert the non-overwritten middle part. + std::uninitialized_fill_n(OldEnd, NumToInsert - NumOverwritten, *EltPtr); + return I; + } + + template < + typename ItTy, + typename = std::enable_if_t::iterator_category, + std::input_iterator_tag>::value>> + iterator insert(iterator I, ItTy From, ItTy To) { + // Convert iterator to elt# to avoid invalidating iterator when we reserve() + size_t InsertElt = I - this->begin(); + + if (I == this->end()) { // Important special case for empty vector. + append(From, To); + return this->begin() + InsertElt; + } + + assert( + this->isReferenceToStorage(I) && + "Insertion iterator is out of bounds."); + + // Check that the reserve that follows doesn't invalidate the iterators. + this->assertSafeToAddRange(From, To); + + size_t NumToInsert = std::distance(From, To); + + // Ensure there is enough space. + reserve(this->size() + NumToInsert); + + // Uninvalidate the iterator. + I = this->begin() + InsertElt; + + // If there are more elements between the insertion point and the end of the + // range than there are being inserted, we can use a simple approach to + // insertion. Since we already reserved space, we know that this won't + // reallocate the vector. + if (size_t(this->end() - I) >= NumToInsert) { + T* OldEnd = this->end(); + append( + std::move_iterator(this->end() - NumToInsert), + std::move_iterator(this->end())); + + // Copy the existing elements that get replaced. + std::move_backward(I, OldEnd - NumToInsert, OldEnd); + + std::copy(From, To, I); + return I; + } + + // Otherwise, we're inserting more elements than exist already, and we're + // not inserting at the end. + + // Move over the elements that we're about to overwrite. + T* OldEnd = this->end(); + this->set_size(this->size() + NumToInsert); + size_t NumOverwritten = OldEnd - I; + this->uninitialized_move(I, OldEnd, this->end() - NumOverwritten); + + // Replace the overwritten part. + for (T* J = I; NumOverwritten > 0; --NumOverwritten) { + *J = *From; + ++J; + ++From; + } + + // Insert the non-overwritten middle part. + this->uninitialized_copy(From, To, OldEnd); + return I; + } + + void insert(iterator I, std::initializer_list IL) { + insert(I, IL.begin(), IL.end()); + } + + template + reference emplace_back(ArgTypes&&... Args) { + if (C10_UNLIKELY(this->size() >= this->capacity())) + return this->growAndEmplaceBack(std::forward(Args)...); + + ::new ((void*)this->end()) T(std::forward(Args)...); + this->set_size(this->size() + 1); + return this->back(); + } + + SmallVectorImpl& operator=(const SmallVectorImpl& RHS); + + SmallVectorImpl& operator=(SmallVectorImpl&& RHS) noexcept( + std::is_nothrow_move_constructible_v&& + std::is_nothrow_destructible_v); + + bool operator==(const SmallVectorImpl& RHS) const { + if (this->size() != RHS.size()) + return false; + return std::equal(this->begin(), this->end(), RHS.begin()); + } + bool operator!=(const SmallVectorImpl& RHS) const { + return !(*this == RHS); + } + + bool operator<(const SmallVectorImpl& RHS) const { + return std::lexicographical_compare( + this->begin(), this->end(), RHS.begin(), RHS.end()); + } +}; + +template +void SmallVectorImpl::swap(SmallVectorImpl& RHS) { + if (this == &RHS) + return; + + // We can only avoid copying elements if neither vector is small. + if (!this->isSmall() && !RHS.isSmall()) { + std::swap(this->BeginX, RHS.BeginX); + std::swap(this->Size, RHS.Size); + std::swap(this->Capacity, RHS.Capacity); + return; + } + this->reserve(RHS.size()); + RHS.reserve(this->size()); + + // Swap the shared elements. + size_t NumShared = this->size(); + if (NumShared > RHS.size()) + NumShared = RHS.size(); + for (size_type i = 0; i != NumShared; ++i) + std::swap((*this)[i], RHS[i]); + + // Copy over the extra elts. + if (this->size() > RHS.size()) { + size_t EltDiff = this->size() - RHS.size(); + this->uninitialized_copy(this->begin() + NumShared, this->end(), RHS.end()); + RHS.set_size(RHS.size() + EltDiff); + this->destroy_range(this->begin() + NumShared, this->end()); + this->set_size(NumShared); + } else if (RHS.size() > this->size()) { + size_t EltDiff = RHS.size() - this->size(); + this->uninitialized_copy(RHS.begin() + NumShared, RHS.end(), this->end()); + this->set_size(this->size() + EltDiff); + this->destroy_range(RHS.begin() + NumShared, RHS.end()); + RHS.set_size(NumShared); + } +} + +template +SmallVectorImpl& SmallVectorImpl::operator=( + const SmallVectorImpl& RHS) { + // Avoid self-assignment. + if (this == &RHS) + return *this; + + // If we already have sufficient space, assign the common elements, then + // destroy any excess. + size_t RHSSize = RHS.size(); + size_t CurSize = this->size(); + if (CurSize >= RHSSize) { + // Assign common elements. + iterator NewEnd; + if (RHSSize) + NewEnd = std::copy(RHS.begin(), RHS.begin() + RHSSize, this->begin()); + else + NewEnd = this->begin(); + + // Destroy excess elements. + this->destroy_range(NewEnd, this->end()); + + // Trim. + this->set_size(RHSSize); + return *this; + } + + // If we have to grow to have enough elements, destroy the current elements. + // This allows us to avoid copying them during the grow. + // FIXME: don't do this if they're efficiently moveable. + if (this->capacity() < RHSSize) { + // Destroy current elements. + this->clear(); + CurSize = 0; + this->grow(RHSSize); + } else if (CurSize) { + // Otherwise, use assignment for the already-constructed elements. + std::copy(RHS.begin(), RHS.begin() + CurSize, this->begin()); + } + + // Copy construct the new elements in place. + this->uninitialized_copy( + RHS.begin() + CurSize, RHS.end(), this->begin() + CurSize); + + // Set end. + this->set_size(RHSSize); + return *this; +} + +template +SmallVectorImpl& SmallVectorImpl::operator=( + SmallVectorImpl&& RHS) noexcept(std::is_nothrow_move_constructible_v&& + std::is_nothrow_destructible_v) { + // Avoid self-assignment. + if (this == &RHS) + return *this; + + // If the RHS isn't small, clear this vector and then steal its buffer. + if (!RHS.isSmall()) { + this->destroy_range(this->begin(), this->end()); + if (!this->isSmall()) + free(this->begin()); + this->BeginX = RHS.BeginX; + this->Size = RHS.Size; + this->Capacity = RHS.Capacity; + RHS.resetToSmall(); + return *this; + } + + // If we already have sufficient space, assign the common elements, then + // destroy any excess. + size_t RHSSize = RHS.size(); + size_t CurSize = this->size(); + if (CurSize >= RHSSize) { + // Assign common elements. + iterator NewEnd = this->begin(); + if (RHSSize) + NewEnd = std::move(RHS.begin(), RHS.end(), NewEnd); + + // Destroy excess elements and trim the bounds. + this->destroy_range(NewEnd, this->end()); + this->set_size(RHSSize); + + // Clear the RHS. + RHS.clear(); + + return *this; + } + + // If we have to grow to have enough elements, destroy the current elements. + // This allows us to avoid copying them during the grow. + // FIXME: this may not actually make any sense if we can efficiently move + // elements. + if (this->capacity() < RHSSize) { + // Destroy current elements. + this->clear(); + CurSize = 0; + this->grow(RHSSize); + } else if (CurSize) { + // Otherwise, use assignment for the already-constructed elements. + std::move(RHS.begin(), RHS.begin() + CurSize, this->begin()); + } + + // Move-construct the new elements in place. + this->uninitialized_move( + RHS.begin() + CurSize, RHS.end(), this->begin() + CurSize); + + // Set end. + this->set_size(RHSSize); + + RHS.clear(); + return *this; +} + +/// Storage for the SmallVector elements. This is specialized for the N=0 case +/// to avoid allocating unnecessary storage. +template +struct SmallVectorStorage { + alignas(T) char InlineElts[N * sizeof(T)]; +}; + +/// We need the storage to be properly aligned even for small-size of 0 so that +/// the pointer math in \a SmallVectorTemplateCommon::getFirstEl() is +/// well-defined. +template +struct alignas(T) SmallVectorStorage {}; + +/// Forward declaration of SmallVector so that +/// calculateSmallVectorDefaultInlinedElements can reference +/// `sizeof(SmallVector)`. +template +class /* LLVM_GSL_OWNER */ SmallVector; + +/// Helper class for calculating the default number of inline elements for +/// `SmallVector`. +/// +/// This should be migrated to a constexpr function when our minimum +/// compiler support is enough for multi-statement constexpr functions. +template +struct CalculateSmallVectorDefaultInlinedElements { + // Parameter controlling the default number of inlined elements + // for `SmallVector`. + // + // The default number of inlined elements ensures that + // 1. There is at least one inlined element. + // 2. `sizeof(SmallVector) <= kPreferredSmallVectorSizeof` unless + // it contradicts 1. + static constexpr size_t kPreferredSmallVectorSizeof = 64; + + // static_assert that sizeof(T) is not "too big". + // + // Because our policy guarantees at least one inlined element, it is possible + // for an arbitrarily large inlined element to allocate an arbitrarily large + // amount of inline storage. We generally consider it an antipattern for a + // SmallVector to allocate an excessive amount of inline storage, so we want + // to call attention to these cases and make sure that users are making an + // intentional decision if they request a lot of inline storage. + // + // We want this assertion to trigger in pathological cases, but otherwise + // not be too easy to hit. To accomplish that, the cutoff is actually somewhat + // larger than kPreferredSmallVectorSizeof (otherwise, + // `SmallVector>` would be one easy way to trip it, and that + // pattern seems useful in practice). + // + // One wrinkle is that this assertion is in theory non-portable, since + // sizeof(T) is in general platform-dependent. However, we don't expect this + // to be much of an issue, because most LLVM development happens on 64-bit + // hosts, and therefore sizeof(T) is expected to *decrease* when compiled for + // 32-bit hosts, dodging the issue. The reverse situation, where development + // happens on a 32-bit host and then fails due to sizeof(T) *increasing* on a + // 64-bit host, is expected to be very rare. + static_assert( + sizeof(T) <= 256, + "You are trying to use a default number of inlined elements for " + "`SmallVector` but `sizeof(T)` is really big! Please use an " + "explicit number of inlined elements with `SmallVector` to make " + "sure you really want that much inline storage."); + + // Discount the size of the header itself when calculating the maximum inline + // bytes. + static constexpr size_t PreferredInlineBytes = + kPreferredSmallVectorSizeof - sizeof(SmallVector); + static constexpr size_t NumElementsThatFit = PreferredInlineBytes / sizeof(T); + static constexpr size_t value = + NumElementsThatFit == 0 ? 1 : NumElementsThatFit; +}; + +/// This is a 'vector' (really, a variable-sized array), optimized +/// for the case when the array is small. It contains some number of elements +/// in-place, which allows it to avoid heap allocation when the actual number of +/// elements is below that threshold. This allows normal "small" cases to be +/// fast without losing generality for large inputs. +/// +/// \note +/// In the absence of a well-motivated choice for the number of inlined +/// elements \p N, it is recommended to use \c SmallVector (that is, +/// omitting the \p N). This will choose a default number of inlined elements +/// reasonable for allocation on the stack (for example, trying to keep \c +/// sizeof(SmallVector) around 64 bytes). +/// +/// \warning This does not attempt to be exception safe. +/// +/// \see https://llvm.org/docs/ProgrammersManual.html#llvm-adt-smallvector-h +template < + typename T, + unsigned N = CalculateSmallVectorDefaultInlinedElements::value> +class /* LLVM_GSL_OWNER */ SmallVector : public SmallVectorImpl, + SmallVectorStorage { + public: + SmallVector() : SmallVectorImpl(N) {} + + ~SmallVector() { + // Destroy the constructed elements in the vector. + this->destroy_range(this->begin(), this->end()); + } + + explicit SmallVector(size_t Size, const T& Value = T()) + : SmallVectorImpl(N) { + this->assign(Size, Value); + } + + template < + typename ItTy, + typename = std::enable_if_t::iterator_category, + std::input_iterator_tag>::value>> + SmallVector(ItTy S, ItTy E) : SmallVectorImpl(N) { + this->append(S, E); + } + + // note: The enable_if restricts Container to types that have a .begin() and + // .end() that return valid input iterators. + template < + typename Container, + std::enable_if_t< + std::is_convertible< + typename std::iterator_traits< + decltype(std::declval() + .begin())>::iterator_category, + std::input_iterator_tag>::value && + std::is_convertible< + typename std::iterator_traits< + decltype(std::declval() + .end())>::iterator_category, + std::input_iterator_tag>::value, + int> = 0> + explicit SmallVector(Container&& c) : SmallVectorImpl(N) { + this->append(c.begin(), c.end()); + } + + SmallVector(std::initializer_list IL) : SmallVectorImpl(N) { + this->assign(IL); + } + + SmallVector(const SmallVector& RHS) : SmallVectorImpl(N) { + if (!RHS.empty()) + SmallVectorImpl::operator=(RHS); + } + + SmallVector& operator=(const SmallVector& RHS) { + SmallVectorImpl::operator=(RHS); + return *this; + } + + SmallVector(SmallVector&& RHS) noexcept( + std::is_nothrow_move_assignable_v>) + : SmallVectorImpl(N) { + if (!RHS.empty()) + SmallVectorImpl::operator=(::std::move(RHS)); + } + + // note: The enable_if restricts Container to types that have a .begin() and + // .end() that return valid input iterators. + template < + typename Container, + std::enable_if_t< + std::is_convertible< + typename std::iterator_traits< + decltype(std::declval() + .begin())>::iterator_category, + std::input_iterator_tag>::value && + std::is_convertible< + typename std::iterator_traits< + decltype(std::declval() + .end())>::iterator_category, + std::input_iterator_tag>::value, + int> = 0> + SmallVector& operator=(const Container& RHS) { + this->assign(RHS.begin(), RHS.end()); + return *this; + } + + SmallVector(SmallVectorImpl&& RHS) noexcept( + std::is_nothrow_move_assignable_v>) + : SmallVectorImpl(N) { + if (!RHS.empty()) + SmallVectorImpl::operator=(::std::move(RHS)); + } + + SmallVector& operator=(SmallVector&& RHS) noexcept( + std::is_nothrow_move_assignable_v>) { + SmallVectorImpl::operator=(::std::move(RHS)); + return *this; + } + + SmallVector& operator=(SmallVectorImpl&& RHS) noexcept( + std::is_nothrow_move_constructible_v>) { + SmallVectorImpl::operator=(::std::move(RHS)); + return *this; + } + + // note: The enable_if restricts Container to types that have a .begin() and + // .end() that return valid input iterators. + template < + typename Container, + std::enable_if_t< + std::is_convertible< + typename std::iterator_traits< + decltype(std::declval() + .begin())>::iterator_category, + std::input_iterator_tag>::value && + std::is_convertible< + typename std::iterator_traits< + decltype(std::declval() + .end())>::iterator_category, + std::input_iterator_tag>::value, + int> = 0> + SmallVector& operator=(Container&& C) { + this->assign(C.begin(), C.end()); + return *this; + } + + SmallVector& operator=(std::initializer_list IL) { + this->assign(IL); + return *this; + } +}; + +template +inline size_t capacity_in_bytes(const SmallVector& X) { + return X.capacity_in_bytes(); +} + +template +std::ostream& operator<<(std::ostream& out, const SmallVector& list) { + int i = 0; + out << "["; + for (auto e : list) { + if (i++ > 0) + out << ", "; + out << e; + } + out << "]"; + return out; +} + +template +using ValueTypeFromRangeType = + typename std::remove_const()))>::type>::type; + +/// Given a range of type R, iterate the entire range and return a +/// SmallVector with elements of the vector. This is useful, for example, +/// when you want to iterate a range and then sort the results. +template +SmallVector, Size> to_vector(R&& Range) { + return {std::begin(Range), std::end(Range)}; +} +template +SmallVector< + ValueTypeFromRangeType, + CalculateSmallVectorDefaultInlinedElements< + ValueTypeFromRangeType>::value> +to_vector(R&& Range) { + return {std::begin(Range), std::end(Range)}; +} + +} // end namespace c10 + +namespace std { + +/// Implement std::swap in terms of SmallVector swap. +template +inline void swap(c10::SmallVectorImpl& LHS, c10::SmallVectorImpl& RHS) { + LHS.swap(RHS); +} + +/// Implement std::swap in terms of SmallVector swap. +template +inline void swap(c10::SmallVector& LHS, c10::SmallVector& RHS) { + LHS.swap(RHS); +} + +} // end namespace std + +C10_CLANG_DIAGNOSTIC_POP() diff --git a/videollama2/lib/python3.10/site-packages/torch/include/c10/util/ThreadLocalDebugInfo.h b/videollama2/lib/python3.10/site-packages/torch/include/c10/util/ThreadLocalDebugInfo.h new file mode 100644 index 0000000000000000000000000000000000000000..8820d35ac47ba311eee50e31e18441463155e528 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/c10/util/ThreadLocalDebugInfo.h @@ -0,0 +1,83 @@ +#pragma once + +#include + +#include +#include + +namespace c10 { + +enum class C10_API_ENUM DebugInfoKind : uint8_t { + PRODUCER_INFO = 0, + MOBILE_RUNTIME_INFO, + PROFILER_STATE, + INFERENCE_CONTEXT, // for inference usage + PARAM_COMMS_INFO, + + TEST_INFO, // used only in tests + TEST_INFO_2, // used only in tests +}; + +class C10_API DebugInfoBase { + public: + DebugInfoBase() = default; + virtual ~DebugInfoBase() = default; +}; + +// Thread local debug information is propagated across the forward +// (including async fork tasks) and backward passes and is supposed +// to be utilized by the user's code to pass extra information from +// the higher layers (e.g. model id) down to the lower levels +// (e.g. to the operator observers used for debugging, logging, +// profiling, etc) +class C10_API ThreadLocalDebugInfo { + public: + static DebugInfoBase* get(DebugInfoKind kind); + + // Get current ThreadLocalDebugInfo + static std::shared_ptr current(); + + // Internal, use DebugInfoGuard/ThreadLocalStateGuard + static void _forceCurrentDebugInfo( + std::shared_ptr info); + + // Push debug info struct of a given kind + static void _push(DebugInfoKind kind, std::shared_ptr info); + // Pop debug info, throws in case the last pushed + // debug info is not of a given kind + static std::shared_ptr _pop(DebugInfoKind kind); + // Peek debug info, throws in case the last pushed debug info is not of the + // given kind + static std::shared_ptr _peek(DebugInfoKind kind); + + private: + std::shared_ptr info_; + DebugInfoKind kind_; + std::shared_ptr parent_info_; + + friend class DebugInfoGuard; +}; + +// DebugInfoGuard is used to set debug information, +// ThreadLocalDebugInfo is semantically immutable, the values are set +// through the scope-based guard object. +// Nested DebugInfoGuard adds/overrides existing values in the scope, +// restoring the original values after exiting the scope. +// Users can access the values through the ThreadLocalDebugInfo::get() call; +class C10_API DebugInfoGuard { + public: + DebugInfoGuard(DebugInfoKind kind, std::shared_ptr info); + + explicit DebugInfoGuard(std::shared_ptr info); + + ~DebugInfoGuard(); + + DebugInfoGuard(const DebugInfoGuard&) = delete; + DebugInfoGuard(DebugInfoGuard&&) = delete; + + private: + bool active_ = false; + std::shared_ptr prev_info_ = nullptr; +}; + +} // namespace c10 diff --git a/videollama2/lib/python3.10/site-packages/torch/include/c10/util/TypeCast.h b/videollama2/lib/python3.10/site-packages/torch/include/c10/util/TypeCast.h new file mode 100644 index 0000000000000000000000000000000000000000..29ed70464d1f238d57b11c447a6dc18c6eb835a6 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/c10/util/TypeCast.h @@ -0,0 +1,168 @@ +#pragma once +#include +#include +#include +#include +#include +#include +#include + +#include + +C10_CLANG_DIAGNOSTIC_PUSH() +#if C10_CLANG_HAS_WARNING("-Wimplicit-float-conversion") +C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-float-conversion") +#endif +#if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion") +C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-int-float-conversion") +#endif + +namespace c10 { + +template +struct needs_real { + constexpr static bool value = + (is_complex::value && !is_complex::value); +}; + +template +struct maybe_real { + C10_HOST_DEVICE static inline src_t apply(src_t src) { + return src; + } +}; + +template +struct maybe_real { + C10_HOST_DEVICE static inline decltype(auto) apply(src_t src) { + return src.real(); + } +}; + +// Note: deliberately ignores undefined behavior, consistent with NumPy. +// PyTorch's type conversions can cause a variety of undefined behavior, +// including float to integral overflow and signed to unsigned integer overflow. +// Some of this undefined behavior is addressed below. +template +struct static_cast_with_inter_type { + C10_HOST_DEVICE __ubsan_ignore_undefined__ static inline dest_t apply( + src_t src) { + constexpr bool real = needs_real::value; + auto r = maybe_real::apply(src); + return static_cast(r); + } +}; + +// Partial template instantiation for casting to uint8. +// Note: Converting from negative float values to unsigned integer types is +// undefined behavior in C++, and current CPU and GPU compilers exhibit +// divergent behavior. Casting from negative float values to signed +// integer types and then to unsigned integer types is not undefined, +// however, so this cast improves the consistency of type conversions +// to uint8 across compilers. +// Further note: Type conversions across compilers still have other undefined +// and divergent behavior. +template +struct static_cast_with_inter_type { + C10_HOST_DEVICE __ubsan_ignore_undefined__ static inline uint8_t apply( + src_t src) { + constexpr bool real = needs_real::value; + return static_cast( + static_cast(maybe_real::apply(src))); + } +}; + +template <> +struct static_cast_with_inter_type, c10::BFloat16> { + C10_HOST_DEVICE __ubsan_ignore_undefined__ static inline c10::complex< + c10::Half> + apply(c10::BFloat16 src) { + return static_cast>(c10::complex{src}); + } +}; + +template <> +struct static_cast_with_inter_type, c10::Float8_e5m2> { + C10_HOST_DEVICE __ubsan_ignore_undefined__ static inline c10::complex< + c10::Half> + apply(c10::Float8_e5m2 src) { + return static_cast>(c10::complex{src}); + } +}; + +template <> +struct static_cast_with_inter_type< + c10::complex, + c10::Float8_e5m2fnuz> { + C10_HOST_DEVICE __ubsan_ignore_undefined__ static inline c10::complex< + c10::Half> + apply(c10::Float8_e5m2fnuz src) { + return static_cast>(c10::complex{src}); + } +}; + +template <> +struct static_cast_with_inter_type< + c10::complex, + c10::Float8_e4m3fn> { + C10_HOST_DEVICE __ubsan_ignore_undefined__ static inline c10::complex< + c10::Half> + apply(c10::Float8_e4m3fn src) { + return static_cast>(c10::complex{src}); + } +}; + +template <> +struct static_cast_with_inter_type< + c10::complex, + c10::Float8_e4m3fnuz> { + C10_HOST_DEVICE __ubsan_ignore_undefined__ static inline c10::complex< + c10::Half> + apply(c10::Float8_e4m3fnuz src) { + return static_cast>(c10::complex{src}); + } +}; + +template <> +struct static_cast_with_inter_type, c10::Half> { + C10_HOST_DEVICE __ubsan_ignore_undefined__ static inline c10::complex< + c10::Half> + apply(c10::Half src) { + return static_cast>(c10::complex{src}); + } +}; + +template <> +struct static_cast_with_inter_type< + c10::complex, + c10::complex> { + C10_HOST_DEVICE __ubsan_ignore_undefined__ static inline c10::complex< + c10::Half> + apply(c10::complex src) { + return static_cast>( + static_cast>(src)); + } +}; + +template +C10_HOST_DEVICE To convert(From f) { + return static_cast_with_inter_type::apply(f); +} + +// Define separately to avoid being inlined and prevent code-size bloat +C10_API void report_overflow(const char* name); + +template +To checked_convert(From f, const char* name) { + // Converting to bool can't overflow so we exclude this case from checking. + if (!std::is_same::value && overflows(f)) { + report_overflow(name); + } + return convert(f); +} + +} // namespace c10 + +C10_CLANG_DIAGNOSTIC_POP() + +// Trigger tests for D25440771. TODO: Remove this line any time you want. diff --git a/videollama2/lib/python3.10/site-packages/torch/include/c10/util/TypeIndex.h b/videollama2/lib/python3.10/site-packages/torch/include/c10/util/TypeIndex.h new file mode 100644 index 0000000000000000000000000000000000000000..a8a8d4568921aad859d7610a3e29a33ec84c3f92 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/c10/util/TypeIndex.h @@ -0,0 +1,196 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace c10 { +namespace util { + +// TODO Make it work for more compilers + +// Intel compiler works +#if defined(__INTEL_COMPILER) +#define C10_TYPENAME_SUPPORTS_CONSTEXPR 0 +#define C10_TYPENAME_CONSTEXPR + +// Clang works +#elif defined(__clang__) + +// except for NVCC +#if defined(__CUDACC__) +#define C10_TYPENAME_SUPPORTS_CONSTEXPR 0 +#define C10_TYPENAME_CONSTEXPR +#else +#define C10_TYPENAME_SUPPORTS_CONSTEXPR 1 +#define C10_TYPENAME_CONSTEXPR constexpr +#endif + +// Windows works +#elif defined(_MSC_VER) + +// except for NVCC +#if defined(__CUDACC__) +#define C10_TYPENAME_SUPPORTS_CONSTEXPR 0 +#define C10_TYPENAME_CONSTEXPR +#else +#define C10_TYPENAME_SUPPORTS_CONSTEXPR 1 +#define C10_TYPENAME_CONSTEXPR constexpr +#endif + +// GCC works +#elif defined(__GNUC__) + +// except when gcc < 9 +#if (__GNUC__ < 9) || defined(__CUDACC__) +#define C10_TYPENAME_SUPPORTS_CONSTEXPR 0 +#define C10_TYPENAME_CONSTEXPR +#else +#define C10_TYPENAME_SUPPORTS_CONSTEXPR 1 +#define C10_TYPENAME_CONSTEXPR constexpr +#endif + +// some other compiler we don't know about +#else +#define C10_TYPENAME_SUPPORTS_CONSTEXPR 1 +#define C10_TYPENAME_CONSTEXPR constexpr +#endif + +struct type_index final : IdWrapper { + constexpr explicit type_index(uint64_t checksum) : IdWrapper(checksum) {} + + // Allow usage in std::map / std::set + // TODO Disallow this and rather use std::unordered_map/set everywhere + friend constexpr bool operator<(type_index lhs, type_index rhs) noexcept { + return lhs.underlyingId() < rhs.underlyingId(); + } + + friend std::ostream& operator<<(std::ostream& stream, type_index typeId) { + return stream << typeId.underlyingId(); + } +}; + +namespace detail { + +#if !defined(__clang__) && !defined(_MSC_VER) && defined(__GNUC__) && \ + __GNUC__ < 5 +// Getting __PRETTY_FUNCTION__ at compile time only works with GCC >= 5 +#error "You're running a too old version of GCC. We need GCC 5 or later." +#endif + +#if defined(__clang__) && __clang_major__ < 4 +// Getting __PRETTY_FUNCTION__ at compile time only works with Clang >= 4 +#error "You're running a too old version of Clang. We need Clang 4 or later." +#endif + +inline constexpr string_view extract( + string_view prefix, + string_view suffix, + string_view str) { +#if !defined(__CUDA_ARCH__) // CUDA doesn't like std::logic_error in device code + return (!str.starts_with(prefix) || !str.ends_with(suffix)) + ? (throw std::logic_error("Invalid pattern"), string_view()) + : str.substr(prefix.size(), str.size() - prefix.size() - suffix.size()); +#else + return str.substr(prefix.size(), str.size() - prefix.size() - suffix.size()); +#endif +} + +template +inline C10_TYPENAME_CONSTEXPR c10::string_view fully_qualified_type_name_impl() { +#if defined(_MSC_VER) && !defined(__clang__) +#if defined(__NVCC__) + return extract( + "c10::basic_string_view c10::util::detail::fully_qualified_type_name_impl<", + ">()", + __FUNCSIG__); +#else + return extract( + "class c10::basic_string_view __cdecl c10::util::detail::fully_qualified_type_name_impl<", + ">(void)", + __FUNCSIG__); +#endif +#elif defined(__clang__) + return extract( + "c10::string_view c10::util::detail::fully_qualified_type_name_impl() [T = ", + "]", + __PRETTY_FUNCTION__); +#elif defined(__GNUC__) + return extract( +#if C10_TYPENAME_SUPPORTS_CONSTEXPR + "constexpr c10::string_view c10::util::detail::fully_qualified_type_name_impl() [with T = ", +#else + "c10::string_view c10::util::detail::fully_qualified_type_name_impl() [with T = ", +#endif + "; c10::string_view = c10::basic_string_view]", + __PRETTY_FUNCTION__); +#endif +} + +#if !defined(__CUDA_ARCH__) +template +inline constexpr uint64_t type_index_impl() { +// Idea: __PRETTY_FUNCTION__ (or __FUNCSIG__ on msvc) contains a qualified name +// of this function, including its template parameter, i.e. including the +// type we want an id for. We use this name and run crc64 on it to get a type +// id. +#if defined(_MSC_VER) && !defined(__clang__) + return crc64(__FUNCSIG__, sizeof(__FUNCSIG__)).checksum(); +#elif defined(__clang__) + return crc64(__PRETTY_FUNCTION__, sizeof(__PRETTY_FUNCTION__)).checksum(); +#elif defined(__GNUC__) + return crc64(__PRETTY_FUNCTION__, sizeof(__PRETTY_FUNCTION__)).checksum(); +#endif +} +#endif + +} // namespace detail + +template +inline constexpr type_index get_type_index() { +#if !defined(__CUDA_ARCH__) + // To enforce that this is really computed at compile time, we pass the + // type index through std::integral_constant. + return type_index{std::integral_constant< + uint64_t, + detail::type_index_impl>()>::value}; +#else + // There's nothing in theory preventing us from running this on device code + // except for nvcc throwing a compiler error if we enable it. + return (abort(), type_index(0)); +#endif +} + +#if !defined(TORCH_PEDANTIC) +// Use precomputed hashsum for std::string +// Needed to workaround ambiguity in class name resolution +// into __PRETTY_FUNCTION__ when abovementioned class is defined in inlined +// namespace. In multi-ABI C++ library, `std::string` is an alias to +// `std::__cxx11::basic_string` which depending on compiler flags can be +// resolved to `basic_string` either in `std` namespace or in +// `std::__cxx11` one (`__cxx11` is an inline namespace) +template <> +inline constexpr type_index get_type_index() { + // hashsum for std::basic_string + return type_index{4193213214807308375ULL}; +} +#endif + +template +inline C10_TYPENAME_CONSTEXPR string_view +get_fully_qualified_type_name() noexcept { +#if C10_TYPENAME_SUPPORTS_CONSTEXPR + constexpr +#else + static +#endif + string_view name = detail::fully_qualified_type_name_impl(); + return name; +} +} // namespace util +} // namespace c10 + +C10_DEFINE_HASH_FOR_IDWRAPPER(c10::util::type_index); diff --git a/videollama2/lib/python3.10/site-packages/torch/include/c10/util/Unroll.h b/videollama2/lib/python3.10/site-packages/torch/include/c10/util/Unroll.h new file mode 100644 index 0000000000000000000000000000000000000000..ee7aa9649a814b7274259362595c8a6c96322f8e --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/c10/util/Unroll.h @@ -0,0 +1,29 @@ +#pragma once +#include + +// Utility to guarantee complete unrolling of a loop where the bounds are known +// at compile time. Various pragmas achieve similar effects, but are not as +// portable across compilers. + +// Example: c10::ForcedUnroll<4>{}(f); is equivalent to f(0); f(1); f(2); f(3); + +namespace c10 { + +template +struct ForcedUnroll { + template + C10_ALWAYS_INLINE void operator()(const Func& f) const { + ForcedUnroll{}(f); + f(n - 1); + } +}; + +template <> +struct ForcedUnroll<1> { + template + C10_ALWAYS_INLINE void operator()(const Func& f) const { + f(0); + } +}; + +} // namespace c10 diff --git a/videollama2/lib/python3.10/site-packages/torch/include/c10/util/env.h b/videollama2/lib/python3.10/site-packages/torch/include/c10/util/env.h new file mode 100644 index 0000000000000000000000000000000000000000..e354b28d9c8a72df88a2eef0bac936017289a546 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/c10/util/env.h @@ -0,0 +1,42 @@ +#pragma once + +#include +#include +#include + +namespace c10 { +namespace utils { +// Reads an environment variable and returns +// - optional, if set equal to "1" +// - optional, if set equal to "0" +// - nullopt, otherwise +// +// NB: +// Issues a warning if the value of the environment variable is not 0 or 1. +inline optional check_env(const char* name) { +#ifdef _MSC_VER +#pragma warning(push) +#pragma warning(disable : 4996) +#endif + auto envar = std::getenv(name); +#ifdef _MSC_VER +#pragma warning(pop) +#endif + if (envar) { + if (strcmp(envar, "0") == 0) { + return false; + } + if (strcmp(envar, "1") == 0) { + return true; + } + TORCH_WARN( + "Ignoring invalid value for boolean flag ", + name, + ": ", + envar, + "valid values are 0 or 1."); + } + return c10::nullopt; +} +} // namespace utils +} // namespace c10 diff --git a/videollama2/lib/python3.10/site-packages/torch/include/c10/util/generic_math.h b/videollama2/lib/python3.10/site-packages/torch/include/c10/util/generic_math.h new file mode 100644 index 0000000000000000000000000000000000000000..a12d59aef78224a9522731ab1faff39d5c6af568 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/c10/util/generic_math.h @@ -0,0 +1,61 @@ +#pragma once + +#include +#include +#include + +// The functions in this file should be header-only as it is used under +// ABI-compatibility mode. + +namespace c10 { + +// NOTE: [Floor Division in Python] +// Python's __floordiv__ operator is more complicated than just floor(a / b). +// It aims to maintain the property: a == (a // b) * b + remainder(a, b) +// which can otherwise fail due to rounding errors in the remainder. +// So, instead it is calculated as: a // b = (a - remainder(a, b)) / b +// With some additional fix-ups added to the result. +// +// For reference, see CPython's implementation: +// https://github.com/python/cpython/blob/ace008c531dd685a30c1dd68f9b5ba35f20171cf/Objects/floatobject.c#L636 + +template +inline C10_HOST_DEVICE scalar_t div_floor_floating(scalar_t a, scalar_t b) + __ubsan_ignore_float_divide_by_zero__ { + if (C10_UNLIKELY(b == 0)) { + // Divide by zero: return standard IEEE result + return a / b; + } + + auto mod = std::fmod(a, b); + auto div = (a - mod) / b; + if ((mod != 0) && (b < 0) != (mod < 0)) { + div -= scalar_t(1); + } + + scalar_t floordiv; + if (div != 0) { + floordiv = std::floor(div); + if (div - floordiv > scalar_t(0.5)) { + floordiv += scalar_t(1.0); + } + } else { + floordiv = compat_copysign(scalar_t(0), a / b); + } + return floordiv; +} + +template +inline C10_HOST_DEVICE scalar_t div_floor_integer(scalar_t a, scalar_t b) { + if (c10::signs_differ(a, b)) { + // Subtracts one from the results of truncation division if the + // divisor and dividend have different sign(bit)s and the remainder of + // the division is nonzero + const auto quot = a / b; + const auto rem = a % b; + return rem ? quot - 1 : quot; + } + return a / b; +} + +} // namespace c10 diff --git a/videollama2/lib/python3.10/site-packages/torch/include/c10/util/logging_is_not_google_glog.h b/videollama2/lib/python3.10/site-packages/torch/include/c10/util/logging_is_not_google_glog.h new file mode 100644 index 0000000000000000000000000000000000000000..3b1b869756eb57246e670a0ae8610e379604d2f9 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/c10/util/logging_is_not_google_glog.h @@ -0,0 +1,258 @@ +#ifndef C10_UTIL_LOGGING_IS_NOT_GOOGLE_GLOG_H_ +#define C10_UTIL_LOGGING_IS_NOT_GOOGLE_GLOG_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +const char CAFFE2_SEVERITY_PREFIX[] = "FEWIV"; + +namespace c10 { + +// Log severity level constants. +const int GLOG_FATAL = 3; +const int GLOG_ERROR = 2; +const int GLOG_WARNING = 1; +const int GLOG_INFO = 0; + +class C10_API MessageLogger { + public: + MessageLogger(const char* file, int line, int severity); + ~MessageLogger(); + // Return the stream associated with the logger object. + std::stringstream& stream() { + return stream_; + } + + private: + // When there is a fatal log, we simply abort. + void DealWithFatal() { + abort(); + } + + const char* tag_; + std::stringstream stream_; + int severity_; +}; + +// This class is used to explicitly ignore values in the conditional +// logging macros. This avoids compiler warnings like "value computed +// is not used" and "statement has no effect". +class C10_API LoggerVoidify { + public: + LoggerVoidify() = default; + // This has to be an operator with a precedence lower than << but + // higher than ?: + void operator&(const std::ostream& s) {} +}; + +// Log a message and terminate. +template +void LogMessageFatal(const char* file, int line, const T& message) { + MessageLogger(file, line, GLOG_FATAL).stream() << message; +} + +// Helpers for TORCH_CHECK_NOTNULL(). Two are necessary to support both raw +// pointers and smart pointers. +template +T& CheckNotNullCommon(const char* file, int line, const char* names, T& t) { + if (t == nullptr) { + LogMessageFatal(file, line, std::string(names)); + } + return t; +} + +template +T* CheckNotNull(const char* file, int line, const char* names, T* t) { + return CheckNotNullCommon(file, line, names, t); +} + +template +T& CheckNotNull(const char* file, int line, const char* names, T& t) { + return CheckNotNullCommon(file, line, names, t); +} +} // namespace c10 + +// ---------------------- Logging Macro definitions -------------------------- + +static_assert( + CAFFE2_LOG_THRESHOLD <= ::c10::GLOG_FATAL, + "CAFFE2_LOG_THRESHOLD should at most be GLOG_FATAL."); +// If n is under the compile time caffe log threshold, The _CAFFE_LOG(n) +// should not generate anything in optimized code. +#define LOG(n) \ + if (::c10::GLOG_##n >= CAFFE2_LOG_THRESHOLD) \ + ::c10::MessageLogger(__FILE__, __LINE__, ::c10::GLOG_##n).stream() +#define VLOG(n) \ + if (-n >= CAFFE2_LOG_THRESHOLD) \ + ::c10::MessageLogger(__FILE__, __LINE__, -n).stream() + +#define LOG_IF(n, condition) \ + if (::c10::GLOG_##n >= CAFFE2_LOG_THRESHOLD && (condition)) \ + ::c10::MessageLogger(__FILE__, __LINE__, ::c10::GLOG_##n).stream() +#define VLOG_IF(n, condition) \ + if (-n >= CAFFE2_LOG_THRESHOLD && (condition)) \ + ::c10::MessageLogger(__FILE__, __LINE__, -n).stream() + +#define VLOG_IS_ON(verboselevel) (CAFFE2_LOG_THRESHOLD <= -(verboselevel)) + +// Log with source location information override (to be used in generic +// warning/error handlers implemented as functions, not macros) +#define LOG_AT_FILE_LINE(n, file, line) \ + if (::c10::GLOG_##n >= CAFFE2_LOG_THRESHOLD) \ + ::c10::MessageLogger(file, line, ::c10::GLOG_##n).stream() + +// Log only if condition is met. Otherwise evaluates to void. +#define FATAL_IF(condition) \ + condition ? (void)0 \ + : ::c10::LoggerVoidify() & \ + ::c10::MessageLogger(__FILE__, __LINE__, ::c10::GLOG_FATAL).stream() + +// Check for a given boolean condition. +#define CHECK(condition) FATAL_IF(condition) << "Check failed: " #condition " " + +#ifndef NDEBUG +// Debug only version of CHECK +#define DCHECK(condition) FATAL_IF(condition) << "Check failed: " #condition " " +#define DLOG(severity) LOG(severity) +#else // NDEBUG +// Optimized version - generates no code. +#define DCHECK(condition) \ + while (false) \ + CHECK(condition) + +#define DLOG(n) \ + true ? (void)0 \ + : ::c10::LoggerVoidify() & \ + ::c10::MessageLogger(__FILE__, __LINE__, ::c10::GLOG_##n).stream() +#endif // NDEBUG + +#define TORCH_CHECK_OP(val1, val2, op) \ + FATAL_IF(((val1)op(val2))) << "Check failed: " #val1 " " #op " " #val2 " (" \ + << (val1) << " vs. " << (val2) << ") " + +// TORCH_CHECK_OP macro definitions +#define TORCH_CHECK_EQ(val1, val2) TORCH_CHECK_OP(val1, val2, ==) +#define TORCH_CHECK_NE(val1, val2) TORCH_CHECK_OP(val1, val2, !=) +#define TORCH_CHECK_LE(val1, val2) TORCH_CHECK_OP(val1, val2, <=) +#define TORCH_CHECK_LT(val1, val2) TORCH_CHECK_OP(val1, val2, <) +#define TORCH_CHECK_GE(val1, val2) TORCH_CHECK_OP(val1, val2, >=) +#define TORCH_CHECK_GT(val1, val2) TORCH_CHECK_OP(val1, val2, >) + +#ifndef NDEBUG +// Debug only versions of TORCH_CHECK_OP macros. +#define TORCH_DCHECK_EQ(val1, val2) TORCH_CHECK_OP(val1, val2, ==) +#define TORCH_DCHECK_NE(val1, val2) TORCH_CHECK_OP(val1, val2, !=) +#define TORCH_DCHECK_LE(val1, val2) TORCH_CHECK_OP(val1, val2, <=) +#define TORCH_DCHECK_LT(val1, val2) TORCH_CHECK_OP(val1, val2, <) +#define TORCH_DCHECK_GE(val1, val2) TORCH_CHECK_OP(val1, val2, >=) +#define TORCH_DCHECK_GT(val1, val2) TORCH_CHECK_OP(val1, val2, >) +#else // !NDEBUG +// These versions generate no code in optimized mode. +#define TORCH_DCHECK_EQ(val1, val2) \ + while (false) \ + TORCH_CHECK_OP(val1, val2, ==) +#define TORCH_DCHECK_NE(val1, val2) \ + while (false) \ + TORCH_CHECK_OP(val1, val2, !=) +#define TORCH_DCHECK_LE(val1, val2) \ + while (false) \ + TORCH_CHECK_OP(val1, val2, <=) +#define TORCH_DCHECK_LT(val1, val2) \ + while (false) \ + TORCH_CHECK_OP(val1, val2, <) +#define TORCH_DCHECK_GE(val1, val2) \ + while (false) \ + TORCH_CHECK_OP(val1, val2, >=) +#define TORCH_DCHECK_GT(val1, val2) \ + while (false) \ + TORCH_CHECK_OP(val1, val2, >) +#endif // NDEBUG + +// Check that a pointer is not null. +#define TORCH_CHECK_NOTNULL(val) \ + ::c10::CheckNotNull( \ + __FILE__, __LINE__, "Check failed: '" #val "' Must be non NULL", (val)) + +#ifndef NDEBUG +// Debug only version of TORCH_CHECK_NOTNULL +#define TORCH_DCHECK_NOTNULL(val) \ + ::c10::CheckNotNull( \ + __FILE__, __LINE__, "Check failed: '" #val "' Must be non NULL", (val)) +#else // !NDEBUG +// Optimized version - generates no code. +#define TORCH_DCHECK_NOTNULL(val) \ + while (false) \ + TORCH_CHECK_NOTNULL(val) +#endif // NDEBUG + +// ---------------------- Support for std objects -------------------------- +// These are adapted from glog to support a limited set of logging capability +// for STL objects. + +namespace std { +// Forward declare these two, and define them after all the container streams +// operators so that we can recurse from pair -> container -> container -> pair +// properly. +template +std::ostream& operator<<(std::ostream& out, const std::pair& p); +} // namespace std + +namespace c10 { +template +void PrintSequence(std::ostream& ss, Iter begin, Iter end); +} // namespace c10 + +namespace std { +#define INSTANTIATE_FOR_CONTAINER(container) \ + template \ + std::ostream& operator<<( \ + std::ostream& out, const container& seq) { \ + c10::PrintSequence(out, seq.begin(), seq.end()); \ + return out; \ + } + +INSTANTIATE_FOR_CONTAINER(std::vector) +INSTANTIATE_FOR_CONTAINER(std::map) +INSTANTIATE_FOR_CONTAINER(std::set) +#undef INSTANTIATE_FOR_CONTAINER + +template +inline std::ostream& operator<<( + std::ostream& out, + const std::pair& p) { + out << '(' << p.first << ", " << p.second << ')'; + return out; +} + +inline std::ostream& operator<<(std::ostream& out, const std::nullptr_t&) { + out << "(null)"; + return out; +} +} // namespace std + +namespace c10 { +template +inline void PrintSequence(std::ostream& out, Iter begin, Iter end) { + // Output at most 100 elements -- appropriate if used for logging. + for (int i = 0; begin != end && i < 100; ++i, ++begin) { + if (i > 0) + out << ' '; + out << *begin; + } + if (begin != end) { + out << " ..."; + } +} +} // namespace c10 + +#endif // C10_UTIL_LOGGING_IS_NOT_GOOGLE_GLOG_H_ diff --git a/videollama2/lib/python3.10/site-packages/torch/include/c10/util/order_preserving_flat_hash_map.h b/videollama2/lib/python3.10/site-packages/torch/include/c10/util/order_preserving_flat_hash_map.h new file mode 100644 index 0000000000000000000000000000000000000000..30652a547f8cfd202753bff6226a12be0964aa6d --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/c10/util/order_preserving_flat_hash_map.h @@ -0,0 +1,2232 @@ +// Taken from +// https://github.com/skarupke/flat_hash_map/blob/2c4687431f978f02a3780e24b8b701d22aa32d9c/flat_hash_map.hpp +// with fixes applied: +// - https://github.com/skarupke/flat_hash_map/pull/25 +// - https://github.com/skarupke/flat_hash_map/pull/26 +// - replace size_t with uint64_t to fix it for 32bit +// - add "GCC diagnostic" pragma to ignore -Wshadow +// - make sherwood_v3_table::convertible_to_iterator public because GCC5 seems +// to have issues with it otherwise +// - fix compiler warnings in operator templated_iterator + +// Copyright Malte Skarupke 2017. +// Distributed under the Boost Software License, Version 1.0. +// (See http://www.boost.org/LICENSE_1_0.txt) + +// Modified to maintain insertion and deletion order through a doubly-linked +// list + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +C10_CLANG_DIAGNOSTIC_PUSH() +#if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion") +C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-int-float-conversion") +#endif + +#ifdef _MSC_VER +#define SKA_NOINLINE(...) __declspec(noinline) __VA_ARGS__ +#else +#define SKA_NOINLINE(...) __VA_ARGS__ __attribute__((noinline)) +#endif + +namespace ska_ordered { + +struct prime_number_hash_policy; +struct power_of_two_hash_policy; +struct fibonacci_hash_policy; + +namespace detailv3 { +template +struct functor_storage : Functor { + functor_storage() = default; + functor_storage(const Functor& functor) : Functor(functor) {} + template + Result operator()(Args&&... args) { + return static_cast(*this)(std::forward(args)...); + } + template + Result operator()(Args&&... args) const { + return static_cast(*this)(std::forward(args)...); + } +}; +template +struct functor_storage { + typedef Result (*function_ptr)(Args...); + function_ptr function; + functor_storage(function_ptr function) : function(function) {} + Result operator()(Args... args) const { + return function(std::forward(args)...); + } + operator function_ptr&() { + return function; + } + operator const function_ptr&() { + return function; + } +}; +template +struct KeyOrValueHasher : functor_storage { + typedef functor_storage hasher_storage; + KeyOrValueHasher() = default; + KeyOrValueHasher(const hasher& hash) : hasher_storage(hash) {} + uint64_t operator()(const key_type& key) { + return static_cast(*this)(key); + } + uint64_t operator()(const key_type& key) const { + return static_cast(*this)(key); + } + uint64_t operator()(const value_type& value) { + return static_cast(*this)(value.first); + } + uint64_t operator()(const value_type& value) const { + return static_cast(*this)(value.first); + } + template + uint64_t operator()(const std::pair& value) { + return static_cast(*this)(value.first); + } + template + uint64_t operator()(const std::pair& value) const { + return static_cast(*this)(value.first); + } +}; +template +struct KeyOrValueEquality : functor_storage { + typedef functor_storage equality_storage; + KeyOrValueEquality() = default; + KeyOrValueEquality(const key_equal& equality) : equality_storage(equality) {} + bool operator()(const key_type& lhs, const key_type& rhs) { + return static_cast(*this)(lhs, rhs); + } + bool operator()(const key_type& lhs, const value_type& rhs) { + return static_cast(*this)(lhs, rhs.first); + } + bool operator()(const value_type& lhs, const key_type& rhs) { + return static_cast(*this)(lhs.first, rhs); + } + bool operator()(const value_type& lhs, const value_type& rhs) { + return static_cast(*this)(lhs.first, rhs.first); + } + template + bool operator()(const key_type& lhs, const std::pair& rhs) { + return static_cast(*this)(lhs, rhs.first); + } + template + bool operator()(const std::pair& lhs, const key_type& rhs) { + return static_cast(*this)(lhs.first, rhs); + } + template + bool operator()(const value_type& lhs, const std::pair& rhs) { + return static_cast(*this)(lhs.first, rhs.first); + } + template + bool operator()(const std::pair& lhs, const value_type& rhs) { + return static_cast(*this)(lhs.first, rhs.first); + } + template + bool operator()(const std::pair& lhs, const std::pair& rhs) { + return static_cast(*this)(lhs.first, rhs.first); + } +}; +static constexpr int8_t min_lookups = 4; +template +struct sherwood_v3_entry { + sherwood_v3_entry() {} + sherwood_v3_entry(int8_t distance_from_desired) + : distance_from_desired(distance_from_desired) {} + ~sherwood_v3_entry() {} + + bool has_value() const { + return distance_from_desired >= 0; + } + bool is_empty() const { + return distance_from_desired < 0; + } + bool is_at_desired_position() const { + return distance_from_desired <= 0; + } + template + void emplace(int8_t distance, Args&&... args) { + new (std::addressof(value)) T(std::forward(args)...); + distance_from_desired = distance; + } + + void destroy_value() { + value.~T(); + distance_from_desired = -1; + } + + sherwood_v3_entry* prev = nullptr; + sherwood_v3_entry* next = nullptr; + int8_t distance_from_desired = -1; + static constexpr int8_t special_end_value = 0; + union { + T value; + }; +}; + +inline int8_t log2(uint64_t value) { + static constexpr int8_t table[64] = { + 63, 0, 58, 1, 59, 47, 53, 2, 60, 39, 48, 27, 54, 33, 42, 3, + 61, 51, 37, 40, 49, 18, 28, 20, 55, 30, 34, 11, 43, 14, 22, 4, + 62, 57, 46, 52, 38, 26, 32, 41, 50, 36, 17, 19, 29, 10, 13, 21, + 56, 45, 25, 31, 35, 16, 9, 12, 44, 24, 15, 8, 23, 7, 6, 5}; + value |= value >> 1; + value |= value >> 2; + value |= value >> 4; + value |= value >> 8; + value |= value >> 16; + value |= value >> 32; + return table[((value - (value >> 1)) * 0x07EDD5E59A4E28C2) >> 58]; +} + +template +struct AssignIfTrue { + void operator()(T& lhs, const T& rhs) { + lhs = rhs; + } + void operator()(T& lhs, T&& rhs) { + lhs = std::move(rhs); + } +}; +template +struct AssignIfTrue { + void operator()(T&, const T&) {} + void operator()(T&, T&&) {} +}; + +inline uint64_t next_power_of_two(uint64_t i) { + --i; + i |= i >> 1; + i |= i >> 2; + i |= i >> 4; + i |= i >> 8; + i |= i >> 16; + i |= i >> 32; + ++i; + return i; +} + +// Implementation taken from http://en.cppreference.com/w/cpp/types/void_t +// (it takes CWG1558 into account and also works for older compilers) +template +struct make_void { + typedef void type; +}; +template +using void_t = typename make_void::type; + +template +struct HashPolicySelector { + typedef fibonacci_hash_policy type; +}; +template +struct HashPolicySelector> { + typedef typename T::hash_policy type; +}; + +template < + typename T, + typename FindKey, + typename ArgumentHash, + typename Hasher, + typename ArgumentEqual, + typename Equal, + typename ArgumentAlloc, + typename EntryAlloc> +class sherwood_v3_table : private EntryAlloc, private Hasher, private Equal { + using Entry = detailv3::sherwood_v3_entry; + using AllocatorTraits = std::allocator_traits; + using EntryPointer = typename AllocatorTraits::pointer; + + public: + struct convertible_to_iterator; + + using value_type = T; + using size_type = uint64_t; + using difference_type = std::ptrdiff_t; + using hasher = ArgumentHash; + using key_equal = ArgumentEqual; + using allocator_type = EntryAlloc; + using reference = value_type&; + using const_reference = const value_type&; + using pointer = value_type*; + using const_pointer = const value_type*; + + sherwood_v3_table() = default; + explicit sherwood_v3_table( + size_type bucket_count, + const ArgumentHash& hash = ArgumentHash(), + const ArgumentEqual& equal = ArgumentEqual(), + const ArgumentAlloc& alloc = ArgumentAlloc()) + : EntryAlloc(alloc), Hasher(hash), Equal(equal) { + rehash(bucket_count); + } + sherwood_v3_table(size_type bucket_count, const ArgumentAlloc& alloc) + : sherwood_v3_table( + bucket_count, + ArgumentHash(), + ArgumentEqual(), + alloc) {} + sherwood_v3_table( + size_type bucket_count, + const ArgumentHash& hash, + const ArgumentAlloc& alloc) + : sherwood_v3_table(bucket_count, hash, ArgumentEqual(), alloc) {} + explicit sherwood_v3_table(const ArgumentAlloc& alloc) : EntryAlloc(alloc) {} + template + sherwood_v3_table( + It first, + It last, + size_type bucket_count = 0, + const ArgumentHash& hash = ArgumentHash(), + const ArgumentEqual& equal = ArgumentEqual(), + const ArgumentAlloc& alloc = ArgumentAlloc()) + : sherwood_v3_table(bucket_count, hash, equal, alloc) { + insert(first, last); + } + template + sherwood_v3_table( + It first, + It last, + size_type bucket_count, + const ArgumentAlloc& alloc) + : sherwood_v3_table( + first, + last, + bucket_count, + ArgumentHash(), + ArgumentEqual(), + alloc) {} + template + sherwood_v3_table( + It first, + It last, + size_type bucket_count, + const ArgumentHash& hash, + const ArgumentAlloc& alloc) + : sherwood_v3_table( + first, + last, + bucket_count, + hash, + ArgumentEqual(), + alloc) {} + sherwood_v3_table( + std::initializer_list il, + size_type bucket_count = 0, + const ArgumentHash& hash = ArgumentHash(), + const ArgumentEqual& equal = ArgumentEqual(), + const ArgumentAlloc& alloc = ArgumentAlloc()) + : sherwood_v3_table(bucket_count, hash, equal, alloc) { + if (bucket_count == 0) + rehash(il.size()); + insert(il.begin(), il.end()); + } + sherwood_v3_table( + std::initializer_list il, + size_type bucket_count, + const ArgumentAlloc& alloc) + : sherwood_v3_table( + il, + bucket_count, + ArgumentHash(), + ArgumentEqual(), + alloc) {} + sherwood_v3_table( + std::initializer_list il, + size_type bucket_count, + const ArgumentHash& hash, + const ArgumentAlloc& alloc) + : sherwood_v3_table(il, bucket_count, hash, ArgumentEqual(), alloc) {} + sherwood_v3_table(const sherwood_v3_table& other) + : sherwood_v3_table( + other, + AllocatorTraits::select_on_container_copy_construction( + other.get_allocator())) {} + sherwood_v3_table(const sherwood_v3_table& other, const ArgumentAlloc& alloc) + : EntryAlloc(alloc), + Hasher(other), + Equal(other), + _max_load_factor(other._max_load_factor) { + rehash_for_other_container(other); + try { + insert(other.begin(), other.end()); + } catch (...) { + clear(); + deallocate_data(entries, num_slots_minus_one, max_lookups); + throw; + } + } + sherwood_v3_table(sherwood_v3_table&& other) noexcept + : EntryAlloc(std::move(other)), + Hasher(std::move(other)), + Equal(std::move(other)) { + swap_pointers(other); + } + sherwood_v3_table( + sherwood_v3_table&& other, + const ArgumentAlloc& alloc) noexcept + : EntryAlloc(alloc), Hasher(std::move(other)), Equal(std::move(other)) { + swap_pointers(other); + } + sherwood_v3_table& operator=(const sherwood_v3_table& other) { + if (this == std::addressof(other)) + return *this; + + clear(); + if (AllocatorTraits::propagate_on_container_copy_assignment::value) { + if (static_cast(*this) != + static_cast(other)) { + reset_to_empty_state(); + } + AssignIfTrue< + EntryAlloc, + AllocatorTraits::propagate_on_container_copy_assignment::value>()( + *this, other); + } + _max_load_factor = other._max_load_factor; + static_cast(*this) = other; + static_cast(*this) = other; + rehash_for_other_container(other); + insert(other.begin(), other.end()); + return *this; + } + sherwood_v3_table& operator=(sherwood_v3_table&& other) noexcept { + if (this == std::addressof(other)) + return *this; + else if (AllocatorTraits::propagate_on_container_move_assignment::value) { + clear(); + reset_to_empty_state(); + AssignIfTrue< + EntryAlloc, + AllocatorTraits::propagate_on_container_move_assignment::value>()( + *this, std::move(other)); + swap_pointers(other); + } else if ( + static_cast(*this) == static_cast(other)) { + swap_pointers(other); + } else { + clear(); + _max_load_factor = other._max_load_factor; + rehash_for_other_container(other); + for (T& elem : other) + emplace(std::move(elem)); + other.clear(); + } + static_cast(*this) = std::move(other); + static_cast(*this) = std::move(other); + return *this; + } + ~sherwood_v3_table() { + clear(); + deallocate_data(entries, num_slots_minus_one, max_lookups); + } + + const allocator_type& get_allocator() const { + return static_cast(*this); + } + const ArgumentEqual& key_eq() const { + return static_cast(*this); + } + const ArgumentHash& hash_function() const { + return static_cast(*this); + } + + template + struct templated_iterator { + templated_iterator() = default; + templated_iterator(EntryPointer current) : current(current) {} + EntryPointer current = EntryPointer(); + + using iterator_category = std::forward_iterator_tag; + using value_type = ValueType; + using difference_type = ptrdiff_t; + using pointer = ValueType*; + using reference = ValueType&; + + friend bool operator==( + const templated_iterator& lhs, + const templated_iterator& rhs) { + return lhs.current == rhs.current; + } + friend bool operator!=( + const templated_iterator& lhs, + const templated_iterator& rhs) { + return !(lhs == rhs); + } + + templated_iterator& operator++() { + current = current->next; + return *this; + } + templated_iterator operator++(int) { + templated_iterator copy(*this); + ++*this; + return copy; + } + + ValueType& operator*() const { + return current->value; + } + ValueType* operator->() const { + return std::addressof(current->value); + } + + // the template automatically disables the operator when value_type is + // already const, because that would cause a lot of compiler warnings + // otherwise. + template < + class target_type = const value_type, + class = typename std::enable_if< + std::is_same::value && + !std::is_same::value>::type> + operator templated_iterator() const { + return {current}; + } + }; + using iterator = templated_iterator; + using const_iterator = templated_iterator; + + iterator begin() { + return sentinel->next; + } + const_iterator begin() const { + return sentinel->next; + } + const_iterator cbegin() const { + return begin(); + } + iterator end() { + return sentinel; + } + const_iterator end() const { + return sentinel; + } + const_iterator cend() const { + return end(); + } + + iterator find(const FindKey& key) { + uint64_t index = + hash_policy.index_for_hash(hash_object(key), num_slots_minus_one); + EntryPointer it = entries + ptrdiff_t(index); + for (int8_t distance = 0; it->distance_from_desired >= distance; + ++distance, ++it) { + if (compares_equal(key, it->value)) + return {it}; + } + return end(); + } + const_iterator find(const FindKey& key) const { + return const_cast(this)->find(key); + } + uint64_t count(const FindKey& key) const { + return find(key) == end() ? 0 : 1; + } + std::pair equal_range(const FindKey& key) { + iterator found = find(key); + if (found == end()) + return {found, found}; + else + return {found, std::next(found)}; + } + std::pair equal_range( + const FindKey& key) const { + const_iterator found = find(key); + if (found == end()) + return {found, found}; + else + return {found, std::next(found)}; + } + + template + std::pair emplace(Key&& key, Args&&... args) { + uint64_t index = + hash_policy.index_for_hash(hash_object(key), num_slots_minus_one); + EntryPointer current_entry = entries + ptrdiff_t(index); + int8_t distance_from_desired = 0; + for (; current_entry->distance_from_desired >= distance_from_desired; + ++current_entry, ++distance_from_desired) { + // insertion of an existing key does not change ordering + if (compares_equal(key, current_entry->value)) + return {{current_entry}, false}; + } + return emplace_new_key( + distance_from_desired, + current_entry, + std::forward(key), + std::forward(args)...); + } + + std::pair insert(const value_type& value) { + return emplace(value); + } + std::pair insert(value_type&& value) { + return emplace(std::move(value)); + } + template + iterator emplace_hint(const_iterator, Args&&... args) { + return emplace(std::forward(args)...).first; + } + iterator insert(const_iterator, const value_type& value) { + return emplace(value).first; + } + iterator insert(const_iterator, value_type&& value) { + return emplace(std::move(value)).first; + } + + template + void insert(It begin, It end) { + for (; begin != end; ++begin) { + emplace(*begin); + } + } + void insert(std::initializer_list il) { + insert(il.begin(), il.end()); + } + + void rehash(uint64_t num_buckets) { + num_buckets = std::max( + num_buckets, + static_cast( + std::ceil(num_elements / static_cast(_max_load_factor)))); + if (num_buckets == 0) { + reset_to_empty_state(); + return; + } + auto new_prime_index = hash_policy.next_size_over(num_buckets); + if (num_buckets == bucket_count()) + return; + int8_t new_max_lookups = compute_max_lookups(num_buckets); + EntryPointer new_buckets( + AllocatorTraits::allocate(*this, num_buckets + new_max_lookups)); + EntryPointer special_end_item = + new_buckets + static_cast(num_buckets + new_max_lookups - 1); + for (EntryPointer it = new_buckets; it != special_end_item; ++it) + it->distance_from_desired = -1; + special_end_item->distance_from_desired = Entry::special_end_value; + std::swap(entries, new_buckets); + std::swap(num_slots_minus_one, num_buckets); + --num_slots_minus_one; + hash_policy.commit(new_prime_index); + int8_t old_max_lookups = max_lookups; + max_lookups = new_max_lookups; + num_elements = 0; + + auto start = sentinel->next; + // point sentinel to itself; + reset_list(); + // reinsert list + for (EntryPointer it = start; it != sentinel;) { + auto next = it->next; + emplace(std::move(it->value)); + it->destroy_value(); + it = next; + } + + deallocate_data(new_buckets, num_buckets, old_max_lookups); + } + + void reserve(uint64_t num_elements_) { + uint64_t required_buckets = num_buckets_for_reserve(num_elements_); + if (required_buckets > bucket_count()) + rehash(required_buckets); + } + + void replace_linked_list_position( + EntryPointer to_be_replaced, + EntryPointer new_node) { + remove_from_list(new_node); + insert_after(new_node, to_be_replaced->prev); + remove_from_list(to_be_replaced); + } + + // the return value is a type that can be converted to an iterator + // the reason for doing this is that it's not free to find the + // iterator pointing at the next element. if you care about the + // next iterator, turn the return value into an iterator + convertible_to_iterator erase(const_iterator to_erase) { + EntryPointer current = to_erase.current; + remove_from_list(current); + current->destroy_value(); + --num_elements; + + for (EntryPointer next = current + ptrdiff_t(1); + !next->is_at_desired_position(); + ++current, ++next) { + // if an entry is being removed, and there are other entries with the + // same hash, the other entries get moved to their desired position by + // reinserting. + current->emplace(next->distance_from_desired - 1, std::move(next->value)); + replace_linked_list_position(next, current); + next->destroy_value(); + } + return {to_erase.current}; + } + + iterator erase(const_iterator begin_it, const_iterator end_it) { + // whenever an entry is removed and there are other entries with the same + // hash, the other entries must get moved to their desired position. + // any reference to a moved entry is invalidated. + // here, we iterate through the range, and make sure that we update + // the pointer to our next entry in the list or the end of the iterator + // when it is invalidated. + + auto curr_iter = begin_it.current; + auto next_iter = curr_iter->next; + auto end_iter = end_it.current; + + while (curr_iter != end_iter) { + remove_from_list(curr_iter); + curr_iter->destroy_value(); + --num_elements; + + for (EntryPointer next_hash_slot = curr_iter + ptrdiff_t(1); + !next_hash_slot->is_at_desired_position(); + ++curr_iter, ++next_hash_slot) { + curr_iter->emplace( + next_hash_slot->distance_from_desired - 1, + std::move(next_hash_slot->value)); + replace_linked_list_position(next_hash_slot, curr_iter); + next_hash_slot->destroy_value(); + + // we are invalidating next_iter or end_iter + if (next_hash_slot == end_iter) { + end_iter = curr_iter; + } else if (next_hash_slot == next_iter) { + next_iter = curr_iter; + } + } + curr_iter = next_iter; + next_iter = curr_iter->next; + } + + return {end_iter}; + } + + uint64_t erase(const FindKey& key) { + auto found = find(key); + if (found == end()) + return 0; + else { + erase(found); + return 1; + } + } + + void clear() { + for (EntryPointer it = entries, + end = it + + static_cast(num_slots_minus_one + max_lookups); + it != end; + ++it) { + if (it->has_value()) + it->destroy_value(); + } + reset_list(); + num_elements = 0; + } + + void shrink_to_fit() { + rehash_for_other_container(*this); + } + + void swap(sherwood_v3_table& other) { + using std::swap; + swap_pointers(other); + swap(static_cast(*this), static_cast(other)); + swap( + static_cast(*this), static_cast(other)); + if (AllocatorTraits::propagate_on_container_swap::value) + swap(static_cast(*this), static_cast(other)); + } + + uint64_t size() const { + return num_elements; + } + uint64_t max_size() const { + return (AllocatorTraits::max_size(*this)) / sizeof(Entry); + } + uint64_t bucket_count() const { + return num_slots_minus_one ? num_slots_minus_one + 1 : 0; + } + size_type max_bucket_count() const { + return (AllocatorTraits::max_size(*this) - min_lookups) / sizeof(Entry); + } + uint64_t bucket(const FindKey& key) const { + return hash_policy.index_for_hash(hash_object(key), num_slots_minus_one); + } + float load_factor() const { + uint64_t buckets = bucket_count(); + if (buckets) + return static_cast(num_elements) / bucket_count(); + else + return 0; + } + void max_load_factor(float value) { + _max_load_factor = value; + } + float max_load_factor() const { + return _max_load_factor; + } + + bool empty() const { + return num_elements == 0; + } + + private: + EntryPointer entries = empty_default_table(); + uint64_t num_slots_minus_one = 0; + typename HashPolicySelector::type hash_policy; + int8_t max_lookups = detailv3::min_lookups - 1; + float _max_load_factor = 0.5f; + uint64_t num_elements = 0; + std::unique_ptr> sentinel_val; + + // head of doubly linked list + EntryPointer sentinel = initSentinel(); + + EntryPointer initSentinel() { + // needs to be a pointer so that hash map can be used with forward declared + // types + sentinel_val = std::make_unique>(); + sentinel = sentinel_val.get(); + reset_list(); + return sentinel; + } + + EntryPointer empty_default_table() { + EntryPointer result = + AllocatorTraits::allocate(*this, detailv3::min_lookups); + EntryPointer special_end_item = + result + static_cast(detailv3::min_lookups - 1); + for (EntryPointer it = result; it != special_end_item; ++it) + it->distance_from_desired = -1; + special_end_item->distance_from_desired = Entry::special_end_value; + return result; + } + + static int8_t compute_max_lookups(uint64_t num_buckets) { + int8_t desired = detailv3::log2(num_buckets); + return std::max(detailv3::min_lookups, desired); + } + + uint64_t num_buckets_for_reserve(uint64_t num_elements_) const { + return static_cast(std::ceil( + num_elements_ / std::min(0.5, static_cast(_max_load_factor)))); + } + void rehash_for_other_container(const sherwood_v3_table& other) { + rehash( + std::min(num_buckets_for_reserve(other.size()), other.bucket_count())); + } + + void swap_pointers(sherwood_v3_table& other) { + using std::swap; + swap(hash_policy, other.hash_policy); + swap(entries, other.entries); + swap(num_slots_minus_one, other.num_slots_minus_one); + swap(num_elements, other.num_elements); + swap(max_lookups, other.max_lookups); + swap(_max_load_factor, other._max_load_factor); + swap(sentinel, other.sentinel); + swap(sentinel_val, other.sentinel_val); + } + + void reset_list() { + sentinel->next = sentinel; + sentinel->prev = sentinel; + } + + void remove_from_list(EntryPointer elem) { + elem->prev->next = elem->next; + elem->next->prev = elem->prev; + } + + void insert_after(EntryPointer new_elem, EntryPointer prev) { + auto next = prev->next; + + prev->next = new_elem; + new_elem->prev = prev; + + new_elem->next = next; + next->prev = new_elem; + } + + void swap_adjacent_nodes(EntryPointer before, EntryPointer after) { + // sentinel stays constant, so before->prev cannot equal after + auto before_prev = before->prev; + auto after_next = after->next; + + before_prev->next = after; + after->prev = before_prev; + + after_next->prev = before; + before->next = after_next; + + before->prev = after; + after->next = before; + } + + void swap_positions(EntryPointer p1, EntryPointer p2) { + if (p1 == p2) { + return; + } + if (p1->next == p2) { + return swap_adjacent_nodes(p1, p2); + } else if (p2->next == p1) { + return swap_adjacent_nodes(p2, p1); + } + + auto p1_prev = p1->prev; + auto p1_next = p1->next; + + auto p2_prev = p2->prev; + auto p2_next = p2->next; + + p1_prev->next = p2; + p2->prev = p1_prev; + + p1_next->prev = p2; + p2->next = p1_next; + + p2_prev->next = p1; + p1->prev = p2_prev; + + p2_next->prev = p1; + p1->next = p2_next; + } + + void append_to_list(EntryPointer new_tail) { + insert_after(new_tail, sentinel->prev); + } + + template + SKA_NOINLINE(std::pair) + emplace_new_key( + int8_t distance_from_desired, + EntryPointer current_entry, + Key&& key, + Args&&... args) { + using std::swap; + if (num_slots_minus_one == 0 || distance_from_desired == max_lookups || + num_elements + 1 > + (num_slots_minus_one + 1) * static_cast(_max_load_factor)) { + grow(); + return emplace(std::forward(key), std::forward(args)...); + } else if (current_entry->is_empty()) { + current_entry->emplace( + distance_from_desired, + std::forward(key), + std::forward(args)...); + ++num_elements; + append_to_list(current_entry); + return {{current_entry}, true}; + } + value_type to_insert(std::forward(key), std::forward(args)...); + swap(distance_from_desired, current_entry->distance_from_desired); + // We maintain the invariant that: + // - result.current_entry contains the new value we're inserting + // and is in the LinkedList position of to_insert + // - to_insert contains the value that represents the position of + // result.current_entry + swap(to_insert, current_entry->value); + iterator result = {current_entry}; + for (++distance_from_desired, ++current_entry;; ++current_entry) { + if (current_entry->is_empty()) { + current_entry->emplace(distance_from_desired, std::move(to_insert)); + append_to_list(current_entry); + // now we can swap back the displaced value to its correct position, + // putting the new value we're inserting to the front of the list + swap_positions(current_entry, result.current); + ++num_elements; + return {result, true}; + } else if (current_entry->distance_from_desired < distance_from_desired) { + swap(distance_from_desired, current_entry->distance_from_desired); + swap(to_insert, current_entry->value); + // to maintain our invariants we need to swap positions + // of result.current & current_entry: + swap_positions(result.current, current_entry); + ++distance_from_desired; + } else { + ++distance_from_desired; + if (distance_from_desired == max_lookups) { + // the displaced element gets put back into its correct position + // we grow the hash table, and then try again to reinsert the new + // element + swap(to_insert, result.current->value); + grow(); + return emplace(std::move(to_insert)); + } + } + } + } + + void grow() { + rehash(std::max(uint64_t(4), 2 * bucket_count())); + } + + void deallocate_data( + EntryPointer begin, + uint64_t num_slots_minus_one_, + int8_t max_lookups_) { + AllocatorTraits::deallocate( + *this, begin, num_slots_minus_one_ + max_lookups_ + 1); + } + + void reset_to_empty_state() { + deallocate_data(entries, num_slots_minus_one, max_lookups); + entries = empty_default_table(); + num_slots_minus_one = 0; + hash_policy.reset(); + max_lookups = detailv3::min_lookups - 1; + } + + template + uint64_t hash_object(const U& key) { + return static_cast(*this)(key); + } + template + uint64_t hash_object(const U& key) const { + return static_cast(*this)(key); + } + template + bool compares_equal(const L& lhs, const R& rhs) { + return static_cast(*this)(lhs, rhs); + } + + public: + struct convertible_to_iterator { + EntryPointer it; + + operator iterator() { + if (it->has_value()) + return {it}; + else + return ++iterator{it}; + } + operator const_iterator() { + if (it->has_value()) + return {it}; + else + return ++const_iterator{it}; + } + }; +}; +} // namespace detailv3 + +struct prime_number_hash_policy { + static uint64_t mod0(uint64_t) { + return 0llu; + } + static uint64_t mod2(uint64_t hash) { + return hash % 2llu; + } + static uint64_t mod3(uint64_t hash) { + return hash % 3llu; + } + static uint64_t mod5(uint64_t hash) { + return hash % 5llu; + } + static uint64_t mod7(uint64_t hash) { + return hash % 7llu; + } + static uint64_t mod11(uint64_t hash) { + return hash % 11llu; + } + static uint64_t mod13(uint64_t hash) { + return hash % 13llu; + } + static uint64_t mod17(uint64_t hash) { + return hash % 17llu; + } + static uint64_t mod23(uint64_t hash) { + return hash % 23llu; + } + static uint64_t mod29(uint64_t hash) { + return hash % 29llu; + } + static uint64_t mod37(uint64_t hash) { + return hash % 37llu; + } + static uint64_t mod47(uint64_t hash) { + return hash % 47llu; + } + static uint64_t mod59(uint64_t hash) { + return hash % 59llu; + } + static uint64_t mod73(uint64_t hash) { + return hash % 73llu; + } + static uint64_t mod97(uint64_t hash) { + return hash % 97llu; + } + static uint64_t mod127(uint64_t hash) { + return hash % 127llu; + } + static uint64_t mod151(uint64_t hash) { + return hash % 151llu; + } + static uint64_t mod197(uint64_t hash) { + return hash % 197llu; + } + static uint64_t mod251(uint64_t hash) { + return hash % 251llu; + } + static uint64_t mod313(uint64_t hash) { + return hash % 313llu; + } + static uint64_t mod397(uint64_t hash) { + return hash % 397llu; + } + static uint64_t mod499(uint64_t hash) { + return hash % 499llu; + } + static uint64_t mod631(uint64_t hash) { + return hash % 631llu; + } + static uint64_t mod797(uint64_t hash) { + return hash % 797llu; + } + static uint64_t mod1009(uint64_t hash) { + return hash % 1009llu; + } + static uint64_t mod1259(uint64_t hash) { + return hash % 1259llu; + } + static uint64_t mod1597(uint64_t hash) { + return hash % 1597llu; + } + static uint64_t mod2011(uint64_t hash) { + return hash % 2011llu; + } + static uint64_t mod2539(uint64_t hash) { + return hash % 2539llu; + } + static uint64_t mod3203(uint64_t hash) { + return hash % 3203llu; + } + static uint64_t mod4027(uint64_t hash) { + return hash % 4027llu; + } + static uint64_t mod5087(uint64_t hash) { + return hash % 5087llu; + } + static uint64_t mod6421(uint64_t hash) { + return hash % 6421llu; + } + static uint64_t mod8089(uint64_t hash) { + return hash % 8089llu; + } + static uint64_t mod10193(uint64_t hash) { + return hash % 10193llu; + } + static uint64_t mod12853(uint64_t hash) { + return hash % 12853llu; + } + static uint64_t mod16193(uint64_t hash) { + return hash % 16193llu; + } + static uint64_t mod20399(uint64_t hash) { + return hash % 20399llu; + } + static uint64_t mod25717(uint64_t hash) { + return hash % 25717llu; + } + static uint64_t mod32401(uint64_t hash) { + return hash % 32401llu; + } + static uint64_t mod40823(uint64_t hash) { + return hash % 40823llu; + } + static uint64_t mod51437(uint64_t hash) { + return hash % 51437llu; + } + static uint64_t mod64811(uint64_t hash) { + return hash % 64811llu; + } + static uint64_t mod81649(uint64_t hash) { + return hash % 81649llu; + } + static uint64_t mod102877(uint64_t hash) { + return hash % 102877llu; + } + static uint64_t mod129607(uint64_t hash) { + return hash % 129607llu; + } + static uint64_t mod163307(uint64_t hash) { + return hash % 163307llu; + } + static uint64_t mod205759(uint64_t hash) { + return hash % 205759llu; + } + static uint64_t mod259229(uint64_t hash) { + return hash % 259229llu; + } + static uint64_t mod326617(uint64_t hash) { + return hash % 326617llu; + } + static uint64_t mod411527(uint64_t hash) { + return hash % 411527llu; + } + static uint64_t mod518509(uint64_t hash) { + return hash % 518509llu; + } + static uint64_t mod653267(uint64_t hash) { + return hash % 653267llu; + } + static uint64_t mod823117(uint64_t hash) { + return hash % 823117llu; + } + static uint64_t mod1037059(uint64_t hash) { + return hash % 1037059llu; + } + static uint64_t mod1306601(uint64_t hash) { + return hash % 1306601llu; + } + static uint64_t mod1646237(uint64_t hash) { + return hash % 1646237llu; + } + static uint64_t mod2074129(uint64_t hash) { + return hash % 2074129llu; + } + static uint64_t mod2613229(uint64_t hash) { + return hash % 2613229llu; + } + static uint64_t mod3292489(uint64_t hash) { + return hash % 3292489llu; + } + static uint64_t mod4148279(uint64_t hash) { + return hash % 4148279llu; + } + static uint64_t mod5226491(uint64_t hash) { + return hash % 5226491llu; + } + static uint64_t mod6584983(uint64_t hash) { + return hash % 6584983llu; + } + static uint64_t mod8296553(uint64_t hash) { + return hash % 8296553llu; + } + static uint64_t mod10453007(uint64_t hash) { + return hash % 10453007llu; + } + static uint64_t mod13169977(uint64_t hash) { + return hash % 13169977llu; + } + static uint64_t mod16593127(uint64_t hash) { + return hash % 16593127llu; + } + static uint64_t mod20906033(uint64_t hash) { + return hash % 20906033llu; + } + static uint64_t mod26339969(uint64_t hash) { + return hash % 26339969llu; + } + static uint64_t mod33186281(uint64_t hash) { + return hash % 33186281llu; + } + static uint64_t mod41812097(uint64_t hash) { + return hash % 41812097llu; + } + static uint64_t mod52679969(uint64_t hash) { + return hash % 52679969llu; + } + static uint64_t mod66372617(uint64_t hash) { + return hash % 66372617llu; + } + static uint64_t mod83624237(uint64_t hash) { + return hash % 83624237llu; + } + static uint64_t mod105359939(uint64_t hash) { + return hash % 105359939llu; + } + static uint64_t mod132745199(uint64_t hash) { + return hash % 132745199llu; + } + static uint64_t mod167248483(uint64_t hash) { + return hash % 167248483llu; + } + static uint64_t mod210719881(uint64_t hash) { + return hash % 210719881llu; + } + static uint64_t mod265490441(uint64_t hash) { + return hash % 265490441llu; + } + static uint64_t mod334496971(uint64_t hash) { + return hash % 334496971llu; + } + static uint64_t mod421439783(uint64_t hash) { + return hash % 421439783llu; + } + static uint64_t mod530980861(uint64_t hash) { + return hash % 530980861llu; + } + static uint64_t mod668993977(uint64_t hash) { + return hash % 668993977llu; + } + static uint64_t mod842879579(uint64_t hash) { + return hash % 842879579llu; + } + static uint64_t mod1061961721(uint64_t hash) { + return hash % 1061961721llu; + } + static uint64_t mod1337987929(uint64_t hash) { + return hash % 1337987929llu; + } + static uint64_t mod1685759167(uint64_t hash) { + return hash % 1685759167llu; + } + static uint64_t mod2123923447(uint64_t hash) { + return hash % 2123923447llu; + } + static uint64_t mod2675975881(uint64_t hash) { + return hash % 2675975881llu; + } + static uint64_t mod3371518343(uint64_t hash) { + return hash % 3371518343llu; + } + static uint64_t mod4247846927(uint64_t hash) { + return hash % 4247846927llu; + } + static uint64_t mod5351951779(uint64_t hash) { + return hash % 5351951779llu; + } + static uint64_t mod6743036717(uint64_t hash) { + return hash % 6743036717llu; + } + static uint64_t mod8495693897(uint64_t hash) { + return hash % 8495693897llu; + } + static uint64_t mod10703903591(uint64_t hash) { + return hash % 10703903591llu; + } + static uint64_t mod13486073473(uint64_t hash) { + return hash % 13486073473llu; + } + static uint64_t mod16991387857(uint64_t hash) { + return hash % 16991387857llu; + } + static uint64_t mod21407807219(uint64_t hash) { + return hash % 21407807219llu; + } + static uint64_t mod26972146961(uint64_t hash) { + return hash % 26972146961llu; + } + static uint64_t mod33982775741(uint64_t hash) { + return hash % 33982775741llu; + } + static uint64_t mod42815614441(uint64_t hash) { + return hash % 42815614441llu; + } + static uint64_t mod53944293929(uint64_t hash) { + return hash % 53944293929llu; + } + static uint64_t mod67965551447(uint64_t hash) { + return hash % 67965551447llu; + } + static uint64_t mod85631228929(uint64_t hash) { + return hash % 85631228929llu; + } + static uint64_t mod107888587883(uint64_t hash) { + return hash % 107888587883llu; + } + static uint64_t mod135931102921(uint64_t hash) { + return hash % 135931102921llu; + } + static uint64_t mod171262457903(uint64_t hash) { + return hash % 171262457903llu; + } + static uint64_t mod215777175787(uint64_t hash) { + return hash % 215777175787llu; + } + static uint64_t mod271862205833(uint64_t hash) { + return hash % 271862205833llu; + } + static uint64_t mod342524915839(uint64_t hash) { + return hash % 342524915839llu; + } + static uint64_t mod431554351609(uint64_t hash) { + return hash % 431554351609llu; + } + static uint64_t mod543724411781(uint64_t hash) { + return hash % 543724411781llu; + } + static uint64_t mod685049831731(uint64_t hash) { + return hash % 685049831731llu; + } + static uint64_t mod863108703229(uint64_t hash) { + return hash % 863108703229llu; + } + static uint64_t mod1087448823553(uint64_t hash) { + return hash % 1087448823553llu; + } + static uint64_t mod1370099663459(uint64_t hash) { + return hash % 1370099663459llu; + } + static uint64_t mod1726217406467(uint64_t hash) { + return hash % 1726217406467llu; + } + static uint64_t mod2174897647073(uint64_t hash) { + return hash % 2174897647073llu; + } + static uint64_t mod2740199326961(uint64_t hash) { + return hash % 2740199326961llu; + } + static uint64_t mod3452434812973(uint64_t hash) { + return hash % 3452434812973llu; + } + static uint64_t mod4349795294267(uint64_t hash) { + return hash % 4349795294267llu; + } + static uint64_t mod5480398654009(uint64_t hash) { + return hash % 5480398654009llu; + } + static uint64_t mod6904869625999(uint64_t hash) { + return hash % 6904869625999llu; + } + static uint64_t mod8699590588571(uint64_t hash) { + return hash % 8699590588571llu; + } + static uint64_t mod10960797308051(uint64_t hash) { + return hash % 10960797308051llu; + } + static uint64_t mod13809739252051(uint64_t hash) { + return hash % 13809739252051llu; + } + static uint64_t mod17399181177241(uint64_t hash) { + return hash % 17399181177241llu; + } + static uint64_t mod21921594616111(uint64_t hash) { + return hash % 21921594616111llu; + } + static uint64_t mod27619478504183(uint64_t hash) { + return hash % 27619478504183llu; + } + static uint64_t mod34798362354533(uint64_t hash) { + return hash % 34798362354533llu; + } + static uint64_t mod43843189232363(uint64_t hash) { + return hash % 43843189232363llu; + } + static uint64_t mod55238957008387(uint64_t hash) { + return hash % 55238957008387llu; + } + static uint64_t mod69596724709081(uint64_t hash) { + return hash % 69596724709081llu; + } + static uint64_t mod87686378464759(uint64_t hash) { + return hash % 87686378464759llu; + } + static uint64_t mod110477914016779(uint64_t hash) { + return hash % 110477914016779llu; + } + static uint64_t mod139193449418173(uint64_t hash) { + return hash % 139193449418173llu; + } + static uint64_t mod175372756929481(uint64_t hash) { + return hash % 175372756929481llu; + } + static uint64_t mod220955828033581(uint64_t hash) { + return hash % 220955828033581llu; + } + static uint64_t mod278386898836457(uint64_t hash) { + return hash % 278386898836457llu; + } + static uint64_t mod350745513859007(uint64_t hash) { + return hash % 350745513859007llu; + } + static uint64_t mod441911656067171(uint64_t hash) { + return hash % 441911656067171llu; + } + static uint64_t mod556773797672909(uint64_t hash) { + return hash % 556773797672909llu; + } + static uint64_t mod701491027718027(uint64_t hash) { + return hash % 701491027718027llu; + } + static uint64_t mod883823312134381(uint64_t hash) { + return hash % 883823312134381llu; + } + static uint64_t mod1113547595345903(uint64_t hash) { + return hash % 1113547595345903llu; + } + static uint64_t mod1402982055436147(uint64_t hash) { + return hash % 1402982055436147llu; + } + static uint64_t mod1767646624268779(uint64_t hash) { + return hash % 1767646624268779llu; + } + static uint64_t mod2227095190691797(uint64_t hash) { + return hash % 2227095190691797llu; + } + static uint64_t mod2805964110872297(uint64_t hash) { + return hash % 2805964110872297llu; + } + static uint64_t mod3535293248537579(uint64_t hash) { + return hash % 3535293248537579llu; + } + static uint64_t mod4454190381383713(uint64_t hash) { + return hash % 4454190381383713llu; + } + static uint64_t mod5611928221744609(uint64_t hash) { + return hash % 5611928221744609llu; + } + static uint64_t mod7070586497075177(uint64_t hash) { + return hash % 7070586497075177llu; + } + static uint64_t mod8908380762767489(uint64_t hash) { + return hash % 8908380762767489llu; + } + static uint64_t mod11223856443489329(uint64_t hash) { + return hash % 11223856443489329llu; + } + static uint64_t mod14141172994150357(uint64_t hash) { + return hash % 14141172994150357llu; + } + static uint64_t mod17816761525534927(uint64_t hash) { + return hash % 17816761525534927llu; + } + static uint64_t mod22447712886978529(uint64_t hash) { + return hash % 22447712886978529llu; + } + static uint64_t mod28282345988300791(uint64_t hash) { + return hash % 28282345988300791llu; + } + static uint64_t mod35633523051069991(uint64_t hash) { + return hash % 35633523051069991llu; + } + static uint64_t mod44895425773957261(uint64_t hash) { + return hash % 44895425773957261llu; + } + static uint64_t mod56564691976601587(uint64_t hash) { + return hash % 56564691976601587llu; + } + static uint64_t mod71267046102139967(uint64_t hash) { + return hash % 71267046102139967llu; + } + static uint64_t mod89790851547914507(uint64_t hash) { + return hash % 89790851547914507llu; + } + static uint64_t mod113129383953203213(uint64_t hash) { + return hash % 113129383953203213llu; + } + static uint64_t mod142534092204280003(uint64_t hash) { + return hash % 142534092204280003llu; + } + static uint64_t mod179581703095829107(uint64_t hash) { + return hash % 179581703095829107llu; + } + static uint64_t mod226258767906406483(uint64_t hash) { + return hash % 226258767906406483llu; + } + static uint64_t mod285068184408560057(uint64_t hash) { + return hash % 285068184408560057llu; + } + static uint64_t mod359163406191658253(uint64_t hash) { + return hash % 359163406191658253llu; + } + static uint64_t mod452517535812813007(uint64_t hash) { + return hash % 452517535812813007llu; + } + static uint64_t mod570136368817120201(uint64_t hash) { + return hash % 570136368817120201llu; + } + static uint64_t mod718326812383316683(uint64_t hash) { + return hash % 718326812383316683llu; + } + static uint64_t mod905035071625626043(uint64_t hash) { + return hash % 905035071625626043llu; + } + static uint64_t mod1140272737634240411(uint64_t hash) { + return hash % 1140272737634240411llu; + } + static uint64_t mod1436653624766633509(uint64_t hash) { + return hash % 1436653624766633509llu; + } + static uint64_t mod1810070143251252131(uint64_t hash) { + return hash % 1810070143251252131llu; + } + static uint64_t mod2280545475268481167(uint64_t hash) { + return hash % 2280545475268481167llu; + } + static uint64_t mod2873307249533267101(uint64_t hash) { + return hash % 2873307249533267101llu; + } + static uint64_t mod3620140286502504283(uint64_t hash) { + return hash % 3620140286502504283llu; + } + static uint64_t mod4561090950536962147(uint64_t hash) { + return hash % 4561090950536962147llu; + } + static uint64_t mod5746614499066534157(uint64_t hash) { + return hash % 5746614499066534157llu; + } + static uint64_t mod7240280573005008577(uint64_t hash) { + return hash % 7240280573005008577llu; + } + static uint64_t mod9122181901073924329(uint64_t hash) { + return hash % 9122181901073924329llu; + } + static uint64_t mod11493228998133068689(uint64_t hash) { + return hash % 11493228998133068689llu; + } + static uint64_t mod14480561146010017169(uint64_t hash) { + return hash % 14480561146010017169llu; + } + static uint64_t mod18446744073709551557(uint64_t hash) { + return hash % 18446744073709551557llu; + } + + using mod_function = uint64_t (*)(uint64_t); + + mod_function next_size_over(uint64_t& size) const { + // prime numbers generated by the following method: + // 1. start with a prime p = 2 + // 2. go to wolfram alpha and get p = NextPrime(2 * p) + // 3. repeat 2. until you overflow 64 bits + // you now have large gaps which you would hit if somebody called reserve() + // with an unlucky number. + // 4. to fill the gaps for every prime p go to wolfram alpha and get + // ClosestPrime(p * 2^(1/3)) and ClosestPrime(p * 2^(2/3)) and put those in + // the gaps + // 5. get PrevPrime(2^64) and put it at the end + static constexpr const uint64_t prime_list[] = { + 2llu, + 3llu, + 5llu, + 7llu, + 11llu, + 13llu, + 17llu, + 23llu, + 29llu, + 37llu, + 47llu, + 59llu, + 73llu, + 97llu, + 127llu, + 151llu, + 197llu, + 251llu, + 313llu, + 397llu, + 499llu, + 631llu, + 797llu, + 1009llu, + 1259llu, + 1597llu, + 2011llu, + 2539llu, + 3203llu, + 4027llu, + 5087llu, + 6421llu, + 8089llu, + 10193llu, + 12853llu, + 16193llu, + 20399llu, + 25717llu, + 32401llu, + 40823llu, + 51437llu, + 64811llu, + 81649llu, + 102877llu, + 129607llu, + 163307llu, + 205759llu, + 259229llu, + 326617llu, + 411527llu, + 518509llu, + 653267llu, + 823117llu, + 1037059llu, + 1306601llu, + 1646237llu, + 2074129llu, + 2613229llu, + 3292489llu, + 4148279llu, + 5226491llu, + 6584983llu, + 8296553llu, + 10453007llu, + 13169977llu, + 16593127llu, + 20906033llu, + 26339969llu, + 33186281llu, + 41812097llu, + 52679969llu, + 66372617llu, + 83624237llu, + 105359939llu, + 132745199llu, + 167248483llu, + 210719881llu, + 265490441llu, + 334496971llu, + 421439783llu, + 530980861llu, + 668993977llu, + 842879579llu, + 1061961721llu, + 1337987929llu, + 1685759167llu, + 2123923447llu, + 2675975881llu, + 3371518343llu, + 4247846927llu, + 5351951779llu, + 6743036717llu, + 8495693897llu, + 10703903591llu, + 13486073473llu, + 16991387857llu, + 21407807219llu, + 26972146961llu, + 33982775741llu, + 42815614441llu, + 53944293929llu, + 67965551447llu, + 85631228929llu, + 107888587883llu, + 135931102921llu, + 171262457903llu, + 215777175787llu, + 271862205833llu, + 342524915839llu, + 431554351609llu, + 543724411781llu, + 685049831731llu, + 863108703229llu, + 1087448823553llu, + 1370099663459llu, + 1726217406467llu, + 2174897647073llu, + 2740199326961llu, + 3452434812973llu, + 4349795294267llu, + 5480398654009llu, + 6904869625999llu, + 8699590588571llu, + 10960797308051llu, + 13809739252051llu, + 17399181177241llu, + 21921594616111llu, + 27619478504183llu, + 34798362354533llu, + 43843189232363llu, + 55238957008387llu, + 69596724709081llu, + 87686378464759llu, + 110477914016779llu, + 139193449418173llu, + 175372756929481llu, + 220955828033581llu, + 278386898836457llu, + 350745513859007llu, + 441911656067171llu, + 556773797672909llu, + 701491027718027llu, + 883823312134381llu, + 1113547595345903llu, + 1402982055436147llu, + 1767646624268779llu, + 2227095190691797llu, + 2805964110872297llu, + 3535293248537579llu, + 4454190381383713llu, + 5611928221744609llu, + 7070586497075177llu, + 8908380762767489llu, + 11223856443489329llu, + 14141172994150357llu, + 17816761525534927llu, + 22447712886978529llu, + 28282345988300791llu, + 35633523051069991llu, + 44895425773957261llu, + 56564691976601587llu, + 71267046102139967llu, + 89790851547914507llu, + 113129383953203213llu, + 142534092204280003llu, + 179581703095829107llu, + 226258767906406483llu, + 285068184408560057llu, + 359163406191658253llu, + 452517535812813007llu, + 570136368817120201llu, + 718326812383316683llu, + 905035071625626043llu, + 1140272737634240411llu, + 1436653624766633509llu, + 1810070143251252131llu, + 2280545475268481167llu, + 2873307249533267101llu, + 3620140286502504283llu, + 4561090950536962147llu, + 5746614499066534157llu, + 7240280573005008577llu, + 9122181901073924329llu, + 11493228998133068689llu, + 14480561146010017169llu, + 18446744073709551557llu}; + static constexpr uint64_t (*const mod_functions[])(uint64_t) = { + &mod0, + &mod2, + &mod3, + &mod5, + &mod7, + &mod11, + &mod13, + &mod17, + &mod23, + &mod29, + &mod37, + &mod47, + &mod59, + &mod73, + &mod97, + &mod127, + &mod151, + &mod197, + &mod251, + &mod313, + &mod397, + &mod499, + &mod631, + &mod797, + &mod1009, + &mod1259, + &mod1597, + &mod2011, + &mod2539, + &mod3203, + &mod4027, + &mod5087, + &mod6421, + &mod8089, + &mod10193, + &mod12853, + &mod16193, + &mod20399, + &mod25717, + &mod32401, + &mod40823, + &mod51437, + &mod64811, + &mod81649, + &mod102877, + &mod129607, + &mod163307, + &mod205759, + &mod259229, + &mod326617, + &mod411527, + &mod518509, + &mod653267, + &mod823117, + &mod1037059, + &mod1306601, + &mod1646237, + &mod2074129, + &mod2613229, + &mod3292489, + &mod4148279, + &mod5226491, + &mod6584983, + &mod8296553, + &mod10453007, + &mod13169977, + &mod16593127, + &mod20906033, + &mod26339969, + &mod33186281, + &mod41812097, + &mod52679969, + &mod66372617, + &mod83624237, + &mod105359939, + &mod132745199, + &mod167248483, + &mod210719881, + &mod265490441, + &mod334496971, + &mod421439783, + &mod530980861, + &mod668993977, + &mod842879579, + &mod1061961721, + &mod1337987929, + &mod1685759167, + &mod2123923447, + &mod2675975881, + &mod3371518343, + &mod4247846927, + &mod5351951779, + &mod6743036717, + &mod8495693897, + &mod10703903591, + &mod13486073473, + &mod16991387857, + &mod21407807219, + &mod26972146961, + &mod33982775741, + &mod42815614441, + &mod53944293929, + &mod67965551447, + &mod85631228929, + &mod107888587883, + &mod135931102921, + &mod171262457903, + &mod215777175787, + &mod271862205833, + &mod342524915839, + &mod431554351609, + &mod543724411781, + &mod685049831731, + &mod863108703229, + &mod1087448823553, + &mod1370099663459, + &mod1726217406467, + &mod2174897647073, + &mod2740199326961, + &mod3452434812973, + &mod4349795294267, + &mod5480398654009, + &mod6904869625999, + &mod8699590588571, + &mod10960797308051, + &mod13809739252051, + &mod17399181177241, + &mod21921594616111, + &mod27619478504183, + &mod34798362354533, + &mod43843189232363, + &mod55238957008387, + &mod69596724709081, + &mod87686378464759, + &mod110477914016779, + &mod139193449418173, + &mod175372756929481, + &mod220955828033581, + &mod278386898836457, + &mod350745513859007, + &mod441911656067171, + &mod556773797672909, + &mod701491027718027, + &mod883823312134381, + &mod1113547595345903, + &mod1402982055436147, + &mod1767646624268779, + &mod2227095190691797, + &mod2805964110872297, + &mod3535293248537579, + &mod4454190381383713, + &mod5611928221744609, + &mod7070586497075177, + &mod8908380762767489, + &mod11223856443489329, + &mod14141172994150357, + &mod17816761525534927, + &mod22447712886978529, + &mod28282345988300791, + &mod35633523051069991, + &mod44895425773957261, + &mod56564691976601587, + &mod71267046102139967, + &mod89790851547914507, + &mod113129383953203213, + &mod142534092204280003, + &mod179581703095829107, + &mod226258767906406483, + &mod285068184408560057, + &mod359163406191658253, + &mod452517535812813007, + &mod570136368817120201, + &mod718326812383316683, + &mod905035071625626043, + &mod1140272737634240411, + &mod1436653624766633509, + &mod1810070143251252131, + &mod2280545475268481167, + &mod2873307249533267101, + &mod3620140286502504283, + &mod4561090950536962147, + &mod5746614499066534157, + &mod7240280573005008577, + &mod9122181901073924329, + &mod11493228998133068689, + &mod14480561146010017169, + &mod18446744073709551557}; + const uint64_t* found = std::lower_bound( + std::begin(prime_list), std::end(prime_list) - 1, size); + size = *found; + return mod_functions[1 + found - prime_list]; + } + void commit(mod_function new_mod_function) { + current_mod_function = new_mod_function; + } + void reset() { + current_mod_function = &mod0; + } + + uint64_t index_for_hash(uint64_t hash, uint64_t /*num_slots_minus_one*/) + const { + return current_mod_function(hash); + } + uint64_t keep_in_range(uint64_t index, uint64_t num_slots_minus_one) const { + return index > num_slots_minus_one ? current_mod_function(index) : index; + } + + private: + mod_function current_mod_function = &mod0; +}; + +struct power_of_two_hash_policy { + uint64_t index_for_hash(uint64_t hash, uint64_t num_slots_minus_one) const { + return hash & num_slots_minus_one; + } + uint64_t keep_in_range(uint64_t index, uint64_t num_slots_minus_one) const { + return index_for_hash(index, num_slots_minus_one); + } + int8_t next_size_over(uint64_t& size) const { + size = detailv3::next_power_of_two(size); + return 0; + } + void commit(int8_t) {} + void reset() {} +}; + +struct fibonacci_hash_policy { + uint64_t index_for_hash(uint64_t hash, uint64_t /*num_slots_minus_one*/) + const { + return (11400714819323198485ull * hash) >> shift; + } + uint64_t keep_in_range(uint64_t index, uint64_t num_slots_minus_one) const { + return index & num_slots_minus_one; + } + + int8_t next_size_over(uint64_t& size) const { + size = std::max(uint64_t(2), detailv3::next_power_of_two(size)); + return 64 - detailv3::log2(size); + } + void commit(int8_t shift_) { + shift = shift_; + } + void reset() { + shift = 63; + } + + private: + int8_t shift = 63; +}; + +template < + typename K, + typename V, + typename H = std::hash, + typename E = std::equal_to, + typename A = std::allocator>> +class order_preserving_flat_hash_map + : public detailv3::sherwood_v3_table< + std::pair, + K, + H, + detailv3::KeyOrValueHasher, H>, + E, + detailv3::KeyOrValueEquality, E>, + A, + typename std::allocator_traits::template rebind_alloc< + detailv3::sherwood_v3_entry>>> { + using Table = detailv3::sherwood_v3_table< + std::pair, + K, + H, + detailv3::KeyOrValueHasher, H>, + E, + detailv3::KeyOrValueEquality, E>, + A, + typename std::allocator_traits::template rebind_alloc< + detailv3::sherwood_v3_entry>>>; + + public: + using key_type = K; + using mapped_type = V; + + using Table::Table; + order_preserving_flat_hash_map() = default; + + inline V& operator[](const K& key) { + return emplace(key, convertible_to_value()).first->second; + } + inline V& operator[](K&& key) { + return emplace(std::move(key), convertible_to_value()).first->second; + } + V& at(const K& key) { + auto found = this->find(key); + if (found == this->end()) + throw std::out_of_range("Argument passed to at() was not in the map."); + return found->second; + } + const V& at(const K& key) const { + auto found = this->find(key); + if (found == this->end()) + throw std::out_of_range("Argument passed to at() was not in the map."); + return found->second; + } + + using Table::emplace; + std::pair emplace() { + return emplace(key_type(), convertible_to_value()); + } + template + std::pair insert_or_assign( + const key_type& key, + M&& m) { + auto emplace_result = emplace(key, std::forward(m)); + if (!emplace_result.second) + emplace_result.first->second = std::forward(m); + return emplace_result; + } + template + std::pair insert_or_assign( + key_type&& key, + M&& m) { + auto emplace_result = emplace(std::move(key), std::forward(m)); + if (!emplace_result.second) + emplace_result.first->second = std::forward(m); + return emplace_result; + } + template + typename Table::iterator insert_or_assign( + typename Table::const_iterator, + const key_type& key, + M&& m) { + return insert_or_assign(key, std::forward(m)).first; + } + template + typename Table::iterator insert_or_assign( + typename Table::const_iterator, + key_type&& key, + M&& m) { + return insert_or_assign(std::move(key), std::forward(m)).first; + } + + friend bool operator==( + const order_preserving_flat_hash_map& lhs, + const order_preserving_flat_hash_map& rhs) { + if (lhs.size() != rhs.size()) + return false; + for (const typename Table::value_type& value : lhs) { + auto found = rhs.find(value.first); + if (found == rhs.end()) + return false; + else if (value.second != found->second) + return false; + } + return true; + } + friend bool operator!=( + const order_preserving_flat_hash_map& lhs, + const order_preserving_flat_hash_map& rhs) { + return !(lhs == rhs); + } + + private: + struct convertible_to_value { + operator V() const { + return V(); + } + }; +}; + +template < + typename T, + typename H = std::hash, + typename E = std::equal_to, + typename A = std::allocator> +class flat_hash_set + : public detailv3::sherwood_v3_table< + T, + T, + H, + detailv3::functor_storage, + E, + detailv3::functor_storage, + A, + typename std::allocator_traits::template rebind_alloc< + detailv3::sherwood_v3_entry>> { + using Table = detailv3::sherwood_v3_table< + T, + T, + H, + detailv3::functor_storage, + E, + detailv3::functor_storage, + A, + typename std::allocator_traits::template rebind_alloc< + detailv3::sherwood_v3_entry>>; + + public: + using key_type = T; + + using Table::Table; + flat_hash_set() = default; + + template + std::pair emplace(Args&&... args) { + return Table::emplace(T(std::forward(args)...)); + } + std::pair emplace(const key_type& arg) { + return Table::emplace(arg); + } + std::pair emplace(key_type& arg) { + return Table::emplace(arg); + } + std::pair emplace(const key_type&& arg) { + return Table::emplace(std::move(arg)); + } + std::pair emplace(key_type&& arg) { + return Table::emplace(std::move(arg)); + } + + friend bool operator==(const flat_hash_set& lhs, const flat_hash_set& rhs) { + if (lhs.size() != rhs.size()) + return false; + for (const T& value : lhs) { + if (rhs.find(value) == rhs.end()) + return false; + } + return true; + } + friend bool operator!=(const flat_hash_set& lhs, const flat_hash_set& rhs) { + return !(lhs == rhs); + } +}; + +template +struct power_of_two_std_hash : std::hash { + typedef ska_ordered::power_of_two_hash_policy hash_policy; +}; + +} // namespace ska_ordered + +C10_CLANG_DIAGNOSTIC_POP() diff --git a/videollama2/lib/python3.10/site-packages/torch/include/c10/util/qint32.h b/videollama2/lib/python3.10/site-packages/torch/include/c10/util/qint32.h new file mode 100644 index 0000000000000000000000000000000000000000..d0a4867d62bbb115f6ce0a1d4521774146de97f7 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/c10/util/qint32.h @@ -0,0 +1,18 @@ +#pragma once +#include + +#include + +namespace c10 { + +/** + * qint32 is for signed 32 bit quantized Tensors + */ +struct alignas(4) qint32 { + using underlying = int32_t; + int32_t val_; + qint32() = default; + C10_HOST_DEVICE explicit qint32(int32_t val) : val_(val) {} +}; + +} // namespace c10 diff --git a/videollama2/lib/python3.10/site-packages/torch/include/c10/util/quint8.h b/videollama2/lib/python3.10/site-packages/torch/include/c10/util/quint8.h new file mode 100644 index 0000000000000000000000000000000000000000..3aeb09b6397c02e057c208bc8d1ac1add9271e31 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/c10/util/quint8.h @@ -0,0 +1,18 @@ +#pragma once +#include + +#include + +namespace c10 { + +/** + * quint8 is for unsigned 8 bit quantized Tensors + */ +struct alignas(1) quint8 { + using underlying = uint8_t; + uint8_t val_; + quint8() = default; + C10_HOST_DEVICE explicit quint8(uint8_t val) : val_(val) {} +}; + +} // namespace c10 diff --git a/videollama2/lib/python3.10/site-packages/torch/include/c10/util/static_tracepoint.h b/videollama2/lib/python3.10/site-packages/torch/include/c10/util/static_tracepoint.h new file mode 100644 index 0000000000000000000000000000000000000000..4dee97306d14326ba99072feb08b80badda77352 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/c10/util/static_tracepoint.h @@ -0,0 +1,34 @@ +#pragma once + +#if defined(__ELF__) && (defined(__x86_64__) || defined(__i386__)) && \ + !(defined(TORCH_DISABLE_SDT) && TORCH_DISABLE_SDT) + +#define TORCH_HAVE_SDT 1 + +#include + +#define TORCH_SDT(name, ...) \ + TORCH_SDT_PROBE_N( \ + pytorch, name, 0, TORCH_SDT_NARG(0, ##__VA_ARGS__), ##__VA_ARGS__) +// Use TORCH_SDT_DEFINE_SEMAPHORE(name) to define the semaphore +// as global variable before using the TORCH_SDT_WITH_SEMAPHORE macro +#define TORCH_SDT_WITH_SEMAPHORE(name, ...) \ + TORCH_SDT_PROBE_N( \ + pytorch, name, 1, TORCH_SDT_NARG(0, ##__VA_ARGS__), ##__VA_ARGS__) +#define TORCH_SDT_IS_ENABLED(name) (TORCH_SDT_SEMAPHORE(pytorch, name) > 0) + +#else + +#define TORCH_HAVE_SDT 0 + +#define TORCH_SDT(name, ...) \ + do { \ + } while (0) +#define TORCH_SDT_WITH_SEMAPHORE(name, ...) \ + do { \ + } while (0) +#define TORCH_SDT_IS_ENABLED(name) (false) +#define TORCH_SDT_DEFINE_SEMAPHORE(name) +#define TORCH_SDT_DECLARE_SEMAPHORE(name) + +#endif diff --git a/videollama2/lib/python3.10/site-packages/torch/include/c10/util/win32-headers.h b/videollama2/lib/python3.10/site-packages/torch/include/c10/util/win32-headers.h new file mode 100644 index 0000000000000000000000000000000000000000..6a3e8bc2caf42f02fba1925970dd33770a40abfa --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/c10/util/win32-headers.h @@ -0,0 +1,60 @@ +#pragma once + +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif +#ifndef NOMINMAX +#define NOMINMAX +#endif +#ifndef NOKERNEL +#define NOKERNEL +#endif +#ifndef NOUSER +#define NOUSER +#endif +#ifndef NOSERVICE +#define NOSERVICE +#endif +#ifndef NOSOUND +#define NOSOUND +#endif +#ifndef NOMCX +#define NOMCX +#endif +#ifndef NOGDI +#define NOGDI +#endif +#ifndef NOMSG +#define NOMSG +#endif +#ifndef NOMB +#define NOMB +#endif +#ifndef NOCLIPBOARD +#define NOCLIPBOARD +#endif + +// dbghelp seems to require windows.h. +// clang-format off +#include +#include +// clang-format on + +#undef VOID +#undef DELETE +#undef IN +#undef THIS +#undef CONST +#undef NAN +#undef UNKNOWN +#undef NONE +#undef ANY +#undef IGNORE +#undef STRICT +#undef GetObject +#undef CreateSemaphore +#undef Yield +#undef RotateRight32 +#undef RotateLeft32 +#undef RotateRight64 +#undef RotateLeft64 diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_base.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_base.h new file mode 100644 index 0000000000000000000000000000000000000000..e4af67d7e5f0b77d50a1b781ed7805834f22c22c --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_base.h @@ -0,0 +1,317 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/array/data.h" +#include "arrow/buffer.h" +#include "arrow/compare.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/util/bit_util.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" +#include "arrow/visitor.h" + +namespace arrow { + +// ---------------------------------------------------------------------- +// User array accessor types + +/// \brief Array base type +/// Immutable data array with some logical type and some length. +/// +/// Any memory is owned by the respective Buffer instance (or its parents). +/// +/// The base class is only required to have a null bitmap buffer if the null +/// count is greater than 0 +/// +/// If known, the null count can be provided in the base Array constructor. If +/// the null count is not known, pass -1 to indicate that the null count is to +/// be computed on the first call to null_count() +class ARROW_EXPORT Array { + public: + virtual ~Array() = default; + + /// \brief Return true if value at index is null. Does not boundscheck + bool IsNull(int64_t i) const { return !IsValid(i); } + + /// \brief Return true if value at index is valid (not null). Does not + /// boundscheck + bool IsValid(int64_t i) const { + if (null_bitmap_data_ != NULLPTR) { + return bit_util::GetBit(null_bitmap_data_, i + data_->offset); + } + // Dispatching with a few conditionals like this makes IsNull more + // efficient for how it is used in practice. Making IsNull virtual + // would add a vtable lookup to every call and prevent inlining + + // a potential inner-branch removal. + if (type_id() == Type::SPARSE_UNION) { + return !internal::IsNullSparseUnion(*data_, i); + } + if (type_id() == Type::DENSE_UNION) { + return !internal::IsNullDenseUnion(*data_, i); + } + if (type_id() == Type::RUN_END_ENCODED) { + return !internal::IsNullRunEndEncoded(*data_, i); + } + return data_->null_count != data_->length; + } + + /// \brief Return a Scalar containing the value of this array at i + Result> GetScalar(int64_t i) const; + + /// Size in the number of elements this array contains. + int64_t length() const { return data_->length; } + + /// A relative position into another array's data, to enable zero-copy + /// slicing. This value defaults to zero + int64_t offset() const { return data_->offset; } + + /// The number of null entries in the array. If the null count was not known + /// at time of construction (and set to a negative value), then the null + /// count will be computed and cached on the first invocation of this + /// function + int64_t null_count() const; + + /// \brief Computes the logical null count for arrays of all types including + /// those that do not have a validity bitmap like union and run-end encoded + /// arrays + /// + /// If the array has a validity bitmap, this function behaves the same as + /// null_count(). For types that have no validity bitmap, this function will + /// recompute the null count every time it is called. + /// + /// \see GetNullCount + int64_t ComputeLogicalNullCount() const; + + const std::shared_ptr& type() const { return data_->type; } + Type::type type_id() const { return data_->type->id(); } + + /// Buffer for the validity (null) bitmap, if any. Note that Union types + /// never have a null bitmap. + /// + /// Note that for `null_count == 0` or for null type, this will be null. + /// This buffer does not account for any slice offset + const std::shared_ptr& null_bitmap() const { return data_->buffers[0]; } + + /// Raw pointer to the null bitmap. + /// + /// Note that for `null_count == 0` or for null type, this will be null. + /// This buffer does not account for any slice offset + const uint8_t* null_bitmap_data() const { return null_bitmap_data_; } + + /// Equality comparison with another array + bool Equals(const Array& arr, const EqualOptions& = EqualOptions::Defaults()) const; + bool Equals(const std::shared_ptr& arr, + const EqualOptions& = EqualOptions::Defaults()) const; + + /// \brief Return the formatted unified diff of arrow::Diff between this + /// Array and another Array + std::string Diff(const Array& other) const; + + /// Approximate equality comparison with another array + /// + /// epsilon is only used if this is FloatArray or DoubleArray + bool ApproxEquals(const std::shared_ptr& arr, + const EqualOptions& = EqualOptions::Defaults()) const; + bool ApproxEquals(const Array& arr, + const EqualOptions& = EqualOptions::Defaults()) const; + + /// Compare if the range of slots specified are equal for the given array and + /// this array. end_idx exclusive. This methods does not bounds check. + bool RangeEquals(int64_t start_idx, int64_t end_idx, int64_t other_start_idx, + const Array& other, + const EqualOptions& = EqualOptions::Defaults()) const; + bool RangeEquals(int64_t start_idx, int64_t end_idx, int64_t other_start_idx, + const std::shared_ptr& other, + const EqualOptions& = EqualOptions::Defaults()) const; + bool RangeEquals(const Array& other, int64_t start_idx, int64_t end_idx, + int64_t other_start_idx, + const EqualOptions& = EqualOptions::Defaults()) const; + bool RangeEquals(const std::shared_ptr& other, int64_t start_idx, + int64_t end_idx, int64_t other_start_idx, + const EqualOptions& = EqualOptions::Defaults()) const; + + /// \brief Apply the ArrayVisitor::Visit() method specialized to the array type + Status Accept(ArrayVisitor* visitor) const; + + /// Construct a zero-copy view of this array with the given type. + /// + /// This method checks if the types are layout-compatible. + /// Nested types are traversed in depth-first order. Data buffers must have + /// the same item sizes, even though the logical types may be different. + /// An error is returned if the types are not layout-compatible. + Result> View(const std::shared_ptr& type) const; + + /// \brief Construct a copy of the array with all buffers on destination + /// Memory Manager + /// + /// This method recursively copies the array's buffers and those of its children + /// onto the destination MemoryManager device and returns the new Array. + Result> CopyTo(const std::shared_ptr& to) const; + + /// \brief Construct a new array attempting to zero-copy view if possible. + /// + /// Like CopyTo this method recursively goes through all of the array's buffers + /// and those of it's children and first attempts to create zero-copy + /// views on the destination MemoryManager device. If it can't, it falls back + /// to performing a copy. See Buffer::ViewOrCopy. + Result> ViewOrCopyTo( + const std::shared_ptr& to) const; + + /// Construct a zero-copy slice of the array with the indicated offset and + /// length + /// + /// \param[in] offset the position of the first element in the constructed + /// slice + /// \param[in] length the length of the slice. If there are not enough + /// elements in the array, the length will be adjusted accordingly + /// + /// \return a new object wrapped in std::shared_ptr + std::shared_ptr Slice(int64_t offset, int64_t length) const; + + /// Slice from offset until end of the array + std::shared_ptr Slice(int64_t offset) const; + + /// Input-checking variant of Array::Slice + Result> SliceSafe(int64_t offset, int64_t length) const; + /// Input-checking variant of Array::Slice + Result> SliceSafe(int64_t offset) const; + + const std::shared_ptr& data() const { return data_; } + + int num_fields() const { return static_cast(data_->child_data.size()); } + + /// \return PrettyPrint representation of array suitable for debugging + std::string ToString() const; + + /// \brief Perform cheap validation checks to determine obvious inconsistencies + /// within the array's internal data. + /// + /// This is O(k) where k is the number of descendents. + /// + /// \return Status + Status Validate() const; + + /// \brief Perform extensive validation checks to determine inconsistencies + /// within the array's internal data. + /// + /// This is potentially O(k*n) where k is the number of descendents and n + /// is the array length. + /// + /// \return Status + Status ValidateFull() const; + + /// \brief Return the device_type that this array's data is allocated on + /// + /// This just delegates to calling device_type on the underlying ArrayData + /// object which backs this Array. + /// + /// \return DeviceAllocationType + DeviceAllocationType device_type() const { return data_->device_type(); } + + /// \brief Return the statistics of this Array + /// + /// This just delegates to calling statistics on the underlying ArrayData + /// object which backs this Array. + /// + /// \return const ArrayStatistics& + std::shared_ptr statistics() const { return data_->statistics; } + + protected: + Array() = default; + ARROW_DEFAULT_MOVE_AND_ASSIGN(Array); + + std::shared_ptr data_; + const uint8_t* null_bitmap_data_ = NULLPTR; + + /// Protected method for constructors + void SetData(const std::shared_ptr& data) { + if (data->buffers.size() > 0) { + null_bitmap_data_ = data->GetValuesSafe(0, /*offset=*/0); + } else { + null_bitmap_data_ = NULLPTR; + } + data_ = data; + } + + private: + ARROW_DISALLOW_COPY_AND_ASSIGN(Array); + + ARROW_FRIEND_EXPORT friend void PrintTo(const Array& x, std::ostream* os); +}; + +static inline std::ostream& operator<<(std::ostream& os, const Array& x) { + os << x.ToString(); + return os; +} + +/// Base class for non-nested arrays +class ARROW_EXPORT FlatArray : public Array { + protected: + using Array::Array; +}; + +/// Base class for arrays of fixed-size logical types +class ARROW_EXPORT PrimitiveArray : public FlatArray { + public: + PrimitiveArray(const std::shared_ptr& type, int64_t length, + const std::shared_ptr& data, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + /// Does not account for any slice offset + const std::shared_ptr& values() const { return data_->buffers[1]; } + + protected: + PrimitiveArray() : raw_values_(NULLPTR) {} + + void SetData(const std::shared_ptr& data) { + this->Array::SetData(data); + raw_values_ = data->GetValuesSafe(1, /*offset=*/0); + } + + explicit PrimitiveArray(const std::shared_ptr& data) { SetData(data); } + + const uint8_t* raw_values_; +}; + +/// Degenerate null type Array +class ARROW_EXPORT NullArray : public FlatArray { + public: + using TypeClass = NullType; + + explicit NullArray(const std::shared_ptr& data) { SetData(data); } + explicit NullArray(int64_t length); + + private: + void SetData(const std::shared_ptr& data) { + null_bitmap_data_ = NULLPTR; + data->null_count = data->length; + data_ = data; + } +}; + +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_binary.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_binary.h new file mode 100644 index 0000000000000000000000000000000000000000..63903eac46d413c24ccaeb048273e8f5e6c8d3c6 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_binary.h @@ -0,0 +1,321 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Array accessor classes for Binary, LargeBinary, String, LargeString, +// FixedSizeBinary + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "arrow/array/array_base.h" +#include "arrow/array/data.h" +#include "arrow/buffer.h" +#include "arrow/stl_iterator.h" +#include "arrow/type.h" +#include "arrow/util/checked_cast.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +/// \addtogroup binary-arrays +/// +/// @{ + +// ---------------------------------------------------------------------- +// Binary and String + +/// Base class for variable-sized binary arrays, regardless of offset size +/// and logical interpretation. +template +class BaseBinaryArray : public FlatArray { + public: + using TypeClass = TYPE; + using offset_type = typename TypeClass::offset_type; + using IteratorType = stl::ArrayIterator>; + + /// Return the pointer to the given elements bytes + // XXX should GetValue(int64_t i) return a string_view? + const uint8_t* GetValue(int64_t i, offset_type* out_length) const { + const offset_type pos = raw_value_offsets_[i]; + *out_length = raw_value_offsets_[i + 1] - pos; + return raw_data_ + pos; + } + + /// \brief Get binary value as a string_view + /// + /// \param i the value index + /// \return the view over the selected value + std::string_view GetView(int64_t i) const { + const offset_type pos = raw_value_offsets_[i]; + return std::string_view(reinterpret_cast(raw_data_ + pos), + raw_value_offsets_[i + 1] - pos); + } + + std::optional operator[](int64_t i) const { + return *IteratorType(*this, i); + } + + /// \brief Get binary value as a string_view + /// Provided for consistency with other arrays. + /// + /// \param i the value index + /// \return the view over the selected value + std::string_view Value(int64_t i) const { return GetView(i); } + + /// \brief Get binary value as a std::string + /// + /// \param i the value index + /// \return the value copied into a std::string + std::string GetString(int64_t i) const { return std::string(GetView(i)); } + + /// Note that this buffer does not account for any slice offset + std::shared_ptr value_offsets() const { return data_->buffers[1]; } + + /// Note that this buffer does not account for any slice offset + std::shared_ptr value_data() const { return data_->buffers[2]; } + + const offset_type* raw_value_offsets() const { return raw_value_offsets_; } + + const uint8_t* raw_data() const { return raw_data_; } + + /// \brief Return the data buffer absolute offset of the data for the value + /// at the passed index. + /// + /// Does not perform boundschecking + offset_type value_offset(int64_t i) const { return raw_value_offsets_[i]; } + + /// \brief Return the length of the data for the value at the passed index. + /// + /// Does not perform boundschecking + offset_type value_length(int64_t i) const { + return raw_value_offsets_[i + 1] - raw_value_offsets_[i]; + } + + /// \brief Return the total length of the memory in the data buffer + /// referenced by this array. If the array has been sliced then this may be + /// less than the size of the data buffer (data_->buffers[2]). + offset_type total_values_length() const { + if (data_->length > 0) { + return raw_value_offsets_[data_->length] - raw_value_offsets_[0]; + } else { + return 0; + } + } + + IteratorType begin() const { return IteratorType(*this); } + + IteratorType end() const { return IteratorType(*this, length()); } + + protected: + // For subclasses + BaseBinaryArray() = default; + + // Protected method for constructors + void SetData(const std::shared_ptr& data) { + this->Array::SetData(data); + raw_value_offsets_ = data->GetValuesSafe(1); + raw_data_ = data->GetValuesSafe(2, /*offset=*/0); + } + + const offset_type* raw_value_offsets_ = NULLPTR; + const uint8_t* raw_data_ = NULLPTR; +}; + +/// Concrete Array class for variable-size binary data +class ARROW_EXPORT BinaryArray : public BaseBinaryArray { + public: + explicit BinaryArray(const std::shared_ptr& data); + + BinaryArray(int64_t length, const std::shared_ptr& value_offsets, + const std::shared_ptr& data, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + protected: + // For subclasses such as StringArray + BinaryArray() : BaseBinaryArray() {} +}; + +/// Concrete Array class for variable-size string (utf-8) data +class ARROW_EXPORT StringArray : public BinaryArray { + public: + using TypeClass = StringType; + + explicit StringArray(const std::shared_ptr& data); + + StringArray(int64_t length, const std::shared_ptr& value_offsets, + const std::shared_ptr& data, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + /// \brief Validate that this array contains only valid UTF8 entries + /// + /// This check is also implied by ValidateFull() + Status ValidateUTF8() const; +}; + +/// Concrete Array class for large variable-size binary data +class ARROW_EXPORT LargeBinaryArray : public BaseBinaryArray { + public: + explicit LargeBinaryArray(const std::shared_ptr& data); + + LargeBinaryArray(int64_t length, const std::shared_ptr& value_offsets, + const std::shared_ptr& data, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + protected: + // For subclasses such as LargeStringArray + LargeBinaryArray() : BaseBinaryArray() {} +}; + +/// Concrete Array class for large variable-size string (utf-8) data +class ARROW_EXPORT LargeStringArray : public LargeBinaryArray { + public: + using TypeClass = LargeStringType; + + explicit LargeStringArray(const std::shared_ptr& data); + + LargeStringArray(int64_t length, const std::shared_ptr& value_offsets, + const std::shared_ptr& data, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + /// \brief Validate that this array contains only valid UTF8 entries + /// + /// This check is also implied by ValidateFull() + Status ValidateUTF8() const; +}; + +// ---------------------------------------------------------------------- +// BinaryView and StringView + +/// Concrete Array class for variable-size binary view data using the +/// BinaryViewType::c_type struct to reference in-line or out-of-line string values +class ARROW_EXPORT BinaryViewArray : public FlatArray { + public: + using TypeClass = BinaryViewType; + using IteratorType = stl::ArrayIterator; + using c_type = BinaryViewType::c_type; + + explicit BinaryViewArray(std::shared_ptr data); + + BinaryViewArray(std::shared_ptr type, int64_t length, + std::shared_ptr views, BufferVector data_buffers, + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + // For API compatibility with BinaryArray etc. + std::string_view GetView(int64_t i) const; + std::string GetString(int64_t i) const { return std::string{GetView(i)}; } + + const auto& values() const { return data_->buffers[1]; } + const c_type* raw_values() const { return raw_values_; } + + std::optional operator[](int64_t i) const { + return *IteratorType(*this, i); + } + + IteratorType begin() const { return IteratorType(*this); } + IteratorType end() const { return IteratorType(*this, length()); } + + protected: + using FlatArray::FlatArray; + + void SetData(std::shared_ptr data) { + FlatArray::SetData(std::move(data)); + raw_values_ = data_->GetValuesSafe(1); + } + + const c_type* raw_values_; +}; + +/// Concrete Array class for variable-size string view (utf-8) data using +/// BinaryViewType::c_type to reference in-line or out-of-line string values +class ARROW_EXPORT StringViewArray : public BinaryViewArray { + public: + using TypeClass = StringViewType; + + explicit StringViewArray(std::shared_ptr data); + + using BinaryViewArray::BinaryViewArray; + + /// \brief Validate that this array contains only valid UTF8 entries + /// + /// This check is also implied by ValidateFull() + Status ValidateUTF8() const; +}; + +// ---------------------------------------------------------------------- +// Fixed width binary + +/// Concrete Array class for fixed-size binary data +class ARROW_EXPORT FixedSizeBinaryArray : public PrimitiveArray { + public: + using TypeClass = FixedSizeBinaryType; + using IteratorType = stl::ArrayIterator; + + explicit FixedSizeBinaryArray(const std::shared_ptr& data); + + FixedSizeBinaryArray(const std::shared_ptr& type, int64_t length, + const std::shared_ptr& data, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + const uint8_t* GetValue(int64_t i) const { return values_ + i * byte_width_; } + const uint8_t* Value(int64_t i) const { return GetValue(i); } + + std::string_view GetView(int64_t i) const { + return std::string_view(reinterpret_cast(GetValue(i)), byte_width_); + } + + std::optional operator[](int64_t i) const { + return *IteratorType(*this, i); + } + + std::string GetString(int64_t i) const { return std::string(GetView(i)); } + + int32_t byte_width() const { return byte_width_; } + + const uint8_t* raw_values() const { return values_; } + + IteratorType begin() const { return IteratorType(*this); } + + IteratorType end() const { return IteratorType(*this, length()); } + + protected: + void SetData(const std::shared_ptr& data) { + this->PrimitiveArray::SetData(data); + byte_width_ = + internal::checked_cast(*type()).byte_width(); + values_ = raw_values_ + data_->offset * byte_width_; + } + + const uint8_t* values_; + int32_t byte_width_; +}; + +/// @} + +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_decimal.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_decimal.h new file mode 100644 index 0000000000000000000000000000000000000000..2f10bb842999640a8cada703ff12ea29c0e5f718 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_decimal.h @@ -0,0 +1,104 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/array/array_binary.h" +#include "arrow/array/data.h" +#include "arrow/type.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +/// \addtogroup numeric-arrays +/// +/// @{ + +// ---------------------------------------------------------------------- +// Decimal32Array + +/// Concrete Array class for 32-bit decimal data +class ARROW_EXPORT Decimal32Array : public FixedSizeBinaryArray { + public: + using TypeClass = Decimal32Type; + + using FixedSizeBinaryArray::FixedSizeBinaryArray; + + /// \brief Construct Decimal32Array from ArrayData instance + explicit Decimal32Array(const std::shared_ptr& data); + + std::string FormatValue(int64_t i) const; +}; + +// ---------------------------------------------------------------------- +// Decimal64Array + +/// Concrete Array class for 64-bit decimal data +class ARROW_EXPORT Decimal64Array : public FixedSizeBinaryArray { + public: + using TypeClass = Decimal64Type; + + using FixedSizeBinaryArray::FixedSizeBinaryArray; + + /// \brief Construct Decimal64Array from ArrayData instance + explicit Decimal64Array(const std::shared_ptr& data); + + std::string FormatValue(int64_t i) const; +}; + +// ---------------------------------------------------------------------- +// Decimal128Array + +/// Concrete Array class for 128-bit decimal data +class ARROW_EXPORT Decimal128Array : public FixedSizeBinaryArray { + public: + using TypeClass = Decimal128Type; + + using FixedSizeBinaryArray::FixedSizeBinaryArray; + + /// \brief Construct Decimal128Array from ArrayData instance + explicit Decimal128Array(const std::shared_ptr& data); + + std::string FormatValue(int64_t i) const; +}; + +// Backward compatibility +using DecimalArray = Decimal128Array; + +// ---------------------------------------------------------------------- +// Decimal256Array + +/// Concrete Array class for 256-bit decimal data +class ARROW_EXPORT Decimal256Array : public FixedSizeBinaryArray { + public: + using TypeClass = Decimal256Type; + + using FixedSizeBinaryArray::FixedSizeBinaryArray; + + /// \brief Construct Decimal256Array from ArrayData instance + explicit Decimal256Array(const std::shared_ptr& data); + + std::string FormatValue(int64_t i) const; +}; + +/// @} + +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_dict.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_dict.h new file mode 100644 index 0000000000000000000000000000000000000000..bf376b51f8c9470d2b4e4c7ed950c9a513fddc9b --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_dict.h @@ -0,0 +1,182 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/array/array_base.h" +#include "arrow/array/data.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +// ---------------------------------------------------------------------- +// DictionaryArray + +/// \brief Array type for dictionary-encoded data with a +/// data-dependent dictionary +/// +/// A dictionary array contains an array of non-negative integers (the +/// "dictionary indices") along with a data type containing a "dictionary" +/// corresponding to the distinct values represented in the data. +/// +/// For example, the array +/// +/// ["foo", "bar", "foo", "bar", "foo", "bar"] +/// +/// with dictionary ["bar", "foo"], would have dictionary array representation +/// +/// indices: [1, 0, 1, 0, 1, 0] +/// dictionary: ["bar", "foo"] +/// +/// The indices in principle may be any integer type. +class ARROW_EXPORT DictionaryArray : public Array { + public: + using TypeClass = DictionaryType; + + explicit DictionaryArray(const std::shared_ptr& data); + + DictionaryArray(const std::shared_ptr& type, + const std::shared_ptr& indices, + const std::shared_ptr& dictionary); + + /// \brief Construct DictionaryArray from dictionary and indices + /// array and validate + /// + /// This function does the validation of the indices and input type. It checks if + /// all indices are non-negative and smaller than the size of the dictionary. + /// + /// \param[in] type a dictionary type + /// \param[in] dictionary the dictionary with same value type as the + /// type object + /// \param[in] indices an array of non-negative integers smaller than the + /// size of the dictionary + static Result> FromArrays( + const std::shared_ptr& type, const std::shared_ptr& indices, + const std::shared_ptr& dictionary); + + static Result> FromArrays( + const std::shared_ptr& indices, const std::shared_ptr& dictionary) { + return FromArrays(::arrow::dictionary(indices->type(), dictionary->type()), indices, + dictionary); + } + + /// \brief Transpose this DictionaryArray + /// + /// This method constructs a new dictionary array with the given dictionary + /// type, transposing indices using the transpose map. The type and the + /// transpose map are typically computed using DictionaryUnifier. + /// + /// \param[in] type the new type object + /// \param[in] dictionary the new dictionary + /// \param[in] transpose_map transposition array of this array's indices + /// into the target array's indices + /// \param[in] pool a pool to allocate the array data from + Result> Transpose( + const std::shared_ptr& type, const std::shared_ptr& dictionary, + const int32_t* transpose_map, MemoryPool* pool = default_memory_pool()) const; + + Result> Compact(MemoryPool* pool = default_memory_pool()) const; + + /// \brief Determine whether dictionary arrays may be compared without unification + bool CanCompareIndices(const DictionaryArray& other) const; + + /// \brief Return the dictionary for this array, which is stored as + /// a member of the ArrayData internal structure + const std::shared_ptr& dictionary() const; + const std::shared_ptr& indices() const; + + /// \brief Return the ith value of indices, cast to int64_t. Not recommended + /// for use in performance-sensitive code. Does not validate whether the + /// value is null or out-of-bounds. + int64_t GetValueIndex(int64_t i) const; + + const DictionaryType* dict_type() const { return dict_type_; } + + private: + void SetData(const std::shared_ptr& data); + const DictionaryType* dict_type_; + std::shared_ptr indices_; + + // Lazily initialized when invoking dictionary() + mutable std::shared_ptr dictionary_; +}; + +/// \brief Helper class for incremental dictionary unification +class ARROW_EXPORT DictionaryUnifier { + public: + virtual ~DictionaryUnifier() = default; + + /// \brief Construct a DictionaryUnifier + /// \param[in] value_type the data type of the dictionaries + /// \param[in] pool MemoryPool to use for memory allocations + static Result> Make( + std::shared_ptr value_type, MemoryPool* pool = default_memory_pool()); + + /// \brief Unify dictionaries across array chunks + /// + /// The dictionaries in the array chunks will be unified, their indices + /// accordingly transposed. + /// + /// Only dictionaries with a primitive value type are currently supported. + /// However, dictionaries nested inside a more complex type are correctly unified. + static Result> UnifyChunkedArray( + const std::shared_ptr& array, + MemoryPool* pool = default_memory_pool()); + + /// \brief Unify dictionaries across the chunks of each table column + /// + /// The dictionaries in each table column will be unified, their indices + /// accordingly transposed. + /// + /// Only dictionaries with a primitive value type are currently supported. + /// However, dictionaries nested inside a more complex type are correctly unified. + static Result> UnifyTable( + const Table& table, MemoryPool* pool = default_memory_pool()); + + /// \brief Append dictionary to the internal memo + virtual Status Unify(const Array& dictionary) = 0; + + /// \brief Append dictionary and compute transpose indices + /// \param[in] dictionary the dictionary values to unify + /// \param[out] out_transpose a Buffer containing computed transpose indices + /// as int32_t values equal in length to the passed dictionary. The value in + /// each slot corresponds to the new index value for each original index + /// for a DictionaryArray with the old dictionary + virtual Status Unify(const Array& dictionary, + std::shared_ptr* out_transpose) = 0; + + /// \brief Return a result DictionaryType with the smallest possible index + /// type to accommodate the unified dictionary. The unifier cannot be used + /// after this is called + virtual Status GetResult(std::shared_ptr* out_type, + std::shared_ptr* out_dict) = 0; + + /// \brief Return a unified dictionary with the given index type. If + /// the index type is not large enough then an invalid status will be returned. + /// The unifier cannot be used after this is called + virtual Status GetResultWithIndexType(const std::shared_ptr& index_type, + std::shared_ptr* out_dict) = 0; +}; + +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_run_end.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_run_end.h new file mode 100644 index 0000000000000000000000000000000000000000..b46b0855ab36776eec4e22cef1a35112e2d18fa8 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_run_end.h @@ -0,0 +1,133 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Array accessor classes run-end encoded arrays + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/array/array_base.h" +#include "arrow/array/data.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/type_fwd.h" +#include "arrow/util/checked_cast.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +/// \addtogroup run-end-encoded-arrays +/// +/// @{ + +// ---------------------------------------------------------------------- +// RunEndEncoded + +/// \brief Array type for run-end encoded data +class ARROW_EXPORT RunEndEncodedArray : public Array { + private: + std::shared_ptr run_ends_array_; + std::shared_ptr values_array_; + + public: + using TypeClass = RunEndEncodedType; + + explicit RunEndEncodedArray(const std::shared_ptr& data); + + /// \brief Construct a RunEndEncodedArray from all parameters + /// + /// The length and offset parameters refer to the dimensions of the logical + /// array which is the array we would get after expanding all the runs into + /// repeated values. As such, length can be much greater than the length of + /// the child run_ends and values arrays. + RunEndEncodedArray(const std::shared_ptr& type, int64_t length, + const std::shared_ptr& run_ends, + const std::shared_ptr& values, int64_t offset = 0); + + /// \brief Construct a RunEndEncodedArray from all parameters + /// + /// The length and offset parameters refer to the dimensions of the logical + /// array which is the array we would get after expanding all the runs into + /// repeated values. As such, length can be much greater than the length of + /// the child run_ends and values arrays. + static Result> Make( + const std::shared_ptr& type, int64_t logical_length, + const std::shared_ptr& run_ends, const std::shared_ptr& values, + int64_t logical_offset = 0); + + /// \brief Construct a RunEndEncodedArray from values and run ends arrays + /// + /// The data type is automatically inferred from the arguments. + /// The run_ends and values arrays must have the same length. + static Result> Make( + int64_t logical_length, const std::shared_ptr& run_ends, + const std::shared_ptr& values, int64_t logical_offset = 0); + + protected: + void SetData(const std::shared_ptr& data); + + public: + /// \brief Returns an array holding the logical indexes of each run-end + /// + /// The physical offset to the array is applied. + const std::shared_ptr& run_ends() const { return run_ends_array_; } + + /// \brief Returns an array holding the values of each run + /// + /// The physical offset to the array is applied. + const std::shared_ptr& values() const { return values_array_; } + + /// \brief Returns an array holding the logical indexes of each run end + /// + /// If a non-zero logical offset is set, this function allocates a new + /// array and rewrites all the run end values to be relative to the logical + /// offset and cuts the end of the array to the logical length. + Result> LogicalRunEnds(MemoryPool* pool) const; + + /// \brief Returns an array holding the values of each run + /// + /// If a non-zero logical offset is set, this function allocates a new + /// array containing only the values within the logical range. + std::shared_ptr LogicalValues() const; + + /// \brief Find the physical offset of this REE array + /// + /// This function uses binary-search, so it has a O(log N) cost. + int64_t FindPhysicalOffset() const; + + /// \brief Find the physical length of this REE array + /// + /// The physical length of an REE is the number of physical values (and + /// run-ends) necessary to represent the logical range of values from offset + /// to length. + /// + /// Avoid calling this function if the physical length can be established in + /// some other way (e.g. when iterating over the runs sequentially until the + /// end). This function uses binary-search, so it has a O(log N) cost. + int64_t FindPhysicalLength() const; +}; + +/// @} + +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_adaptive.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_adaptive.h new file mode 100644 index 0000000000000000000000000000000000000000..0cea571be3e3244741f3df15f87c8958eedddf76 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_adaptive.h @@ -0,0 +1,215 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/array/builder_base.h" +#include "arrow/buffer.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +/// \addtogroup numeric-builders +/// +/// @{ + +namespace internal { + +class ARROW_EXPORT AdaptiveIntBuilderBase : public ArrayBuilder { + public: + AdaptiveIntBuilderBase(uint8_t start_int_size, MemoryPool* pool, + int64_t alignment = kDefaultBufferAlignment); + + explicit AdaptiveIntBuilderBase(MemoryPool* pool, + int64_t alignment = kDefaultBufferAlignment) + : AdaptiveIntBuilderBase(sizeof(uint8_t), pool, alignment) {} + + /// \brief Append multiple nulls + /// \param[in] length the number of nulls to append + Status AppendNulls(int64_t length) final { + ARROW_RETURN_NOT_OK(CommitPendingData()); + if (ARROW_PREDICT_TRUE(length > 0)) { + ARROW_RETURN_NOT_OK(Reserve(length)); + memset(data_->mutable_data() + length_ * int_size_, 0, int_size_ * length); + UnsafeSetNull(length); + } + return Status::OK(); + } + + Status AppendNull() final { + pending_data_[pending_pos_] = 0; + pending_valid_[pending_pos_] = 0; + pending_has_nulls_ = true; + ++pending_pos_; + ++length_; + ++null_count_; + + if (ARROW_PREDICT_FALSE(pending_pos_ >= pending_size_)) { + return CommitPendingData(); + } + return Status::OK(); + } + + Status AppendEmptyValues(int64_t length) final { + ARROW_RETURN_NOT_OK(CommitPendingData()); + if (ARROW_PREDICT_TRUE(length > 0)) { + ARROW_RETURN_NOT_OK(Reserve(length)); + memset(data_->mutable_data() + length_ * int_size_, 0, int_size_ * length); + UnsafeSetNotNull(length); + } + return Status::OK(); + } + + Status AppendEmptyValue() final { + pending_data_[pending_pos_] = 0; + pending_valid_[pending_pos_] = 1; + ++pending_pos_; + ++length_; + + if (ARROW_PREDICT_FALSE(pending_pos_ >= pending_size_)) { + return CommitPendingData(); + } + return Status::OK(); + } + + void Reset() override; + Status Resize(int64_t capacity) override; + + protected: + Status AppendInternal(const uint64_t val) { + pending_data_[pending_pos_] = val; + pending_valid_[pending_pos_] = 1; + ++pending_pos_; + ++length_; + + if (ARROW_PREDICT_FALSE(pending_pos_ >= pending_size_)) { + return CommitPendingData(); + } + return Status::OK(); + } + + virtual Status CommitPendingData() = 0; + + template + typename std::enable_if= sizeof(new_type), Status>::type + ExpandIntSizeInternal(); + template + typename std::enable_if<(sizeof(old_type) < sizeof(new_type)), Status>::type + ExpandIntSizeInternal(); + + std::shared_ptr data_; + uint8_t* raw_data_ = NULLPTR; + + const uint8_t start_int_size_; + uint8_t int_size_; + + static constexpr int32_t pending_size_ = 1024; + uint8_t pending_valid_[pending_size_]; + uint64_t pending_data_[pending_size_]; + int32_t pending_pos_ = 0; + bool pending_has_nulls_ = false; +}; + +} // namespace internal + +class ARROW_EXPORT AdaptiveUIntBuilder : public internal::AdaptiveIntBuilderBase { + public: + explicit AdaptiveUIntBuilder(uint8_t start_int_size, + MemoryPool* pool = default_memory_pool()); + + explicit AdaptiveUIntBuilder(MemoryPool* pool = default_memory_pool()) + : AdaptiveUIntBuilder(sizeof(uint8_t), pool) {} + + using internal::AdaptiveIntBuilderBase::Reset; + + /// Scalar append + Status Append(const uint64_t val) { return AppendInternal(val); } + + /// \brief Append a sequence of elements in one shot + /// \param[in] values a contiguous C array of values + /// \param[in] length the number of values to append + /// \param[in] valid_bytes an optional sequence of bytes where non-zero + /// indicates a valid (non-null) value + /// \return Status + Status AppendValues(const uint64_t* values, int64_t length, + const uint8_t* valid_bytes = NULLPTR); + + Status FinishInternal(std::shared_ptr* out) override; + + std::shared_ptr type() const override; + + protected: + Status CommitPendingData() override; + Status ExpandIntSize(uint8_t new_int_size); + + Status AppendValuesInternal(const uint64_t* values, int64_t length, + const uint8_t* valid_bytes); + + template + Status ExpandIntSizeN(); +}; + +class ARROW_EXPORT AdaptiveIntBuilder : public internal::AdaptiveIntBuilderBase { + public: + explicit AdaptiveIntBuilder(uint8_t start_int_size, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment); + + explicit AdaptiveIntBuilder(MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : AdaptiveIntBuilder(sizeof(uint8_t), pool, alignment) {} + + using internal::AdaptiveIntBuilderBase::Reset; + + /// Scalar append + Status Append(const int64_t val) { return AppendInternal(static_cast(val)); } + + /// \brief Append a sequence of elements in one shot + /// \param[in] values a contiguous C array of values + /// \param[in] length the number of values to append + /// \param[in] valid_bytes an optional sequence of bytes where non-zero + /// indicates a valid (non-null) value + /// \return Status + Status AppendValues(const int64_t* values, int64_t length, + const uint8_t* valid_bytes = NULLPTR); + + Status FinishInternal(std::shared_ptr* out) override; + + std::shared_ptr type() const override; + + protected: + Status CommitPendingData() override; + Status ExpandIntSize(uint8_t new_int_size); + + Status AppendValuesInternal(const int64_t* values, int64_t length, + const uint8_t* valid_bytes); + + template + Status ExpandIntSizeN(); +}; + +/// @} + +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_base.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_base.h new file mode 100644 index 0000000000000000000000000000000000000000..ecd2136f5d20ba126bd359977ea17f76c4fe23ed --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_base.h @@ -0,0 +1,371 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include // IWYU pragma: keep +#include +#include +#include +#include +#include + +#include "arrow/array/array_base.h" +#include "arrow/array/array_primitive.h" +#include "arrow/buffer.h" +#include "arrow/buffer_builder.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +namespace internal { + +template +class ArrayBuilderExtraOps { + public: + /// \brief Append a value from an optional or null if it has no value. + Status AppendOrNull(const std::optional& value) { + auto* self = static_cast(this); + return value.has_value() ? self->Append(*value) : self->AppendNull(); + } + + /// \brief Append a value from an optional or null if it has no value. + /// + /// Unsafe methods don't check existing size. + void UnsafeAppendOrNull(const std::optional& value) { + auto* self = static_cast(this); + return value.has_value() ? self->UnsafeAppend(*value) : self->UnsafeAppendNull(); + } +}; + +} // namespace internal + +/// \defgroup numeric-builders Concrete builder subclasses for numeric types +/// @{ +/// @} + +/// \defgroup temporal-builders Concrete builder subclasses for temporal types +/// @{ +/// @} + +/// \defgroup binary-builders Concrete builder subclasses for binary types +/// @{ +/// @} + +/// \defgroup nested-builders Concrete builder subclasses for nested types +/// @{ +/// @} + +/// \defgroup dictionary-builders Concrete builder subclasses for dictionary types +/// @{ +/// @} + +/// \defgroup run-end-encoded-builders Concrete builder subclasses for run-end encoded +/// arrays +/// @{ +/// @} + +constexpr int64_t kMinBuilderCapacity = 1 << 5; +constexpr int64_t kListMaximumElements = std::numeric_limits::max() - 1; + +/// Base class for all data array builders. +/// +/// This class provides a facilities for incrementally building the null bitmap +/// (see Append methods) and as a side effect the current number of slots and +/// the null count. +/// +/// \note Users are expected to use builders as one of the concrete types below. +/// For example, ArrayBuilder* pointing to BinaryBuilder should be downcast before use. +class ARROW_EXPORT ArrayBuilder { + public: + explicit ArrayBuilder(MemoryPool* pool, int64_t alignment = kDefaultBufferAlignment) + : pool_(pool), alignment_(alignment), null_bitmap_builder_(pool, alignment) {} + + ARROW_DEFAULT_MOVE_AND_ASSIGN(ArrayBuilder); + + virtual ~ArrayBuilder() = default; + + /// For nested types. Since the objects are owned by this class instance, we + /// skip shared pointers and just return a raw pointer + ArrayBuilder* child(int i) { return children_[i].get(); } + + const std::shared_ptr& child_builder(int i) const { return children_[i]; } + + int num_children() const { return static_cast(children_.size()); } + + virtual int64_t length() const { return length_; } + int64_t null_count() const { return null_count_; } + int64_t capacity() const { return capacity_; } + + /// \brief Ensure that enough memory has been allocated to fit the indicated + /// number of total elements in the builder, including any that have already + /// been appended. Does not account for reallocations that may be due to + /// variable size data, like binary values. To make space for incremental + /// appends, use Reserve instead. + /// + /// \param[in] capacity the minimum number of total array values to + /// accommodate. Must be greater than the current capacity. + /// \return Status + virtual Status Resize(int64_t capacity); + + /// \brief Ensure that there is enough space allocated to append the indicated + /// number of elements without any further reallocation. Overallocation is + /// used in order to minimize the impact of incremental Reserve() calls. + /// Note that additional_capacity is relative to the current number of elements + /// rather than to the current capacity, so calls to Reserve() which are not + /// interspersed with addition of new elements may not increase the capacity. + /// + /// \param[in] additional_capacity the number of additional array values + /// \return Status + Status Reserve(int64_t additional_capacity) { + auto current_capacity = capacity(); + auto min_capacity = length() + additional_capacity; + if (min_capacity <= current_capacity) return Status::OK(); + + // leave growth factor up to BufferBuilder + auto new_capacity = BufferBuilder::GrowByFactor(current_capacity, min_capacity); + return Resize(new_capacity); + } + + /// Reset the builder. + virtual void Reset(); + + /// \brief Append a null value to builder + virtual Status AppendNull() = 0; + /// \brief Append a number of null values to builder + virtual Status AppendNulls(int64_t length) = 0; + + /// \brief Append a non-null value to builder + /// + /// The appended value is an implementation detail, but the corresponding + /// memory slot is guaranteed to be initialized. + /// This method is useful when appending a null value to a parent nested type. + virtual Status AppendEmptyValue() = 0; + + /// \brief Append a number of non-null values to builder + /// + /// The appended values are an implementation detail, but the corresponding + /// memory slot is guaranteed to be initialized. + /// This method is useful when appending null values to a parent nested type. + virtual Status AppendEmptyValues(int64_t length) = 0; + + /// \brief Append a value from a scalar + Status AppendScalar(const Scalar& scalar) { return AppendScalar(scalar, 1); } + virtual Status AppendScalar(const Scalar& scalar, int64_t n_repeats); + virtual Status AppendScalars(const ScalarVector& scalars); + + /// \brief Append a range of values from an array. + /// + /// The given array must be the same type as the builder. + virtual Status AppendArraySlice(const ArraySpan& ARROW_ARG_UNUSED(array), + int64_t ARROW_ARG_UNUSED(offset), + int64_t ARROW_ARG_UNUSED(length)) { + return Status::NotImplemented("AppendArraySlice for builder for ", *type()); + } + + /// \brief Return result of builder as an internal generic ArrayData + /// object. Resets builder except for dictionary builder + /// + /// \param[out] out the finalized ArrayData object + /// \return Status + virtual Status FinishInternal(std::shared_ptr* out) = 0; + + /// \brief Return result of builder as an Array object. + /// + /// The builder is reset except for DictionaryBuilder. + /// + /// \param[out] out the finalized Array object + /// \return Status + Status Finish(std::shared_ptr* out); + + /// \brief Return result of builder as an Array object. + /// + /// The builder is reset except for DictionaryBuilder. + /// + /// \return The finalized Array object + Result> Finish(); + + /// \brief Return the type of the built Array + virtual std::shared_ptr type() const = 0; + + protected: + /// Append to null bitmap + Status AppendToBitmap(bool is_valid); + + /// Vector append. Treat each zero byte as a null. If valid_bytes is null + /// assume all of length bits are valid. + Status AppendToBitmap(const uint8_t* valid_bytes, int64_t length); + + /// Uniform append. Append N times the same validity bit. + Status AppendToBitmap(int64_t num_bits, bool value); + + /// Set the next length bits to not null (i.e. valid). + Status SetNotNull(int64_t length); + + // Unsafe operations (don't check capacity/don't resize) + + void UnsafeAppendNull() { UnsafeAppendToBitmap(false); } + + // Append to null bitmap, update the length + void UnsafeAppendToBitmap(bool is_valid) { + null_bitmap_builder_.UnsafeAppend(is_valid); + ++length_; + if (!is_valid) ++null_count_; + } + + // Vector append. Treat each zero byte as a nullzero. If valid_bytes is null + // assume all of length bits are valid. + void UnsafeAppendToBitmap(const uint8_t* valid_bytes, int64_t length) { + if (valid_bytes == NULLPTR) { + return UnsafeSetNotNull(length); + } + null_bitmap_builder_.UnsafeAppend(valid_bytes, length); + length_ += length; + null_count_ = null_bitmap_builder_.false_count(); + } + + // Vector append. Copy from a given bitmap. If bitmap is null assume + // all of length bits are valid. + void UnsafeAppendToBitmap(const uint8_t* bitmap, int64_t offset, int64_t length) { + if (bitmap == NULLPTR) { + return UnsafeSetNotNull(length); + } + null_bitmap_builder_.UnsafeAppend(bitmap, offset, length); + length_ += length; + null_count_ = null_bitmap_builder_.false_count(); + } + + // Append the same validity value a given number of times. + void UnsafeAppendToBitmap(const int64_t num_bits, bool value) { + if (value) { + UnsafeSetNotNull(num_bits); + } else { + UnsafeSetNull(num_bits); + } + } + + void UnsafeAppendToBitmap(const std::vector& is_valid); + + // Set the next validity bits to not null (i.e. valid). + void UnsafeSetNotNull(int64_t length); + + // Set the next validity bits to null (i.e. invalid). + void UnsafeSetNull(int64_t length); + + static Status TrimBuffer(const int64_t bytes_filled, ResizableBuffer* buffer); + + /// \brief Finish to an array of the specified ArrayType + template + Status FinishTyped(std::shared_ptr* out) { + std::shared_ptr out_untyped; + ARROW_RETURN_NOT_OK(Finish(&out_untyped)); + *out = std::static_pointer_cast(std::move(out_untyped)); + return Status::OK(); + } + + // Check the requested capacity for validity + Status CheckCapacity(int64_t new_capacity) { + if (ARROW_PREDICT_FALSE(new_capacity < 0)) { + return Status::Invalid( + "Resize capacity must be positive (requested: ", new_capacity, ")"); + } + + if (ARROW_PREDICT_FALSE(new_capacity < length_)) { + return Status::Invalid("Resize cannot downsize (requested: ", new_capacity, + ", current length: ", length_, ")"); + } + + return Status::OK(); + } + + // Check for array type + Status CheckArrayType(const std::shared_ptr& expected_type, + const Array& array, const char* message); + Status CheckArrayType(Type::type expected_type, const Array& array, + const char* message); + + MemoryPool* pool_; + int64_t alignment_; + + TypedBufferBuilder null_bitmap_builder_; + int64_t null_count_ = 0; + + // Array length, so far. Also, the index of the next element to be added + int64_t length_ = 0; + int64_t capacity_ = 0; + + // Child value array builders. These are owned by this class + std::vector> children_; + + private: + ARROW_DISALLOW_COPY_AND_ASSIGN(ArrayBuilder); +}; + +/// \brief Construct an empty ArrayBuilder corresponding to the data +/// type +/// \param[in] pool the MemoryPool to use for allocations +/// \param[in] type the data type to create the builder for +/// \param[out] out the created ArrayBuilder +ARROW_EXPORT +Status MakeBuilder(MemoryPool* pool, const std::shared_ptr& type, + std::unique_ptr* out); + +inline Result> MakeBuilder( + const std::shared_ptr& type, MemoryPool* pool = default_memory_pool()) { + std::unique_ptr out; + ARROW_RETURN_NOT_OK(MakeBuilder(pool, type, &out)); + return out; +} + +/// \brief Construct an empty ArrayBuilder corresponding to the data +/// type, where any top-level or nested dictionary builders return the +/// exact index type specified by the type. +ARROW_EXPORT +Status MakeBuilderExactIndex(MemoryPool* pool, const std::shared_ptr& type, + std::unique_ptr* out); + +inline Result> MakeBuilderExactIndex( + const std::shared_ptr& type, MemoryPool* pool = default_memory_pool()) { + std::unique_ptr out; + ARROW_RETURN_NOT_OK(MakeBuilderExactIndex(pool, type, &out)); + return out; +} + +/// \brief Construct an empty DictionaryBuilder initialized optionally +/// with a preexisting dictionary +/// \param[in] pool the MemoryPool to use for allocations +/// \param[in] type the dictionary type to create the builder for +/// \param[in] dictionary the initial dictionary, if any. May be nullptr +/// \param[out] out the created ArrayBuilder +ARROW_EXPORT +Status MakeDictionaryBuilder(MemoryPool* pool, const std::shared_ptr& type, + const std::shared_ptr& dictionary, + std::unique_ptr* out); + +inline Result> MakeDictionaryBuilder( + const std::shared_ptr& type, const std::shared_ptr& dictionary, + MemoryPool* pool = default_memory_pool()) { + std::unique_ptr out; + ARROW_RETURN_NOT_OK(MakeDictionaryBuilder(pool, type, dictionary, &out)); + return out; +} + +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_binary.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_binary.h new file mode 100644 index 0000000000000000000000000000000000000000..442e4a26320a2eab2e10b57735827e738bf07344 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_binary.h @@ -0,0 +1,971 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/array/array_base.h" +#include "arrow/array/array_binary.h" +#include "arrow/array/builder_base.h" +#include "arrow/array/data.h" +#include "arrow/buffer.h" +#include "arrow/buffer_builder.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/util/binary_view_util.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +/// \addtogroup binary-builders +/// +/// @{ + +// ---------------------------------------------------------------------- +// Binary and String + +template +class BaseBinaryBuilder + : public ArrayBuilder, + public internal::ArrayBuilderExtraOps, std::string_view> { + public: + using TypeClass = TYPE; + using offset_type = typename TypeClass::offset_type; + + explicit BaseBinaryBuilder(MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : ArrayBuilder(pool, alignment), + offsets_builder_(pool, alignment), + value_data_builder_(pool, alignment) {} + + BaseBinaryBuilder(const std::shared_ptr& type, MemoryPool* pool) + : BaseBinaryBuilder(pool) {} + + Status Append(const uint8_t* value, offset_type length) { + ARROW_RETURN_NOT_OK(Reserve(1)); + UnsafeAppendNextOffset(); + // Safety check for UBSAN. + if (ARROW_PREDICT_TRUE(length > 0)) { + ARROW_RETURN_NOT_OK(ValidateOverflow(length)); + ARROW_RETURN_NOT_OK(value_data_builder_.Append(value, length)); + } + + UnsafeAppendToBitmap(true); + return Status::OK(); + } + + Status Append(const char* value, offset_type length) { + return Append(reinterpret_cast(value), length); + } + + Status Append(std::string_view value) { + return Append(value.data(), static_cast(value.size())); + } + + /// Extend the last appended value by appending more data at the end + /// + /// Unlike Append, this does not create a new offset. + Status ExtendCurrent(const uint8_t* value, offset_type length) { + // Safety check for UBSAN. + if (ARROW_PREDICT_TRUE(length > 0)) { + ARROW_RETURN_NOT_OK(ValidateOverflow(length)); + ARROW_RETURN_NOT_OK(value_data_builder_.Append(value, length)); + } + return Status::OK(); + } + + Status ExtendCurrent(std::string_view value) { + return ExtendCurrent(reinterpret_cast(value.data()), + static_cast(value.size())); + } + + Status AppendNulls(int64_t length) final { + const int64_t num_bytes = value_data_builder_.length(); + ARROW_RETURN_NOT_OK(Reserve(length)); + for (int64_t i = 0; i < length; ++i) { + offsets_builder_.UnsafeAppend(static_cast(num_bytes)); + } + UnsafeAppendToBitmap(length, false); + return Status::OK(); + } + + Status AppendNull() final { + ARROW_RETURN_NOT_OK(Reserve(1)); + UnsafeAppendNextOffset(); + UnsafeAppendToBitmap(false); + return Status::OK(); + } + + Status AppendEmptyValue() final { + ARROW_RETURN_NOT_OK(Reserve(1)); + UnsafeAppendNextOffset(); + UnsafeAppendToBitmap(true); + return Status::OK(); + } + + Status AppendEmptyValues(int64_t length) final { + const int64_t num_bytes = value_data_builder_.length(); + ARROW_RETURN_NOT_OK(Reserve(length)); + for (int64_t i = 0; i < length; ++i) { + offsets_builder_.UnsafeAppend(static_cast(num_bytes)); + } + UnsafeAppendToBitmap(length, true); + return Status::OK(); + } + + /// \brief Append without checking capacity + /// + /// Offsets and data should have been presized using Reserve() and + /// ReserveData(), respectively. + void UnsafeAppend(const uint8_t* value, offset_type length) { + UnsafeAppendNextOffset(); + value_data_builder_.UnsafeAppend(value, length); + UnsafeAppendToBitmap(true); + } + + void UnsafeAppend(const char* value, offset_type length) { + UnsafeAppend(reinterpret_cast(value), length); + } + + void UnsafeAppend(const std::string& value) { + UnsafeAppend(value.c_str(), static_cast(value.size())); + } + + void UnsafeAppend(std::string_view value) { + UnsafeAppend(value.data(), static_cast(value.size())); + } + + /// Like ExtendCurrent, but do not check capacity + void UnsafeExtendCurrent(const uint8_t* value, offset_type length) { + value_data_builder_.UnsafeAppend(value, length); + } + + void UnsafeExtendCurrent(std::string_view value) { + UnsafeExtendCurrent(reinterpret_cast(value.data()), + static_cast(value.size())); + } + + void UnsafeAppendNull() { + const int64_t num_bytes = value_data_builder_.length(); + offsets_builder_.UnsafeAppend(static_cast(num_bytes)); + UnsafeAppendToBitmap(false); + } + + void UnsafeAppendEmptyValue() { + const int64_t num_bytes = value_data_builder_.length(); + offsets_builder_.UnsafeAppend(static_cast(num_bytes)); + UnsafeAppendToBitmap(true); + } + + /// \brief Append a sequence of strings in one shot. + /// + /// \param[in] values a vector of strings + /// \param[in] valid_bytes an optional sequence of bytes where non-zero + /// indicates a valid (non-null) value + /// \return Status + Status AppendValues(const std::vector& values, + const uint8_t* valid_bytes = NULLPTR) { + std::size_t total_length = std::accumulate( + values.begin(), values.end(), 0ULL, + [](uint64_t sum, const std::string& str) { return sum + str.size(); }); + ARROW_RETURN_NOT_OK(Reserve(values.size())); + ARROW_RETURN_NOT_OK(ReserveData(total_length)); + + if (valid_bytes != NULLPTR) { + for (std::size_t i = 0; i < values.size(); ++i) { + UnsafeAppendNextOffset(); + if (valid_bytes[i]) { + value_data_builder_.UnsafeAppend( + reinterpret_cast(values[i].data()), values[i].size()); + } + } + } else { + for (const auto& value : values) { + UnsafeAppendNextOffset(); + value_data_builder_.UnsafeAppend(reinterpret_cast(value.data()), + value.size()); + } + } + + UnsafeAppendToBitmap(valid_bytes, values.size()); + return Status::OK(); + } + + /// \brief Append a sequence of nul-terminated strings in one shot. + /// If one of the values is NULL, it is processed as a null + /// value even if the corresponding valid_bytes entry is 1. + /// + /// \param[in] values a contiguous C array of nul-terminated char * + /// \param[in] length the number of values to append + /// \param[in] valid_bytes an optional sequence of bytes where non-zero + /// indicates a valid (non-null) value + /// \return Status + Status AppendValues(const char** values, int64_t length, + const uint8_t* valid_bytes = NULLPTR) { + std::size_t total_length = 0; + std::vector value_lengths(length); + bool have_null_value = false; + for (int64_t i = 0; i < length; ++i) { + if (values[i] != NULLPTR) { + auto value_length = strlen(values[i]); + value_lengths[i] = value_length; + total_length += value_length; + } else { + have_null_value = true; + } + } + ARROW_RETURN_NOT_OK(Reserve(length)); + ARROW_RETURN_NOT_OK(ReserveData(total_length)); + + if (valid_bytes) { + int64_t valid_bytes_offset = 0; + for (int64_t i = 0; i < length; ++i) { + UnsafeAppendNextOffset(); + if (valid_bytes[i]) { + if (values[i]) { + value_data_builder_.UnsafeAppend(reinterpret_cast(values[i]), + value_lengths[i]); + } else { + UnsafeAppendToBitmap(valid_bytes + valid_bytes_offset, + i - valid_bytes_offset); + UnsafeAppendToBitmap(false); + valid_bytes_offset = i + 1; + } + } + } + UnsafeAppendToBitmap(valid_bytes + valid_bytes_offset, length - valid_bytes_offset); + } else { + if (have_null_value) { + std::vector valid_vector(length, 0); + for (int64_t i = 0; i < length; ++i) { + UnsafeAppendNextOffset(); + if (values[i]) { + value_data_builder_.UnsafeAppend(reinterpret_cast(values[i]), + value_lengths[i]); + valid_vector[i] = 1; + } + } + UnsafeAppendToBitmap(valid_vector.data(), length); + } else { + for (int64_t i = 0; i < length; ++i) { + UnsafeAppendNextOffset(); + value_data_builder_.UnsafeAppend(reinterpret_cast(values[i]), + value_lengths[i]); + } + UnsafeAppendToBitmap(NULLPTR, length); + } + } + return Status::OK(); + } + + Status AppendArraySlice(const ArraySpan& array, int64_t offset, + int64_t length) override { + auto bitmap = array.GetValues(0, 0); + auto offsets = array.GetValues(1); + auto data = array.GetValues(2, 0); + auto total_length = offsets[offset + length] - offsets[offset]; + ARROW_RETURN_NOT_OK(Reserve(length)); + ARROW_RETURN_NOT_OK(ReserveData(total_length)); + for (int64_t i = 0; i < length; i++) { + if (!bitmap || bit_util::GetBit(bitmap, array.offset + offset + i)) { + const offset_type start = offsets[offset + i]; + const offset_type end = offsets[offset + i + 1]; + UnsafeAppend(data + start, end - start); + } else { + UnsafeAppendNull(); + } + } + return Status::OK(); + } + + void Reset() override { + ArrayBuilder::Reset(); + offsets_builder_.Reset(); + value_data_builder_.Reset(); + } + + Status ValidateOverflow(int64_t new_bytes) { + auto new_size = value_data_builder_.length() + new_bytes; + if (ARROW_PREDICT_FALSE(new_size > memory_limit())) { + return Status::CapacityError("array cannot contain more than ", memory_limit(), + " bytes, have ", new_size); + } else { + return Status::OK(); + } + } + + Status Resize(int64_t capacity) override { + ARROW_RETURN_NOT_OK(CheckCapacity(capacity)); + // One more than requested for offsets + ARROW_RETURN_NOT_OK(offsets_builder_.Resize(capacity + 1)); + return ArrayBuilder::Resize(capacity); + } + + /// \brief Ensures there is enough allocated capacity to append the indicated + /// number of bytes to the value data buffer without additional allocations + Status ReserveData(int64_t elements) { + ARROW_RETURN_NOT_OK(ValidateOverflow(elements)); + return value_data_builder_.Reserve(elements); + } + + Status FinishInternal(std::shared_ptr* out) override { + // Write final offset (values length) + ARROW_RETURN_NOT_OK(AppendNextOffset()); + + // These buffers' padding zeroed by BufferBuilder + std::shared_ptr offsets, value_data, null_bitmap; + ARROW_RETURN_NOT_OK(offsets_builder_.Finish(&offsets)); + ARROW_RETURN_NOT_OK(value_data_builder_.Finish(&value_data)); + ARROW_RETURN_NOT_OK(null_bitmap_builder_.Finish(&null_bitmap)); + + *out = ArrayData::Make(type(), length_, {null_bitmap, offsets, value_data}, + null_count_, 0); + Reset(); + return Status::OK(); + } + + /// \return data pointer of the value date builder + const uint8_t* value_data() const { return value_data_builder_.data(); } + /// \return size of values buffer so far + int64_t value_data_length() const { return value_data_builder_.length(); } + /// \return capacity of values buffer + int64_t value_data_capacity() const { return value_data_builder_.capacity(); } + + /// \return data pointer of the value date builder + const offset_type* offsets_data() const { return offsets_builder_.data(); } + + /// Temporary access to a value. + /// + /// This pointer becomes invalid on the next modifying operation. + const uint8_t* GetValue(int64_t i, offset_type* out_length) const { + const offset_type* offsets = offsets_builder_.data(); + const auto offset = offsets[i]; + if (i == (length_ - 1)) { + *out_length = static_cast(value_data_builder_.length()) - offset; + } else { + *out_length = offsets[i + 1] - offset; + } + return value_data_builder_.data() + offset; + } + + offset_type offset(int64_t i) const { return offsets_data()[i]; } + + /// Temporary access to a value. + /// + /// This view becomes invalid on the next modifying operation. + std::string_view GetView(int64_t i) const { + offset_type value_length; + const uint8_t* value_data = GetValue(i, &value_length); + return std::string_view(reinterpret_cast(value_data), value_length); + } + + // Cannot make this a static attribute because of linking issues + static constexpr int64_t memory_limit() { + return std::numeric_limits::max() - 1; + } + + protected: + TypedBufferBuilder offsets_builder_; + TypedBufferBuilder value_data_builder_; + + Status AppendNextOffset() { + const int64_t num_bytes = value_data_builder_.length(); + return offsets_builder_.Append(static_cast(num_bytes)); + } + + void UnsafeAppendNextOffset() { + const int64_t num_bytes = value_data_builder_.length(); + offsets_builder_.UnsafeAppend(static_cast(num_bytes)); + } +}; + +/// \class BinaryBuilder +/// \brief Builder class for variable-length binary data +class ARROW_EXPORT BinaryBuilder : public BaseBinaryBuilder { + public: + using BaseBinaryBuilder::BaseBinaryBuilder; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + std::shared_ptr type() const override { return binary(); } +}; + +/// \class StringBuilder +/// \brief Builder class for UTF8 strings +class ARROW_EXPORT StringBuilder : public BinaryBuilder { + public: + using BinaryBuilder::BinaryBuilder; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + std::shared_ptr type() const override { return utf8(); } +}; + +/// \class LargeBinaryBuilder +/// \brief Builder class for large variable-length binary data +class ARROW_EXPORT LargeBinaryBuilder : public BaseBinaryBuilder { + public: + using BaseBinaryBuilder::BaseBinaryBuilder; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + std::shared_ptr type() const override { return large_binary(); } +}; + +/// \class LargeStringBuilder +/// \brief Builder class for large UTF8 strings +class ARROW_EXPORT LargeStringBuilder : public LargeBinaryBuilder { + public: + using LargeBinaryBuilder::LargeBinaryBuilder; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + std::shared_ptr type() const override { return large_utf8(); } +}; + +// ---------------------------------------------------------------------- +// BinaryViewBuilder, StringViewBuilder +// +// These builders do not support building raw pointer view arrays. + +namespace internal { + +// We allocate medium-sized memory chunks and accumulate data in those, which +// may result in some waste if there are many large-ish strings. If a string +// comes along that does not fit into a block, we allocate a new block and +// write into that. +// +// Later we can implement optimizations to continuing filling underfull blocks +// after encountering a large string that required allocating a new block. +class ARROW_EXPORT StringHeapBuilder { + public: + static constexpr int64_t kDefaultBlocksize = 32 << 10; // 32KB + + StringHeapBuilder(MemoryPool* pool, int64_t alignment) + : pool_(pool), alignment_(alignment) {} + + void SetBlockSize(int64_t blocksize) { blocksize_ = blocksize; } + + using c_type = BinaryViewType::c_type; + + template + std::conditional_t, c_type> Append(const uint8_t* value, + int64_t length) { + if (length <= BinaryViewType::kInlineSize) { + return util::ToInlineBinaryView(value, static_cast(length)); + } + + if constexpr (Safe) { + ARROW_RETURN_NOT_OK(Reserve(length)); + } + + auto v = util::ToNonInlineBinaryView(value, static_cast(length), + static_cast(blocks_.size() - 1), + current_offset_); + + memcpy(current_out_buffer_, value, static_cast(length)); + current_out_buffer_ += length; + current_remaining_bytes_ -= length; + current_offset_ += static_cast(length); + return v; + } + + static constexpr int64_t ValueSizeLimit() { + return std::numeric_limits::max(); + } + + /// \brief Ensure that the indicated number of bytes can be appended via + /// UnsafeAppend operations without the need to allocate more memory + Status Reserve(int64_t num_bytes) { + if (ARROW_PREDICT_FALSE(num_bytes > ValueSizeLimit())) { + return Status::CapacityError( + "BinaryView or StringView elements cannot reference " + "strings larger than 2GB"); + } + if (num_bytes > current_remaining_bytes_) { + ARROW_RETURN_NOT_OK(FinishLastBlock()); + current_remaining_bytes_ = num_bytes > blocksize_ ? num_bytes : blocksize_; + ARROW_ASSIGN_OR_RAISE( + std::shared_ptr new_block, + AllocateResizableBuffer(current_remaining_bytes_, alignment_, pool_)); + current_offset_ = 0; + current_out_buffer_ = new_block->mutable_data(); + blocks_.emplace_back(std::move(new_block)); + } + return Status::OK(); + } + + void Reset() { + current_offset_ = 0; + current_out_buffer_ = NULLPTR; + current_remaining_bytes_ = 0; + blocks_.clear(); + } + + int64_t current_remaining_bytes() const { return current_remaining_bytes_; } + + Result>> Finish() { + if (!blocks_.empty()) { + ARROW_RETURN_NOT_OK(FinishLastBlock()); + } + current_offset_ = 0; + current_out_buffer_ = NULLPTR; + current_remaining_bytes_ = 0; + return std::move(blocks_); + } + + private: + Status FinishLastBlock() { + if (current_remaining_bytes_ > 0) { + // Avoid leaking uninitialized bytes from the allocator + ARROW_RETURN_NOT_OK( + blocks_.back()->Resize(blocks_.back()->size() - current_remaining_bytes_, + /*shrink_to_fit=*/true)); + blocks_.back()->ZeroPadding(); + } + return Status::OK(); + } + + MemoryPool* pool_; + int64_t alignment_; + int64_t blocksize_ = kDefaultBlocksize; + std::vector> blocks_; + + int32_t current_offset_ = 0; + uint8_t* current_out_buffer_ = NULLPTR; + int64_t current_remaining_bytes_ = 0; +}; + +} // namespace internal + +class ARROW_EXPORT BinaryViewBuilder : public ArrayBuilder { + public: + using TypeClass = BinaryViewType; + + // this constructor provided for MakeBuilder compatibility + BinaryViewBuilder(const std::shared_ptr&, MemoryPool* pool); + + explicit BinaryViewBuilder(MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : ArrayBuilder(pool, alignment), + data_builder_(pool, alignment), + data_heap_builder_(pool, alignment) {} + + /// Set the size for future preallocated data buffers. + /// + /// The default size is 32KB, so after each 32KB of string data appended to the builder + /// a new data buffer will be allocated. Adjust this to a larger value to decrease the + /// frequency of allocation, or to a smaller value to lower the overhead of each + /// allocation. + void SetBlockSize(int64_t blocksize) { data_heap_builder_.SetBlockSize(blocksize); } + + /// The number of bytes which can be appended to this builder without allocating another + /// data buffer. + int64_t current_block_bytes_remaining() const { + return data_heap_builder_.current_remaining_bytes(); + } + + Status Append(const uint8_t* value, int64_t length) { + ARROW_RETURN_NOT_OK(Reserve(1)); + UnsafeAppendToBitmap(true); + ARROW_ASSIGN_OR_RAISE(auto v, + data_heap_builder_.Append(value, length)); + data_builder_.UnsafeAppend(v); + return Status::OK(); + } + + Status Append(const char* value, int64_t length) { + return Append(reinterpret_cast(value), length); + } + + Status Append(std::string_view value) { + return Append(value.data(), static_cast(value.size())); + } + + /// \brief Append without checking capacity + /// + /// Builder should have been presized using Reserve() and ReserveData(), + /// respectively, and the value must not be larger than 2GB + void UnsafeAppend(const uint8_t* value, int64_t length) { + UnsafeAppendToBitmap(true); + auto v = data_heap_builder_.Append(value, length); + data_builder_.UnsafeAppend(v); + } + + void UnsafeAppend(const char* value, int64_t length) { + UnsafeAppend(reinterpret_cast(value), length); + } + + void UnsafeAppend(const std::string& value) { + UnsafeAppend(value.c_str(), static_cast(value.size())); + } + + void UnsafeAppend(std::string_view value) { + UnsafeAppend(value.data(), static_cast(value.size())); + } + + /// \brief Ensures there is enough allocated available capacity in the + /// out-of-line data heap to append the indicated number of bytes without + /// additional allocations + Status ReserveData(int64_t length); + + Status AppendNulls(int64_t length) final { + ARROW_RETURN_NOT_OK(Reserve(length)); + data_builder_.UnsafeAppend(length, BinaryViewType::c_type{}); + UnsafeSetNull(length); + return Status::OK(); + } + + /// \brief Append a single null element + Status AppendNull() final { + ARROW_RETURN_NOT_OK(Reserve(1)); + data_builder_.UnsafeAppend(BinaryViewType::c_type{}); + UnsafeAppendToBitmap(false); + return Status::OK(); + } + + /// \brief Append a empty element (length-0 inline string) + Status AppendEmptyValue() final { + ARROW_RETURN_NOT_OK(Reserve(1)); + data_builder_.UnsafeAppend(BinaryViewType::c_type{}); + UnsafeAppendToBitmap(true); + return Status::OK(); + } + + /// \brief Append several empty elements + Status AppendEmptyValues(int64_t length) final { + ARROW_RETURN_NOT_OK(Reserve(length)); + data_builder_.UnsafeAppend(length, BinaryViewType::c_type{}); + UnsafeSetNotNull(length); + return Status::OK(); + } + + void UnsafeAppendNull() { + data_builder_.UnsafeAppend(BinaryViewType::c_type{}); + UnsafeAppendToBitmap(false); + } + + void UnsafeAppendEmptyValue() { + data_builder_.UnsafeAppend(BinaryViewType::c_type{}); + UnsafeAppendToBitmap(true); + } + + /// \brief Append a slice of a BinaryViewArray passed as an ArraySpan. Copies + /// the underlying out-of-line string memory to avoid memory lifetime issues + Status AppendArraySlice(const ArraySpan& array, int64_t offset, + int64_t length) override; + + void Reset() override; + + Status Resize(int64_t capacity) override { + ARROW_RETURN_NOT_OK(CheckCapacity(capacity)); + capacity = std::max(capacity, kMinBuilderCapacity); + ARROW_RETURN_NOT_OK(data_builder_.Resize(capacity)); + return ArrayBuilder::Resize(capacity); + } + + Status FinishInternal(std::shared_ptr* out) override; + + std::shared_ptr type() const override { return binary_view(); } + + protected: + TypedBufferBuilder data_builder_; + + // Accumulates out-of-line data in fixed-size chunks which are then attached + // to the resulting ArrayData + internal::StringHeapBuilder data_heap_builder_; +}; + +class ARROW_EXPORT StringViewBuilder : public BinaryViewBuilder { + public: + using BinaryViewBuilder::BinaryViewBuilder; + std::shared_ptr type() const override { return utf8_view(); } +}; + +// ---------------------------------------------------------------------- +// FixedSizeBinaryBuilder + +class ARROW_EXPORT FixedSizeBinaryBuilder : public ArrayBuilder { + public: + using TypeClass = FixedSizeBinaryType; + + explicit FixedSizeBinaryBuilder(const std::shared_ptr& type, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment); + + Status Append(const uint8_t* value) { + ARROW_RETURN_NOT_OK(Reserve(1)); + UnsafeAppend(value); + return Status::OK(); + } + + Status Append(const char* value) { + return Append(reinterpret_cast(value)); + } + + Status Append(std::string_view view) { + ARROW_RETURN_NOT_OK(Reserve(1)); + UnsafeAppend(view); + return Status::OK(); + } + + Status Append(const std::string& s) { + ARROW_RETURN_NOT_OK(Reserve(1)); + UnsafeAppend(s); + return Status::OK(); + } + + Status Append(const Buffer& s) { + ARROW_RETURN_NOT_OK(Reserve(1)); + UnsafeAppend(s); + return Status::OK(); + } + + Status Append(const std::shared_ptr& s) { return Append(*s); } + + template + Status Append(const std::array& value) { + ARROW_RETURN_NOT_OK(Reserve(1)); + UnsafeAppend( + std::string_view(reinterpret_cast(value.data()), value.size())); + return Status::OK(); + } + + Status AppendValues(const uint8_t* data, int64_t length, + const uint8_t* valid_bytes = NULLPTR); + + Status AppendValues(const uint8_t* data, int64_t length, const uint8_t* validity, + int64_t bitmap_offset); + + Status AppendNull() final; + Status AppendNulls(int64_t length) final; + + Status AppendEmptyValue() final; + Status AppendEmptyValues(int64_t length) final; + + Status AppendArraySlice(const ArraySpan& array, int64_t offset, + int64_t length) override { + return AppendValues( + array.GetValues(1, 0) + ((array.offset + offset) * byte_width_), length, + array.GetValues(0, 0), array.offset + offset); + } + + void UnsafeAppend(const uint8_t* value) { + UnsafeAppendToBitmap(true); + if (ARROW_PREDICT_TRUE(byte_width_ > 0)) { + byte_builder_.UnsafeAppend(value, byte_width_); + } + } + + void UnsafeAppend(const char* value) { + UnsafeAppend(reinterpret_cast(value)); + } + + void UnsafeAppend(std::string_view value) { +#ifndef NDEBUG + CheckValueSize(static_cast(value.size())); +#endif + UnsafeAppend(reinterpret_cast(value.data())); + } + + void UnsafeAppend(const Buffer& s) { UnsafeAppend(std::string_view{s}); } + + void UnsafeAppend(const std::shared_ptr& s) { UnsafeAppend(*s); } + + void UnsafeAppendNull() { + UnsafeAppendToBitmap(false); + byte_builder_.UnsafeAppend(/*num_copies=*/byte_width_, 0); + } + + Status ValidateOverflow(int64_t new_bytes) const { + auto new_size = byte_builder_.length() + new_bytes; + if (ARROW_PREDICT_FALSE(new_size > memory_limit())) { + return Status::CapacityError("array cannot contain more than ", memory_limit(), + " bytes, have ", new_size); + } else { + return Status::OK(); + } + } + + /// \brief Ensures there is enough allocated capacity to append the indicated + /// number of bytes to the value data buffer without additional allocations + Status ReserveData(int64_t elements) { + ARROW_RETURN_NOT_OK(ValidateOverflow(elements)); + return byte_builder_.Reserve(elements); + } + + void Reset() override; + Status Resize(int64_t capacity) override; + Status FinishInternal(std::shared_ptr* out) override; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + /// \return size of values buffer so far + int64_t value_data_length() const { return byte_builder_.length(); } + + int32_t byte_width() const { return byte_width_; } + + /// Temporary access to a value. + /// + /// This pointer becomes invalid on the next modifying operation. + const uint8_t* GetValue(int64_t i) const; + + /// Temporary access to a value. + /// + /// This view becomes invalid on the next modifying operation. + std::string_view GetView(int64_t i) const; + + static constexpr int64_t memory_limit() { + return std::numeric_limits::max() - 1; + } + + std::shared_ptr type() const override { + return fixed_size_binary(byte_width_); + } + + protected: + int32_t byte_width_; + BufferBuilder byte_builder_; + + /// Temporary access to a value. + /// + /// This pointer becomes invalid on the next modifying operation. + uint8_t* GetMutableValue(int64_t i) { + uint8_t* data_ptr = byte_builder_.mutable_data(); + return data_ptr + i * byte_width_; + } + + void CheckValueSize(int64_t size); +}; + +/// @} + +// ---------------------------------------------------------------------- +// Chunked builders: build a sequence of BinaryArray or StringArray that are +// limited to a particular size (to the upper limit of 2GB) + +namespace internal { + +class ARROW_EXPORT ChunkedBinaryBuilder { + public: + explicit ChunkedBinaryBuilder(int32_t max_chunk_value_length, + MemoryPool* pool = default_memory_pool()); + + ChunkedBinaryBuilder(int32_t max_chunk_value_length, int32_t max_chunk_length, + MemoryPool* pool = default_memory_pool()); + + virtual ~ChunkedBinaryBuilder() = default; + + Status Append(const uint8_t* value, int32_t length) { + if (ARROW_PREDICT_FALSE(length + builder_->value_data_length() > + max_chunk_value_length_)) { + if (builder_->value_data_length() == 0) { + // The current item is larger than max_chunk_size_; + // this chunk will be oversize and hold *only* this item + ARROW_RETURN_NOT_OK(builder_->Append(value, length)); + return NextChunk(); + } + // The current item would cause builder_->value_data_length() to exceed + // max_chunk_size_, so finish this chunk and append the current item to the next + // chunk + ARROW_RETURN_NOT_OK(NextChunk()); + return Append(value, length); + } + + if (ARROW_PREDICT_FALSE(builder_->length() == max_chunk_length_)) { + // The current item would cause builder_->length() to exceed max_chunk_length_, so + // finish this chunk and append the current item to the next chunk + ARROW_RETURN_NOT_OK(NextChunk()); + } + + return builder_->Append(value, length); + } + + Status Append(std::string_view value) { + return Append(reinterpret_cast(value.data()), + static_cast(value.size())); + } + + Status AppendNull() { + if (ARROW_PREDICT_FALSE(builder_->length() == max_chunk_length_)) { + ARROW_RETURN_NOT_OK(NextChunk()); + } + return builder_->AppendNull(); + } + + Status Reserve(int64_t values); + + virtual Status Finish(ArrayVector* out); + + protected: + Status NextChunk(); + + // maximum total character data size per chunk + int64_t max_chunk_value_length_; + + // maximum elements allowed per chunk + int64_t max_chunk_length_ = kListMaximumElements; + + // when Reserve() would cause builder_ to exceed its max_chunk_length_, + // add to extra_capacity_ instead and wait to reserve until the next chunk + int64_t extra_capacity_ = 0; + + std::unique_ptr builder_; + std::vector> chunks_; +}; + +class ARROW_EXPORT ChunkedStringBuilder : public ChunkedBinaryBuilder { + public: + using ChunkedBinaryBuilder::ChunkedBinaryBuilder; + + Status Finish(ArrayVector* out) override; +}; + +} // namespace internal + +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_decimal.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_decimal.h new file mode 100644 index 0000000000000000000000000000000000000000..a0bf0a04220842cceada0d0754ad6be4e41a3093 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_decimal.h @@ -0,0 +1,164 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/array/array_decimal.h" +#include "arrow/array/builder_base.h" +#include "arrow/array/builder_binary.h" +#include "arrow/array/data.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +/// \addtogroup numeric-builders +/// +/// @{ + +class ARROW_EXPORT Decimal32Builder : public FixedSizeBinaryBuilder { + public: + using TypeClass = Decimal32Type; + using ValueType = Decimal32; + + explicit Decimal32Builder(const std::shared_ptr& type, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment); + + using FixedSizeBinaryBuilder::Append; + using FixedSizeBinaryBuilder::AppendValues; + using FixedSizeBinaryBuilder::Reset; + + Status Append(Decimal32 val); + void UnsafeAppend(Decimal32 val); + void UnsafeAppend(std::string_view val); + + Status FinishInternal(std::shared_ptr* out) override; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + std::shared_ptr type() const override { return decimal_type_; } + + protected: + std::shared_ptr decimal_type_; +}; + +class ARROW_EXPORT Decimal64Builder : public FixedSizeBinaryBuilder { + public: + using TypeClass = Decimal64Type; + using ValueType = Decimal64; + + explicit Decimal64Builder(const std::shared_ptr& type, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment); + + using FixedSizeBinaryBuilder::Append; + using FixedSizeBinaryBuilder::AppendValues; + using FixedSizeBinaryBuilder::Reset; + + Status Append(Decimal64 val); + void UnsafeAppend(Decimal64 val); + void UnsafeAppend(std::string_view val); + + Status FinishInternal(std::shared_ptr* out) override; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + std::shared_ptr type() const override { return decimal_type_; } + + protected: + std::shared_ptr decimal_type_; +}; + +class ARROW_EXPORT Decimal128Builder : public FixedSizeBinaryBuilder { + public: + using TypeClass = Decimal128Type; + using ValueType = Decimal128; + + explicit Decimal128Builder(const std::shared_ptr& type, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment); + + using FixedSizeBinaryBuilder::Append; + using FixedSizeBinaryBuilder::AppendValues; + using FixedSizeBinaryBuilder::Reset; + + Status Append(Decimal128 val); + void UnsafeAppend(Decimal128 val); + void UnsafeAppend(std::string_view val); + + Status FinishInternal(std::shared_ptr* out) override; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + std::shared_ptr type() const override { return decimal_type_; } + + protected: + std::shared_ptr decimal_type_; +}; + +class ARROW_EXPORT Decimal256Builder : public FixedSizeBinaryBuilder { + public: + using TypeClass = Decimal256Type; + using ValueType = Decimal256; + + explicit Decimal256Builder(const std::shared_ptr& type, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment); + + using FixedSizeBinaryBuilder::Append; + using FixedSizeBinaryBuilder::AppendValues; + using FixedSizeBinaryBuilder::Reset; + + Status Append(const Decimal256& val); + void UnsafeAppend(const Decimal256& val); + void UnsafeAppend(std::string_view val); + + Status FinishInternal(std::shared_ptr* out) override; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + std::shared_ptr type() const override { return decimal_type_; } + + protected: + std::shared_ptr decimal_type_; +}; + +using DecimalBuilder = Decimal128Builder; + +/// @} + +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_dict.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_dict.h new file mode 100644 index 0000000000000000000000000000000000000000..116c82049eea9ea49a716452090297f57be4eb6b --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_dict.h @@ -0,0 +1,728 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/array/array_base.h" +#include "arrow/array/array_binary.h" +#include "arrow/array/builder_adaptive.h" // IWYU pragma: export +#include "arrow/array/builder_base.h" // IWYU pragma: export +#include "arrow/array/builder_primitive.h" // IWYU pragma: export +#include "arrow/array/data.h" +#include "arrow/array/util.h" +#include "arrow/scalar.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/type_traits.h" +#include "arrow/util/bit_block_counter.h" +#include "arrow/util/checked_cast.h" +#include "arrow/util/decimal.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +// ---------------------------------------------------------------------- +// Dictionary builder + +namespace internal { + +template +struct DictionaryValue { + using type = typename T::c_type; + using PhysicalType = T; +}; + +template +struct DictionaryValue> { + using type = std::string_view; + using PhysicalType = + typename std::conditional::value, + BinaryType, LargeBinaryType>::type; +}; + +template +struct DictionaryValue> { + using type = std::string_view; + using PhysicalType = BinaryViewType; +}; + +template +struct DictionaryValue> { + using type = std::string_view; + using PhysicalType = BinaryType; +}; + +class ARROW_EXPORT DictionaryMemoTable { + public: + DictionaryMemoTable(MemoryPool* pool, const std::shared_ptr& type); + DictionaryMemoTable(MemoryPool* pool, const std::shared_ptr& dictionary); + ~DictionaryMemoTable(); + + Status GetArrayData(int64_t start_offset, std::shared_ptr* out); + + /// \brief Insert new memo values + Status InsertValues(const Array& values); + + int32_t size() const; + + template + Status GetOrInsert(typename DictionaryValue::type value, int32_t* out) { + // We want to keep the DictionaryMemoTable implementation private, also we can't + // use extern template classes because of compiler issues (MinGW?). Instead, + // we expose explicit function overrides for each supported physical type. + const typename DictionaryValue::PhysicalType* physical_type = NULLPTR; + return GetOrInsert(physical_type, value, out); + } + + private: + Status GetOrInsert(const BooleanType*, bool value, int32_t* out); + Status GetOrInsert(const Int8Type*, int8_t value, int32_t* out); + Status GetOrInsert(const Int16Type*, int16_t value, int32_t* out); + Status GetOrInsert(const Int32Type*, int32_t value, int32_t* out); + Status GetOrInsert(const Int64Type*, int64_t value, int32_t* out); + Status GetOrInsert(const UInt8Type*, uint8_t value, int32_t* out); + Status GetOrInsert(const UInt16Type*, uint16_t value, int32_t* out); + Status GetOrInsert(const UInt32Type*, uint32_t value, int32_t* out); + Status GetOrInsert(const UInt64Type*, uint64_t value, int32_t* out); + Status GetOrInsert(const DurationType*, int64_t value, int32_t* out); + Status GetOrInsert(const TimestampType*, int64_t value, int32_t* out); + Status GetOrInsert(const Date32Type*, int32_t value, int32_t* out); + Status GetOrInsert(const Date64Type*, int64_t value, int32_t* out); + Status GetOrInsert(const Time32Type*, int32_t value, int32_t* out); + Status GetOrInsert(const Time64Type*, int64_t value, int32_t* out); + Status GetOrInsert(const MonthDayNanoIntervalType*, + MonthDayNanoIntervalType::MonthDayNanos value, int32_t* out); + Status GetOrInsert(const DayTimeIntervalType*, + DayTimeIntervalType::DayMilliseconds value, int32_t* out); + Status GetOrInsert(const MonthIntervalType*, int32_t value, int32_t* out); + Status GetOrInsert(const FloatType*, float value, int32_t* out); + Status GetOrInsert(const DoubleType*, double value, int32_t* out); + + Status GetOrInsert(const BinaryType*, std::string_view value, int32_t* out); + Status GetOrInsert(const LargeBinaryType*, std::string_view value, int32_t* out); + Status GetOrInsert(const BinaryViewType*, std::string_view value, int32_t* out); + + class DictionaryMemoTableImpl; + std::unique_ptr impl_; +}; + +} // namespace internal + +/// \addtogroup dictionary-builders +/// +/// @{ + +namespace internal { + +/// \brief Array builder for created encoded DictionaryArray from +/// dense array +/// +/// Unlike other builders, dictionary builder does not completely +/// reset the state on Finish calls. +template +class DictionaryBuilderBase : public ArrayBuilder { + public: + using TypeClass = DictionaryType; + using Value = typename DictionaryValue::type; + + // WARNING: the type given below is the value type, not the DictionaryType. + // The DictionaryType is instantiated on the Finish() call. + template + DictionaryBuilderBase(uint8_t start_int_size, + enable_if_t::value && + !is_fixed_size_binary_type::value, + const std::shared_ptr&> + value_type, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : ArrayBuilder(pool, alignment), + memo_table_(new internal::DictionaryMemoTable(pool, value_type)), + delta_offset_(0), + byte_width_(-1), + indices_builder_(start_int_size, pool, alignment), + value_type_(value_type) {} + + template + explicit DictionaryBuilderBase( + enable_if_t::value, const std::shared_ptr&> + value_type, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : ArrayBuilder(pool, alignment), + memo_table_(new internal::DictionaryMemoTable(pool, value_type)), + delta_offset_(0), + byte_width_(-1), + indices_builder_(pool, alignment), + value_type_(value_type) {} + + template + explicit DictionaryBuilderBase( + const std::shared_ptr& index_type, + enable_if_t::value, const std::shared_ptr&> + value_type, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : ArrayBuilder(pool, alignment), + memo_table_(new internal::DictionaryMemoTable(pool, value_type)), + delta_offset_(0), + byte_width_(-1), + indices_builder_(index_type, pool, alignment), + value_type_(value_type) {} + + template + DictionaryBuilderBase(uint8_t start_int_size, + enable_if_t::value && + is_fixed_size_binary_type::value, + const std::shared_ptr&> + value_type, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : ArrayBuilder(pool, alignment), + memo_table_(new internal::DictionaryMemoTable(pool, value_type)), + delta_offset_(0), + byte_width_(static_cast(*value_type).byte_width()), + indices_builder_(start_int_size, pool, alignment), + value_type_(value_type) {} + + template + explicit DictionaryBuilderBase( + enable_if_fixed_size_binary&> value_type, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : ArrayBuilder(pool, alignment), + memo_table_(new internal::DictionaryMemoTable(pool, value_type)), + delta_offset_(0), + byte_width_(static_cast(*value_type).byte_width()), + indices_builder_(pool, alignment), + value_type_(value_type) {} + + template + explicit DictionaryBuilderBase( + const std::shared_ptr& index_type, + enable_if_fixed_size_binary&> value_type, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : ArrayBuilder(pool, alignment), + memo_table_(new internal::DictionaryMemoTable(pool, value_type)), + delta_offset_(0), + byte_width_(static_cast(*value_type).byte_width()), + indices_builder_(index_type, pool, alignment), + value_type_(value_type) {} + + template + explicit DictionaryBuilderBase( + enable_if_parameter_free pool = default_memory_pool()) + : DictionaryBuilderBase(TypeTraits::type_singleton(), pool) {} + + // This constructor doesn't check for errors. Use InsertMemoValues instead. + explicit DictionaryBuilderBase(const std::shared_ptr& dictionary, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : ArrayBuilder(pool, alignment), + memo_table_(new internal::DictionaryMemoTable(pool, dictionary)), + delta_offset_(0), + byte_width_(-1), + indices_builder_(pool, alignment), + value_type_(dictionary->type()) {} + + ~DictionaryBuilderBase() override = default; + + /// \brief The current number of entries in the dictionary + int64_t dictionary_length() const { return memo_table_->size(); } + + /// \brief The value byte width (for FixedSizeBinaryType) + template + enable_if_fixed_size_binary byte_width() const { + return byte_width_; + } + + /// \brief Append a scalar value + Status Append(Value value) { + ARROW_RETURN_NOT_OK(Reserve(1)); + + int32_t memo_index; + ARROW_RETURN_NOT_OK(memo_table_->GetOrInsert(value, &memo_index)); + ARROW_RETURN_NOT_OK(indices_builder_.Append(memo_index)); + length_ += 1; + + return Status::OK(); + } + + /// \brief Append a fixed-width string (only for FixedSizeBinaryType) + template + enable_if_fixed_size_binary Append(const uint8_t* value) { + return Append(std::string_view(reinterpret_cast(value), byte_width_)); + } + + /// \brief Append a fixed-width string (only for FixedSizeBinaryType) + template + enable_if_fixed_size_binary Append(const char* value) { + return Append(std::string_view(value, byte_width_)); + } + + /// \brief Append a string (only for binary types) + template + enable_if_binary_like Append(const uint8_t* value, int32_t length) { + return Append(reinterpret_cast(value), length); + } + + /// \brief Append a string (only for binary types) + template + enable_if_binary_like Append(const char* value, int32_t length) { + return Append(std::string_view(value, length)); + } + + /// \brief Append a string (only for string types) + template + enable_if_string_like Append(const char* value, int32_t length) { + return Append(std::string_view(value, length)); + } + + /// \brief Append a decimal (only for Decimal32/64/128/256 Type) + template ::CType> + enable_if_decimal Append(const CType& value) { + auto bytes = value.ToBytes(); + return Append(bytes.data(), static_cast(bytes.size())); + } + + /// \brief Append a scalar null value + Status AppendNull() final { + length_ += 1; + null_count_ += 1; + + return indices_builder_.AppendNull(); + } + + Status AppendNulls(int64_t length) final { + length_ += length; + null_count_ += length; + + return indices_builder_.AppendNulls(length); + } + + Status AppendEmptyValue() final { + length_ += 1; + + return indices_builder_.AppendEmptyValue(); + } + + Status AppendEmptyValues(int64_t length) final { + length_ += length; + + return indices_builder_.AppendEmptyValues(length); + } + + Status AppendScalar(const Scalar& scalar, int64_t n_repeats) override { + if (!scalar.is_valid) return AppendNulls(n_repeats); + + const auto& dict_ty = internal::checked_cast(*scalar.type); + const DictionaryScalar& dict_scalar = + internal::checked_cast(scalar); + const auto& dict = internal::checked_cast::ArrayType&>( + *dict_scalar.value.dictionary); + ARROW_RETURN_NOT_OK(Reserve(n_repeats)); + switch (dict_ty.index_type()->id()) { + case Type::UINT8: + return AppendScalarImpl(dict, *dict_scalar.value.index, n_repeats); + case Type::INT8: + return AppendScalarImpl(dict, *dict_scalar.value.index, n_repeats); + case Type::UINT16: + return AppendScalarImpl(dict, *dict_scalar.value.index, n_repeats); + case Type::INT16: + return AppendScalarImpl(dict, *dict_scalar.value.index, n_repeats); + case Type::UINT32: + return AppendScalarImpl(dict, *dict_scalar.value.index, n_repeats); + case Type::INT32: + return AppendScalarImpl(dict, *dict_scalar.value.index, n_repeats); + case Type::UINT64: + return AppendScalarImpl(dict, *dict_scalar.value.index, n_repeats); + case Type::INT64: + return AppendScalarImpl(dict, *dict_scalar.value.index, n_repeats); + default: + return Status::TypeError("Invalid index type: ", dict_ty); + } + return Status::OK(); + } + + Status AppendScalars(const ScalarVector& scalars) override { + for (const auto& scalar : scalars) { + ARROW_RETURN_NOT_OK(AppendScalar(*scalar, /*n_repeats=*/1)); + } + return Status::OK(); + } + + Status AppendArraySlice(const ArraySpan& array, int64_t offset, int64_t length) final { + // Visit the indices and insert the unpacked values. + const auto& dict_ty = internal::checked_cast(*array.type); + // See if possible to avoid using ToArrayData here + const typename TypeTraits::ArrayType dict(array.dictionary().ToArrayData()); + ARROW_RETURN_NOT_OK(Reserve(length)); + switch (dict_ty.index_type()->id()) { + case Type::UINT8: + return AppendArraySliceImpl(dict, array, offset, length); + case Type::INT8: + return AppendArraySliceImpl(dict, array, offset, length); + case Type::UINT16: + return AppendArraySliceImpl(dict, array, offset, length); + case Type::INT16: + return AppendArraySliceImpl(dict, array, offset, length); + case Type::UINT32: + return AppendArraySliceImpl(dict, array, offset, length); + case Type::INT32: + return AppendArraySliceImpl(dict, array, offset, length); + case Type::UINT64: + return AppendArraySliceImpl(dict, array, offset, length); + case Type::INT64: + return AppendArraySliceImpl(dict, array, offset, length); + default: + return Status::TypeError("Invalid index type: ", dict_ty); + } + return Status::OK(); + } + + /// \brief Insert values into the dictionary's memo, but do not append any + /// indices. Can be used to initialize a new builder with known dictionary + /// values + /// \param[in] values dictionary values to add to memo. Type must match + /// builder type + Status InsertMemoValues(const Array& values) { + return memo_table_->InsertValues(values); + } + + /// \brief Append a whole dense array to the builder + template + enable_if_t::value, Status> AppendArray( + const Array& array) { + using ArrayType = typename TypeTraits::ArrayType; + +#ifndef NDEBUG + ARROW_RETURN_NOT_OK(ArrayBuilder::CheckArrayType( + value_type_, array, "Wrong value type of array to be appended")); +#endif + + const auto& concrete_array = static_cast(array); + for (int64_t i = 0; i < array.length(); i++) { + if (array.IsNull(i)) { + ARROW_RETURN_NOT_OK(AppendNull()); + } else { + ARROW_RETURN_NOT_OK(Append(concrete_array.GetView(i))); + } + } + return Status::OK(); + } + + template + enable_if_fixed_size_binary AppendArray(const Array& array) { +#ifndef NDEBUG + ARROW_RETURN_NOT_OK(ArrayBuilder::CheckArrayType( + value_type_, array, "Wrong value type of array to be appended")); +#endif + + const auto& concrete_array = static_cast(array); + for (int64_t i = 0; i < array.length(); i++) { + if (array.IsNull(i)) { + ARROW_RETURN_NOT_OK(AppendNull()); + } else { + ARROW_RETURN_NOT_OK(Append(concrete_array.GetValue(i))); + } + } + return Status::OK(); + } + + void Reset() override { + // Perform a partial reset. Call ResetFull to also reset the accumulated + // dictionary values + ArrayBuilder::Reset(); + indices_builder_.Reset(); + } + + /// \brief Reset and also clear accumulated dictionary values in memo table + void ResetFull() { + Reset(); + memo_table_.reset(new internal::DictionaryMemoTable(pool_, value_type_)); + } + + Status Resize(int64_t capacity) override { + ARROW_RETURN_NOT_OK(CheckCapacity(capacity)); + capacity = std::max(capacity, kMinBuilderCapacity); + ARROW_RETURN_NOT_OK(indices_builder_.Resize(capacity)); + capacity_ = indices_builder_.capacity(); + return Status::OK(); + } + + /// \brief Return dictionary indices and a delta dictionary since the last + /// time that Finish or FinishDelta were called, and reset state of builder + /// (except the memo table) + Status FinishDelta(std::shared_ptr* out_indices, + std::shared_ptr* out_delta) { + std::shared_ptr indices_data; + std::shared_ptr delta_data; + ARROW_RETURN_NOT_OK(FinishWithDictOffset(delta_offset_, &indices_data, &delta_data)); + *out_indices = MakeArray(indices_data); + *out_delta = MakeArray(delta_data); + return Status::OK(); + } + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + std::shared_ptr type() const override { + return ::arrow::dictionary(indices_builder_.type(), value_type_); + } + + protected: + template + Status AppendArraySliceImpl(const typename TypeTraits::ArrayType& dict, + const ArraySpan& array, int64_t offset, int64_t length) { + const c_type* values = array.GetValues(1) + offset; + return VisitBitBlocks( + array.buffers[0].data, array.offset + offset, length, + [&](const int64_t position) { + const int64_t index = static_cast(values[position]); + if (dict.IsValid(index)) { + return Append(dict.GetView(index)); + } + return AppendNull(); + }, + [&]() { return AppendNull(); }); + } + + template + Status AppendScalarImpl(const typename TypeTraits::ArrayType& dict, + const Scalar& index_scalar, int64_t n_repeats) { + using ScalarType = typename TypeTraits::ScalarType; + const auto index = internal::checked_cast(index_scalar).value; + if (index_scalar.is_valid && dict.IsValid(index)) { + const auto& value = dict.GetView(index); + for (int64_t i = 0; i < n_repeats; i++) { + ARROW_RETURN_NOT_OK(Append(value)); + } + return Status::OK(); + } + return AppendNulls(n_repeats); + } + + Status FinishInternal(std::shared_ptr* out) override { + std::shared_ptr dictionary; + ARROW_RETURN_NOT_OK(FinishWithDictOffset(/*offset=*/0, out, &dictionary)); + + // Set type of array data to the right dictionary type + (*out)->type = type(); + (*out)->dictionary = dictionary; + return Status::OK(); + } + + Status FinishWithDictOffset(int64_t dict_offset, + std::shared_ptr* out_indices, + std::shared_ptr* out_dictionary) { + // Finalize indices array + ARROW_RETURN_NOT_OK(indices_builder_.FinishInternal(out_indices)); + + // Generate dictionary array from hash table contents + ARROW_RETURN_NOT_OK(memo_table_->GetArrayData(dict_offset, out_dictionary)); + delta_offset_ = memo_table_->size(); + + // Update internals for further uses of this DictionaryBuilder + ArrayBuilder::Reset(); + return Status::OK(); + } + + std::unique_ptr memo_table_; + + // The size of the dictionary memo at last invocation of Finish, to use in + // FinishDelta for computing dictionary deltas + int32_t delta_offset_; + + // Only used for FixedSizeBinaryType + int32_t byte_width_; + + BuilderType indices_builder_; + std::shared_ptr value_type_; +}; + +template +class DictionaryBuilderBase : public ArrayBuilder { + public: + template + DictionaryBuilderBase( + enable_if_t::value, uint8_t> + start_int_size, + const std::shared_ptr& value_type, + MemoryPool* pool = default_memory_pool()) + : ArrayBuilder(pool), indices_builder_(start_int_size, pool) {} + + explicit DictionaryBuilderBase(const std::shared_ptr& value_type, + MemoryPool* pool = default_memory_pool()) + : ArrayBuilder(pool), indices_builder_(pool) {} + + explicit DictionaryBuilderBase(const std::shared_ptr& index_type, + const std::shared_ptr& value_type, + MemoryPool* pool = default_memory_pool()) + : ArrayBuilder(pool), indices_builder_(index_type, pool) {} + + template + explicit DictionaryBuilderBase( + enable_if_t::value, uint8_t> + start_int_size, + MemoryPool* pool = default_memory_pool()) + : ArrayBuilder(pool), indices_builder_(start_int_size, pool) {} + + explicit DictionaryBuilderBase(MemoryPool* pool = default_memory_pool()) + : ArrayBuilder(pool), indices_builder_(pool) {} + + explicit DictionaryBuilderBase(const std::shared_ptr& dictionary, + MemoryPool* pool = default_memory_pool()) + : ArrayBuilder(pool), indices_builder_(pool) {} + + /// \brief Append a scalar null value + Status AppendNull() final { + length_ += 1; + null_count_ += 1; + + return indices_builder_.AppendNull(); + } + + Status AppendNulls(int64_t length) final { + length_ += length; + null_count_ += length; + + return indices_builder_.AppendNulls(length); + } + + Status AppendEmptyValue() final { + length_ += 1; + + return indices_builder_.AppendEmptyValue(); + } + + Status AppendEmptyValues(int64_t length) final { + length_ += length; + + return indices_builder_.AppendEmptyValues(length); + } + + /// \brief Append a whole dense array to the builder + Status AppendArray(const Array& array) { +#ifndef NDEBUG + ARROW_RETURN_NOT_OK(ArrayBuilder::CheckArrayType( + Type::NA, array, "Wrong value type of array to be appended")); +#endif + for (int64_t i = 0; i < array.length(); i++) { + ARROW_RETURN_NOT_OK(AppendNull()); + } + return Status::OK(); + } + + Status Resize(int64_t capacity) override { + ARROW_RETURN_NOT_OK(CheckCapacity(capacity)); + capacity = std::max(capacity, kMinBuilderCapacity); + + ARROW_RETURN_NOT_OK(indices_builder_.Resize(capacity)); + capacity_ = indices_builder_.capacity(); + return Status::OK(); + } + + Status FinishInternal(std::shared_ptr* out) override { + ARROW_RETURN_NOT_OK(indices_builder_.FinishInternal(out)); + (*out)->type = dictionary((*out)->type, null()); + (*out)->dictionary = NullArray(0).data(); + return Status::OK(); + } + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + std::shared_ptr type() const override { + return ::arrow::dictionary(indices_builder_.type(), null()); + } + + protected: + BuilderType indices_builder_; +}; + +} // namespace internal + +/// \brief A DictionaryArray builder that uses AdaptiveIntBuilder to return the +/// smallest index size that can accommodate the dictionary indices +template +class DictionaryBuilder : public internal::DictionaryBuilderBase { + public: + using BASE = internal::DictionaryBuilderBase; + using BASE::BASE; + + /// \brief Append dictionary indices directly without modifying memo + /// + /// NOTE: Experimental API + Status AppendIndices(const int64_t* values, int64_t length, + const uint8_t* valid_bytes = NULLPTR) { + int64_t null_count_before = this->indices_builder_.null_count(); + ARROW_RETURN_NOT_OK(this->indices_builder_.AppendValues(values, length, valid_bytes)); + this->capacity_ = this->indices_builder_.capacity(); + this->length_ += length; + this->null_count_ += this->indices_builder_.null_count() - null_count_before; + return Status::OK(); + } +}; + +/// \brief A DictionaryArray builder that always returns int32 dictionary +/// indices so that data cast to dictionary form will have a consistent index +/// type, e.g. for creating a ChunkedArray +template +class Dictionary32Builder : public internal::DictionaryBuilderBase { + public: + using BASE = internal::DictionaryBuilderBase; + using BASE::BASE; + + /// \brief Append dictionary indices directly without modifying memo + /// + /// NOTE: Experimental API + Status AppendIndices(const int32_t* values, int64_t length, + const uint8_t* valid_bytes = NULLPTR) { + int64_t null_count_before = this->indices_builder_.null_count(); + ARROW_RETURN_NOT_OK(this->indices_builder_.AppendValues(values, length, valid_bytes)); + this->capacity_ = this->indices_builder_.capacity(); + this->length_ += length; + this->null_count_ += this->indices_builder_.null_count() - null_count_before; + return Status::OK(); + } +}; + +// ---------------------------------------------------------------------- +// Binary / Unicode builders +// (compatibility aliases; those used to be derived classes with additional +// Append() overloads, but they have been folded into DictionaryBuilderBase) + +using BinaryDictionaryBuilder = DictionaryBuilder; +using StringDictionaryBuilder = DictionaryBuilder; +using BinaryDictionary32Builder = Dictionary32Builder; +using StringDictionary32Builder = Dictionary32Builder; + +/// @} + +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_primitive.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_primitive.h new file mode 100644 index 0000000000000000000000000000000000000000..de7af1b46bdee2f7cecb5978bf84950bfac9b274 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_primitive.h @@ -0,0 +1,556 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/array/builder_base.h" +#include "arrow/array/data.h" +#include "arrow/result.h" +#include "arrow/type.h" +#include "arrow/type_traits.h" + +namespace arrow { + +class ARROW_EXPORT NullBuilder : public ArrayBuilder { + public: + explicit NullBuilder(MemoryPool* pool = default_memory_pool(), + int64_t ARROW_ARG_UNUSED(alignment) = kDefaultBufferAlignment) + : ArrayBuilder(pool) {} + + explicit NullBuilder(const std::shared_ptr& ARROW_ARG_UNUSED(type), + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : NullBuilder(pool, alignment) {} + + /// \brief Append the specified number of null elements + Status AppendNulls(int64_t length) final { + if (length < 0) return Status::Invalid("length must be positive"); + null_count_ += length; + length_ += length; + return Status::OK(); + } + + /// \brief Append a single null element + Status AppendNull() final { return AppendNulls(1); } + + Status AppendEmptyValues(int64_t length) final { return AppendNulls(length); } + + Status AppendEmptyValue() final { return AppendEmptyValues(1); } + + Status Append(std::nullptr_t) { return AppendNull(); } + + Status AppendArraySlice(const ArraySpan&, int64_t, int64_t length) override { + return AppendNulls(length); + } + + Status FinishInternal(std::shared_ptr* out) override; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + std::shared_ptr type() const override { return null(); } + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } +}; + +/// \addtogroup numeric-builders +/// +/// @{ + +/// Base class for all Builders that emit an Array of a scalar numerical type. +template +class NumericBuilder + : public ArrayBuilder, + public internal::ArrayBuilderExtraOps, typename T::c_type> { + public: + using TypeClass = T; + using value_type = typename T::c_type; + using ArrayType = typename TypeTraits::ArrayType; + + template + explicit NumericBuilder( + enable_if_parameter_free pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : ArrayBuilder(pool, alignment), + type_(TypeTraits::type_singleton()), + data_builder_(pool, alignment) {} + + NumericBuilder(const std::shared_ptr& type, MemoryPool* pool, + int64_t alignment = kDefaultBufferAlignment) + : ArrayBuilder(pool, alignment), type_(type), data_builder_(pool, alignment) {} + + /// Append a single scalar and increase the size if necessary. + Status Append(const value_type val) { + ARROW_RETURN_NOT_OK(ArrayBuilder::Reserve(1)); + UnsafeAppend(val); + return Status::OK(); + } + + /// Write nulls as uint8_t* (0 value indicates null) into pre-allocated memory + /// The memory at the corresponding data slot is set to 0 to prevent + /// uninitialized memory access + Status AppendNulls(int64_t length) final { + ARROW_RETURN_NOT_OK(Reserve(length)); + data_builder_.UnsafeAppend(length, value_type{}); // zero + UnsafeSetNull(length); + return Status::OK(); + } + + /// \brief Append a single null element + Status AppendNull() final { + ARROW_RETURN_NOT_OK(Reserve(1)); + data_builder_.UnsafeAppend(value_type{}); // zero + UnsafeAppendToBitmap(false); + return Status::OK(); + } + + /// \brief Append a empty element + Status AppendEmptyValue() final { + ARROW_RETURN_NOT_OK(Reserve(1)); + data_builder_.UnsafeAppend(value_type{}); // zero + UnsafeAppendToBitmap(true); + return Status::OK(); + } + + /// \brief Append several empty elements + Status AppendEmptyValues(int64_t length) final { + ARROW_RETURN_NOT_OK(Reserve(length)); + data_builder_.UnsafeAppend(length, value_type{}); // zero + UnsafeSetNotNull(length); + return Status::OK(); + } + + value_type GetValue(int64_t index) const { return data_builder_.data()[index]; } + + void Reset() override { + data_builder_.Reset(); + ArrayBuilder::Reset(); + } + + Status Resize(int64_t capacity) override { + ARROW_RETURN_NOT_OK(CheckCapacity(capacity)); + capacity = std::max(capacity, kMinBuilderCapacity); + ARROW_RETURN_NOT_OK(data_builder_.Resize(capacity)); + return ArrayBuilder::Resize(capacity); + } + + value_type operator[](int64_t index) const { return GetValue(index); } + + value_type& operator[](int64_t index) { + return reinterpret_cast(data_builder_.mutable_data())[index]; + } + + /// \brief Append a sequence of elements in one shot + /// \param[in] values a contiguous C array of values + /// \param[in] length the number of values to append + /// \param[in] valid_bytes an optional sequence of bytes where non-zero + /// indicates a valid (non-null) value + /// \return Status + Status AppendValues(const value_type* values, int64_t length, + const uint8_t* valid_bytes = NULLPTR) { + ARROW_RETURN_NOT_OK(Reserve(length)); + data_builder_.UnsafeAppend(values, length); + // length_ is update by these + ArrayBuilder::UnsafeAppendToBitmap(valid_bytes, length); + return Status::OK(); + } + + /// \brief Append a sequence of elements in one shot + /// \param[in] values a contiguous C array of values + /// \param[in] length the number of values to append + /// \param[in] bitmap a validity bitmap to copy (may be null) + /// \param[in] bitmap_offset an offset into the validity bitmap + /// \return Status + Status AppendValues(const value_type* values, int64_t length, const uint8_t* bitmap, + int64_t bitmap_offset) { + ARROW_RETURN_NOT_OK(Reserve(length)); + data_builder_.UnsafeAppend(values, length); + // length_ is update by these + ArrayBuilder::UnsafeAppendToBitmap(bitmap, bitmap_offset, length); + return Status::OK(); + } + + /// \brief Append a sequence of elements in one shot + /// \param[in] values a contiguous C array of values + /// \param[in] length the number of values to append + /// \param[in] is_valid an std::vector indicating valid (1) or null + /// (0). Equal in length to values + /// \return Status + Status AppendValues(const value_type* values, int64_t length, + const std::vector& is_valid) { + ARROW_RETURN_NOT_OK(Reserve(length)); + data_builder_.UnsafeAppend(values, length); + // length_ is update by these + ArrayBuilder::UnsafeAppendToBitmap(is_valid); + return Status::OK(); + } + + /// \brief Append a sequence of elements in one shot + /// \param[in] values a std::vector of values + /// \param[in] is_valid an std::vector indicating valid (1) or null + /// (0). Equal in length to values + /// \return Status + Status AppendValues(const std::vector& values, + const std::vector& is_valid) { + return AppendValues(values.data(), static_cast(values.size()), is_valid); + } + + /// \brief Append a sequence of elements in one shot + /// \param[in] values a std::vector of values + /// \return Status + Status AppendValues(const std::vector& values) { + return AppendValues(values.data(), static_cast(values.size())); + } + + Status FinishInternal(std::shared_ptr* out) override { + ARROW_ASSIGN_OR_RAISE(auto null_bitmap, + null_bitmap_builder_.FinishWithLength(length_)); + ARROW_ASSIGN_OR_RAISE(auto data, data_builder_.FinishWithLength(length_)); + *out = ArrayData::Make(type(), length_, {null_bitmap, data}, null_count_); + capacity_ = length_ = null_count_ = 0; + return Status::OK(); + } + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + /// \brief Append a sequence of elements in one shot + /// \param[in] values_begin InputIterator to the beginning of the values + /// \param[in] values_end InputIterator pointing to the end of the values + /// \return Status + template + Status AppendValues(ValuesIter values_begin, ValuesIter values_end) { + int64_t length = static_cast(std::distance(values_begin, values_end)); + ARROW_RETURN_NOT_OK(Reserve(length)); + data_builder_.UnsafeAppend(values_begin, values_end); + // this updates the length_ + UnsafeSetNotNull(length); + return Status::OK(); + } + + /// \brief Append a sequence of elements in one shot, with a specified nullmap + /// \param[in] values_begin InputIterator to the beginning of the values + /// \param[in] values_end InputIterator pointing to the end of the values + /// \param[in] valid_begin InputIterator with elements indication valid(1) + /// or null(0) values. + /// \return Status + template + enable_if_t::value, Status> AppendValues( + ValuesIter values_begin, ValuesIter values_end, ValidIter valid_begin) { + static_assert(!internal::is_null_pointer::value, + "Don't pass a NULLPTR directly as valid_begin, use the 2-argument " + "version instead"); + int64_t length = static_cast(std::distance(values_begin, values_end)); + ARROW_RETURN_NOT_OK(Reserve(length)); + data_builder_.UnsafeAppend(values_begin, values_end); + null_bitmap_builder_.UnsafeAppend( + length, [&valid_begin]() -> bool { return *valid_begin++; }); + length_ = null_bitmap_builder_.length(); + null_count_ = null_bitmap_builder_.false_count(); + return Status::OK(); + } + + // Same as above, with a pointer type ValidIter + template + enable_if_t::value, Status> AppendValues( + ValuesIter values_begin, ValuesIter values_end, ValidIter valid_begin) { + int64_t length = static_cast(std::distance(values_begin, values_end)); + ARROW_RETURN_NOT_OK(Reserve(length)); + data_builder_.UnsafeAppend(values_begin, values_end); + // this updates the length_ + if (valid_begin == NULLPTR) { + UnsafeSetNotNull(length); + } else { + null_bitmap_builder_.UnsafeAppend( + length, [&valid_begin]() -> bool { return *valid_begin++; }); + length_ = null_bitmap_builder_.length(); + null_count_ = null_bitmap_builder_.false_count(); + } + + return Status::OK(); + } + + Status AppendArraySlice(const ArraySpan& array, int64_t offset, + int64_t length) override { + return AppendValues(array.GetValues(1) + offset, length, + array.GetValues(0, 0), array.offset + offset); + } + + /// Append a single scalar under the assumption that the underlying Buffer is + /// large enough. + /// + /// This method does not capacity-check; make sure to call Reserve + /// beforehand. + void UnsafeAppend(const value_type val) { + ArrayBuilder::UnsafeAppendToBitmap(true); + data_builder_.UnsafeAppend(val); + } + + void UnsafeAppendNull() { + ArrayBuilder::UnsafeAppendToBitmap(false); + data_builder_.UnsafeAppend(value_type{}); // zero + } + + std::shared_ptr type() const override { return type_; } + + protected: + std::shared_ptr type_; + TypedBufferBuilder data_builder_; +}; + +// Builders + +using UInt8Builder = NumericBuilder; +using UInt16Builder = NumericBuilder; +using UInt32Builder = NumericBuilder; +using UInt64Builder = NumericBuilder; + +using Int8Builder = NumericBuilder; +using Int16Builder = NumericBuilder; +using Int32Builder = NumericBuilder; +using Int64Builder = NumericBuilder; + +using HalfFloatBuilder = NumericBuilder; +using FloatBuilder = NumericBuilder; +using DoubleBuilder = NumericBuilder; + +/// @} + +/// \addtogroup temporal-builders +/// +/// @{ + +using Date32Builder = NumericBuilder; +using Date64Builder = NumericBuilder; +using Time32Builder = NumericBuilder; +using Time64Builder = NumericBuilder; +using TimestampBuilder = NumericBuilder; +using MonthIntervalBuilder = NumericBuilder; +using DurationBuilder = NumericBuilder; + +/// @} + +class ARROW_EXPORT BooleanBuilder + : public ArrayBuilder, + public internal::ArrayBuilderExtraOps { + public: + using TypeClass = BooleanType; + using value_type = bool; + + explicit BooleanBuilder(MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment); + + BooleanBuilder(const std::shared_ptr& type, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment); + + /// Write nulls as uint8_t* (0 value indicates null) into pre-allocated memory + Status AppendNulls(int64_t length) final { + ARROW_RETURN_NOT_OK(Reserve(length)); + data_builder_.UnsafeAppend(length, false); + UnsafeSetNull(length); + return Status::OK(); + } + + Status AppendNull() final { + ARROW_RETURN_NOT_OK(Reserve(1)); + UnsafeAppendNull(); + return Status::OK(); + } + + Status AppendEmptyValue() final { + ARROW_RETURN_NOT_OK(Reserve(1)); + data_builder_.UnsafeAppend(false); + UnsafeSetNotNull(1); + return Status::OK(); + } + + Status AppendEmptyValues(int64_t length) final { + ARROW_RETURN_NOT_OK(Reserve(length)); + data_builder_.UnsafeAppend(length, false); + UnsafeSetNotNull(length); + return Status::OK(); + } + + /// Scalar append + Status Append(const bool val) { + ARROW_RETURN_NOT_OK(Reserve(1)); + UnsafeAppend(val); + return Status::OK(); + } + + Status Append(const uint8_t val) { return Append(val != 0); } + + /// Scalar append, without checking for capacity + void UnsafeAppend(const bool val) { + data_builder_.UnsafeAppend(val); + UnsafeAppendToBitmap(true); + } + + void UnsafeAppendNull() { + data_builder_.UnsafeAppend(false); + UnsafeAppendToBitmap(false); + } + + void UnsafeAppend(const uint8_t val) { UnsafeAppend(val != 0); } + + /// \brief Append a sequence of elements in one shot + /// \param[in] values a contiguous array of bytes (non-zero is 1) + /// \param[in] length the number of values to append + /// \param[in] valid_bytes an optional sequence of bytes where non-zero + /// indicates a valid (non-null) value + /// \return Status + Status AppendValues(const uint8_t* values, int64_t length, + const uint8_t* valid_bytes = NULLPTR); + + /// \brief Append a sequence of elements in one shot + /// \param[in] values a bitmap of values + /// \param[in] length the number of values to append + /// \param[in] validity a validity bitmap to copy (may be null) + /// \param[in] offset an offset into the values and validity bitmaps + /// \return Status + Status AppendValues(const uint8_t* values, int64_t length, const uint8_t* validity, + int64_t offset); + + /// \brief Append a sequence of elements in one shot + /// \param[in] values a contiguous C array of values + /// \param[in] length the number of values to append + /// \param[in] is_valid an std::vector indicating valid (1) or null + /// (0). Equal in length to values + /// \return Status + Status AppendValues(const uint8_t* values, int64_t length, + const std::vector& is_valid); + + /// \brief Append a sequence of elements in one shot + /// \param[in] values a std::vector of bytes + /// \param[in] is_valid an std::vector indicating valid (1) or null + /// (0). Equal in length to values + /// \return Status + Status AppendValues(const std::vector& values, + const std::vector& is_valid); + + /// \brief Append a sequence of elements in one shot + /// \param[in] values a std::vector of bytes + /// \return Status + Status AppendValues(const std::vector& values); + + /// \brief Append a sequence of elements in one shot + /// \param[in] values an std::vector indicating true (1) or false + /// \param[in] is_valid an std::vector indicating valid (1) or null + /// (0). Equal in length to values + /// \return Status + Status AppendValues(const std::vector& values, const std::vector& is_valid); + + /// \brief Append a sequence of elements in one shot + /// \param[in] values an std::vector indicating true (1) or false + /// \return Status + Status AppendValues(const std::vector& values); + + /// \brief Append a sequence of elements in one shot + /// \param[in] values_begin InputIterator to the beginning of the values + /// \param[in] values_end InputIterator pointing to the end of the values + /// or null(0) values + /// \return Status + template + Status AppendValues(ValuesIter values_begin, ValuesIter values_end) { + int64_t length = static_cast(std::distance(values_begin, values_end)); + ARROW_RETURN_NOT_OK(Reserve(length)); + data_builder_.UnsafeAppend( + length, [&values_begin]() -> bool { return *values_begin++; }); + // this updates length_ + UnsafeSetNotNull(length); + return Status::OK(); + } + + /// \brief Append a sequence of elements in one shot, with a specified nullmap + /// \param[in] values_begin InputIterator to the beginning of the values + /// \param[in] values_end InputIterator pointing to the end of the values + /// \param[in] valid_begin InputIterator with elements indication valid(1) + /// or null(0) values + /// \return Status + template + enable_if_t::value, Status> AppendValues( + ValuesIter values_begin, ValuesIter values_end, ValidIter valid_begin) { + static_assert(!internal::is_null_pointer::value, + "Don't pass a NULLPTR directly as valid_begin, use the 2-argument " + "version instead"); + int64_t length = static_cast(std::distance(values_begin, values_end)); + ARROW_RETURN_NOT_OK(Reserve(length)); + + data_builder_.UnsafeAppend( + length, [&values_begin]() -> bool { return *values_begin++; }); + null_bitmap_builder_.UnsafeAppend( + length, [&valid_begin]() -> bool { return *valid_begin++; }); + length_ = null_bitmap_builder_.length(); + null_count_ = null_bitmap_builder_.false_count(); + return Status::OK(); + } + + // Same as above, for a pointer type ValidIter + template + enable_if_t::value, Status> AppendValues( + ValuesIter values_begin, ValuesIter values_end, ValidIter valid_begin) { + int64_t length = static_cast(std::distance(values_begin, values_end)); + ARROW_RETURN_NOT_OK(Reserve(length)); + data_builder_.UnsafeAppend( + length, [&values_begin]() -> bool { return *values_begin++; }); + + if (valid_begin == NULLPTR) { + UnsafeSetNotNull(length); + } else { + null_bitmap_builder_.UnsafeAppend( + length, [&valid_begin]() -> bool { return *valid_begin++; }); + } + length_ = null_bitmap_builder_.length(); + null_count_ = null_bitmap_builder_.false_count(); + return Status::OK(); + } + + Status AppendValues(int64_t length, bool value); + + Status AppendArraySlice(const ArraySpan& array, int64_t offset, + int64_t length) override { + return AppendValues(array.GetValues(1, 0), length, + array.GetValues(0, 0), array.offset + offset); + } + + Status FinishInternal(std::shared_ptr* out) override; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + void Reset() override; + Status Resize(int64_t capacity) override; + + std::shared_ptr type() const override { return boolean(); } + + protected: + TypedBufferBuilder data_builder_; +}; + +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_run_end.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_run_end.h new file mode 100644 index 0000000000000000000000000000000000000000..ac92efbd0dbe6b470b8275219e75b41aa3f7ab3a --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_run_end.h @@ -0,0 +1,303 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/array.h" +#include "arrow/array/builder_base.h" + +namespace arrow { + +/// \addtogroup run-end-encoded-builders +/// +/// @{ + +namespace internal { + +/// \brief An ArrayBuilder that deduplicates repeated values as they are +/// appended to the inner-ArrayBuilder and reports the length of the current run +/// of identical values. +/// +/// The following sequence of calls +/// +/// Append(2) +/// Append(2) +/// Append(2) +/// Append(7) +/// Append(7) +/// Append(2) +/// FinishInternal() +/// +/// will cause the inner-builder to receive only 3 Append calls +/// +/// Append(2) +/// Append(7) +/// Append(2) +/// FinishInternal() +/// +/// Note that values returned by length(), null_count() and capacity() are +/// related to the compressed array built by the inner-ArrayBuilder. +class RunCompressorBuilder : public ArrayBuilder { + public: + RunCompressorBuilder(MemoryPool* pool, std::shared_ptr inner_builder, + std::shared_ptr type); + + ~RunCompressorBuilder() override; + + ARROW_DISALLOW_COPY_AND_ASSIGN(RunCompressorBuilder); + + /// \brief Called right before a run is being closed + /// + /// Subclasses can override this function to perform an additional action when + /// a run is closed (i.e. run-length is known and value is appended to the + /// inner builder). + /// + /// \param value can be NULLPTR if closing a run of NULLs + /// \param length the greater than 0 length of the value run being closed + virtual Status WillCloseRun(const std::shared_ptr& value, + int64_t length) { + return Status::OK(); + } + + /// \brief Called right before a run of empty values is being closed + /// + /// Subclasses can override this function to perform an additional action when + /// a run of empty values is appended (i.e. run-length is known and a single + /// empty value is appended to the inner builder). + /// + /// \param length the greater than 0 length of the value run being closed + virtual Status WillCloseRunOfEmptyValues(int64_t length) { return Status::OK(); } + + /// \brief Allocate enough memory for a given number of array elements. + /// + /// NOTE: Conservatively resizing a run-length compressed array for a given + /// number of logical elements is not possible, since the physical length will + /// vary depending on the values to be appended in the future. But we can + /// pessimistically assume that each run will contain a single value and + /// allocate that number of runs. + Status Resize(int64_t capacity) override { return ResizePhysical(capacity); } + + /// \brief Allocate enough memory for a given number of runs. + /// + /// Like Resize on non-encoded builders, it does not account for variable size + /// data. + Status ResizePhysical(int64_t capacity); + + Status ReservePhysical(int64_t additional_capacity) { + return Reserve(additional_capacity); + } + + void Reset() override; + + Status AppendNull() final { return AppendNulls(1); } + Status AppendNulls(int64_t length) override; + + Status AppendEmptyValue() final { return AppendEmptyValues(1); } + Status AppendEmptyValues(int64_t length) override; + + Status AppendScalar(const Scalar& scalar, int64_t n_repeats) override; + Status AppendScalars(const ScalarVector& scalars) override; + + // AppendArraySlice() is not implemented. + + /// \brief Append a slice of an array containing values from already + /// compressed runs. + /// + /// NOTE: WillCloseRun() is not called as the length of each run cannot be + /// determined at this point. Caller should ensure that !has_open_run() by + /// calling FinishCurrentRun() before calling this. + /// + /// Pre-condition: !has_open_run() + Status AppendRunCompressedArraySlice(const ArraySpan& array, int64_t offset, + int64_t length); + + /// \brief Forces the closing of the current run if one is currently open. + /// + /// This can be called when one wants to ensure the current run will not be + /// extended. This may cause identical values to appear close to each other in + /// the underlying array (i.e. two runs that could be a single run) if more + /// values are appended after this is called. + /// + /// Finish() and FinishInternal() call this automatically. + virtual Status FinishCurrentRun(); + + Status FinishInternal(std::shared_ptr* out) override; + + ArrayBuilder& inner_builder() const { return *inner_builder_; } + + std::shared_ptr type() const override { return inner_builder_->type(); } + + bool has_open_run() const { return current_run_length_ > 0; } + int64_t open_run_length() const { return current_run_length_; } + + private: + inline void UpdateDimensions() { + capacity_ = inner_builder_->capacity(); + length_ = inner_builder_->length(); + null_count_ = inner_builder_->null_count(); + } + + private: + std::shared_ptr inner_builder_; + std::shared_ptr current_value_ = NULLPTR; + int64_t current_run_length_ = 0; +}; + +} // namespace internal + +// ---------------------------------------------------------------------- +// RunEndEncoded builder + +/// \brief Run-end encoded array builder. +/// +/// NOTE: the value returned by and capacity() is related to the +/// compressed array (physical) and not the decoded array (logical) that is +/// run-end encoded. null_count() always returns 0. length(), on the other hand, +/// returns the logical length of the run-end encoded array. +class ARROW_EXPORT RunEndEncodedBuilder : public ArrayBuilder { + private: + // An internal::RunCompressorBuilder that produces a run-end in the + // RunEndEncodedBuilder every time a value-run is closed. + class ValueRunBuilder : public internal::RunCompressorBuilder { + public: + ValueRunBuilder(MemoryPool* pool, const std::shared_ptr& value_builder, + const std::shared_ptr& value_type, + RunEndEncodedBuilder& ree_builder); + + ~ValueRunBuilder() override = default; + + Status WillCloseRun(const std::shared_ptr&, int64_t length) override { + return ree_builder_.CloseRun(length); + } + + Status WillCloseRunOfEmptyValues(int64_t length) override { + return ree_builder_.CloseRun(length); + } + + private: + RunEndEncodedBuilder& ree_builder_; + }; + + public: + RunEndEncodedBuilder(MemoryPool* pool, + const std::shared_ptr& run_end_builder, + const std::shared_ptr& value_builder, + std::shared_ptr type); + + /// \brief Allocate enough memory for a given number of array elements. + /// + /// NOTE: Conservatively resizing an REE for a given number of logical + /// elements is not possible, since the physical length will vary depending on + /// the values to be appended in the future. But we can pessimistically assume + /// that each run will contain a single value and allocate that number of + /// runs. + Status Resize(int64_t capacity) override { return ResizePhysical(capacity); } + + /// \brief Allocate enough memory for a given number of runs. + Status ResizePhysical(int64_t capacity); + + /// \brief Ensure that there is enough space allocated to append the indicated + /// number of run without any further reallocation. Overallocation is + /// used in order to minimize the impact of incremental ReservePhysical() calls. + /// Note that additional_capacity is relative to the current number of elements + /// rather than to the current capacity, so calls to Reserve() which are not + /// interspersed with addition of new elements may not increase the capacity. + /// + /// \param[in] additional_capacity the number of additional runs + /// \return Status + Status ReservePhysical(int64_t additional_capacity) { + return Reserve(additional_capacity); + } + + void Reset() override; + + Status AppendNull() final { return AppendNulls(1); } + Status AppendNulls(int64_t length) override; + + Status AppendEmptyValue() final { return AppendEmptyValues(1); } + Status AppendEmptyValues(int64_t length) override; + Status AppendScalar(const Scalar& scalar, int64_t n_repeats) override; + Status AppendScalars(const ScalarVector& scalars) override; + Status AppendArraySlice(const ArraySpan& array, int64_t offset, + int64_t length) override; + Status FinishInternal(std::shared_ptr* out) override; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + /// \brief Forces the closing of the current run if one is currently open. + /// + /// This can be called when one wants to ensure the current run will not be + /// extended. This may cause identical values to appear close to each other in + /// the values array (i.e. two runs that could be a single run) if more + /// values are appended after this is called. + Status FinishCurrentRun(); + + std::shared_ptr type() const override; + + private: + /// \brief Update physical capacity and logical length + /// + /// \param committed_logical_length number of logical values that have been + /// committed to the values array + /// \param open_run_length number of logical values in the currently open run if any + inline void UpdateDimensions(int64_t committed_logical_length, + int64_t open_run_length) { + capacity_ = run_end_builder().capacity(); + length_ = committed_logical_length + open_run_length; + committed_logical_length_ = committed_logical_length; + } + + // Pre-condition: !value_run_builder_.has_open_run() + template + Status DoAppendArraySlice(const ArraySpan& array, int64_t offset, int64_t length); + + template + Status DoAppendRunEnd(int64_t run_end); + + /// \brief Cast run_end to the appropriate type and appends it to the run_ends + /// array. + Status AppendRunEnd(int64_t run_end); + + /// \brief Close a run by appending a value to the run_ends array and updating + /// length_ to reflect the new run. + /// + /// Pre-condition: run_length > 0. + [[nodiscard]] Status CloseRun(int64_t run_length); + + ArrayBuilder& run_end_builder(); + ArrayBuilder& value_builder(); + + private: + std::shared_ptr type_; + ValueRunBuilder* value_run_builder_; + // The length not counting the current open run in the value_run_builder_ + int64_t committed_logical_length_ = 0; +}; + +/// @} + +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_time.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_time.h new file mode 100644 index 0000000000000000000000000000000000000000..da29ae3124b5d3da32605503b29edf6920cdf6d6 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_time.h @@ -0,0 +1,66 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Contains declarations of time related Arrow builder types. + +#pragma once + +#include + +#include "arrow/array/builder_base.h" +#include "arrow/array/builder_primitive.h" + +namespace arrow { + +/// \addtogroup temporal-builders +/// +/// @{ + +// TODO(ARROW-7938): this class is untested + +class ARROW_EXPORT DayTimeIntervalBuilder : public NumericBuilder { + public: + using DayMilliseconds = DayTimeIntervalType::DayMilliseconds; + + explicit DayTimeIntervalBuilder(MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : DayTimeIntervalBuilder(day_time_interval(), pool, alignment) {} + + explicit DayTimeIntervalBuilder(std::shared_ptr type, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : NumericBuilder(type, pool, alignment) {} +}; + +class ARROW_EXPORT MonthDayNanoIntervalBuilder + : public NumericBuilder { + public: + using MonthDayNanos = MonthDayNanoIntervalType::MonthDayNanos; + + explicit MonthDayNanoIntervalBuilder(MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : MonthDayNanoIntervalBuilder(month_day_nano_interval(), pool, alignment) {} + + explicit MonthDayNanoIntervalBuilder(std::shared_ptr type, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : NumericBuilder(type, pool, alignment) {} +}; + +/// @} + +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/concatenate.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/concatenate.h new file mode 100644 index 0000000000000000000000000000000000000000..aada5624d63a3052edddf0182799c474bee0c528 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/concatenate.h @@ -0,0 +1,53 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/type_fwd.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace internal { + +/// \brief Concatenate arrays +/// +/// \param[in] arrays a vector of arrays to be concatenated +/// \param[in] pool memory to store the result will be allocated from this memory pool +/// \param[out] out_suggested_cast if a non-OK Result is returned, the function might set +/// out_suggested_cast to a cast suggestion that would allow concatenating the arrays +/// without overflow of offsets (e.g. string to large_string) +/// +/// \return the concatenated array +ARROW_EXPORT +Result> Concatenate(const ArrayVector& arrays, MemoryPool* pool, + std::shared_ptr* out_suggested_cast); + +} // namespace internal + +/// \brief Concatenate arrays +/// +/// \param[in] arrays a vector of arrays to be concatenated +/// \param[in] pool memory to store the result will be allocated from this memory pool +/// \return the concatenated array +ARROW_EXPORT +Result> Concatenate(const ArrayVector& arrays, + MemoryPool* pool = default_memory_pool()); + +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/data.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/data.h new file mode 100644 index 0000000000000000000000000000000000000000..1e6ee9a1d32ff25e0530e6b89ee321cb9a438119 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/data.h @@ -0,0 +1,676 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include // IWYU pragma: export +#include +#include +#include +#include +#include + +#include "arrow/array/statistics.h" +#include "arrow/buffer.h" +#include "arrow/result.h" +#include "arrow/type.h" +#include "arrow/type_fwd.h" +#include "arrow/util/bit_util.h" +#include "arrow/util/macros.h" +#include "arrow/util/span.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +namespace internal { +// ---------------------------------------------------------------------- +// Null handling for types without a validity bitmap and the dictionary type + +ARROW_EXPORT bool IsNullSparseUnion(const ArrayData& data, int64_t i); +ARROW_EXPORT bool IsNullDenseUnion(const ArrayData& data, int64_t i); +ARROW_EXPORT bool IsNullRunEndEncoded(const ArrayData& data, int64_t i); + +ARROW_EXPORT bool UnionMayHaveLogicalNulls(const ArrayData& data); +ARROW_EXPORT bool RunEndEncodedMayHaveLogicalNulls(const ArrayData& data); +ARROW_EXPORT bool DictionaryMayHaveLogicalNulls(const ArrayData& data); + +} // namespace internal + +// When slicing, we do not know the null count of the sliced range without +// doing some computation. To avoid doing this eagerly, we set the null count +// to -1 (any negative number will do). When Array::null_count is called the +// first time, the null count will be computed. See ARROW-33 +constexpr int64_t kUnknownNullCount = -1; + +// ---------------------------------------------------------------------- +// Generic array data container + +/// \class ArrayData +/// \brief Mutable container for generic Arrow array data +/// +/// This data structure is a self-contained representation of the memory and +/// metadata inside an Arrow array data structure (called vectors in Java). The +/// classes arrow::Array and its subclasses provide strongly-typed accessors +/// with support for the visitor pattern and other affordances. +/// +/// This class is designed for easy internal data manipulation, analytical data +/// processing, and data transport to and from IPC messages. For example, we +/// could cast from int64 to float64 like so: +/// +/// Int64Array arr = GetMyData(); +/// auto new_data = arr.data()->Copy(); +/// new_data->type = arrow::float64(); +/// DoubleArray double_arr(new_data); +/// +/// This object is also useful in an analytics setting where memory may be +/// reused. For example, if we had a group of operations all returning doubles, +/// say: +/// +/// Log(Sqrt(Expr(arr))) +/// +/// Then the low-level implementations of each of these functions could have +/// the signatures +/// +/// void Log(const ArrayData& values, ArrayData* out); +/// +/// As another example a function may consume one or more memory buffers in an +/// input array and replace them with newly-allocated data, changing the output +/// data type as well. +struct ARROW_EXPORT ArrayData { + ArrayData() = default; + + ArrayData(std::shared_ptr type, int64_t length, + int64_t null_count = kUnknownNullCount, int64_t offset = 0) + : type(std::move(type)), length(length), null_count(null_count), offset(offset) {} + + ArrayData(std::shared_ptr type, int64_t length, + std::vector> buffers, + int64_t null_count = kUnknownNullCount, int64_t offset = 0) + : ArrayData(std::move(type), length, null_count, offset) { + this->buffers = std::move(buffers); +#ifndef NDEBUG + // in debug mode, call the `device_type` function to trigger + // the DCHECKs that validate all the buffers are on the same device + ARROW_UNUSED(this->device_type()); +#endif + } + + ArrayData(std::shared_ptr type, int64_t length, + std::vector> buffers, + std::vector> child_data, + int64_t null_count = kUnknownNullCount, int64_t offset = 0) + : ArrayData(std::move(type), length, null_count, offset) { + this->buffers = std::move(buffers); + this->child_data = std::move(child_data); +#ifndef NDEBUG + // in debug mode, call the `device_type` function to trigger + // the DCHECKs that validate all the buffers (including children) + // are on the same device + ARROW_UNUSED(this->device_type()); +#endif + } + + static std::shared_ptr Make(std::shared_ptr type, int64_t length, + std::vector> buffers, + int64_t null_count = kUnknownNullCount, + int64_t offset = 0); + + static std::shared_ptr Make( + std::shared_ptr type, int64_t length, + std::vector> buffers, + std::vector> child_data, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + static std::shared_ptr Make( + std::shared_ptr type, int64_t length, + std::vector> buffers, + std::vector> child_data, + std::shared_ptr dictionary, int64_t null_count = kUnknownNullCount, + int64_t offset = 0); + + static std::shared_ptr Make(std::shared_ptr type, int64_t length, + int64_t null_count = kUnknownNullCount, + int64_t offset = 0); + + // Move constructor + ArrayData(ArrayData&& other) noexcept + : type(std::move(other.type)), + length(other.length), + offset(other.offset), + buffers(std::move(other.buffers)), + child_data(std::move(other.child_data)), + dictionary(std::move(other.dictionary)), + statistics(std::move(other.statistics)) { + SetNullCount(other.null_count); + } + + // Copy constructor + ArrayData(const ArrayData& other) noexcept + : type(other.type), + length(other.length), + offset(other.offset), + buffers(other.buffers), + child_data(other.child_data), + dictionary(other.dictionary), + statistics(other.statistics) { + SetNullCount(other.null_count); + } + + // Move assignment + ArrayData& operator=(ArrayData&& other) { + type = std::move(other.type); + length = other.length; + SetNullCount(other.null_count); + offset = other.offset; + buffers = std::move(other.buffers); + child_data = std::move(other.child_data); + dictionary = std::move(other.dictionary); + statistics = std::move(other.statistics); + return *this; + } + + // Copy assignment + ArrayData& operator=(const ArrayData& other) { + type = other.type; + length = other.length; + SetNullCount(other.null_count); + offset = other.offset; + buffers = other.buffers; + child_data = other.child_data; + dictionary = other.dictionary; + statistics = other.statistics; + return *this; + } + + std::shared_ptr Copy() const { return std::make_shared(*this); } + + /// \brief Copy all buffers and children recursively to destination MemoryManager + /// + /// This utilizes MemoryManager::CopyBuffer to create a new ArrayData object + /// recursively copying the buffers and all child buffers to the destination + /// memory manager. This includes dictionaries if applicable. + Result> CopyTo( + const std::shared_ptr& to) const; + /// \brief View or Copy this ArrayData to destination memory manager. + /// + /// Tries to view the buffer contents on the given memory manager's device + /// if possible (to avoid a copy) but falls back to copying if a no-copy view + /// isn't supported. + Result> ViewOrCopyTo( + const std::shared_ptr& to) const; + + bool IsNull(int64_t i) const { return !IsValid(i); } + + bool IsValid(int64_t i) const { + if (buffers[0] != NULLPTR) { + return bit_util::GetBit(buffers[0]->data(), i + offset); + } + const auto type = this->type->id(); + if (type == Type::SPARSE_UNION) { + return !internal::IsNullSparseUnion(*this, i); + } + if (type == Type::DENSE_UNION) { + return !internal::IsNullDenseUnion(*this, i); + } + if (type == Type::RUN_END_ENCODED) { + return !internal::IsNullRunEndEncoded(*this, i); + } + return null_count.load() != length; + } + + // Access a buffer's data as a typed C pointer + template + inline const T* GetValues(int i, int64_t absolute_offset) const { + if (buffers[i]) { + return reinterpret_cast(buffers[i]->data()) + absolute_offset; + } else { + return NULLPTR; + } + } + + template + inline const T* GetValues(int i) const { + return GetValues(i, offset); + } + + // Like GetValues, but returns NULLPTR instead of aborting if the underlying + // buffer is not a CPU buffer. + template + inline const T* GetValuesSafe(int i, int64_t absolute_offset) const { + if (buffers[i] && buffers[i]->is_cpu()) { + return reinterpret_cast(buffers[i]->data()) + absolute_offset; + } else { + return NULLPTR; + } + } + + template + inline const T* GetValuesSafe(int i) const { + return GetValuesSafe(i, offset); + } + + // Access a buffer's data as a typed C pointer + template + inline T* GetMutableValues(int i, int64_t absolute_offset) { + if (buffers[i]) { + return reinterpret_cast(buffers[i]->mutable_data()) + absolute_offset; + } else { + return NULLPTR; + } + } + + template + inline T* GetMutableValues(int i) { + return GetMutableValues(i, offset); + } + + /// \brief Construct a zero-copy slice of the data with the given offset and length + /// + /// The associated `ArrayStatistics` is always discarded in a sliced + /// `ArrayData`. Because `ArrayStatistics` in the original + /// `ArrayData` may be invalid in a sliced `ArrayData`. If you want + /// to reuse statistics in the original `ArrayData`, you need to do + /// it by yourself. + /// + /// If the specified slice range has the same range as the original + /// `ArrayData`, we can reuse statistics in the original + /// `ArrayData`. Because it has the same data as the original + /// `ArrayData`. But the associated `ArrayStatistics` is discarded + /// in this case too. Use `Copy()` instead for the case. + std::shared_ptr Slice(int64_t offset, int64_t length) const; + + /// \brief Input-checking variant of Slice + /// + /// An Invalid Status is returned if the requested slice falls out of bounds. + /// Note that unlike Slice, `length` isn't clamped to the available buffer size. + Result> SliceSafe(int64_t offset, int64_t length) const; + + void SetNullCount(int64_t v) { null_count.store(v); } + + /// \brief Return physical null count, or compute and set it if it's not known + int64_t GetNullCount() const; + + /// \brief Return true if the data has a validity bitmap and the physical null + /// count is known to be non-zero or not yet known. + /// + /// Note that this is not the same as MayHaveLogicalNulls, which also checks + /// for the presence of nulls in child data for types like unions and run-end + /// encoded types. + /// + /// \see HasValidityBitmap + /// \see MayHaveLogicalNulls + bool MayHaveNulls() const { + // If an ArrayData is slightly malformed it may have kUnknownNullCount set + // but no buffer + return null_count.load() != 0 && buffers[0] != NULLPTR; + } + + /// \brief Return true if the data has a validity bitmap + bool HasValidityBitmap() const { return buffers[0] != NULLPTR; } + + /// \brief Return true if the validity bitmap may have 0's in it, or if the + /// child arrays (in the case of types without a validity bitmap) may have + /// nulls, or if the dictionary of dictionay array may have nulls. + /// + /// This is not a drop-in replacement for MayHaveNulls, as historically + /// MayHaveNulls() has been used to check for the presence of a validity + /// bitmap that needs to be checked. + /// + /// Code that previously used MayHaveNulls() and then dealt with the validity + /// bitmap directly can be fixed to handle all types correctly without + /// performance degradation when handling most types by adopting + /// HasValidityBitmap and MayHaveLogicalNulls. + /// + /// Before: + /// + /// uint8_t* validity = array.MayHaveNulls() ? array.buffers[0].data : NULLPTR; + /// for (int64_t i = 0; i < array.length; ++i) { + /// if (validity && !bit_util::GetBit(validity, i)) { + /// continue; // skip a NULL + /// } + /// ... + /// } + /// + /// After: + /// + /// bool all_valid = !array.MayHaveLogicalNulls(); + /// uint8_t* validity = array.HasValidityBitmap() ? array.buffers[0].data : NULLPTR; + /// for (int64_t i = 0; i < array.length; ++i) { + /// bool is_valid = all_valid || + /// (validity && bit_util::GetBit(validity, i)) || + /// array.IsValid(i); + /// if (!is_valid) { + /// continue; // skip a NULL + /// } + /// ... + /// } + bool MayHaveLogicalNulls() const { + if (buffers[0] != NULLPTR) { + return null_count.load() != 0; + } + const auto t = type->id(); + if (t == Type::SPARSE_UNION || t == Type::DENSE_UNION) { + return internal::UnionMayHaveLogicalNulls(*this); + } + if (t == Type::RUN_END_ENCODED) { + return internal::RunEndEncodedMayHaveLogicalNulls(*this); + } + if (t == Type::DICTIONARY) { + return internal::DictionaryMayHaveLogicalNulls(*this); + } + return null_count.load() != 0; + } + + /// \brief Computes the logical null count for arrays of all types including + /// those that do not have a validity bitmap like union and run-end encoded + /// arrays + /// + /// If the array has a validity bitmap, this function behaves the same as + /// GetNullCount. For types that have no validity bitmap, this function will + /// recompute the null count every time it is called. + /// + /// \see GetNullCount + int64_t ComputeLogicalNullCount() const; + + /// \brief Return the device_type of the underlying buffers and children + /// + /// If there are no buffers in this ArrayData object, it just returns + /// DeviceAllocationType::kCPU as a default. We also assume that all buffers + /// should be allocated on the same device type and perform DCHECKs to confirm + /// this in debug mode. + /// + /// \return DeviceAllocationType + DeviceAllocationType device_type() const; + + std::shared_ptr type; + int64_t length = 0; + mutable std::atomic null_count{0}; + // The logical start point into the physical buffers (in values, not bytes). + // Note that, for child data, this must be *added* to the child data's own offset. + int64_t offset = 0; + std::vector> buffers; + std::vector> child_data; + + // The dictionary for this Array, if any. Only used for dictionary type + std::shared_ptr dictionary; + + // The statistics for this Array. + std::shared_ptr statistics; +}; + +/// \brief A non-owning Buffer reference +struct ARROW_EXPORT BufferSpan { + // It is the user of this class's responsibility to ensure that + // buffers that were const originally are not written to + // accidentally. + uint8_t* data = NULLPTR; + int64_t size = 0; + // Pointer back to buffer that owns this memory + const std::shared_ptr* owner = NULLPTR; + + template + const T* data_as() const { + return reinterpret_cast(data); + } + template + T* mutable_data_as() { + return reinterpret_cast(data); + } +}; + +/// \brief EXPERIMENTAL: A non-owning ArrayData reference that is cheaply +/// copyable and does not contain any shared_ptr objects. Do not use in public +/// APIs aside from compute kernels for now +struct ARROW_EXPORT ArraySpan { + const DataType* type = NULLPTR; + int64_t length = 0; + mutable int64_t null_count = kUnknownNullCount; + int64_t offset = 0; + BufferSpan buffers[3]; + + ArraySpan() = default; + + explicit ArraySpan(const DataType* type, int64_t length) : type(type), length(length) {} + + ArraySpan(const ArrayData& data) { // NOLINT implicit conversion + SetMembers(data); + } + explicit ArraySpan(const Scalar& data) { FillFromScalar(data); } + + /// If dictionary-encoded, put dictionary in the first entry + std::vector child_data; + + /// \brief Populate ArraySpan to look like an array of length 1 pointing at + /// the data members of a Scalar value + void FillFromScalar(const Scalar& value); + + void SetMembers(const ArrayData& data); + + void SetBuffer(int index, const std::shared_ptr& buffer) { + this->buffers[index].data = const_cast(buffer->data()); + this->buffers[index].size = buffer->size(); + this->buffers[index].owner = &buffer; + } + + const ArraySpan& dictionary() const { return child_data[0]; } + + /// \brief Return the number of buffers (out of 3) that are used to + /// constitute this array + int num_buffers() const; + + // Access a buffer's data as a typed C pointer + template + inline T* GetValues(int i, int64_t absolute_offset) { + return reinterpret_cast(buffers[i].data) + absolute_offset; + } + + template + inline T* GetValues(int i) { + return GetValues(i, this->offset); + } + + // Access a buffer's data as a typed C pointer + template + inline const T* GetValues(int i, int64_t absolute_offset) const { + return reinterpret_cast(buffers[i].data) + absolute_offset; + } + + template + inline const T* GetValues(int i) const { + return GetValues(i, this->offset); + } + + /// \brief Access a buffer's data as a span + /// + /// \param i The buffer index + /// \param length The required length (in number of typed values) of the requested span + /// \pre i > 0 + /// \pre length <= the length of the buffer (in number of values) that's expected for + /// this array type + /// \return A span of the requested length + template + util::span GetSpan(int i, int64_t length) const { + const int64_t buffer_length = buffers[i].size / static_cast(sizeof(T)); + assert(i > 0 && length + offset <= buffer_length); + ARROW_UNUSED(buffer_length); + return util::span(buffers[i].data_as() + this->offset, length); + } + + /// \brief Access a buffer's data as a span + /// + /// \param i The buffer index + /// \param length The required length (in number of typed values) of the requested span + /// \pre i > 0 + /// \pre length <= the length of the buffer (in number of values) that's expected for + /// this array type + /// \return A span of the requested length + template + util::span GetSpan(int i, int64_t length) { + const int64_t buffer_length = buffers[i].size / static_cast(sizeof(T)); + assert(i > 0 && length + offset <= buffer_length); + ARROW_UNUSED(buffer_length); + return util::span(buffers[i].mutable_data_as() + this->offset, length); + } + + inline bool IsNull(int64_t i) const { return !IsValid(i); } + + inline bool IsValid(int64_t i) const { + if (this->buffers[0].data != NULLPTR) { + return bit_util::GetBit(this->buffers[0].data, i + this->offset); + } else { + const auto type = this->type->id(); + if (type == Type::SPARSE_UNION) { + return !IsNullSparseUnion(i); + } + if (type == Type::DENSE_UNION) { + return !IsNullDenseUnion(i); + } + if (type == Type::RUN_END_ENCODED) { + return !IsNullRunEndEncoded(i); + } + return this->null_count != this->length; + } + } + + std::shared_ptr ToArrayData() const; + + std::shared_ptr ToArray() const; + + std::shared_ptr GetBuffer(int index) const { + const BufferSpan& buf = this->buffers[index]; + if (buf.owner) { + return *buf.owner; + } else if (buf.data != NULLPTR) { + // Buffer points to some memory without an owning buffer + return std::make_shared(buf.data, buf.size); + } else { + return NULLPTR; + } + } + + void SetSlice(int64_t offset, int64_t length) { + this->offset = offset; + this->length = length; + if (this->type->id() == Type::NA) { + this->null_count = this->length; + } else if (this->MayHaveNulls()) { + this->null_count = kUnknownNullCount; + } else { + this->null_count = 0; + } + } + + /// \brief Return physical null count, or compute and set it if it's not known + int64_t GetNullCount() const; + + /// \brief Return true if the array has a validity bitmap and the physical null + /// count is known to be non-zero or not yet known + /// + /// Note that this is not the same as MayHaveLogicalNulls, which also checks + /// for the presence of nulls in child data for types like unions and run-end + /// encoded types. + /// + /// \see HasValidityBitmap + /// \see MayHaveLogicalNulls + bool MayHaveNulls() const { + // If an ArrayData is slightly malformed it may have kUnknownNullCount set + // but no buffer + return null_count != 0 && buffers[0].data != NULLPTR; + } + + /// \brief Return true if the array has a validity bitmap + bool HasValidityBitmap() const { return buffers[0].data != NULLPTR; } + + /// \brief Return true if the validity bitmap may have 0's in it, or if the + /// child arrays (in the case of types without a validity bitmap) may have + /// nulls, or if the dictionary of dictionay array may have nulls. + /// + /// \see ArrayData::MayHaveLogicalNulls + bool MayHaveLogicalNulls() const { + if (buffers[0].data != NULLPTR) { + return null_count != 0; + } + const auto t = type->id(); + if (t == Type::SPARSE_UNION || t == Type::DENSE_UNION) { + return UnionMayHaveLogicalNulls(); + } + if (t == Type::RUN_END_ENCODED) { + return RunEndEncodedMayHaveLogicalNulls(); + } + if (t == Type::DICTIONARY) { + return DictionaryMayHaveLogicalNulls(); + } + return null_count != 0; + } + + /// \brief Compute the logical null count for arrays of all types including + /// those that do not have a validity bitmap like union and run-end encoded + /// arrays + /// + /// If the array has a validity bitmap, this function behaves the same as + /// GetNullCount. For types that have no validity bitmap, this function will + /// recompute the logical null count every time it is called. + /// + /// \see GetNullCount + int64_t ComputeLogicalNullCount() const; + + /// Some DataTypes (StringView, BinaryView) may have an arbitrary number of variadic + /// buffers. Since ArraySpan only has 3 buffers, we pack the variadic buffers into + /// buffers[2]; IE buffers[2].data points to the first shared_ptr of the + /// variadic set and buffers[2].size is the number of variadic buffers times + /// sizeof(shared_ptr). + /// + /// \see HasVariadicBuffers + util::span> GetVariadicBuffers() const; + bool HasVariadicBuffers() const; + + private: + ARROW_FRIEND_EXPORT friend bool internal::IsNullRunEndEncoded(const ArrayData& span, + int64_t i); + + bool IsNullSparseUnion(int64_t i) const; + bool IsNullDenseUnion(int64_t i) const; + + /// \brief Return true if the value at logical index i is null + /// + /// This function uses binary-search, so it has a O(log N) cost. + /// Iterating over the whole array and calling IsNull is O(N log N), so + /// for better performance it is recommended to use a + /// ree_util::RunEndEncodedArraySpan to iterate run by run instead. + bool IsNullRunEndEncoded(int64_t i) const; + + bool UnionMayHaveLogicalNulls() const; + bool RunEndEncodedMayHaveLogicalNulls() const; + bool DictionaryMayHaveLogicalNulls() const; +}; + +namespace internal { + +void FillZeroLengthArray(const DataType* type, ArraySpan* span); + +/// Construct a zero-copy view of this ArrayData with the given type. +/// +/// This method checks if the types are layout-compatible. +/// Nested types are traversed in depth-first order. Data buffers must have +/// the same item sizes, even though the logical types may be different. +/// An error is returned if the types are not layout-compatible. +ARROW_EXPORT +Result> GetArrayView(const std::shared_ptr& data, + const std::shared_ptr& type); + +} // namespace internal +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/diff.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/diff.h new file mode 100644 index 0000000000000000000000000000000000000000..a405164b333f3b21a17e8414ef59a8a628c28579 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/diff.h @@ -0,0 +1,76 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/array/array_base.h" +#include "arrow/array/array_nested.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +/// \brief Compare two arrays, returning an edit script which expresses the difference +/// between them +/// +/// An edit script is an array of struct(insert: bool, run_length: int64_t). +/// Each element of "insert" determines whether an element was inserted into (true) +/// or deleted from (false) base. Each insertion or deletion is followed by a run of +/// elements which are unchanged from base to target; the length of this run is stored +/// in "run_length". (Note that the edit script begins and ends with a run of shared +/// elements but both fields of the struct must have the same length. To accommodate this +/// the first element of "insert" should be ignored.) +/// +/// For example for base "hlloo" and target "hello", the edit script would be +/// [ +/// {"insert": false, "run_length": 1}, // leading run of length 1 ("h") +/// {"insert": true, "run_length": 3}, // insert("e") then a run of length 3 ("llo") +/// {"insert": false, "run_length": 0} // delete("o") then an empty run +/// ] +/// +/// Diffing arrays containing nulls is not currently supported. +/// +/// \param[in] base baseline for comparison +/// \param[in] target an array of identical type to base whose elements differ from base's +/// \param[in] pool memory to store the result will be allocated from this memory pool +/// \return an edit script array which can be applied to base to produce target +ARROW_EXPORT +Result> Diff(const Array& base, const Array& target, + MemoryPool* pool = default_memory_pool()); + +/// \brief visitor interface for easy traversal of an edit script +/// +/// visitor will be called for each hunk of insertions and deletions. +ARROW_EXPORT Status VisitEditScript( + const Array& edits, + const std::function& visitor); + +/// \brief return a function which will format an edit script in unified +/// diff format to os, given base and target arrays of type +ARROW_EXPORT Result< + std::function> +MakeUnifiedDiffFormatter(const DataType& type, std::ostream* os); + +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/statistics.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/statistics.h new file mode 100644 index 0000000000000000000000000000000000000000..523f877bbe429c39bd9b6265a58c5c313abaeb42 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/statistics.h @@ -0,0 +1,69 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/util/visibility.h" + +namespace arrow { + +/// \brief Statistics for an Array +/// +/// Apache Arrow format doesn't have statistics but data source such +/// as Apache Parquet may have statistics. Statistics associated with +/// data source can be read unified API via this class. +struct ARROW_EXPORT ArrayStatistics { + using ValueType = std::variant; + + /// \brief The number of null values, may not be set + std::optional null_count = std::nullopt; + + /// \brief The number of distinct values, may not be set + std::optional distinct_count = std::nullopt; + + /// \brief The minimum value, may not be set + std::optional min = std::nullopt; + + /// \brief Whether the minimum value is exact or not + bool is_min_exact = false; + + /// \brief The maximum value, may not be set + std::optional max = std::nullopt; + + /// \brief Whether the maximum value is exact or not + bool is_max_exact = false; + + /// \brief Check two statistics for equality + bool Equals(const ArrayStatistics& other) const { + return null_count == other.null_count && distinct_count == other.distinct_count && + min == other.min && is_min_exact == other.is_min_exact && max == other.max && + is_max_exact == other.is_max_exact; + } + + /// \brief Check two statistics for equality + bool operator==(const ArrayStatistics& other) const { return Equals(other); } + + /// \brief Check two statistics for not equality + bool operator!=(const ArrayStatistics& other) const { return !Equals(other); } +}; + +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/util.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/util.h new file mode 100644 index 0000000000000000000000000000000000000000..fd8e75ddb86405c523a8083f559dab0e72364e24 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/util.h @@ -0,0 +1,96 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/array/data.h" +#include "arrow/compare.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +/// \defgroup array-factories Array factory functions +/// +/// @{ + +/// \brief Create a strongly-typed Array instance from generic ArrayData +/// \param[in] data the array contents +/// \return the resulting Array instance +ARROW_EXPORT +std::shared_ptr MakeArray(const std::shared_ptr& data); + +/// \brief Create a strongly-typed Array instance with all elements null +/// \param[in] type the array type +/// \param[in] length the array length +/// \param[in] pool the memory pool to allocate memory from +ARROW_EXPORT +Result> MakeArrayOfNull(const std::shared_ptr& type, + int64_t length, + MemoryPool* pool = default_memory_pool()); + +/// \brief Create an Array instance whose slots are the given scalar +/// \param[in] scalar the value with which to fill the array +/// \param[in] length the array length +/// \param[in] pool the memory pool to allocate memory from +ARROW_EXPORT +Result> MakeArrayFromScalar( + const Scalar& scalar, int64_t length, MemoryPool* pool = default_memory_pool()); + +/// \brief Create an empty Array of a given type +/// +/// The output Array will be of the given type. +/// +/// \param[in] type the data type of the empty Array +/// \param[in] pool the memory pool to allocate memory from +/// \return the resulting Array +ARROW_EXPORT +Result> MakeEmptyArray(std::shared_ptr type, + MemoryPool* pool = default_memory_pool()); + +/// @} + +namespace internal { + +/// \brief Swap endian of each element in a generic ArrayData +/// +/// As dictionaries are often shared between different arrays, dictionaries +/// are not swapped by this function and should be handled separately. +/// +/// \param[in] data the array contents +/// \param[in] pool the memory pool to allocate memory from +/// \return the resulting ArrayData whose elements were swapped +ARROW_EXPORT +Result> SwapEndianArrayData( + const std::shared_ptr& data, MemoryPool* pool = default_memory_pool()); + +/// Given a number of ArrayVectors, treat each ArrayVector as the +/// chunks of a chunked array. Then rechunk each ArrayVector such that +/// all ArrayVectors are chunked identically. It is mandatory that +/// all ArrayVectors contain the same total number of elements. +ARROW_EXPORT +std::vector RechunkArraysConsistently(const std::vector&); + +} // namespace internal +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/validate.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/validate.h new file mode 100644 index 0000000000000000000000000000000000000000..3ebfa0a51edce21ca585862b1dbb074b6cf8d9c8 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/array/validate.h @@ -0,0 +1,56 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace internal { + +// Internal functions implementing Array::Validate() and friends. + +// O(1) array metadata validation + +ARROW_EXPORT +Status ValidateArray(const Array& array); + +ARROW_EXPORT +Status ValidateArray(const ArrayData& data); + +// O(N) array data validation. +// Note that, starting from 7.0.0, "full" routines also validate metadata. +// Before, ValidateArray() needed to be called before ValidateArrayFull() +// to ensure metadata correctness, otherwise invalid memory accesses +// may occur. + +ARROW_EXPORT +Status ValidateArrayFull(const Array& array); + +ARROW_EXPORT +Status ValidateArrayFull(const ArrayData& data); + +ARROW_EXPORT +Status ValidateUTF8(const Array& array); + +ARROW_EXPORT +Status ValidateUTF8(const ArrayData& data); + +} // namespace internal +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/api.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/api.h new file mode 100644 index 0000000000000000000000000000000000000000..b5690aed8da9dfafc4af84e0a713b0c2028ed28e --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/api.h @@ -0,0 +1,25 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/ipc/dictionary.h" +#include "arrow/ipc/feather.h" +#include "arrow/ipc/json_simple.h" +#include "arrow/ipc/message.h" +#include "arrow/ipc/reader.h" +#include "arrow/ipc/writer.h" diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/dictionary.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/dictionary.h new file mode 100644 index 0000000000000000000000000000000000000000..e4287cb19747fa60f5d728b6afb2bcab30443bfd --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/dictionary.h @@ -0,0 +1,177 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Tools for dictionaries in IPC context + +#pragma once + +#include +#include +#include +#include + +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace ipc { + +namespace internal { + +class FieldPosition { + public: + FieldPosition() : parent_(NULLPTR), index_(-1), depth_(0) {} + + FieldPosition child(int index) const { return {this, index}; } + + std::vector path() const { + std::vector path(depth_); + const FieldPosition* cur = this; + for (int i = depth_ - 1; i >= 0; --i) { + path[i] = cur->index_; + cur = cur->parent_; + } + return path; + } + + protected: + FieldPosition(const FieldPosition* parent, int index) + : parent_(parent), index_(index), depth_(parent->depth_ + 1) {} + + const FieldPosition* parent_; + int index_; + int depth_; +}; + +} // namespace internal + +/// \brief Map fields in a schema to dictionary ids +/// +/// The mapping is structural, i.e. the field path (as a vector of indices) +/// is associated to the dictionary id. A dictionary id may be associated +/// to multiple fields. +class ARROW_EXPORT DictionaryFieldMapper { + public: + DictionaryFieldMapper(); + explicit DictionaryFieldMapper(const Schema& schema); + ~DictionaryFieldMapper(); + + Status AddSchemaFields(const Schema& schema); + Status AddField(int64_t id, std::vector field_path); + + Result GetFieldId(std::vector field_path) const; + + int num_fields() const; + + /// \brief Returns number of unique dictionaries, taking into + /// account that different fields can share the same dictionary. + int num_dicts() const; + + private: + struct Impl; + std::unique_ptr impl_; +}; + +using DictionaryVector = std::vector>>; + +/// \brief Memoization data structure for reading dictionaries from IPC streams +/// +/// This structure tracks the following associations: +/// - field position (structural) -> dictionary id +/// - dictionary id -> value type +/// - dictionary id -> dictionary (value) data +/// +/// Together, they allow resolving dictionary data when reading an IPC stream, +/// using metadata recorded in the schema message and data recorded in the +/// dictionary batch messages (see ResolveDictionaries). +/// +/// This structure isn't useful for writing an IPC stream, where only +/// DictionaryFieldMapper is necessary. +class ARROW_EXPORT DictionaryMemo { + public: + DictionaryMemo(); + ~DictionaryMemo(); + + DictionaryFieldMapper& fields(); + const DictionaryFieldMapper& fields() const; + + /// \brief Return current dictionary corresponding to a particular + /// id. Returns KeyError if id not found + Result> GetDictionary(int64_t id, MemoryPool* pool) const; + + /// \brief Return dictionary value type corresponding to a + /// particular dictionary id. + Result> GetDictionaryType(int64_t id) const; + + /// \brief Return true if we have a dictionary for the input id + bool HasDictionary(int64_t id) const; + + /// \brief Add a dictionary value type to the memo with a particular id. + /// Returns KeyError if a different type is already registered with the same id. + Status AddDictionaryType(int64_t id, const std::shared_ptr& type); + + /// \brief Add a dictionary to the memo with a particular id. Returns + /// KeyError if that dictionary already exists + Status AddDictionary(int64_t id, const std::shared_ptr& dictionary); + + /// \brief Append a dictionary delta to the memo with a particular id. Returns + /// KeyError if that dictionary does not exists + Status AddDictionaryDelta(int64_t id, const std::shared_ptr& dictionary); + + /// \brief Add a dictionary to the memo if it does not have one with the id, + /// otherwise, replace the dictionary with the new one. + /// + /// Return true if the dictionary was added, false if replaced. + Result AddOrReplaceDictionary(int64_t id, + const std::shared_ptr& dictionary); + + private: + struct Impl; + std::unique_ptr impl_; +}; + +// For writing: collect dictionary entries to write to the IPC stream, in order +// (i.e. inner dictionaries before dependent outer dictionaries). +ARROW_EXPORT +Result CollectDictionaries(const RecordBatch& batch, + const DictionaryFieldMapper& mapper); + +// For reading: resolve all dictionaries in columns, according to the field +// mapping and dictionary arrays stored in memo. +// Columns may be sparse, i.e. some entries may be left null +// (e.g. if an inclusion mask was used). +ARROW_EXPORT +Status ResolveDictionaries(const ArrayDataVector& columns, const DictionaryMemo& memo, + MemoryPool* pool); + +namespace internal { + +// Like CollectDictionaries above, but uses the memo's DictionaryFieldMapper +// and all collected dictionaries are added to the memo using AddDictionary. +// +// This is used as a shortcut in some roundtripping tests (to avoid emitting +// any actual dictionary batches). +ARROW_EXPORT +Status CollectDictionaries(const RecordBatch& batch, DictionaryMemo* memo); + +} // namespace internal + +} // namespace ipc +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/feather.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/feather.h new file mode 100644 index 0000000000000000000000000000000000000000..da88ee22f8291f81da3046e3c6e5844a5021be4d --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/feather.h @@ -0,0 +1,150 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Public API for the "Feather" file format, originally created at +// http://github.com/wesm/feather + +#pragma once + +#include +#include +#include +#include + +#include "arrow/ipc/options.h" +#include "arrow/type_fwd.h" +#include "arrow/util/compression.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class Schema; +class Status; +class Table; + +namespace io { + +class OutputStream; +class RandomAccessFile; + +} // namespace io + +namespace ipc { +namespace feather { + +static constexpr const int kFeatherV1Version = 2; +static constexpr const int kFeatherV2Version = 3; + +// ---------------------------------------------------------------------- +// Metadata accessor classes + +/// \class Reader +/// \brief An interface for reading columns from Feather files +class ARROW_EXPORT Reader { + public: + virtual ~Reader() = default; + + /// \brief Open a Feather file from a RandomAccessFile interface + /// + /// \param[in] source a RandomAccessFile instance + /// \return the table reader + static Result> Open( + const std::shared_ptr& source); + + /// \brief Open a Feather file from a RandomAccessFile interface + /// with IPC Read options + /// + /// \param[in] source a RandomAccessFile instance + /// \param[in] options IPC Read options + /// \return the table reader + static Result> Open( + const std::shared_ptr& source, const IpcReadOptions& options); + + /// \brief Return the version number of the Feather file + virtual int version() const = 0; + + virtual std::shared_ptr schema() const = 0; + + /// \brief Read all columns from the file as an arrow::Table. + /// + /// \param[out] out the returned table + /// \return Status + /// + /// This function is zero-copy if the file source supports zero-copy reads + virtual Status Read(std::shared_ptr* out) = 0; + + /// \brief Read only the specified columns from the file as an arrow::Table. + /// + /// \param[in] indices the column indices to read + /// \param[out] out the returned table + /// \return Status + /// + /// This function is zero-copy if the file source supports zero-copy reads + virtual Status Read(const std::vector& indices, std::shared_ptr
* out) = 0; + + /// \brief Read only the specified columns from the file as an arrow::Table. + /// + /// \param[in] names the column names to read + /// \param[out] out the returned table + /// \return Status + /// + /// This function is zero-copy if the file source supports zero-copy reads + virtual Status Read(const std::vector& names, + std::shared_ptr
* out) = 0; +}; + +struct ARROW_EXPORT WriteProperties { + static WriteProperties Defaults(); + + static WriteProperties DefaultsV1() { + WriteProperties props = Defaults(); + props.version = kFeatherV1Version; + return props; + } + + /// Feather file version number + /// + /// version 2: "Feather V1" Apache Arrow <= 0.16.0 + /// version 3: "Feather V2" Apache Arrow > 0.16.0 + int version = kFeatherV2Version; + + // Parameters for Feather V2 only + + /// Number of rows per intra-file chunk. Use smaller chunksize when you need + /// faster random row access + int64_t chunksize = 1LL << 16; + + /// Compression type to use. Only UNCOMPRESSED, LZ4_FRAME, and ZSTD are + /// supported. The default compression returned by Defaults() is LZ4 if the + /// project is built with support for it, otherwise + /// UNCOMPRESSED. UNCOMPRESSED is set as the object default here so that if + /// WriteProperties::Defaults() is not used, the default constructor for + /// WriteProperties will work regardless of the options used to build the C++ + /// project. + Compression::type compression = Compression::UNCOMPRESSED; + + /// Compressor-specific compression level + int compression_level = ::arrow::util::kUseDefaultCompressionLevel; +}; + +ARROW_EXPORT +Status WriteTable(const Table& table, io::OutputStream* dst, + const WriteProperties& properties = WriteProperties::Defaults()); + +} // namespace feather +} // namespace ipc +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/json_simple.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/json_simple.h new file mode 100644 index 0000000000000000000000000000000000000000..3a730ee6a3f1963e2f7a486f8fac3ab4472ddf74 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/json_simple.h @@ -0,0 +1,71 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Implement a simple JSON representation format for arrays + +#pragma once + +#include +#include +#include + +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class Array; +class DataType; + +namespace ipc { +namespace internal { +namespace json { + +ARROW_EXPORT +Result> ArrayFromJSON(const std::shared_ptr&, + const std::string& json); + +ARROW_EXPORT +Result> ArrayFromJSON(const std::shared_ptr&, + std::string_view json); + +ARROW_EXPORT +Result> ArrayFromJSON(const std::shared_ptr&, + const char* json); + +ARROW_EXPORT +Status ChunkedArrayFromJSON(const std::shared_ptr& type, + const std::vector& json_strings, + std::shared_ptr* out); + +ARROW_EXPORT +Status DictArrayFromJSON(const std::shared_ptr&, std::string_view indices_json, + std::string_view dictionary_json, std::shared_ptr* out); + +ARROW_EXPORT +Status ScalarFromJSON(const std::shared_ptr&, std::string_view json, + std::shared_ptr* out); + +ARROW_EXPORT +Status DictScalarFromJSON(const std::shared_ptr&, std::string_view index_json, + std::string_view dictionary_json, std::shared_ptr* out); + +} // namespace json +} // namespace internal +} // namespace ipc +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/message.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/message.h new file mode 100644 index 0000000000000000000000000000000000000000..1cd72ce993ed28ddfd1f894af35eeefbbdce6050 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/message.h @@ -0,0 +1,565 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// C++ object model and user API for interprocess schema messaging + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/io/type_fwd.h" +#include "arrow/ipc/type_fwd.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace ipc { + +struct IpcWriteOptions; + +// Read interface classes. We do not fully deserialize the flatbuffers so that +// individual fields metadata can be retrieved from very large schema without +// + +/// \class Message +/// \brief An IPC message including metadata and body +class ARROW_EXPORT Message { + public: + /// \brief Construct message, but do not validate + /// + /// Use at your own risk; Message::Open has more metadata validation + Message(std::shared_ptr metadata, std::shared_ptr body); + + ~Message(); + + /// \brief Create and validate a Message instance from two buffers + /// + /// \param[in] metadata a buffer containing the Flatbuffer metadata + /// \param[in] body a buffer containing the message body, which may be null + /// \return the created message + static Result> Open(std::shared_ptr metadata, + std::shared_ptr body); + + /// \brief Read message body and create Message given Flatbuffer metadata + /// \param[in] metadata containing a serialized Message flatbuffer + /// \param[in] stream an InputStream + /// \return the created Message + /// + /// \note If stream supports zero-copy, this is zero-copy + static Result> ReadFrom(std::shared_ptr metadata, + io::InputStream* stream); + + /// \brief Read message body from position in file, and create Message given + /// the Flatbuffer metadata + /// \param[in] offset the position in the file where the message body starts. + /// \param[in] metadata containing a serialized Message flatbuffer + /// \param[in] file the seekable file interface to read from + /// \return the created Message + /// + /// \note If file supports zero-copy, this is zero-copy + static Result> ReadFrom(const int64_t offset, + std::shared_ptr metadata, + io::RandomAccessFile* file); + + /// \brief Return true if message type and contents are equal + /// + /// \param other another message + /// \return true if contents equal + bool Equals(const Message& other) const; + + /// \brief the Message metadata + /// + /// \return buffer + std::shared_ptr metadata() const; + + /// \brief Custom metadata serialized in metadata Flatbuffer. Returns nullptr + /// when none set + const std::shared_ptr& custom_metadata() const; + + /// \brief the Message body, if any + /// + /// \return buffer is null if no body + std::shared_ptr body() const; + + /// \brief The expected body length according to the metadata, for + /// verification purposes + int64_t body_length() const; + + /// \brief The Message type + MessageType type() const; + + /// \brief The Message metadata version + MetadataVersion metadata_version() const; + + const void* header() const; + + /// \brief Write length-prefixed metadata and body to output stream + /// + /// \param[in] file output stream to write to + /// \param[in] options IPC writing options including alignment + /// \param[out] output_length the number of bytes written + /// \return Status + Status SerializeTo(io::OutputStream* file, const IpcWriteOptions& options, + int64_t* output_length) const; + + /// \brief Return true if the Message metadata passes Flatbuffer validation + bool Verify() const; + + /// \brief Whether a given message type needs a body. + static bool HasBody(MessageType type) { + return type != MessageType::NONE && type != MessageType::SCHEMA; + } + + private: + // Hide serialization details from user API + class MessageImpl; + std::unique_ptr impl_; + + ARROW_DISALLOW_COPY_AND_ASSIGN(Message); +}; + +ARROW_EXPORT std::string FormatMessageType(MessageType type); + +/// \class MessageDecoderListener +/// \brief An abstract class to listen events from MessageDecoder. +/// +/// This API is EXPERIMENTAL. +/// +/// \since 0.17.0 +class ARROW_EXPORT MessageDecoderListener { + public: + virtual ~MessageDecoderListener() = default; + + /// \brief Called when a message is decoded. + /// + /// MessageDecoder calls this method when it decodes a message. This + /// method is called multiple times when the target stream has + /// multiple messages. + /// + /// \param[in] message a decoded message + /// \return Status + virtual Status OnMessageDecoded(std::unique_ptr message) = 0; + + /// \brief Called when the decoder state is changed to + /// MessageDecoder::State::INITIAL. + /// + /// The default implementation just returns arrow::Status::OK(). + /// + /// \return Status + virtual Status OnInitial(); + + /// \brief Called when the decoder state is changed to + /// MessageDecoder::State::METADATA_LENGTH. + /// + /// The default implementation just returns arrow::Status::OK(). + /// + /// \return Status + virtual Status OnMetadataLength(); + + /// \brief Called when the decoder state is changed to + /// MessageDecoder::State::METADATA. + /// + /// The default implementation just returns arrow::Status::OK(). + /// + /// \return Status + virtual Status OnMetadata(); + + /// \brief Called when the decoder state is changed to + /// MessageDecoder::State::BODY. + /// + /// The default implementation just returns arrow::Status::OK(). + /// + /// \return Status + virtual Status OnBody(); + + /// \brief Called when the decoder state is changed to + /// MessageDecoder::State::EOS. + /// + /// The default implementation just returns arrow::Status::OK(). + /// + /// \return Status + virtual Status OnEOS(); +}; + +/// \class AssignMessageDecoderListener +/// \brief Assign a message decoded by MessageDecoder. +/// +/// This API is EXPERIMENTAL. +/// +/// \since 0.17.0 +class ARROW_EXPORT AssignMessageDecoderListener : public MessageDecoderListener { + public: + /// \brief Construct a listener that assigns a decoded message to the + /// specified location. + /// + /// \param[in] message a location to store the received message + explicit AssignMessageDecoderListener(std::unique_ptr* message) + : message_(message) {} + + virtual ~AssignMessageDecoderListener() = default; + + Status OnMessageDecoded(std::unique_ptr message) override { + *message_ = std::move(message); + return Status::OK(); + } + + private: + std::unique_ptr* message_; + + ARROW_DISALLOW_COPY_AND_ASSIGN(AssignMessageDecoderListener); +}; + +/// \class MessageDecoder +/// \brief Push style message decoder that receives data from user. +/// +/// This API is EXPERIMENTAL. +/// +/// \since 0.17.0 +class ARROW_EXPORT MessageDecoder { + public: + /// \brief State for reading a message + enum State { + /// The initial state. It requires one of the followings as the next data: + /// + /// * int32_t continuation token + /// * int32_t end-of-stream mark (== 0) + /// * int32_t metadata length (backward compatibility for + /// reading old IPC messages produced prior to version 0.15.0 + INITIAL, + + /// It requires int32_t metadata length. + METADATA_LENGTH, + + /// It requires metadata. + METADATA, + + /// It requires message body. + BODY, + + /// The end-of-stream state. No more data is processed. + EOS, + }; + + /// \brief Construct a message decoder. + /// + /// \param[in] listener a MessageDecoderListener that responds events from + /// the decoder + /// \param[in] pool an optional MemoryPool to copy metadata on the + /// \param[in] skip_body if true the body will be skipped even if the message has a body + /// CPU, if required + explicit MessageDecoder(std::shared_ptr listener, + MemoryPool* pool = default_memory_pool(), + bool skip_body = false); + + /// \brief Construct a message decoder with the specified state. + /// + /// This is a construct for advanced users that know how to decode + /// Message. + /// + /// \param[in] listener a MessageDecoderListener that responds events from + /// the decoder + /// \param[in] initial_state an initial state of the decode + /// \param[in] initial_next_required_size the number of bytes needed + /// to run the next action + /// \param[in] pool an optional MemoryPool to copy metadata on the + /// CPU, if required + /// \param[in] skip_body if true the body will be skipped even if the message has a body + MessageDecoder(std::shared_ptr listener, State initial_state, + int64_t initial_next_required_size, + MemoryPool* pool = default_memory_pool(), bool skip_body = false); + + virtual ~MessageDecoder(); + + /// \brief Feed data to the decoder as a raw data. + /// + /// If the decoder can decode one or more messages by the data, the + /// decoder calls listener->OnMessageDecoded() with a decoded + /// message multiple times. + /// + /// If the state of the decoder is changed, corresponding callbacks + /// on listener is called: + /// + /// * MessageDecoder::State::INITIAL: listener->OnInitial() + /// * MessageDecoder::State::METADATA_LENGTH: listener->OnMetadataLength() + /// * MessageDecoder::State::METADATA: listener->OnMetadata() + /// * MessageDecoder::State::BODY: listener->OnBody() + /// * MessageDecoder::State::EOS: listener->OnEOS() + /// + /// \param[in] data a raw data to be processed. This data isn't + /// copied. The passed memory must be kept alive through message + /// processing. + /// \param[in] size raw data size. + /// \return Status + Status Consume(const uint8_t* data, int64_t size); + + /// \brief Feed data to the decoder as a Buffer. + /// + /// If the decoder can decode one or more messages by the Buffer, + /// the decoder calls listener->OnMessageDecoded() with a decoded + /// message multiple times. + /// + /// \param[in] buffer a Buffer to be processed. + /// \return Status + Status Consume(std::shared_ptr buffer); + + /// \brief Return the number of bytes needed to advance the state of + /// the decoder. + /// + /// This method is provided for users who want to optimize performance. + /// Normal users don't need to use this method. + /// + /// Here is an example usage for normal users: + /// + /// ~~~{.cpp} + /// decoder.Consume(buffer1); + /// decoder.Consume(buffer2); + /// decoder.Consume(buffer3); + /// ~~~ + /// + /// Decoder has internal buffer. If consumed data isn't enough to + /// advance the state of the decoder, consumed data is buffered to + /// the internal buffer. It causes performance overhead. + /// + /// If you pass next_required_size() size data to each Consume() + /// call, the decoder doesn't use its internal buffer. It improves + /// performance. + /// + /// Here is an example usage to avoid using internal buffer: + /// + /// ~~~{.cpp} + /// buffer1 = get_data(decoder.next_required_size()); + /// decoder.Consume(buffer1); + /// buffer2 = get_data(decoder.next_required_size()); + /// decoder.Consume(buffer2); + /// ~~~ + /// + /// Users can use this method to avoid creating small + /// chunks. Message body must be contiguous data. If users pass + /// small chunks to the decoder, the decoder needs concatenate small + /// chunks internally. It causes performance overhead. + /// + /// Here is an example usage to reduce small chunks: + /// + /// ~~~{.cpp} + /// buffer = AllocateResizableBuffer(); + /// while ((small_chunk = get_data(&small_chunk_size))) { + /// auto current_buffer_size = buffer->size(); + /// buffer->Resize(current_buffer_size + small_chunk_size); + /// memcpy(buffer->mutable_data() + current_buffer_size, + /// small_chunk, + /// small_chunk_size); + /// if (buffer->size() < decoder.next_required_size()) { + /// continue; + /// } + /// std::shared_ptr chunk(buffer.release()); + /// decoder.Consume(chunk); + /// buffer = AllocateResizableBuffer(); + /// } + /// if (buffer->size() > 0) { + /// std::shared_ptr chunk(buffer.release()); + /// decoder.Consume(chunk); + /// } + /// ~~~ + /// + /// \return the number of bytes needed to advance the state of the + /// decoder + int64_t next_required_size() const; + + /// \brief Return the current state of the decoder. + /// + /// This method is provided for users who want to optimize performance. + /// Normal users don't need to use this method. + /// + /// Decoder doesn't need Buffer to process data on the + /// MessageDecoder::State::INITIAL state and the + /// MessageDecoder::State::METADATA_LENGTH. Creating Buffer has + /// performance overhead. Advanced users can avoid creating Buffer + /// by checking the current state of the decoder: + /// + /// ~~~{.cpp} + /// switch (decoder.state()) { + /// MessageDecoder::State::INITIAL: + /// MessageDecoder::State::METADATA_LENGTH: + /// { + /// uint8_t data[sizeof(int32_t)]; + /// auto data_size = input->Read(decoder.next_required_size(), data); + /// decoder.Consume(data, data_size); + /// } + /// break; + /// default: + /// { + /// auto buffer = input->Read(decoder.next_required_size()); + /// decoder.Consume(buffer); + /// } + /// break; + /// } + /// ~~~ + /// + /// \return the current state + State state() const; + + private: + class MessageDecoderImpl; + std::unique_ptr impl_; + + ARROW_DISALLOW_COPY_AND_ASSIGN(MessageDecoder); +}; + +/// \brief Abstract interface for a sequence of messages +/// \since 0.5.0 +class ARROW_EXPORT MessageReader { + public: + virtual ~MessageReader() = default; + + /// \brief Create MessageReader that reads from InputStream + static std::unique_ptr Open(io::InputStream* stream); + + /// \brief Create MessageReader that reads from owned InputStream + static std::unique_ptr Open( + const std::shared_ptr& owned_stream); + + /// \brief Read next Message from the interface + /// + /// \return an arrow::ipc::Message instance + virtual Result> ReadNextMessage() = 0; +}; + +// the first parameter of the function should be a pointer to metadata (aka. +// org::apache::arrow::flatbuf::RecordBatch*) +using FieldsLoaderFunction = std::function; + +/// \brief Read encapsulated RPC message from position in file +/// +/// Read a length-prefixed message flatbuffer starting at the indicated file +/// offset. If the message has a body with non-zero length, it will also be +/// read +/// +/// The metadata_length includes at least the length prefix and the flatbuffer +/// +/// \param[in] offset the position in the file where the message starts. The +/// first 4 bytes after the offset are the message length +/// \param[in] metadata_length the total number of bytes to read from file +/// \param[in] file the seekable file interface to read from +/// \param[in] fields_loader the function for loading subset of fields from the given file +/// \return the message read + +ARROW_EXPORT +Result> ReadMessage( + const int64_t offset, const int32_t metadata_length, io::RandomAccessFile* file, + const FieldsLoaderFunction& fields_loader = {}); + +/// \brief Read encapsulated RPC message from cached buffers +/// +/// The buffers should contain an entire message. Partial reads are not handled. +/// +/// This method can be used to read just the metadata by passing in a nullptr for the +/// body. The body will then be skipped and the body size will not be validated. +/// +/// If the body buffer is provided then it must be the complete body buffer +/// +/// This is similar to Message::Open but performs slightly more validation (e.g. checks +/// to see that the metadata length is correct and that the body is the size the metadata +/// expected) +/// +/// \param metadata The bytes for the metadata +/// \param body The bytes for the body +/// \return The message represented by the buffers +ARROW_EXPORT Result> ReadMessage( + std::shared_ptr metadata, std::shared_ptr body); + +ARROW_EXPORT +Future> ReadMessageAsync( + const int64_t offset, const int32_t metadata_length, const int64_t body_length, + io::RandomAccessFile* file, const io::IOContext& context = io::default_io_context()); + +/// \brief Advance stream to an 8-byte offset if its position is not a multiple +/// of 8 already +/// \param[in] stream an input stream +/// \param[in] alignment the byte multiple for the metadata prefix, usually 8 +/// or 64, to ensure the body starts on a multiple of that alignment +/// \return Status +ARROW_EXPORT +Status AlignStream(io::InputStream* stream, int32_t alignment = 8); + +/// \brief Advance stream to an 8-byte offset if its position is not a multiple +/// of 8 already +/// \param[in] stream an output stream +/// \param[in] alignment the byte multiple for the metadata prefix, usually 8 +/// or 64, to ensure the body starts on a multiple of that alignment +/// \return Status +ARROW_EXPORT +Status AlignStream(io::OutputStream* stream, int32_t alignment = 8); + +/// \brief Return error Status if file position is not a multiple of the +/// indicated alignment +ARROW_EXPORT +Status CheckAligned(io::FileInterface* stream, int32_t alignment = 8); + +/// \brief Read encapsulated IPC message (metadata and body) from InputStream +/// +/// Returns null if there are not enough bytes available or the +/// message length is 0 (e.g. EOS in a stream) +/// +/// \param[in] stream an input stream +/// \param[in] pool an optional MemoryPool to copy metadata on the CPU, if required +/// \return Message +ARROW_EXPORT +Result> ReadMessage(io::InputStream* stream, + MemoryPool* pool = default_memory_pool()); + +/// \brief Feed data from InputStream to MessageDecoder to decode an +/// encapsulated IPC message (metadata and body) +/// +/// This API is EXPERIMENTAL. +/// +/// \param[in] decoder a decoder +/// \param[in] stream an input stream +/// \return Status +/// +/// \since 0.17.0 +ARROW_EXPORT +Status DecodeMessage(MessageDecoder* decoder, io::InputStream* stream); + +/// Write encapsulated IPC message Does not make assumptions about +/// whether the stream is aligned already. Can write legacy (pre +/// version 0.15.0) IPC message if option set +/// +/// continuation: 0xFFFFFFFF +/// message_size: int32 +/// message: const void* +/// padding +/// +/// +/// \param[in] message a buffer containing the metadata to write +/// \param[in] options IPC writing options, including alignment and +/// legacy message support +/// \param[in,out] file the OutputStream to write to +/// \param[out] message_length the total size of the payload written including +/// padding +/// \return Status +Status WriteMessage(const Buffer& message, const IpcWriteOptions& options, + io::OutputStream* file, int32_t* message_length); + +} // namespace ipc +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/options.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/options.h new file mode 100644 index 0000000000000000000000000000000000000000..48b6758212bd5370aa2ff48f095080c92f60b086 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/options.h @@ -0,0 +1,178 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/io/caching.h" +#include "arrow/ipc/type_fwd.h" +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/util/compression.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class MemoryPool; + +namespace ipc { + +// ARROW-109: We set this number arbitrarily to help catch user mistakes. For +// deeply nested schemas, it is expected the user will indicate explicitly the +// maximum allowed recursion depth +constexpr int kMaxNestingDepth = 64; + +/// \brief Options for writing Arrow IPC messages +struct ARROW_EXPORT IpcWriteOptions { + /// \brief If true, allow field lengths that don't fit in a signed 32-bit int. + /// + /// Some implementations may not be able to parse streams created with this option. + bool allow_64bit = false; + + /// \brief The maximum permitted schema nesting depth. + int max_recursion_depth = kMaxNestingDepth; + + /// \brief Write padding after memory buffers up to this multiple of bytes. + int32_t alignment = 8; + + /// \brief Write the pre-0.15.0 IPC message format + /// + /// This legacy format consists of a 4-byte prefix instead of 8-byte. + bool write_legacy_ipc_format = false; + + /// \brief The memory pool to use for allocations made during IPC writing + /// + /// While Arrow IPC is predominantly zero-copy, it may have to allocate + /// memory in some cases (for example if compression is enabled). + MemoryPool* memory_pool = default_memory_pool(); + + /// \brief Compression codec to use for record batch body buffers + /// + /// May only be UNCOMPRESSED, LZ4_FRAME and ZSTD. + std::shared_ptr codec; + + /// \brief Minimum space savings percentage required for compression to be applied + /// + /// Space savings is calculated as (1.0 - compressed_size / uncompressed_size). + /// + /// For example, if min_space_savings = 0.1, a 100-byte body buffer won't undergo + /// compression if its expected compressed size exceeds 90 bytes. If this option is + /// unset, compression will be used indiscriminately. If no codec was supplied, this + /// option is ignored. + /// + /// Values outside of the range [0,1] are handled as errors. + /// + /// Note that enabling this option may result in unreadable data for Arrow C++ versions + /// prior to 12.0.0. + std::optional min_space_savings; + + /// \brief Use global CPU thread pool to parallelize any computational tasks + /// like compression + bool use_threads = true; + + /// \brief Whether to emit dictionary deltas + /// + /// If false, a changed dictionary for a given field will emit a full + /// dictionary replacement. + /// If true, a changed dictionary will be compared against the previous + /// version. If possible, a dictionary delta will be emitted, otherwise + /// a full dictionary replacement. + /// + /// Default is false to maximize stream compatibility. + /// + /// Also, note that if a changed dictionary is a nested dictionary, + /// then a delta is never emitted, for compatibility with the read path. + bool emit_dictionary_deltas = false; + + /// \brief Whether to unify dictionaries for the IPC file format + /// + /// The IPC file format doesn't support dictionary replacements. + /// Therefore, chunks of a column with a dictionary type must have the same + /// dictionary in each record batch (or an extended dictionary + delta). + /// + /// If this option is true, RecordBatchWriter::WriteTable will attempt + /// to unify dictionaries across each table column. If this option is + /// false, incompatible dictionaries across a table column will simply + /// raise an error. + /// + /// Note that enabling this option has a runtime cost. Also, not all types + /// currently support dictionary unification. + /// + /// This option is ignored for IPC streams, which support dictionary replacement + /// and deltas. + bool unify_dictionaries = false; + + /// \brief Format version to use for IPC messages and their metadata. + /// + /// Presently using V5 version (readable by 1.0.0 and later). + /// V4 is also available (readable by 0.8.0 and later). + MetadataVersion metadata_version = MetadataVersion::V5; + + static IpcWriteOptions Defaults(); +}; + +/// \brief Options for reading Arrow IPC messages +struct ARROW_EXPORT IpcReadOptions { + /// \brief The maximum permitted schema nesting depth. + int max_recursion_depth = kMaxNestingDepth; + + /// \brief The memory pool to use for allocations made during IPC reading + /// + /// While Arrow IPC is predominantly zero-copy, it may have to allocate + /// memory in some cases (for example if compression is enabled). + MemoryPool* memory_pool = default_memory_pool(); + + /// \brief Top-level schema fields to include when deserializing RecordBatch. + /// + /// If empty (the default), return all deserialized fields. + /// If non-empty, the values are the indices of fields in the top-level schema. + std::vector included_fields; + + /// \brief Use global CPU thread pool to parallelize any computational tasks + /// like decompression + bool use_threads = true; + + /// \brief Whether to convert incoming data to platform-native endianness + /// + /// If the endianness of the received schema is not equal to platform-native + /// endianness, then all buffers with endian-sensitive data will be byte-swapped. + /// This includes the value buffers of numeric types, temporal types, decimal + /// types, as well as the offset buffers of variable-sized binary and list-like + /// types. + /// + /// Endianness conversion is achieved by the RecordBatchFileReader, + /// RecordBatchStreamReader and StreamDecoder classes. + bool ensure_native_endian = true; + + /// \brief Options to control caching behavior when pre-buffering is requested + /// + /// The lazy property will always be reset to true to deliver the expected behavior + io::CacheOptions pre_buffer_cache_options = io::CacheOptions::LazyDefaults(); + + static IpcReadOptions Defaults(); +}; + +namespace internal { + +Status CheckCompressionSupported(Compression::type codec); + +} // namespace internal +} // namespace ipc +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/reader.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/reader.h new file mode 100644 index 0000000000000000000000000000000000000000..888f59a627771b4591d2eb030483b70a49630999 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/reader.h @@ -0,0 +1,638 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Read Arrow files and streams + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/io/caching.h" +#include "arrow/io/type_fwd.h" +#include "arrow/ipc/message.h" +#include "arrow/ipc/options.h" +#include "arrow/record_batch.h" +#include "arrow/result.h" +#include "arrow/type_fwd.h" +#include "arrow/util/async_generator.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace ipc { + +class DictionaryMemo; +struct IpcPayload; + +using RecordBatchReader = ::arrow::RecordBatchReader; + +struct ReadStats { + /// Number of IPC messages read. + int64_t num_messages = 0; + /// Number of record batches read. + int64_t num_record_batches = 0; + /// Number of dictionary batches read. + /// + /// Note: num_dictionary_batches >= num_dictionary_deltas + num_replaced_dictionaries + int64_t num_dictionary_batches = 0; + + /// Number of dictionary deltas read. + int64_t num_dictionary_deltas = 0; + /// Number of replaced dictionaries (i.e. where a dictionary batch replaces + /// an existing dictionary with an unrelated new dictionary). + int64_t num_replaced_dictionaries = 0; +}; + +/// \brief Synchronous batch stream reader that reads from io::InputStream +/// +/// This class reads the schema (plus any dictionaries) as the first messages +/// in the stream, followed by record batches. For more granular zero-copy +/// reads see the ReadRecordBatch functions +class ARROW_EXPORT RecordBatchStreamReader : public RecordBatchReader { + public: + /// Create batch reader from generic MessageReader. + /// This will take ownership of the given MessageReader. + /// + /// \param[in] message_reader a MessageReader implementation + /// \param[in] options any IPC reading options (optional) + /// \return the created batch reader + static Result> Open( + std::unique_ptr message_reader, + const IpcReadOptions& options = IpcReadOptions::Defaults()); + + /// \brief Record batch stream reader from InputStream + /// + /// \param[in] stream an input stream instance. Must stay alive throughout + /// lifetime of stream reader + /// \param[in] options any IPC reading options (optional) + /// \return the created batch reader + static Result> Open( + io::InputStream* stream, + const IpcReadOptions& options = IpcReadOptions::Defaults()); + + /// \brief Open stream and retain ownership of stream object + /// \param[in] stream the input stream + /// \param[in] options any IPC reading options (optional) + /// \return the created batch reader + static Result> Open( + const std::shared_ptr& stream, + const IpcReadOptions& options = IpcReadOptions::Defaults()); + + /// \brief Return current read statistics + virtual ReadStats stats() const = 0; +}; + +/// \brief Reads the record batch file format +class ARROW_EXPORT RecordBatchFileReader + : public std::enable_shared_from_this { + public: + virtual ~RecordBatchFileReader() = default; + + /// \brief Open a RecordBatchFileReader + /// + /// Open a file-like object that is assumed to be self-contained; i.e., the + /// end of the file interface is the end of the Arrow file. Note that there + /// can be any amount of data preceding the Arrow-formatted data, because we + /// need only locate the end of the Arrow file stream to discover the metadata + /// and then proceed to read the data into memory. + static Result> Open( + io::RandomAccessFile* file, + const IpcReadOptions& options = IpcReadOptions::Defaults()); + + /// \brief Open a RecordBatchFileReader + /// If the file is embedded within some larger file or memory region, you can + /// pass the absolute memory offset to the end of the file (which contains the + /// metadata footer). The metadata must have been written with memory offsets + /// relative to the start of the containing file + /// + /// \param[in] file the data source + /// \param[in] footer_offset the position of the end of the Arrow file + /// \param[in] options options for IPC reading + /// \return the returned reader + static Result> Open( + io::RandomAccessFile* file, int64_t footer_offset, + const IpcReadOptions& options = IpcReadOptions::Defaults()); + + /// \brief Version of Open that retains ownership of file + /// + /// \param[in] file the data source + /// \param[in] options options for IPC reading + /// \return the returned reader + static Result> Open( + const std::shared_ptr& file, + const IpcReadOptions& options = IpcReadOptions::Defaults()); + + /// \brief Version of Open that retains ownership of file + /// + /// \param[in] file the data source + /// \param[in] footer_offset the position of the end of the Arrow file + /// \param[in] options options for IPC reading + /// \return the returned reader + static Result> Open( + const std::shared_ptr& file, int64_t footer_offset, + const IpcReadOptions& options = IpcReadOptions::Defaults()); + + /// \brief Open a file asynchronously (owns the file). + static Future> OpenAsync( + const std::shared_ptr& file, + const IpcReadOptions& options = IpcReadOptions::Defaults()); + + /// \brief Open a file asynchronously (borrows the file). + static Future> OpenAsync( + io::RandomAccessFile* file, + const IpcReadOptions& options = IpcReadOptions::Defaults()); + + /// \brief Open a file asynchronously (owns the file). + static Future> OpenAsync( + const std::shared_ptr& file, int64_t footer_offset, + const IpcReadOptions& options = IpcReadOptions::Defaults()); + + /// \brief Open a file asynchronously (borrows the file). + static Future> OpenAsync( + io::RandomAccessFile* file, int64_t footer_offset, + const IpcReadOptions& options = IpcReadOptions::Defaults()); + + /// \brief The schema read from the file + virtual std::shared_ptr schema() const = 0; + + /// \brief Returns the number of record batches in the file + virtual int num_record_batches() const = 0; + + /// \brief Return the metadata version from the file metadata + virtual MetadataVersion version() const = 0; + + /// \brief Return the contents of the custom_metadata field from the file's + /// Footer + virtual std::shared_ptr metadata() const = 0; + + /// \brief Read a particular record batch from the file. Does not copy memory + /// if the input source supports zero-copy. + /// + /// \param[in] i the index of the record batch to return + /// \return the read batch + virtual Result> ReadRecordBatch(int i) = 0; + + /// \brief Read a particular record batch along with its custom metadata from the file. + /// Does not copy memory if the input source supports zero-copy. + /// + /// \param[in] i the index of the record batch to return + /// \return a struct containing the read batch and its custom metadata + virtual Result ReadRecordBatchWithCustomMetadata(int i) = 0; + + /// \brief Return current read statistics + virtual ReadStats stats() const = 0; + + /// \brief Computes the total number of rows in the file. + virtual Result CountRows() = 0; + + /// \brief Begin loading metadata for the desired batches into memory. + /// + /// This method will also begin loading all dictionaries messages into memory. + /// + /// For a regular file this will immediately begin disk I/O in the background on a + /// thread on the IOContext's thread pool. If the file is memory mapped this will + /// ensure the memory needed for the metadata is paged from disk into memory + /// + /// \param indices Indices of the batches to prefetch + /// If empty then all batches will be prefetched. + virtual Status PreBufferMetadata(const std::vector& indices) = 0; + + /// \brief Get a reentrant generator of record batches. + /// + /// \param[in] coalesce If true, enable I/O coalescing. + /// \param[in] io_context The IOContext to use (controls which thread pool + /// is used for I/O). + /// \param[in] cache_options Options for coalescing (if enabled). + /// \param[in] executor Optionally, an executor to use for decoding record + /// batches. This is generally only a benefit for very wide and/or + /// compressed batches. + virtual Result>> GetRecordBatchGenerator( + const bool coalesce = false, + const io::IOContext& io_context = io::default_io_context(), + const io::CacheOptions cache_options = io::CacheOptions::LazyDefaults(), + arrow::internal::Executor* executor = NULLPTR) = 0; + + /// \brief Collect all batches as a vector of record batches + Result ToRecordBatches(); + + /// \brief Collect all batches and concatenate as arrow::Table + Result> ToTable(); +}; + +/// \brief A general listener class to receive events. +/// +/// You must implement callback methods for interested events. +/// +/// This API is EXPERIMENTAL. +/// +/// \since 0.17.0 +class ARROW_EXPORT Listener { + public: + virtual ~Listener() = default; + + /// \brief Called when end-of-stream is received. + /// + /// The default implementation just returns arrow::Status::OK(). + /// + /// \return Status + /// + /// \see StreamDecoder + virtual Status OnEOS(); + + /// \brief Called when a record batch is decoded and + /// OnRecordBatchWithMetadataDecoded() isn't overridden. + /// + /// The default implementation just returns + /// arrow::Status::NotImplemented(). + /// + /// \param[in] record_batch a record batch decoded + /// \return Status + /// + /// \see StreamDecoder + virtual Status OnRecordBatchDecoded(std::shared_ptr record_batch); + + /// \brief Called when a record batch with custom metadata is decoded. + /// + /// The default implementation just calls OnRecordBatchDecoded() + /// without custom metadata. + /// + /// \param[in] record_batch_with_metadata a record batch with custom + /// metadata decoded + /// \return Status + /// + /// \see StreamDecoder + /// + /// \since 13.0.0 + virtual Status OnRecordBatchWithMetadataDecoded( + RecordBatchWithMetadata record_batch_with_metadata); + + /// \brief Called when a schema is decoded. + /// + /// The default implementation just returns arrow::Status::OK(). + /// + /// \param[in] schema a schema decoded + /// \return Status + /// + /// \see StreamDecoder + virtual Status OnSchemaDecoded(std::shared_ptr schema); + + /// \brief Called when a schema is decoded. + /// + /// The default implementation just calls OnSchemaDecoded(schema) + /// (without filtered_schema) to keep backward compatibility. + /// + /// \param[in] schema a schema decoded + /// \param[in] filtered_schema a filtered schema that only has read fields + /// \return Status + /// + /// \see StreamDecoder + /// + /// \since 13.0.0 + virtual Status OnSchemaDecoded(std::shared_ptr schema, + std::shared_ptr filtered_schema); +}; + +/// \brief Collect schema and record batches decoded by StreamDecoder. +/// +/// This API is EXPERIMENTAL. +/// +/// \since 0.17.0 +class ARROW_EXPORT CollectListener : public Listener { + public: + CollectListener() : schema_(), filtered_schema_(), record_batches_(), metadatas_() {} + virtual ~CollectListener() = default; + + Status OnSchemaDecoded(std::shared_ptr schema, + std::shared_ptr filtered_schema) override { + schema_ = std::move(schema); + filtered_schema_ = std::move(filtered_schema); + return Status::OK(); + } + + Status OnRecordBatchWithMetadataDecoded( + RecordBatchWithMetadata record_batch_with_metadata) override { + record_batches_.push_back(std::move(record_batch_with_metadata.batch)); + metadatas_.push_back(std::move(record_batch_with_metadata.custom_metadata)); + return Status::OK(); + } + + /// \return the decoded schema + std::shared_ptr schema() const { return schema_; } + + /// \return the filtered schema + std::shared_ptr filtered_schema() const { return filtered_schema_; } + + /// \return the all decoded record batches + const std::vector>& record_batches() const { + return record_batches_; + } + + /// \return the all decoded metadatas + const std::vector>& metadatas() const { + return metadatas_; + } + + /// \return the number of collected record batches + int64_t num_record_batches() const { return record_batches_.size(); } + + /// \return the last decoded record batch and remove it from + /// record_batches + std::shared_ptr PopRecordBatch() { + auto record_batch_with_metadata = PopRecordBatchWithMetadata(); + return std::move(record_batch_with_metadata.batch); + } + + /// \return the last decoded record batch with custom metadata and + /// remove it from record_batches + RecordBatchWithMetadata PopRecordBatchWithMetadata() { + RecordBatchWithMetadata record_batch_with_metadata; + if (record_batches_.empty()) { + return record_batch_with_metadata; + } + record_batch_with_metadata.batch = std::move(record_batches_.back()); + record_batch_with_metadata.custom_metadata = std::move(metadatas_.back()); + record_batches_.pop_back(); + metadatas_.pop_back(); + return record_batch_with_metadata; + } + + private: + std::shared_ptr schema_; + std::shared_ptr filtered_schema_; + std::vector> record_batches_; + std::vector> metadatas_; +}; + +/// \brief Push style stream decoder that receives data from user. +/// +/// This class decodes the Apache Arrow IPC streaming format data. +/// +/// This API is EXPERIMENTAL. +/// +/// \see https://arrow.apache.org/docs/format/Columnar.html#ipc-streaming-format +/// +/// \since 0.17.0 +class ARROW_EXPORT StreamDecoder { + public: + /// \brief Construct a stream decoder. + /// + /// \param[in] listener a Listener that must implement + /// Listener::OnRecordBatchDecoded() to receive decoded record batches + /// \param[in] options any IPC reading options (optional) + StreamDecoder(std::shared_ptr listener, + IpcReadOptions options = IpcReadOptions::Defaults()); + + virtual ~StreamDecoder(); + + /// \brief Feed data to the decoder as a raw data. + /// + /// If the decoder can read one or more record batches by the data, + /// the decoder calls listener->OnRecordBatchDecoded() with a + /// decoded record batch multiple times. + /// + /// \param[in] data a raw data to be processed. This data isn't + /// copied. The passed memory must be kept alive through record + /// batch processing. + /// \param[in] size raw data size. + /// \return Status + Status Consume(const uint8_t* data, int64_t size); + + /// \brief Feed data to the decoder as a Buffer. + /// + /// If the decoder can read one or more record batches by the + /// Buffer, the decoder calls listener->RecordBatchReceived() with a + /// decoded record batch multiple times. + /// + /// \param[in] buffer a Buffer to be processed. + /// \return Status + Status Consume(std::shared_ptr buffer); + + /// \brief Reset the internal status. + /// + /// You can reuse this decoder for new stream after calling + /// this. + /// + /// \return Status + Status Reset(); + + /// \return the shared schema of the record batches in the stream + std::shared_ptr schema() const; + + /// \brief Return the number of bytes needed to advance the state of + /// the decoder. + /// + /// This method is provided for users who want to optimize performance. + /// Normal users don't need to use this method. + /// + /// Here is an example usage for normal users: + /// + /// ~~~{.cpp} + /// decoder.Consume(buffer1); + /// decoder.Consume(buffer2); + /// decoder.Consume(buffer3); + /// ~~~ + /// + /// Decoder has internal buffer. If consumed data isn't enough to + /// advance the state of the decoder, consumed data is buffered to + /// the internal buffer. It causes performance overhead. + /// + /// If you pass next_required_size() size data to each Consume() + /// call, the decoder doesn't use its internal buffer. It improves + /// performance. + /// + /// Here is an example usage to avoid using internal buffer: + /// + /// ~~~{.cpp} + /// buffer1 = get_data(decoder.next_required_size()); + /// decoder.Consume(buffer1); + /// buffer2 = get_data(decoder.next_required_size()); + /// decoder.Consume(buffer2); + /// ~~~ + /// + /// Users can use this method to avoid creating small chunks. Record + /// batch data must be contiguous data. If users pass small chunks + /// to the decoder, the decoder needs concatenate small chunks + /// internally. It causes performance overhead. + /// + /// Here is an example usage to reduce small chunks: + /// + /// ~~~{.cpp} + /// buffer = AllocateResizableBuffer(); + /// while ((small_chunk = get_data(&small_chunk_size))) { + /// auto current_buffer_size = buffer->size(); + /// buffer->Resize(current_buffer_size + small_chunk_size); + /// memcpy(buffer->mutable_data() + current_buffer_size, + /// small_chunk, + /// small_chunk_size); + /// if (buffer->size() < decoder.next_required_size()) { + /// continue; + /// } + /// std::shared_ptr chunk(buffer.release()); + /// decoder.Consume(chunk); + /// buffer = AllocateResizableBuffer(); + /// } + /// if (buffer->size() > 0) { + /// std::shared_ptr chunk(buffer.release()); + /// decoder.Consume(chunk); + /// } + /// ~~~ + /// + /// \return the number of bytes needed to advance the state of the + /// decoder + int64_t next_required_size() const; + + /// \brief Return current read statistics + ReadStats stats() const; + + private: + class StreamDecoderImpl; + std::unique_ptr impl_; + + ARROW_DISALLOW_COPY_AND_ASSIGN(StreamDecoder); +}; + +// Generic read functions; does not copy data if the input supports zero copy reads + +/// \brief Read Schema from stream serialized as a single IPC message +/// and populate any dictionary-encoded fields into a DictionaryMemo +/// +/// \param[in] stream an InputStream +/// \param[in] dictionary_memo for recording dictionary-encoded fields +/// \return the output Schema +/// +/// If record batches follow the schema, it is better to use +/// RecordBatchStreamReader +ARROW_EXPORT +Result> ReadSchema(io::InputStream* stream, + DictionaryMemo* dictionary_memo); + +/// \brief Read Schema from encapsulated Message +/// +/// \param[in] message the message containing the Schema IPC metadata +/// \param[in] dictionary_memo DictionaryMemo for recording dictionary-encoded +/// fields. Can be nullptr if you are sure there are no +/// dictionary-encoded fields +/// \return the resulting Schema +ARROW_EXPORT +Result> ReadSchema(const Message& message, + DictionaryMemo* dictionary_memo); + +/// Read record batch as encapsulated IPC message with metadata size prefix and +/// header +/// +/// \param[in] schema the record batch schema +/// \param[in] dictionary_memo DictionaryMemo which has any +/// dictionaries. Can be nullptr if you are sure there are no +/// dictionary-encoded fields +/// \param[in] options IPC options for reading +/// \param[in] stream the file where the batch is located +/// \return the read record batch +ARROW_EXPORT +Result> ReadRecordBatch( + const std::shared_ptr& schema, const DictionaryMemo* dictionary_memo, + const IpcReadOptions& options, io::InputStream* stream); + +/// \brief Read record batch from message +/// +/// \param[in] message a Message containing the record batch metadata +/// \param[in] schema the record batch schema +/// \param[in] dictionary_memo DictionaryMemo which has any +/// dictionaries. Can be nullptr if you are sure there are no +/// dictionary-encoded fields +/// \param[in] options IPC options for reading +/// \return the read record batch +ARROW_EXPORT +Result> ReadRecordBatch( + const Message& message, const std::shared_ptr& schema, + const DictionaryMemo* dictionary_memo, const IpcReadOptions& options); + +/// Read record batch from file given metadata and schema +/// +/// \param[in] metadata a Message containing the record batch metadata +/// \param[in] schema the record batch schema +/// \param[in] dictionary_memo DictionaryMemo which has any +/// dictionaries. Can be nullptr if you are sure there are no +/// dictionary-encoded fields +/// \param[in] file a random access file +/// \param[in] options options for deserialization +/// \return the read record batch +ARROW_EXPORT +Result> ReadRecordBatch( + const Buffer& metadata, const std::shared_ptr& schema, + const DictionaryMemo* dictionary_memo, const IpcReadOptions& options, + io::RandomAccessFile* file); + +/// \brief Read arrow::Tensor as encapsulated IPC message in file +/// +/// \param[in] file an InputStream pointed at the start of the message +/// \return the read tensor +ARROW_EXPORT +Result> ReadTensor(io::InputStream* file); + +/// \brief EXPERIMENTAL: Read arrow::Tensor from IPC message +/// +/// \param[in] message a Message containing the tensor metadata and body +/// \return the read tensor +ARROW_EXPORT +Result> ReadTensor(const Message& message); + +/// \brief EXPERIMENTAL: Read arrow::SparseTensor as encapsulated IPC message in file +/// +/// \param[in] file an InputStream pointed at the start of the message +/// \return the read sparse tensor +ARROW_EXPORT +Result> ReadSparseTensor(io::InputStream* file); + +/// \brief EXPERIMENTAL: Read arrow::SparseTensor from IPC message +/// +/// \param[in] message a Message containing the tensor metadata and body +/// \return the read sparse tensor +ARROW_EXPORT +Result> ReadSparseTensor(const Message& message); + +namespace internal { + +// These internal APIs may change without warning or deprecation + +/// \brief EXPERIMENTAL: Read arrow::SparseTensorFormat::type from a metadata +/// \param[in] metadata a Buffer containing the sparse tensor metadata +/// \return the count of the body buffers +ARROW_EXPORT +Result ReadSparseTensorBodyBufferCount(const Buffer& metadata); + +/// \brief EXPERIMENTAL: Read arrow::SparseTensor from an IpcPayload +/// \param[in] payload a IpcPayload contains a serialized SparseTensor +/// \return the read sparse tensor +ARROW_EXPORT +Result> ReadSparseTensorPayload(const IpcPayload& payload); + +// For fuzzing targets +ARROW_EXPORT +Status FuzzIpcStream(const uint8_t* data, int64_t size); +ARROW_EXPORT +Status FuzzIpcTensorStream(const uint8_t* data, int64_t size); +ARROW_EXPORT +Status FuzzIpcFile(const uint8_t* data, int64_t size); + +} // namespace internal + +} // namespace ipc +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/test_common.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/test_common.h new file mode 100644 index 0000000000000000000000000000000000000000..189de288795c00a826ce0a57785a8e395dd32e6e --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/test_common.h @@ -0,0 +1,192 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/array.h" +#include "arrow/record_batch.h" +#include "arrow/status.h" +#include "arrow/testing/visibility.h" +#include "arrow/type.h" + +namespace arrow { +namespace ipc { +namespace test { + +// A typedef used for test parameterization +typedef Status MakeRecordBatch(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +void CompareArraysDetailed(int index, const Array& result, const Array& expected); + +ARROW_TESTING_EXPORT +void CompareBatchColumnsDetailed(const RecordBatch& result, const RecordBatch& expected); + +ARROW_TESTING_EXPORT +Status MakeRandomInt32Array(int64_t length, bool include_nulls, MemoryPool* pool, + std::shared_ptr* out, uint32_t seed = 0, + int32_t min = 0, int32_t max = 1000); + +ARROW_TESTING_EXPORT +Status MakeRandomInt64Array(int64_t length, bool include_nulls, MemoryPool* pool, + std::shared_ptr* out, uint32_t seed = 0); + +ARROW_TESTING_EXPORT +Status MakeRandomListArray(const std::shared_ptr& child_array, int num_lists, + bool include_nulls, MemoryPool* pool, + std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeRandomLargeListArray(const std::shared_ptr& child_array, int num_lists, + bool include_nulls, MemoryPool* pool, + std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeRandomBooleanArray(const int length, bool include_nulls, + std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeBooleanBatchSized(const int length, std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeBooleanBatch(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeIntBatchSized(int length, std::shared_ptr* out, + uint32_t seed = 0); + +ARROW_TESTING_EXPORT +Status MakeIntRecordBatch(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeFloat3264BatchSized(int length, std::shared_ptr* out, + uint32_t seed = 0); + +ARROW_TESTING_EXPORT +Status MakeFloat3264Batch(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeFloatBatchSized(int length, std::shared_ptr* out, + uint32_t seed = 0); + +ARROW_TESTING_EXPORT +Status MakeFloatBatch(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeRandomStringArray(int64_t length, bool include_nulls, MemoryPool* pool, + std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeStringTypesRecordBatch(std::shared_ptr* out, + bool with_nulls = true, bool with_view_types = true); + +ARROW_TESTING_EXPORT +Status MakeStringTypesRecordBatchWithNulls(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeNullRecordBatch(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeListRecordBatch(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeListViewRecordBatch(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeFixedSizeListRecordBatch(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeZeroLengthRecordBatch(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeNonNullRecordBatch(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeDeeplyNestedList(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeDeeplyNestedListView(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeStruct(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeRunEndEncoded(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeUnion(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeDictionary(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeDictionaryFlat(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeNestedDictionary(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeMap(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeMapOfDictionary(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeDates(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeTimestamps(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeIntervals(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeTimes(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeFWBinary(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeDecimal(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeNull(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeUuid(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeComplex128(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeDictExtension(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeRandomTensor(const std::shared_ptr& type, + const std::vector& shape, bool row_major_p, + std::shared_ptr* out, uint32_t seed = 0); + +ARROW_TESTING_EXPORT Status RoundtripBatch(const std::shared_ptr& batch, + std::shared_ptr* out); + +} // namespace test +} // namespace ipc +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/type_fwd.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/type_fwd.h new file mode 100644 index 0000000000000000000000000000000000000000..b0d3afa922f789f4f9a8a0b2b435b3ebe0456d42 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/type_fwd.h @@ -0,0 +1,68 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +namespace arrow { +namespace ipc { + +enum class MetadataVersion : char { + /// 0.1.0 + V1, + + /// 0.2.0 + V2, + + /// 0.3.0 to 0.7.1 + V3, + + /// 0.8.0 to 0.17.0 + V4, + + /// >= 1.0.0 + V5 +}; + +class Message; +enum class MessageType { + NONE, + SCHEMA, + DICTIONARY_BATCH, + RECORD_BATCH, + TENSOR, + SPARSE_TENSOR +}; + +struct IpcReadOptions; +struct IpcWriteOptions; + +class MessageReader; + +class RecordBatchStreamReader; +class RecordBatchFileReader; +class RecordBatchWriter; + +class DictionaryFieldMapper; +class DictionaryMemo; + +namespace feather { + +class Reader; + +} // namespace feather +} // namespace ipc +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/util.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/util.h new file mode 100644 index 0000000000000000000000000000000000000000..709fedbf31b0b31585c81b36d5a81db0e5c92754 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/util.h @@ -0,0 +1,41 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +namespace arrow { +namespace ipc { + +// Buffers are padded to 64-byte boundaries (for SIMD) +static constexpr int32_t kArrowAlignment = 64; + +// Tensors are padded to 64-byte boundaries +static constexpr int32_t kTensorAlignment = 64; + +// Align on 8-byte boundaries in IPC +static constexpr int32_t kArrowIpcAlignment = 8; + +static constexpr uint8_t kPaddingBytes[kArrowAlignment] = {0}; + +static inline int64_t PaddedLength(int64_t nbytes, int32_t alignment = kArrowAlignment) { + return ((nbytes + alignment - 1) / alignment) * alignment; +} + +} // namespace ipc +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/writer.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/writer.h new file mode 100644 index 0000000000000000000000000000000000000000..aefb59f3136e4c98419799eb31faf9700fc6efd2 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/writer.h @@ -0,0 +1,475 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Implement Arrow streaming binary format + +#pragma once + +#include +#include +#include + +#include "arrow/ipc/dictionary.h" // IWYU pragma: export +#include "arrow/ipc/message.h" +#include "arrow/ipc/options.h" +#include "arrow/result.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class Array; +class Buffer; +class MemoryManager; +class MemoryPool; +class RecordBatch; +class Schema; +class Status; +class Table; +class Tensor; +class SparseTensor; + +namespace io { + +class OutputStream; + +} // namespace io + +namespace ipc { + +/// \brief Intermediate data structure with metadata header, and zero +/// or more buffers for the message body. +struct IpcPayload { + MessageType type = MessageType::NONE; + std::shared_ptr metadata; + std::vector> body_buffers; + std::vector variadic_buffer_counts; + int64_t body_length = 0; // serialized body length (padded, maybe compressed) + int64_t raw_body_length = 0; // initial uncompressed body length +}; + +struct WriteStats { + /// Number of IPC messages written. + int64_t num_messages = 0; + /// Number of record batches written. + int64_t num_record_batches = 0; + /// Number of dictionary batches written. + /// + /// Note: num_dictionary_batches >= num_dictionary_deltas + num_replaced_dictionaries + int64_t num_dictionary_batches = 0; + + /// Number of dictionary deltas written. + int64_t num_dictionary_deltas = 0; + /// Number of replaced dictionaries (i.e. where a dictionary batch replaces + /// an existing dictionary with an unrelated new dictionary). + int64_t num_replaced_dictionaries = 0; + + /// Total size in bytes of record batches emitted. + /// The "raw" size counts the original buffer sizes, while the "serialized" size + /// includes padding and (optionally) compression. + int64_t total_raw_body_size = 0; + int64_t total_serialized_body_size = 0; +}; + +/// \class RecordBatchWriter +/// \brief Abstract interface for writing a stream of record batches +class ARROW_EXPORT RecordBatchWriter { + public: + virtual ~RecordBatchWriter(); + + /// \brief Write a record batch to the stream + /// + /// \param[in] batch the record batch to write to the stream + /// \return Status + virtual Status WriteRecordBatch(const RecordBatch& batch) = 0; + + /// \brief Write a record batch with custom metadata to the stream + /// + /// \param[in] batch the record batch to write to the stream + /// \param[in] custom_metadata the record batch's custom metadata to write to the stream + /// \return Status + virtual Status WriteRecordBatch( + const RecordBatch& batch, + const std::shared_ptr& custom_metadata); + + /// \brief Write possibly-chunked table by creating sequence of record batches + /// \param[in] table table to write + /// \return Status + Status WriteTable(const Table& table); + + /// \brief Write Table with a particular chunksize + /// \param[in] table table to write + /// \param[in] max_chunksize maximum number of rows for table chunks. To + /// indicate that no maximum should be enforced, pass -1. + /// \return Status + virtual Status WriteTable(const Table& table, int64_t max_chunksize); + + /// \brief Perform any logic necessary to finish the stream + /// + /// \return Status + virtual Status Close() = 0; + + /// \brief Return current write statistics + virtual WriteStats stats() const = 0; +}; + +/// \defgroup record-batch-writer-factories Functions for creating RecordBatchWriter +/// instances +/// +/// @{ + +/// Create a new IPC stream writer from stream sink and schema. User is +/// responsible for closing the actual OutputStream. +/// +/// \param[in] sink output stream to write to +/// \param[in] schema the schema of the record batches to be written +/// \param[in] options options for serialization +/// \return Result> +ARROW_EXPORT +Result> MakeStreamWriter( + io::OutputStream* sink, const std::shared_ptr& schema, + const IpcWriteOptions& options = IpcWriteOptions::Defaults()); + +/// Create a new IPC stream writer from stream sink and schema. User is +/// responsible for closing the actual OutputStream. +/// +/// \param[in] sink output stream to write to +/// \param[in] schema the schema of the record batches to be written +/// \param[in] options options for serialization +/// \return Result> +ARROW_EXPORT +Result> MakeStreamWriter( + std::shared_ptr sink, const std::shared_ptr& schema, + const IpcWriteOptions& options = IpcWriteOptions::Defaults()); + +/// Create a new IPC file writer from stream sink and schema +/// +/// \param[in] sink output stream to write to +/// \param[in] schema the schema of the record batches to be written +/// \param[in] options options for serialization, optional +/// \param[in] metadata custom metadata for File Footer, optional +/// \return Result> +ARROW_EXPORT +Result> MakeFileWriter( + io::OutputStream* sink, const std::shared_ptr& schema, + const IpcWriteOptions& options = IpcWriteOptions::Defaults(), + const std::shared_ptr& metadata = NULLPTR); + +/// Create a new IPC file writer from stream sink and schema +/// +/// \param[in] sink output stream to write to +/// \param[in] schema the schema of the record batches to be written +/// \param[in] options options for serialization, optional +/// \param[in] metadata custom metadata for File Footer, optional +/// \return Result> +ARROW_EXPORT +Result> MakeFileWriter( + std::shared_ptr sink, const std::shared_ptr& schema, + const IpcWriteOptions& options = IpcWriteOptions::Defaults(), + const std::shared_ptr& metadata = NULLPTR); + +/// @} + +/// \brief Low-level API for writing a record batch (without schema) +/// to an OutputStream as encapsulated IPC message. See Arrow format +/// documentation for more detail. +/// +/// \param[in] batch the record batch to write +/// \param[in] buffer_start_offset the start offset to use in the buffer metadata, +/// generally should be 0 +/// \param[in] dst an OutputStream +/// \param[out] metadata_length the size of the length-prefixed flatbuffer +/// including padding to a 64-byte boundary +/// \param[out] body_length the size of the contiguous buffer block plus +/// \param[in] options options for serialization +/// \return Status +ARROW_EXPORT +Status WriteRecordBatch(const RecordBatch& batch, int64_t buffer_start_offset, + io::OutputStream* dst, int32_t* metadata_length, + int64_t* body_length, const IpcWriteOptions& options); + +/// \brief Serialize record batch as encapsulated IPC message in a new buffer +/// +/// \param[in] batch the record batch +/// \param[in] options the IpcWriteOptions to use for serialization +/// \return the serialized message +ARROW_EXPORT +Result> SerializeRecordBatch(const RecordBatch& batch, + const IpcWriteOptions& options); + +/// \brief Serialize record batch as encapsulated IPC message in a new buffer +/// +/// \param[in] batch the record batch +/// \param[in] mm a MemoryManager to allocate memory from +/// \return the serialized message +ARROW_EXPORT +Result> SerializeRecordBatch(const RecordBatch& batch, + std::shared_ptr mm); + +/// \brief Write record batch to OutputStream +/// +/// \param[in] batch the record batch to write +/// \param[in] options the IpcWriteOptions to use for serialization +/// \param[in] out the OutputStream to write the output to +/// \return Status +/// +/// If writing to pre-allocated memory, you can use +/// arrow::ipc::GetRecordBatchSize to compute how much space is required +ARROW_EXPORT +Status SerializeRecordBatch(const RecordBatch& batch, const IpcWriteOptions& options, + io::OutputStream* out); + +/// \brief Serialize schema as encapsulated IPC message +/// +/// \param[in] schema the schema to write +/// \param[in] pool a MemoryPool to allocate memory from +/// \return the serialized schema +ARROW_EXPORT +Result> SerializeSchema(const Schema& schema, + MemoryPool* pool = default_memory_pool()); + +/// \brief Write multiple record batches to OutputStream, including schema +/// \param[in] batches a vector of batches. Must all have same schema +/// \param[in] options options for serialization +/// \param[out] dst an OutputStream +/// \return Status +ARROW_EXPORT +Status WriteRecordBatchStream(const std::vector>& batches, + const IpcWriteOptions& options, io::OutputStream* dst); + +/// \brief Compute the number of bytes needed to write an IPC payload +/// including metadata +/// +/// \param[in] payload the IPC payload to write +/// \param[in] options write options +/// \return the size of the complete encapsulated message +ARROW_EXPORT +int64_t GetPayloadSize(const IpcPayload& payload, + const IpcWriteOptions& options = IpcWriteOptions::Defaults()); + +/// \brief Compute the number of bytes needed to write a record batch including metadata +/// +/// \param[in] batch the record batch to write +/// \param[out] size the size of the complete encapsulated message +/// \return Status +ARROW_EXPORT +Status GetRecordBatchSize(const RecordBatch& batch, int64_t* size); + +/// \brief Compute the number of bytes needed to write a record batch including metadata +/// +/// \param[in] batch the record batch to write +/// \param[in] options options for serialization +/// \param[out] size the size of the complete encapsulated message +/// \return Status +ARROW_EXPORT +Status GetRecordBatchSize(const RecordBatch& batch, const IpcWriteOptions& options, + int64_t* size); + +/// \brief Compute the number of bytes needed to write a tensor including metadata +/// +/// \param[in] tensor the tensor to write +/// \param[out] size the size of the complete encapsulated message +/// \return Status +ARROW_EXPORT +Status GetTensorSize(const Tensor& tensor, int64_t* size); + +/// \brief EXPERIMENTAL: Convert arrow::Tensor to a Message with minimal memory +/// allocation +/// +/// \param[in] tensor the Tensor to write +/// \param[in] pool MemoryPool to allocate space for metadata +/// \return the resulting Message +ARROW_EXPORT +Result> GetTensorMessage(const Tensor& tensor, MemoryPool* pool); + +/// \brief Write arrow::Tensor as a contiguous message. +/// +/// The metadata and body are written assuming 64-byte alignment. It is the +/// user's responsibility to ensure that the OutputStream has been aligned +/// to a 64-byte multiple before writing the message. +/// +/// The message is written out as followed: +/// \code +/// +/// \endcode +/// +/// \param[in] tensor the Tensor to write +/// \param[in] dst the OutputStream to write to +/// \param[out] metadata_length the actual metadata length, including padding +/// \param[out] body_length the actual message body length +/// \return Status +ARROW_EXPORT +Status WriteTensor(const Tensor& tensor, io::OutputStream* dst, int32_t* metadata_length, + int64_t* body_length); + +/// \brief EXPERIMENTAL: Convert arrow::SparseTensor to a Message with minimal memory +/// allocation +/// +/// The message is written out as followed: +/// \code +/// +/// \endcode +/// +/// \param[in] sparse_tensor the SparseTensor to write +/// \param[in] pool MemoryPool to allocate space for metadata +/// \return the resulting Message +ARROW_EXPORT +Result> GetSparseTensorMessage(const SparseTensor& sparse_tensor, + MemoryPool* pool); + +/// \brief EXPERIMENTAL: Write arrow::SparseTensor as a contiguous message. The metadata, +/// sparse index, and body are written assuming 64-byte alignment. It is the +/// user's responsibility to ensure that the OutputStream has been aligned +/// to a 64-byte multiple before writing the message. +/// +/// \param[in] sparse_tensor the SparseTensor to write +/// \param[in] dst the OutputStream to write to +/// \param[out] metadata_length the actual metadata length, including padding +/// \param[out] body_length the actual message body length +/// \return Status +ARROW_EXPORT +Status WriteSparseTensor(const SparseTensor& sparse_tensor, io::OutputStream* dst, + int32_t* metadata_length, int64_t* body_length); + +/// \brief Compute IpcPayload for the given schema +/// \param[in] schema the Schema that is being serialized +/// \param[in] options options for serialization +/// \param[in] mapper object mapping dictionary fields to dictionary ids +/// \param[out] out the returned vector of IpcPayloads +/// \return Status +ARROW_EXPORT +Status GetSchemaPayload(const Schema& schema, const IpcWriteOptions& options, + const DictionaryFieldMapper& mapper, IpcPayload* out); + +/// \brief Compute IpcPayload for a dictionary +/// \param[in] id the dictionary id +/// \param[in] dictionary the dictionary values +/// \param[in] options options for serialization +/// \param[out] payload the output IpcPayload +/// \return Status +ARROW_EXPORT +Status GetDictionaryPayload(int64_t id, const std::shared_ptr& dictionary, + const IpcWriteOptions& options, IpcPayload* payload); + +/// \brief Compute IpcPayload for a dictionary +/// \param[in] id the dictionary id +/// \param[in] is_delta whether the dictionary is a delta dictionary +/// \param[in] dictionary the dictionary values +/// \param[in] options options for serialization +/// \param[out] payload the output IpcPayload +/// \return Status +ARROW_EXPORT +Status GetDictionaryPayload(int64_t id, bool is_delta, + const std::shared_ptr& dictionary, + const IpcWriteOptions& options, IpcPayload* payload); + +/// \brief Compute IpcPayload for the given record batch +/// \param[in] batch the RecordBatch that is being serialized +/// \param[in] options options for serialization +/// \param[out] out the returned IpcPayload +/// \return Status +ARROW_EXPORT +Status GetRecordBatchPayload(const RecordBatch& batch, const IpcWriteOptions& options, + IpcPayload* out); + +/// \brief Compute IpcPayload for the given record batch and custom metadata +/// \param[in] batch the RecordBatch that is being serialized +/// \param[in] custom_metadata the custom metadata to be serialized with the record batch +/// \param[in] options options for serialization +/// \param[out] out the returned IpcPayload +/// \return Status +ARROW_EXPORT +Status GetRecordBatchPayload( + const RecordBatch& batch, + const std::shared_ptr& custom_metadata, + const IpcWriteOptions& options, IpcPayload* out); + +/// \brief Write an IPC payload to the given stream. +/// \param[in] payload the payload to write +/// \param[in] options options for serialization +/// \param[in] dst The stream to write the payload to. +/// \param[out] metadata_length the length of the serialized metadata +/// \return Status +ARROW_EXPORT +Status WriteIpcPayload(const IpcPayload& payload, const IpcWriteOptions& options, + io::OutputStream* dst, int32_t* metadata_length); + +/// \brief Compute IpcPayload for the given sparse tensor +/// \param[in] sparse_tensor the SparseTensor that is being serialized +/// \param[in,out] pool for any required temporary memory allocations +/// \param[out] out the returned IpcPayload +/// \return Status +ARROW_EXPORT +Status GetSparseTensorPayload(const SparseTensor& sparse_tensor, MemoryPool* pool, + IpcPayload* out); + +namespace internal { + +// These internal APIs may change without warning or deprecation + +class ARROW_EXPORT IpcPayloadWriter { + public: + virtual ~IpcPayloadWriter(); + + // Default implementation is a no-op + virtual Status Start(); + + virtual Status WritePayload(const IpcPayload& payload) = 0; + + virtual Status Close() = 0; +}; + +/// Create a new IPC payload stream writer from stream sink. User is +/// responsible for closing the actual OutputStream. +/// +/// \param[in] sink output stream to write to +/// \param[in] options options for serialization +/// \return Result> +ARROW_EXPORT +Result> MakePayloadStreamWriter( + io::OutputStream* sink, const IpcWriteOptions& options = IpcWriteOptions::Defaults()); + +/// Create a new IPC payload file writer from stream sink. +/// +/// \param[in] sink output stream to write to +/// \param[in] schema the schema of the record batches to be written +/// \param[in] options options for serialization, optional +/// \param[in] metadata custom metadata for File Footer, optional +/// \return Status +ARROW_EXPORT +Result> MakePayloadFileWriter( + io::OutputStream* sink, const std::shared_ptr& schema, + const IpcWriteOptions& options = IpcWriteOptions::Defaults(), + const std::shared_ptr& metadata = NULLPTR); + +/// Create a new RecordBatchWriter from IpcPayloadWriter and schema. +/// +/// The format is implicitly the IPC stream format (allowing dictionary +/// replacement and deltas). +/// +/// \param[in] sink the IpcPayloadWriter to write to +/// \param[in] schema the schema of the record batches to be written +/// \param[in] options options for serialization +/// \return Result> +ARROW_EXPORT +Result> OpenRecordBatchWriter( + std::unique_ptr sink, const std::shared_ptr& schema, + const IpcWriteOptions& options = IpcWriteOptions::Defaults()); + +} // namespace internal +} // namespace ipc +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/aligned_storage.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/aligned_storage.h new file mode 100644 index 0000000000000000000000000000000000000000..01e3ced2d1f61b8eb3719208c13a5dc4e111e771 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/aligned_storage.h @@ -0,0 +1,145 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/util/launder.h" +#include "arrow/util/macros.h" + +namespace arrow { +namespace internal { + +template +class AlignedStorage { + public: + static constexpr bool can_memcpy = std::is_trivial::value; + + constexpr T* get() noexcept { + return arrow::internal::launder(reinterpret_cast(&data_)); + } + + constexpr const T* get() const noexcept { + // Use fully qualified name to avoid ambiguities with MSVC (ARROW-14800) + return arrow::internal::launder(reinterpret_cast(&data_)); + } + + void destroy() noexcept { + if (!std::is_trivially_destructible::value) { + get()->~T(); + } + } + + template + void construct(A&&... args) noexcept { + new (&data_) T(std::forward(args)...); + } + + template + void assign(V&& v) noexcept { + *get() = std::forward(v); + } + + void move_construct(AlignedStorage* other) noexcept { + new (&data_) T(std::move(*other->get())); + } + + void move_assign(AlignedStorage* other) noexcept { *get() = std::move(*other->get()); } + + template + static typename std::enable_if::type move_construct_several( + AlignedStorage* ARROW_RESTRICT src, AlignedStorage* ARROW_RESTRICT dest, size_t n, + size_t memcpy_length) noexcept { + memcpy(dest->get(), src->get(), memcpy_length * sizeof(T)); + } + + template + static typename std::enable_if::type + move_construct_several_and_destroy_source(AlignedStorage* ARROW_RESTRICT src, + AlignedStorage* ARROW_RESTRICT dest, size_t n, + size_t memcpy_length) noexcept { + memcpy(dest->get(), src->get(), memcpy_length * sizeof(T)); + } + + template + static typename std::enable_if::type move_construct_several( + AlignedStorage* ARROW_RESTRICT src, AlignedStorage* ARROW_RESTRICT dest, size_t n, + size_t memcpy_length) noexcept { + for (size_t i = 0; i < n; ++i) { + new (dest[i].get()) T(std::move(*src[i].get())); + } + } + + template + static typename std::enable_if::type + move_construct_several_and_destroy_source(AlignedStorage* ARROW_RESTRICT src, + AlignedStorage* ARROW_RESTRICT dest, size_t n, + size_t memcpy_length) noexcept { + for (size_t i = 0; i < n; ++i) { + new (dest[i].get()) T(std::move(*src[i].get())); + src[i].destroy(); + } + } + + static void move_construct_several(AlignedStorage* ARROW_RESTRICT src, + AlignedStorage* ARROW_RESTRICT dest, + size_t n) noexcept { + move_construct_several(src, dest, n, n); + } + + static void move_construct_several_and_destroy_source( + AlignedStorage* ARROW_RESTRICT src, AlignedStorage* ARROW_RESTRICT dest, + size_t n) noexcept { + move_construct_several_and_destroy_source(src, dest, n, n); + } + + static void destroy_several(AlignedStorage* p, size_t n) noexcept { + if (!std::is_trivially_destructible::value) { + for (size_t i = 0; i < n; ++i) { + p[i].destroy(); + } + } + } + + private: +#if !defined(__clang__) && defined(__GNUC__) && defined(__i386__) + // Workaround for GCC bug on i386: + // alignof(int64 | float64) can give different results depending on the + // compilation context, leading to internal ABI mismatch manifesting + // in incorrect propagation of Result between + // compilation units. + // (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=88115) + static constexpr size_t alignment() { + if (std::is_integral_v && sizeof(T) == 8) { + return 4; + } else if (std::is_floating_point_v && sizeof(T) == 8) { + return 4; + } + return alignof(T); + } + + typename std::aligned_storage::type data_; +#else + typename std::aligned_storage::type data_; +#endif +}; + +} // namespace internal +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_writer.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_writer.h new file mode 100644 index 0000000000000000000000000000000000000000..c9ce8012f3eb5a65ec91b1321b687bc0d77f7557 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_writer.h @@ -0,0 +1,286 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/util/bit_util.h" +#include "arrow/util/endian.h" +#include "arrow/util/macros.h" + +namespace arrow { +namespace internal { + +class BitmapWriter { + // A sequential bitwise writer that preserves surrounding bit values. + + public: + BitmapWriter(uint8_t* bitmap, int64_t start_offset, int64_t length) + : bitmap_(bitmap), position_(0), length_(length) { + byte_offset_ = start_offset / 8; + bit_mask_ = bit_util::kBitmask[start_offset % 8]; + if (length > 0) { + current_byte_ = bitmap[byte_offset_]; + } else { + current_byte_ = 0; + } + } + + void Set() { current_byte_ |= bit_mask_; } + + void Clear() { current_byte_ &= bit_mask_ ^ 0xFF; } + + void Next() { + bit_mask_ = static_cast(bit_mask_ << 1); + ++position_; + if (bit_mask_ == 0) { + // Finished this byte, need advancing + bit_mask_ = 0x01; + bitmap_[byte_offset_++] = current_byte_; + if (ARROW_PREDICT_TRUE(position_ < length_)) { + current_byte_ = bitmap_[byte_offset_]; + } + } + } + + void Finish() { + // Store current byte if we didn't went past bitmap storage + if (length_ > 0 && (bit_mask_ != 0x01 || position_ < length_)) { + bitmap_[byte_offset_] = current_byte_; + } + } + + int64_t position() const { return position_; } + + private: + uint8_t* bitmap_; + int64_t position_; + int64_t length_; + + uint8_t current_byte_; + uint8_t bit_mask_; + int64_t byte_offset_; +}; + +class FirstTimeBitmapWriter { + // Like BitmapWriter, but any bit values *following* the bits written + // might be clobbered. It is hence faster than BitmapWriter, and can + // also avoid false positives with Valgrind. + + public: + FirstTimeBitmapWriter(uint8_t* bitmap, int64_t start_offset, int64_t length) + : bitmap_(bitmap), position_(0), length_(length) { + current_byte_ = 0; + byte_offset_ = start_offset / 8; + bit_mask_ = bit_util::kBitmask[start_offset % 8]; + if (length > 0) { + current_byte_ = + bitmap[byte_offset_] & bit_util::kPrecedingBitmask[start_offset % 8]; + } else { + current_byte_ = 0; + } + } + + /// Appends number_of_bits from word to valid_bits and valid_bits_offset. + /// + /// \param[in] word The LSB bitmap to append. Any bits past number_of_bits are assumed + /// to be unset (i.e. 0). + /// \param[in] number_of_bits The number of bits to append from word. + void AppendWord(uint64_t word, int64_t number_of_bits) { + if (ARROW_PREDICT_FALSE(number_of_bits == 0)) { + return; + } + + // Location that the first byte needs to be written to. + uint8_t* append_position = bitmap_ + byte_offset_; + + // Update state variables except for current_byte_ here. + position_ += number_of_bits; + int64_t bit_offset = bit_util::CountTrailingZeros(static_cast(bit_mask_)); + bit_mask_ = bit_util::kBitmask[(bit_offset + number_of_bits) % 8]; + byte_offset_ += (bit_offset + number_of_bits) / 8; + + if (bit_offset != 0) { + // We are in the middle of the byte. This code updates the byte and shifts + // bits appropriately within word so it can be memcpy'd below. + int64_t bits_to_carry = 8 - bit_offset; + // Carry over bits from word to current_byte_. We assume any extra bits in word + // unset so no additional accounting is needed for when number_of_bits < + // bits_to_carry. + current_byte_ |= (word & bit_util::kPrecedingBitmask[bits_to_carry]) << bit_offset; + // Check if everything is transferred into current_byte_. + if (ARROW_PREDICT_FALSE(number_of_bits < bits_to_carry)) { + return; + } + *append_position = current_byte_; + append_position++; + // Move the carry bits off of word. + word = word >> bits_to_carry; + number_of_bits -= bits_to_carry; + } + word = bit_util::ToLittleEndian(word); + int64_t bytes_for_word = ::arrow::bit_util::BytesForBits(number_of_bits); + std::memcpy(append_position, &word, bytes_for_word); + // At this point, the previous current_byte_ has been written to bitmap_. + // The new current_byte_ is either the last relevant byte in 'word' + // or cleared if the new position is byte aligned (i.e. a fresh byte). + if (bit_mask_ == 0x1) { + current_byte_ = 0; + } else { + current_byte_ = *(append_position + bytes_for_word - 1); + } + } + + void Set() { current_byte_ |= bit_mask_; } + + void Clear() {} + + void Next() { + bit_mask_ = static_cast(bit_mask_ << 1); + ++position_; + if (bit_mask_ == 0) { + // Finished this byte, need advancing + bit_mask_ = 0x01; + bitmap_[byte_offset_++] = current_byte_; + current_byte_ = 0; + } + } + + void Finish() { + // Store current byte if we didn't went go bitmap storage + if (length_ > 0 && (bit_mask_ != 0x01 || position_ < length_)) { + bitmap_[byte_offset_] = current_byte_; + } + } + + int64_t position() const { return position_; } + + private: + uint8_t* bitmap_; + int64_t position_; + int64_t length_; + + uint8_t current_byte_; + uint8_t bit_mask_; + int64_t byte_offset_; +}; + +template +class BitmapWordWriter { + public: + BitmapWordWriter() = default; + BitmapWordWriter(uint8_t* bitmap, int64_t offset, int64_t length) + : offset_(static_cast(may_have_byte_offset) * (offset % 8)), + bitmap_(bitmap + offset / 8), + bitmap_end_(bitmap_ + bit_util::BytesForBits(offset_ + length)), + mask_((1U << offset_) - 1) { + if (offset_) { + if (length >= static_cast(sizeof(Word) * 8)) { + current_data.word_ = load(bitmap_); + } else if (length > 0) { + current_data.epi.byte_ = load(bitmap_); + } + } + } + + void PutNextWord(Word word) { + if (may_have_byte_offset && offset_) { + // split one word into two adjacent words, don't touch unused bits + // |<------ word ----->| + // +-----+-------------+ + // | A | B | + // +-----+-------------+ + // | | + // v v offset + // +-------------+-----+-------------+-----+ + // | --- | A | B | --- | + // +-------------+-----+-------------+-----+ + // |<------ next ----->|<---- current ---->| + word = (word << offset_) | (word >> (sizeof(Word) * 8 - offset_)); + Word next_word = load(bitmap_ + sizeof(Word)); + current_data.word_ = (current_data.word_ & mask_) | (word & ~mask_); + next_word = (next_word & ~mask_) | (word & mask_); + store(bitmap_, current_data.word_); + store(bitmap_ + sizeof(Word), next_word); + current_data.word_ = next_word; + } else { + store(bitmap_, word); + } + bitmap_ += sizeof(Word); + } + + void PutNextTrailingByte(uint8_t byte, int valid_bits) { + if (valid_bits == 8) { + if (may_have_byte_offset && offset_) { + byte = (byte << offset_) | (byte >> (8 - offset_)); + uint8_t next_byte = load(bitmap_ + 1); + current_data.epi.byte_ = (current_data.epi.byte_ & mask_) | (byte & ~mask_); + next_byte = (next_byte & ~mask_) | (byte & mask_); + store(bitmap_, current_data.epi.byte_); + store(bitmap_ + 1, next_byte); + current_data.epi.byte_ = next_byte; + } else { + store(bitmap_, byte); + } + ++bitmap_; + } else { + assert(valid_bits > 0); + assert(valid_bits < 8); + assert(bitmap_ + bit_util::BytesForBits(offset_ + valid_bits) <= bitmap_end_); + internal::BitmapWriter writer(bitmap_, offset_, valid_bits); + for (int i = 0; i < valid_bits; ++i) { + (byte & 0x01) ? writer.Set() : writer.Clear(); + writer.Next(); + byte >>= 1; + } + writer.Finish(); + } + } + + private: + int64_t offset_; + uint8_t* bitmap_; + + const uint8_t* bitmap_end_; + uint64_t mask_; + union { + Word word_; + struct { +#if ARROW_LITTLE_ENDIAN == 0 + uint8_t padding_bytes_[sizeof(Word) - 1]; +#endif + uint8_t byte_; + } epi; + } current_data; + + template + DType load(const uint8_t* bitmap) { + assert(bitmap + sizeof(DType) <= bitmap_end_); + return bit_util::ToLittleEndian(util::SafeLoadAs(bitmap)); + } + + template + void store(uint8_t* bitmap, DType data) { + assert(bitmap + sizeof(DType) <= bitmap_end_); + util::SafeStore(bitmap, bit_util::FromLittleEndian(data)); + } +}; + +} // namespace internal +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_default.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_default.h new file mode 100644 index 0000000000000000000000000000000000000000..4c661dcce3798c737c1d20bce525dcaa88c83078 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_default.h @@ -0,0 +1,4251 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// This file was modified from its original version for inclusion in parquet-cpp. +// Original source: +// https://github.com/lemire/FrameOfReference/blob/6ccaf9e97160f9a3b299e23a8ef739e711ef0c71/src/bpacking.cpp +// The original copyright notice follows. + +// This code is released under the +// Apache License Version 2.0 http://www.apache.org/licenses/. +// (c) Daniel Lemire 2013 + +#pragma once + +#include "arrow/util/bit_util.h" +#include "arrow/util/ubsan.h" + +namespace arrow { +namespace internal { + +inline const uint32_t* unpack1_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) & 1; + out++; + *out = (inl >> 1) & 1; + out++; + *out = (inl >> 2) & 1; + out++; + *out = (inl >> 3) & 1; + out++; + *out = (inl >> 4) & 1; + out++; + *out = (inl >> 5) & 1; + out++; + *out = (inl >> 6) & 1; + out++; + *out = (inl >> 7) & 1; + out++; + *out = (inl >> 8) & 1; + out++; + *out = (inl >> 9) & 1; + out++; + *out = (inl >> 10) & 1; + out++; + *out = (inl >> 11) & 1; + out++; + *out = (inl >> 12) & 1; + out++; + *out = (inl >> 13) & 1; + out++; + *out = (inl >> 14) & 1; + out++; + *out = (inl >> 15) & 1; + out++; + *out = (inl >> 16) & 1; + out++; + *out = (inl >> 17) & 1; + out++; + *out = (inl >> 18) & 1; + out++; + *out = (inl >> 19) & 1; + out++; + *out = (inl >> 20) & 1; + out++; + *out = (inl >> 21) & 1; + out++; + *out = (inl >> 22) & 1; + out++; + *out = (inl >> 23) & 1; + out++; + *out = (inl >> 24) & 1; + out++; + *out = (inl >> 25) & 1; + out++; + *out = (inl >> 26) & 1; + out++; + *out = (inl >> 27) & 1; + out++; + *out = (inl >> 28) & 1; + out++; + *out = (inl >> 29) & 1; + out++; + *out = (inl >> 30) & 1; + out++; + *out = (inl >> 31); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack2_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 2); + out++; + *out = (inl >> 2) % (1U << 2); + out++; + *out = (inl >> 4) % (1U << 2); + out++; + *out = (inl >> 6) % (1U << 2); + out++; + *out = (inl >> 8) % (1U << 2); + out++; + *out = (inl >> 10) % (1U << 2); + out++; + *out = (inl >> 12) % (1U << 2); + out++; + *out = (inl >> 14) % (1U << 2); + out++; + *out = (inl >> 16) % (1U << 2); + out++; + *out = (inl >> 18) % (1U << 2); + out++; + *out = (inl >> 20) % (1U << 2); + out++; + *out = (inl >> 22) % (1U << 2); + out++; + *out = (inl >> 24) % (1U << 2); + out++; + *out = (inl >> 26) % (1U << 2); + out++; + *out = (inl >> 28) % (1U << 2); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 2); + out++; + *out = (inl >> 2) % (1U << 2); + out++; + *out = (inl >> 4) % (1U << 2); + out++; + *out = (inl >> 6) % (1U << 2); + out++; + *out = (inl >> 8) % (1U << 2); + out++; + *out = (inl >> 10) % (1U << 2); + out++; + *out = (inl >> 12) % (1U << 2); + out++; + *out = (inl >> 14) % (1U << 2); + out++; + *out = (inl >> 16) % (1U << 2); + out++; + *out = (inl >> 18) % (1U << 2); + out++; + *out = (inl >> 20) % (1U << 2); + out++; + *out = (inl >> 22) % (1U << 2); + out++; + *out = (inl >> 24) % (1U << 2); + out++; + *out = (inl >> 26) % (1U << 2); + out++; + *out = (inl >> 28) % (1U << 2); + out++; + *out = (inl >> 30); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack3_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 3); + out++; + *out = (inl >> 3) % (1U << 3); + out++; + *out = (inl >> 6) % (1U << 3); + out++; + *out = (inl >> 9) % (1U << 3); + out++; + *out = (inl >> 12) % (1U << 3); + out++; + *out = (inl >> 15) % (1U << 3); + out++; + *out = (inl >> 18) % (1U << 3); + out++; + *out = (inl >> 21) % (1U << 3); + out++; + *out = (inl >> 24) % (1U << 3); + out++; + *out = (inl >> 27) % (1U << 3); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 1)) << (3 - 1); + out++; + *out = (inl >> 1) % (1U << 3); + out++; + *out = (inl >> 4) % (1U << 3); + out++; + *out = (inl >> 7) % (1U << 3); + out++; + *out = (inl >> 10) % (1U << 3); + out++; + *out = (inl >> 13) % (1U << 3); + out++; + *out = (inl >> 16) % (1U << 3); + out++; + *out = (inl >> 19) % (1U << 3); + out++; + *out = (inl >> 22) % (1U << 3); + out++; + *out = (inl >> 25) % (1U << 3); + out++; + *out = (inl >> 28) % (1U << 3); + out++; + *out = (inl >> 31); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (3 - 2); + out++; + *out = (inl >> 2) % (1U << 3); + out++; + *out = (inl >> 5) % (1U << 3); + out++; + *out = (inl >> 8) % (1U << 3); + out++; + *out = (inl >> 11) % (1U << 3); + out++; + *out = (inl >> 14) % (1U << 3); + out++; + *out = (inl >> 17) % (1U << 3); + out++; + *out = (inl >> 20) % (1U << 3); + out++; + *out = (inl >> 23) % (1U << 3); + out++; + *out = (inl >> 26) % (1U << 3); + out++; + *out = (inl >> 29); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack4_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 4); + out++; + *out = (inl >> 4) % (1U << 4); + out++; + *out = (inl >> 8) % (1U << 4); + out++; + *out = (inl >> 12) % (1U << 4); + out++; + *out = (inl >> 16) % (1U << 4); + out++; + *out = (inl >> 20) % (1U << 4); + out++; + *out = (inl >> 24) % (1U << 4); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 4); + out++; + *out = (inl >> 4) % (1U << 4); + out++; + *out = (inl >> 8) % (1U << 4); + out++; + *out = (inl >> 12) % (1U << 4); + out++; + *out = (inl >> 16) % (1U << 4); + out++; + *out = (inl >> 20) % (1U << 4); + out++; + *out = (inl >> 24) % (1U << 4); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 4); + out++; + *out = (inl >> 4) % (1U << 4); + out++; + *out = (inl >> 8) % (1U << 4); + out++; + *out = (inl >> 12) % (1U << 4); + out++; + *out = (inl >> 16) % (1U << 4); + out++; + *out = (inl >> 20) % (1U << 4); + out++; + *out = (inl >> 24) % (1U << 4); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 4); + out++; + *out = (inl >> 4) % (1U << 4); + out++; + *out = (inl >> 8) % (1U << 4); + out++; + *out = (inl >> 12) % (1U << 4); + out++; + *out = (inl >> 16) % (1U << 4); + out++; + *out = (inl >> 20) % (1U << 4); + out++; + *out = (inl >> 24) % (1U << 4); + out++; + *out = (inl >> 28); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack5_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 5); + out++; + *out = (inl >> 5) % (1U << 5); + out++; + *out = (inl >> 10) % (1U << 5); + out++; + *out = (inl >> 15) % (1U << 5); + out++; + *out = (inl >> 20) % (1U << 5); + out++; + *out = (inl >> 25) % (1U << 5); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 3)) << (5 - 3); + out++; + *out = (inl >> 3) % (1U << 5); + out++; + *out = (inl >> 8) % (1U << 5); + out++; + *out = (inl >> 13) % (1U << 5); + out++; + *out = (inl >> 18) % (1U << 5); + out++; + *out = (inl >> 23) % (1U << 5); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 1)) << (5 - 1); + out++; + *out = (inl >> 1) % (1U << 5); + out++; + *out = (inl >> 6) % (1U << 5); + out++; + *out = (inl >> 11) % (1U << 5); + out++; + *out = (inl >> 16) % (1U << 5); + out++; + *out = (inl >> 21) % (1U << 5); + out++; + *out = (inl >> 26) % (1U << 5); + out++; + *out = (inl >> 31); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (5 - 4); + out++; + *out = (inl >> 4) % (1U << 5); + out++; + *out = (inl >> 9) % (1U << 5); + out++; + *out = (inl >> 14) % (1U << 5); + out++; + *out = (inl >> 19) % (1U << 5); + out++; + *out = (inl >> 24) % (1U << 5); + out++; + *out = (inl >> 29); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (5 - 2); + out++; + *out = (inl >> 2) % (1U << 5); + out++; + *out = (inl >> 7) % (1U << 5); + out++; + *out = (inl >> 12) % (1U << 5); + out++; + *out = (inl >> 17) % (1U << 5); + out++; + *out = (inl >> 22) % (1U << 5); + out++; + *out = (inl >> 27); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack6_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 6); + out++; + *out = (inl >> 6) % (1U << 6); + out++; + *out = (inl >> 12) % (1U << 6); + out++; + *out = (inl >> 18) % (1U << 6); + out++; + *out = (inl >> 24) % (1U << 6); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (6 - 4); + out++; + *out = (inl >> 4) % (1U << 6); + out++; + *out = (inl >> 10) % (1U << 6); + out++; + *out = (inl >> 16) % (1U << 6); + out++; + *out = (inl >> 22) % (1U << 6); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (6 - 2); + out++; + *out = (inl >> 2) % (1U << 6); + out++; + *out = (inl >> 8) % (1U << 6); + out++; + *out = (inl >> 14) % (1U << 6); + out++; + *out = (inl >> 20) % (1U << 6); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 6); + out++; + *out = (inl >> 6) % (1U << 6); + out++; + *out = (inl >> 12) % (1U << 6); + out++; + *out = (inl >> 18) % (1U << 6); + out++; + *out = (inl >> 24) % (1U << 6); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (6 - 4); + out++; + *out = (inl >> 4) % (1U << 6); + out++; + *out = (inl >> 10) % (1U << 6); + out++; + *out = (inl >> 16) % (1U << 6); + out++; + *out = (inl >> 22) % (1U << 6); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (6 - 2); + out++; + *out = (inl >> 2) % (1U << 6); + out++; + *out = (inl >> 8) % (1U << 6); + out++; + *out = (inl >> 14) % (1U << 6); + out++; + *out = (inl >> 20) % (1U << 6); + out++; + *out = (inl >> 26); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack7_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 7); + out++; + *out = (inl >> 7) % (1U << 7); + out++; + *out = (inl >> 14) % (1U << 7); + out++; + *out = (inl >> 21) % (1U << 7); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 3)) << (7 - 3); + out++; + *out = (inl >> 3) % (1U << 7); + out++; + *out = (inl >> 10) % (1U << 7); + out++; + *out = (inl >> 17) % (1U << 7); + out++; + *out = (inl >> 24) % (1U << 7); + out++; + *out = (inl >> 31); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (7 - 6); + out++; + *out = (inl >> 6) % (1U << 7); + out++; + *out = (inl >> 13) % (1U << 7); + out++; + *out = (inl >> 20) % (1U << 7); + out++; + *out = (inl >> 27); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (7 - 2); + out++; + *out = (inl >> 2) % (1U << 7); + out++; + *out = (inl >> 9) % (1U << 7); + out++; + *out = (inl >> 16) % (1U << 7); + out++; + *out = (inl >> 23) % (1U << 7); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 5)) << (7 - 5); + out++; + *out = (inl >> 5) % (1U << 7); + out++; + *out = (inl >> 12) % (1U << 7); + out++; + *out = (inl >> 19) % (1U << 7); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 1)) << (7 - 1); + out++; + *out = (inl >> 1) % (1U << 7); + out++; + *out = (inl >> 8) % (1U << 7); + out++; + *out = (inl >> 15) % (1U << 7); + out++; + *out = (inl >> 22) % (1U << 7); + out++; + *out = (inl >> 29); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (7 - 4); + out++; + *out = (inl >> 4) % (1U << 7); + out++; + *out = (inl >> 11) % (1U << 7); + out++; + *out = (inl >> 18) % (1U << 7); + out++; + *out = (inl >> 25); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack8_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 8); + out++; + *out = (inl >> 8) % (1U << 8); + out++; + *out = (inl >> 16) % (1U << 8); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 8); + out++; + *out = (inl >> 8) % (1U << 8); + out++; + *out = (inl >> 16) % (1U << 8); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 8); + out++; + *out = (inl >> 8) % (1U << 8); + out++; + *out = (inl >> 16) % (1U << 8); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 8); + out++; + *out = (inl >> 8) % (1U << 8); + out++; + *out = (inl >> 16) % (1U << 8); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 8); + out++; + *out = (inl >> 8) % (1U << 8); + out++; + *out = (inl >> 16) % (1U << 8); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 8); + out++; + *out = (inl >> 8) % (1U << 8); + out++; + *out = (inl >> 16) % (1U << 8); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 8); + out++; + *out = (inl >> 8) % (1U << 8); + out++; + *out = (inl >> 16) % (1U << 8); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 8); + out++; + *out = (inl >> 8) % (1U << 8); + out++; + *out = (inl >> 16) % (1U << 8); + out++; + *out = (inl >> 24); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack9_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 9); + out++; + *out = (inl >> 9) % (1U << 9); + out++; + *out = (inl >> 18) % (1U << 9); + out++; + *out = (inl >> 27); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (9 - 4); + out++; + *out = (inl >> 4) % (1U << 9); + out++; + *out = (inl >> 13) % (1U << 9); + out++; + *out = (inl >> 22) % (1U << 9); + out++; + *out = (inl >> 31); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (9 - 8); + out++; + *out = (inl >> 8) % (1U << 9); + out++; + *out = (inl >> 17) % (1U << 9); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 3)) << (9 - 3); + out++; + *out = (inl >> 3) % (1U << 9); + out++; + *out = (inl >> 12) % (1U << 9); + out++; + *out = (inl >> 21) % (1U << 9); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 7)) << (9 - 7); + out++; + *out = (inl >> 7) % (1U << 9); + out++; + *out = (inl >> 16) % (1U << 9); + out++; + *out = (inl >> 25); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (9 - 2); + out++; + *out = (inl >> 2) % (1U << 9); + out++; + *out = (inl >> 11) % (1U << 9); + out++; + *out = (inl >> 20) % (1U << 9); + out++; + *out = (inl >> 29); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (9 - 6); + out++; + *out = (inl >> 6) % (1U << 9); + out++; + *out = (inl >> 15) % (1U << 9); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 1)) << (9 - 1); + out++; + *out = (inl >> 1) % (1U << 9); + out++; + *out = (inl >> 10) % (1U << 9); + out++; + *out = (inl >> 19) % (1U << 9); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 5)) << (9 - 5); + out++; + *out = (inl >> 5) % (1U << 9); + out++; + *out = (inl >> 14) % (1U << 9); + out++; + *out = (inl >> 23); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack10_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 10); + out++; + *out = (inl >> 10) % (1U << 10); + out++; + *out = (inl >> 20) % (1U << 10); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (10 - 8); + out++; + *out = (inl >> 8) % (1U << 10); + out++; + *out = (inl >> 18) % (1U << 10); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (10 - 6); + out++; + *out = (inl >> 6) % (1U << 10); + out++; + *out = (inl >> 16) % (1U << 10); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (10 - 4); + out++; + *out = (inl >> 4) % (1U << 10); + out++; + *out = (inl >> 14) % (1U << 10); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (10 - 2); + out++; + *out = (inl >> 2) % (1U << 10); + out++; + *out = (inl >> 12) % (1U << 10); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 10); + out++; + *out = (inl >> 10) % (1U << 10); + out++; + *out = (inl >> 20) % (1U << 10); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (10 - 8); + out++; + *out = (inl >> 8) % (1U << 10); + out++; + *out = (inl >> 18) % (1U << 10); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (10 - 6); + out++; + *out = (inl >> 6) % (1U << 10); + out++; + *out = (inl >> 16) % (1U << 10); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (10 - 4); + out++; + *out = (inl >> 4) % (1U << 10); + out++; + *out = (inl >> 14) % (1U << 10); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (10 - 2); + out++; + *out = (inl >> 2) % (1U << 10); + out++; + *out = (inl >> 12) % (1U << 10); + out++; + *out = (inl >> 22); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack11_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 11); + out++; + *out = (inl >> 11) % (1U << 11); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 1)) << (11 - 1); + out++; + *out = (inl >> 1) % (1U << 11); + out++; + *out = (inl >> 12) % (1U << 11); + out++; + *out = (inl >> 23); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (11 - 2); + out++; + *out = (inl >> 2) % (1U << 11); + out++; + *out = (inl >> 13) % (1U << 11); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 3)) << (11 - 3); + out++; + *out = (inl >> 3) % (1U << 11); + out++; + *out = (inl >> 14) % (1U << 11); + out++; + *out = (inl >> 25); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (11 - 4); + out++; + *out = (inl >> 4) % (1U << 11); + out++; + *out = (inl >> 15) % (1U << 11); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 5)) << (11 - 5); + out++; + *out = (inl >> 5) % (1U << 11); + out++; + *out = (inl >> 16) % (1U << 11); + out++; + *out = (inl >> 27); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (11 - 6); + out++; + *out = (inl >> 6) % (1U << 11); + out++; + *out = (inl >> 17) % (1U << 11); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 7)) << (11 - 7); + out++; + *out = (inl >> 7) % (1U << 11); + out++; + *out = (inl >> 18) % (1U << 11); + out++; + *out = (inl >> 29); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (11 - 8); + out++; + *out = (inl >> 8) % (1U << 11); + out++; + *out = (inl >> 19) % (1U << 11); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 9)) << (11 - 9); + out++; + *out = (inl >> 9) % (1U << 11); + out++; + *out = (inl >> 20) % (1U << 11); + out++; + *out = (inl >> 31); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (11 - 10); + out++; + *out = (inl >> 10) % (1U << 11); + out++; + *out = (inl >> 21); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack12_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 12); + out++; + *out = (inl >> 12) % (1U << 12); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (12 - 4); + out++; + *out = (inl >> 4) % (1U << 12); + out++; + *out = (inl >> 16) % (1U << 12); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (12 - 8); + out++; + *out = (inl >> 8) % (1U << 12); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 12); + out++; + *out = (inl >> 12) % (1U << 12); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (12 - 4); + out++; + *out = (inl >> 4) % (1U << 12); + out++; + *out = (inl >> 16) % (1U << 12); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (12 - 8); + out++; + *out = (inl >> 8) % (1U << 12); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 12); + out++; + *out = (inl >> 12) % (1U << 12); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (12 - 4); + out++; + *out = (inl >> 4) % (1U << 12); + out++; + *out = (inl >> 16) % (1U << 12); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (12 - 8); + out++; + *out = (inl >> 8) % (1U << 12); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 12); + out++; + *out = (inl >> 12) % (1U << 12); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (12 - 4); + out++; + *out = (inl >> 4) % (1U << 12); + out++; + *out = (inl >> 16) % (1U << 12); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (12 - 8); + out++; + *out = (inl >> 8) % (1U << 12); + out++; + *out = (inl >> 20); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack13_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 13); + out++; + *out = (inl >> 13) % (1U << 13); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 7)) << (13 - 7); + out++; + *out = (inl >> 7) % (1U << 13); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 1)) << (13 - 1); + out++; + *out = (inl >> 1) % (1U << 13); + out++; + *out = (inl >> 14) % (1U << 13); + out++; + *out = (inl >> 27); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (13 - 8); + out++; + *out = (inl >> 8) % (1U << 13); + out++; + *out = (inl >> 21); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (13 - 2); + out++; + *out = (inl >> 2) % (1U << 13); + out++; + *out = (inl >> 15) % (1U << 13); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 9)) << (13 - 9); + out++; + *out = (inl >> 9) % (1U << 13); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 3)) << (13 - 3); + out++; + *out = (inl >> 3) % (1U << 13); + out++; + *out = (inl >> 16) % (1U << 13); + out++; + *out = (inl >> 29); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (13 - 10); + out++; + *out = (inl >> 10) % (1U << 13); + out++; + *out = (inl >> 23); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (13 - 4); + out++; + *out = (inl >> 4) % (1U << 13); + out++; + *out = (inl >> 17) % (1U << 13); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 11)) << (13 - 11); + out++; + *out = (inl >> 11) % (1U << 13); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 5)) << (13 - 5); + out++; + *out = (inl >> 5) % (1U << 13); + out++; + *out = (inl >> 18) % (1U << 13); + out++; + *out = (inl >> 31); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (13 - 12); + out++; + *out = (inl >> 12) % (1U << 13); + out++; + *out = (inl >> 25); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (13 - 6); + out++; + *out = (inl >> 6) % (1U << 13); + out++; + *out = (inl >> 19); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack14_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 14); + out++; + *out = (inl >> 14) % (1U << 14); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (14 - 10); + out++; + *out = (inl >> 10) % (1U << 14); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (14 - 6); + out++; + *out = (inl >> 6) % (1U << 14); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (14 - 2); + out++; + *out = (inl >> 2) % (1U << 14); + out++; + *out = (inl >> 16) % (1U << 14); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (14 - 12); + out++; + *out = (inl >> 12) % (1U << 14); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (14 - 8); + out++; + *out = (inl >> 8) % (1U << 14); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (14 - 4); + out++; + *out = (inl >> 4) % (1U << 14); + out++; + *out = (inl >> 18); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 14); + out++; + *out = (inl >> 14) % (1U << 14); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (14 - 10); + out++; + *out = (inl >> 10) % (1U << 14); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (14 - 6); + out++; + *out = (inl >> 6) % (1U << 14); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (14 - 2); + out++; + *out = (inl >> 2) % (1U << 14); + out++; + *out = (inl >> 16) % (1U << 14); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (14 - 12); + out++; + *out = (inl >> 12) % (1U << 14); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (14 - 8); + out++; + *out = (inl >> 8) % (1U << 14); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (14 - 4); + out++; + *out = (inl >> 4) % (1U << 14); + out++; + *out = (inl >> 18); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack15_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 15); + out++; + *out = (inl >> 15) % (1U << 15); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 13)) << (15 - 13); + out++; + *out = (inl >> 13) % (1U << 15); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 11)) << (15 - 11); + out++; + *out = (inl >> 11) % (1U << 15); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 9)) << (15 - 9); + out++; + *out = (inl >> 9) % (1U << 15); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 7)) << (15 - 7); + out++; + *out = (inl >> 7) % (1U << 15); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 5)) << (15 - 5); + out++; + *out = (inl >> 5) % (1U << 15); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 3)) << (15 - 3); + out++; + *out = (inl >> 3) % (1U << 15); + out++; + *out = (inl >> 18); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 1)) << (15 - 1); + out++; + *out = (inl >> 1) % (1U << 15); + out++; + *out = (inl >> 16) % (1U << 15); + out++; + *out = (inl >> 31); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 14)) << (15 - 14); + out++; + *out = (inl >> 14) % (1U << 15); + out++; + *out = (inl >> 29); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (15 - 12); + out++; + *out = (inl >> 12) % (1U << 15); + out++; + *out = (inl >> 27); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (15 - 10); + out++; + *out = (inl >> 10) % (1U << 15); + out++; + *out = (inl >> 25); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (15 - 8); + out++; + *out = (inl >> 8) % (1U << 15); + out++; + *out = (inl >> 23); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (15 - 6); + out++; + *out = (inl >> 6) % (1U << 15); + out++; + *out = (inl >> 21); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (15 - 4); + out++; + *out = (inl >> 4) % (1U << 15); + out++; + *out = (inl >> 19); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (15 - 2); + out++; + *out = (inl >> 2) % (1U << 15); + out++; + *out = (inl >> 17); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack16_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 16); + out++; + *out = (inl >> 16); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack17_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 17); + out++; + *out = (inl >> 17); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (17 - 2); + out++; + *out = (inl >> 2) % (1U << 17); + out++; + *out = (inl >> 19); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (17 - 4); + out++; + *out = (inl >> 4) % (1U << 17); + out++; + *out = (inl >> 21); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (17 - 6); + out++; + *out = (inl >> 6) % (1U << 17); + out++; + *out = (inl >> 23); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (17 - 8); + out++; + *out = (inl >> 8) % (1U << 17); + out++; + *out = (inl >> 25); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (17 - 10); + out++; + *out = (inl >> 10) % (1U << 17); + out++; + *out = (inl >> 27); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (17 - 12); + out++; + *out = (inl >> 12) % (1U << 17); + out++; + *out = (inl >> 29); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 14)) << (17 - 14); + out++; + *out = (inl >> 14) % (1U << 17); + out++; + *out = (inl >> 31); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (17 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 1)) << (17 - 1); + out++; + *out = (inl >> 1) % (1U << 17); + out++; + *out = (inl >> 18); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 3)) << (17 - 3); + out++; + *out = (inl >> 3) % (1U << 17); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 5)) << (17 - 5); + out++; + *out = (inl >> 5) % (1U << 17); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 7)) << (17 - 7); + out++; + *out = (inl >> 7) % (1U << 17); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 9)) << (17 - 9); + out++; + *out = (inl >> 9) % (1U << 17); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 11)) << (17 - 11); + out++; + *out = (inl >> 11) % (1U << 17); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 13)) << (17 - 13); + out++; + *out = (inl >> 13) % (1U << 17); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 15)) << (17 - 15); + out++; + *out = (inl >> 15); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack18_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 18); + out++; + *out = (inl >> 18); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (18 - 4); + out++; + *out = (inl >> 4) % (1U << 18); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (18 - 8); + out++; + *out = (inl >> 8) % (1U << 18); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (18 - 12); + out++; + *out = (inl >> 12) % (1U << 18); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (18 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (18 - 2); + out++; + *out = (inl >> 2) % (1U << 18); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (18 - 6); + out++; + *out = (inl >> 6) % (1U << 18); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (18 - 10); + out++; + *out = (inl >> 10) % (1U << 18); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 14)) << (18 - 14); + out++; + *out = (inl >> 14); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 18); + out++; + *out = (inl >> 18); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (18 - 4); + out++; + *out = (inl >> 4) % (1U << 18); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (18 - 8); + out++; + *out = (inl >> 8) % (1U << 18); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (18 - 12); + out++; + *out = (inl >> 12) % (1U << 18); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (18 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (18 - 2); + out++; + *out = (inl >> 2) % (1U << 18); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (18 - 6); + out++; + *out = (inl >> 6) % (1U << 18); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (18 - 10); + out++; + *out = (inl >> 10) % (1U << 18); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 14)) << (18 - 14); + out++; + *out = (inl >> 14); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack19_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 19); + out++; + *out = (inl >> 19); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (19 - 6); + out++; + *out = (inl >> 6) % (1U << 19); + out++; + *out = (inl >> 25); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (19 - 12); + out++; + *out = (inl >> 12) % (1U << 19); + out++; + *out = (inl >> 31); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 18)) << (19 - 18); + out++; + *out = (inl >> 18); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 5)) << (19 - 5); + out++; + *out = (inl >> 5) % (1U << 19); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 11)) << (19 - 11); + out++; + *out = (inl >> 11) % (1U << 19); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 17)) << (19 - 17); + out++; + *out = (inl >> 17); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (19 - 4); + out++; + *out = (inl >> 4) % (1U << 19); + out++; + *out = (inl >> 23); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (19 - 10); + out++; + *out = (inl >> 10) % (1U << 19); + out++; + *out = (inl >> 29); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (19 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 3)) << (19 - 3); + out++; + *out = (inl >> 3) % (1U << 19); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 9)) << (19 - 9); + out++; + *out = (inl >> 9) % (1U << 19); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 15)) << (19 - 15); + out++; + *out = (inl >> 15); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (19 - 2); + out++; + *out = (inl >> 2) % (1U << 19); + out++; + *out = (inl >> 21); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (19 - 8); + out++; + *out = (inl >> 8) % (1U << 19); + out++; + *out = (inl >> 27); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 14)) << (19 - 14); + out++; + *out = (inl >> 14); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 1)) << (19 - 1); + out++; + *out = (inl >> 1) % (1U << 19); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 7)) << (19 - 7); + out++; + *out = (inl >> 7) % (1U << 19); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 13)) << (19 - 13); + out++; + *out = (inl >> 13); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack20_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (20 - 8); + out++; + *out = (inl >> 8) % (1U << 20); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (20 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (20 - 4); + out++; + *out = (inl >> 4) % (1U << 20); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (20 - 12); + out++; + *out = (inl >> 12); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (20 - 8); + out++; + *out = (inl >> 8) % (1U << 20); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (20 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (20 - 4); + out++; + *out = (inl >> 4) % (1U << 20); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (20 - 12); + out++; + *out = (inl >> 12); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (20 - 8); + out++; + *out = (inl >> 8) % (1U << 20); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (20 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (20 - 4); + out++; + *out = (inl >> 4) % (1U << 20); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (20 - 12); + out++; + *out = (inl >> 12); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (20 - 8); + out++; + *out = (inl >> 8) % (1U << 20); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (20 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (20 - 4); + out++; + *out = (inl >> 4) % (1U << 20); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (20 - 12); + out++; + *out = (inl >> 12); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack21_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 21); + out++; + *out = (inl >> 21); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (21 - 10); + out++; + *out = (inl >> 10) % (1U << 21); + out++; + *out = (inl >> 31); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 20)) << (21 - 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 9)) << (21 - 9); + out++; + *out = (inl >> 9) % (1U << 21); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 19)) << (21 - 19); + out++; + *out = (inl >> 19); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (21 - 8); + out++; + *out = (inl >> 8) % (1U << 21); + out++; + *out = (inl >> 29); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 18)) << (21 - 18); + out++; + *out = (inl >> 18); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 7)) << (21 - 7); + out++; + *out = (inl >> 7) % (1U << 21); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 17)) << (21 - 17); + out++; + *out = (inl >> 17); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (21 - 6); + out++; + *out = (inl >> 6) % (1U << 21); + out++; + *out = (inl >> 27); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (21 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 5)) << (21 - 5); + out++; + *out = (inl >> 5) % (1U << 21); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 15)) << (21 - 15); + out++; + *out = (inl >> 15); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (21 - 4); + out++; + *out = (inl >> 4) % (1U << 21); + out++; + *out = (inl >> 25); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 14)) << (21 - 14); + out++; + *out = (inl >> 14); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 3)) << (21 - 3); + out++; + *out = (inl >> 3) % (1U << 21); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 13)) << (21 - 13); + out++; + *out = (inl >> 13); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (21 - 2); + out++; + *out = (inl >> 2) % (1U << 21); + out++; + *out = (inl >> 23); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (21 - 12); + out++; + *out = (inl >> 12); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 1)) << (21 - 1); + out++; + *out = (inl >> 1) % (1U << 21); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 11)) << (21 - 11); + out++; + *out = (inl >> 11); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack22_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 22); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (22 - 12); + out++; + *out = (inl >> 12); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (22 - 2); + out++; + *out = (inl >> 2) % (1U << 22); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 14)) << (22 - 14); + out++; + *out = (inl >> 14); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (22 - 4); + out++; + *out = (inl >> 4) % (1U << 22); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (22 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (22 - 6); + out++; + *out = (inl >> 6) % (1U << 22); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 18)) << (22 - 18); + out++; + *out = (inl >> 18); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (22 - 8); + out++; + *out = (inl >> 8) % (1U << 22); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 20)) << (22 - 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (22 - 10); + out++; + *out = (inl >> 10); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 22); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (22 - 12); + out++; + *out = (inl >> 12); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (22 - 2); + out++; + *out = (inl >> 2) % (1U << 22); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 14)) << (22 - 14); + out++; + *out = (inl >> 14); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (22 - 4); + out++; + *out = (inl >> 4) % (1U << 22); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (22 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (22 - 6); + out++; + *out = (inl >> 6) % (1U << 22); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 18)) << (22 - 18); + out++; + *out = (inl >> 18); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (22 - 8); + out++; + *out = (inl >> 8) % (1U << 22); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 20)) << (22 - 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (22 - 10); + out++; + *out = (inl >> 10); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack23_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 23); + out++; + *out = (inl >> 23); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 14)) << (23 - 14); + out++; + *out = (inl >> 14); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 5)) << (23 - 5); + out++; + *out = (inl >> 5) % (1U << 23); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 19)) << (23 - 19); + out++; + *out = (inl >> 19); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (23 - 10); + out++; + *out = (inl >> 10); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 1)) << (23 - 1); + out++; + *out = (inl >> 1) % (1U << 23); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 15)) << (23 - 15); + out++; + *out = (inl >> 15); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (23 - 6); + out++; + *out = (inl >> 6) % (1U << 23); + out++; + *out = (inl >> 29); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 20)) << (23 - 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 11)) << (23 - 11); + out++; + *out = (inl >> 11); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (23 - 2); + out++; + *out = (inl >> 2) % (1U << 23); + out++; + *out = (inl >> 25); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (23 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 7)) << (23 - 7); + out++; + *out = (inl >> 7) % (1U << 23); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 21)) << (23 - 21); + out++; + *out = (inl >> 21); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (23 - 12); + out++; + *out = (inl >> 12); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 3)) << (23 - 3); + out++; + *out = (inl >> 3) % (1U << 23); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 17)) << (23 - 17); + out++; + *out = (inl >> 17); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (23 - 8); + out++; + *out = (inl >> 8) % (1U << 23); + out++; + *out = (inl >> 31); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 22)) << (23 - 22); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 13)) << (23 - 13); + out++; + *out = (inl >> 13); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (23 - 4); + out++; + *out = (inl >> 4) % (1U << 23); + out++; + *out = (inl >> 27); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 18)) << (23 - 18); + out++; + *out = (inl >> 18); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 9)) << (23 - 9); + out++; + *out = (inl >> 9); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack24_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (24 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (24 - 8); + out++; + *out = (inl >> 8); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (24 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (24 - 8); + out++; + *out = (inl >> 8); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (24 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (24 - 8); + out++; + *out = (inl >> 8); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (24 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (24 - 8); + out++; + *out = (inl >> 8); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (24 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (24 - 8); + out++; + *out = (inl >> 8); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (24 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (24 - 8); + out++; + *out = (inl >> 8); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (24 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (24 - 8); + out++; + *out = (inl >> 8); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (24 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (24 - 8); + out++; + *out = (inl >> 8); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack25_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 25); + out++; + *out = (inl >> 25); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 18)) << (25 - 18); + out++; + *out = (inl >> 18); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 11)) << (25 - 11); + out++; + *out = (inl >> 11); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (25 - 4); + out++; + *out = (inl >> 4) % (1U << 25); + out++; + *out = (inl >> 29); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 22)) << (25 - 22); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 15)) << (25 - 15); + out++; + *out = (inl >> 15); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (25 - 8); + out++; + *out = (inl >> 8); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 1)) << (25 - 1); + out++; + *out = (inl >> 1) % (1U << 25); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 19)) << (25 - 19); + out++; + *out = (inl >> 19); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (25 - 12); + out++; + *out = (inl >> 12); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 5)) << (25 - 5); + out++; + *out = (inl >> 5) % (1U << 25); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 23)) << (25 - 23); + out++; + *out = (inl >> 23); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (25 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 9)) << (25 - 9); + out++; + *out = (inl >> 9); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (25 - 2); + out++; + *out = (inl >> 2) % (1U << 25); + out++; + *out = (inl >> 27); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 20)) << (25 - 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 13)) << (25 - 13); + out++; + *out = (inl >> 13); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (25 - 6); + out++; + *out = (inl >> 6) % (1U << 25); + out++; + *out = (inl >> 31); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 24)) << (25 - 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 17)) << (25 - 17); + out++; + *out = (inl >> 17); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (25 - 10); + out++; + *out = (inl >> 10); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 3)) << (25 - 3); + out++; + *out = (inl >> 3) % (1U << 25); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 21)) << (25 - 21); + out++; + *out = (inl >> 21); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 14)) << (25 - 14); + out++; + *out = (inl >> 14); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 7)) << (25 - 7); + out++; + *out = (inl >> 7); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack26_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 26); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 20)) << (26 - 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 14)) << (26 - 14); + out++; + *out = (inl >> 14); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (26 - 8); + out++; + *out = (inl >> 8); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (26 - 2); + out++; + *out = (inl >> 2) % (1U << 26); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 22)) << (26 - 22); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (26 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (26 - 10); + out++; + *out = (inl >> 10); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (26 - 4); + out++; + *out = (inl >> 4) % (1U << 26); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 24)) << (26 - 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 18)) << (26 - 18); + out++; + *out = (inl >> 18); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (26 - 12); + out++; + *out = (inl >> 12); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (26 - 6); + out++; + *out = (inl >> 6); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 26); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 20)) << (26 - 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 14)) << (26 - 14); + out++; + *out = (inl >> 14); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (26 - 8); + out++; + *out = (inl >> 8); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (26 - 2); + out++; + *out = (inl >> 2) % (1U << 26); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 22)) << (26 - 22); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (26 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (26 - 10); + out++; + *out = (inl >> 10); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (26 - 4); + out++; + *out = (inl >> 4) % (1U << 26); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 24)) << (26 - 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 18)) << (26 - 18); + out++; + *out = (inl >> 18); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (26 - 12); + out++; + *out = (inl >> 12); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (26 - 6); + out++; + *out = (inl >> 6); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack27_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 27); + out++; + *out = (inl >> 27); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 22)) << (27 - 22); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 17)) << (27 - 17); + out++; + *out = (inl >> 17); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (27 - 12); + out++; + *out = (inl >> 12); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 7)) << (27 - 7); + out++; + *out = (inl >> 7); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (27 - 2); + out++; + *out = (inl >> 2) % (1U << 27); + out++; + *out = (inl >> 29); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 24)) << (27 - 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 19)) << (27 - 19); + out++; + *out = (inl >> 19); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 14)) << (27 - 14); + out++; + *out = (inl >> 14); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 9)) << (27 - 9); + out++; + *out = (inl >> 9); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (27 - 4); + out++; + *out = (inl >> 4) % (1U << 27); + out++; + *out = (inl >> 31); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 26)) << (27 - 26); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 21)) << (27 - 21); + out++; + *out = (inl >> 21); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (27 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 11)) << (27 - 11); + out++; + *out = (inl >> 11); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (27 - 6); + out++; + *out = (inl >> 6); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 1)) << (27 - 1); + out++; + *out = (inl >> 1) % (1U << 27); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 23)) << (27 - 23); + out++; + *out = (inl >> 23); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 18)) << (27 - 18); + out++; + *out = (inl >> 18); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 13)) << (27 - 13); + out++; + *out = (inl >> 13); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (27 - 8); + out++; + *out = (inl >> 8); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 3)) << (27 - 3); + out++; + *out = (inl >> 3) % (1U << 27); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 25)) << (27 - 25); + out++; + *out = (inl >> 25); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 20)) << (27 - 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 15)) << (27 - 15); + out++; + *out = (inl >> 15); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (27 - 10); + out++; + *out = (inl >> 10); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 5)) << (27 - 5); + out++; + *out = (inl >> 5); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack28_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 28); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 24)) << (28 - 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 20)) << (28 - 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (28 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (28 - 12); + out++; + *out = (inl >> 12); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (28 - 8); + out++; + *out = (inl >> 8); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (28 - 4); + out++; + *out = (inl >> 4); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 28); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 24)) << (28 - 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 20)) << (28 - 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (28 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (28 - 12); + out++; + *out = (inl >> 12); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (28 - 8); + out++; + *out = (inl >> 8); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (28 - 4); + out++; + *out = (inl >> 4); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 28); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 24)) << (28 - 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 20)) << (28 - 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (28 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (28 - 12); + out++; + *out = (inl >> 12); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (28 - 8); + out++; + *out = (inl >> 8); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (28 - 4); + out++; + *out = (inl >> 4); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 28); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 24)) << (28 - 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 20)) << (28 - 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (28 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (28 - 12); + out++; + *out = (inl >> 12); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (28 - 8); + out++; + *out = (inl >> 8); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (28 - 4); + out++; + *out = (inl >> 4); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack29_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 29); + out++; + *out = (inl >> 29); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 26)) << (29 - 26); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 23)) << (29 - 23); + out++; + *out = (inl >> 23); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 20)) << (29 - 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 17)) << (29 - 17); + out++; + *out = (inl >> 17); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 14)) << (29 - 14); + out++; + *out = (inl >> 14); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 11)) << (29 - 11); + out++; + *out = (inl >> 11); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (29 - 8); + out++; + *out = (inl >> 8); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 5)) << (29 - 5); + out++; + *out = (inl >> 5); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (29 - 2); + out++; + *out = (inl >> 2) % (1U << 29); + out++; + *out = (inl >> 31); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 28)) << (29 - 28); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 25)) << (29 - 25); + out++; + *out = (inl >> 25); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 22)) << (29 - 22); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 19)) << (29 - 19); + out++; + *out = (inl >> 19); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (29 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 13)) << (29 - 13); + out++; + *out = (inl >> 13); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (29 - 10); + out++; + *out = (inl >> 10); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 7)) << (29 - 7); + out++; + *out = (inl >> 7); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (29 - 4); + out++; + *out = (inl >> 4); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 1)) << (29 - 1); + out++; + *out = (inl >> 1) % (1U << 29); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 27)) << (29 - 27); + out++; + *out = (inl >> 27); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 24)) << (29 - 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 21)) << (29 - 21); + out++; + *out = (inl >> 21); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 18)) << (29 - 18); + out++; + *out = (inl >> 18); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 15)) << (29 - 15); + out++; + *out = (inl >> 15); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (29 - 12); + out++; + *out = (inl >> 12); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 9)) << (29 - 9); + out++; + *out = (inl >> 9); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (29 - 6); + out++; + *out = (inl >> 6); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 3)) << (29 - 3); + out++; + *out = (inl >> 3); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack30_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 30); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 28)) << (30 - 28); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 26)) << (30 - 26); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 24)) << (30 - 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 22)) << (30 - 22); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 20)) << (30 - 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 18)) << (30 - 18); + out++; + *out = (inl >> 18); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (30 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 14)) << (30 - 14); + out++; + *out = (inl >> 14); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (30 - 12); + out++; + *out = (inl >> 12); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (30 - 10); + out++; + *out = (inl >> 10); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (30 - 8); + out++; + *out = (inl >> 8); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (30 - 6); + out++; + *out = (inl >> 6); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (30 - 4); + out++; + *out = (inl >> 4); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (30 - 2); + out++; + *out = (inl >> 2); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 30); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 28)) << (30 - 28); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 26)) << (30 - 26); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 24)) << (30 - 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 22)) << (30 - 22); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 20)) << (30 - 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 18)) << (30 - 18); + out++; + *out = (inl >> 18); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (30 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 14)) << (30 - 14); + out++; + *out = (inl >> 14); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (30 - 12); + out++; + *out = (inl >> 12); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (30 - 10); + out++; + *out = (inl >> 10); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (30 - 8); + out++; + *out = (inl >> 8); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (30 - 6); + out++; + *out = (inl >> 6); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (30 - 4); + out++; + *out = (inl >> 4); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (30 - 2); + out++; + *out = (inl >> 2); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack31_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 31); + out++; + *out = (inl >> 31); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 30)) << (31 - 30); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 29)) << (31 - 29); + out++; + *out = (inl >> 29); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 28)) << (31 - 28); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 27)) << (31 - 27); + out++; + *out = (inl >> 27); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 26)) << (31 - 26); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 25)) << (31 - 25); + out++; + *out = (inl >> 25); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 24)) << (31 - 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 23)) << (31 - 23); + out++; + *out = (inl >> 23); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 22)) << (31 - 22); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 21)) << (31 - 21); + out++; + *out = (inl >> 21); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 20)) << (31 - 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 19)) << (31 - 19); + out++; + *out = (inl >> 19); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 18)) << (31 - 18); + out++; + *out = (inl >> 18); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 17)) << (31 - 17); + out++; + *out = (inl >> 17); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (31 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 15)) << (31 - 15); + out++; + *out = (inl >> 15); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 14)) << (31 - 14); + out++; + *out = (inl >> 14); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 13)) << (31 - 13); + out++; + *out = (inl >> 13); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (31 - 12); + out++; + *out = (inl >> 12); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 11)) << (31 - 11); + out++; + *out = (inl >> 11); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (31 - 10); + out++; + *out = (inl >> 10); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 9)) << (31 - 9); + out++; + *out = (inl >> 9); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (31 - 8); + out++; + *out = (inl >> 8); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 7)) << (31 - 7); + out++; + *out = (inl >> 7); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (31 - 6); + out++; + *out = (inl >> 6); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 5)) << (31 - 5); + out++; + *out = (inl >> 5); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (31 - 4); + out++; + *out = (inl >> 4); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 3)) << (31 - 3); + out++; + *out = (inl >> 3); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (31 - 2); + out++; + *out = (inl >> 2); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 1)) << (31 - 1); + out++; + *out = (inl >> 1); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack32_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + out++; + + return in; +} + +inline const uint32_t* nullunpacker32(const uint32_t* in, uint32_t* out) { + for (int k = 0; k < 32; ++k) { + out[k] = 0; + } + return in; +} + +} // namespace internal +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/checked_cast.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/checked_cast.h new file mode 100644 index 0000000000000000000000000000000000000000..97f6b61a1f8cebd297a5f4a8fe4401b6073de45f --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/checked_cast.h @@ -0,0 +1,61 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +namespace arrow { +namespace internal { + +template +inline OutputType checked_cast(InputType&& value) { + static_assert(std::is_class::type>::type>::value, + "checked_cast input type must be a class"); + static_assert(std::is_class::type>::type>::value, + "checked_cast output type must be a class"); +#ifdef NDEBUG + return static_cast(value); +#else + return dynamic_cast(value); +#endif +} + +template +std::shared_ptr checked_pointer_cast(std::shared_ptr r) noexcept { +#ifdef NDEBUG + return std::static_pointer_cast(std::move(r)); +#else + return std::dynamic_pointer_cast(std::move(r)); +#endif +} + +template +std::unique_ptr checked_pointer_cast(std::unique_ptr r) noexcept { +#ifdef NDEBUG + return std::unique_ptr(static_cast(r.release())); +#else + return std::unique_ptr(dynamic_cast(r.release())); +#endif +} + +} // namespace internal +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/concurrent_map.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/concurrent_map.h new file mode 100644 index 0000000000000000000000000000000000000000..ff1584552a8ffc77fa518002bd285795ec0d1408 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/concurrent_map.h @@ -0,0 +1,68 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/util/mutex.h" + +namespace arrow { +namespace util { + +template +class ConcurrentMap { + public: + void Insert(const K& key, const V& value) { + auto lock = mutex_.Lock(); + map_.insert({key, value}); + } + + template + V GetOrInsert(const K& key, ValueFunc&& compute_value_func) { + auto lock = mutex_.Lock(); + auto it = map_.find(key); + if (it == map_.end()) { + auto pair = map_.emplace(key, compute_value_func()); + it = pair.first; + } + return it->second; + } + + void Erase(const K& key) { + auto lock = mutex_.Lock(); + map_.erase(key); + } + + void Clear() { + auto lock = mutex_.Lock(); + map_.clear(); + } + + size_t size() const { + auto lock = mutex_.Lock(); + return map_.size(); + } + + private: + std::unordered_map map_; + mutable arrow::util::Mutex mutex_; +}; + +} // namespace util +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/config.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/config.h new file mode 100644 index 0000000000000000000000000000000000000000..b451252bdd942a8388098ce25cb07b3b7e76e904 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/config.h @@ -0,0 +1,70 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#define ARROW_VERSION_MAJOR 18 +#define ARROW_VERSION_MINOR 1 +#define ARROW_VERSION_PATCH 0 +#define ARROW_VERSION ((ARROW_VERSION_MAJOR * 1000) + ARROW_VERSION_MINOR) * 1000 + ARROW_VERSION_PATCH + +#define ARROW_VERSION_STRING "18.1.0" + +#define ARROW_SO_VERSION "1801" +#define ARROW_FULL_SO_VERSION "1801.0.0" + +#define ARROW_CXX_COMPILER_ID "GNU" +#define ARROW_CXX_COMPILER_VERSION "12.2.1" +#define ARROW_CXX_COMPILER_FLAGS " -Wno-noexcept-type -Wno-subobject-linkage -fdiagnostics-color=always -Wall -fno-semantic-interposition -msse4.2 " + +#define ARROW_BUILD_TYPE "RELEASE" + +#define ARROW_PACKAGE_KIND "python-wheel-manylinux228" + +#define ARROW_COMPUTE +#define ARROW_CSV +/* #undef ARROW_CUDA */ +#define ARROW_DATASET +#define ARROW_FILESYSTEM +#define ARROW_FLIGHT +/* #undef ARROW_FLIGHT_SQL */ +#define ARROW_IPC +#define ARROW_JEMALLOC +#define ARROW_JEMALLOC_VENDORED +#define ARROW_JSON +#define ARROW_MIMALLOC +#define ARROW_ORC +#define ARROW_PARQUET +#define ARROW_SUBSTRAIT + +#define ARROW_AZURE +#define ARROW_ENABLE_THREADING +#define ARROW_GCS +#define ARROW_HDFS +#define ARROW_S3 +/* #undef ARROW_USE_GLOG */ +#define ARROW_USE_NATIVE_INT128 +#define ARROW_WITH_BROTLI +#define ARROW_WITH_BZ2 +#define ARROW_WITH_LZ4 +/* #undef ARROW_WITH_MUSL */ +/* #undef ARROW_WITH_OPENTELEMETRY */ +#define ARROW_WITH_RE2 +#define ARROW_WITH_SNAPPY +/* #undef ARROW_WITH_UCX */ +#define ARROW_WITH_UTF8PROC +#define ARROW_WITH_ZLIB +#define ARROW_WITH_ZSTD +#define PARQUET_REQUIRE_ENCRYPTION diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/crc32.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/crc32.h new file mode 100644 index 0000000000000000000000000000000000000000..155cf7cfae1061feda9ae436a5f966b90cbabc6a --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/crc32.h @@ -0,0 +1,36 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include +#include + +#include "arrow/util/visibility.h" + +namespace arrow { +namespace internal { + +/// \brief Compute the CRC32 checksum of the given data +/// +/// This function computes CRC32 with the polynomial 0x04C11DB7, +/// as used in zlib and others (note this is different from CRC32C). +/// To compute a running CRC32, pass the previous value in `prev`, +/// otherwise `prev` should be 0. +ARROW_EXPORT +uint32_t crc32(uint32_t prev, const void* data, size_t length); + +} // namespace internal +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/debug.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/debug.h new file mode 100644 index 0000000000000000000000000000000000000000..ed38a4dcf7ab87aad4db906dd8b6abc058387f8e --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/debug.h @@ -0,0 +1,29 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/util/visibility.h" + +namespace arrow { +namespace internal { + +ARROW_EXPORT +void DebugTrap(); + +} // namespace internal +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/delimiting.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/delimiting.h new file mode 100644 index 0000000000000000000000000000000000000000..161ad0bfddfc5a52040256a9cb39b5af96b876db --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/delimiting.h @@ -0,0 +1,181 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/status.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class Buffer; + +class ARROW_EXPORT BoundaryFinder { + public: + BoundaryFinder() = default; + + virtual ~BoundaryFinder(); + + /// \brief Find the position of the first delimiter inside block + /// + /// `partial` is taken to be the beginning of the block, and `block` + /// its continuation. Also, `partial` doesn't contain a delimiter. + /// + /// The returned `out_pos` is relative to `block`'s start and should point + /// to the first character after the first delimiter. + /// `out_pos` will be -1 if no delimiter is found. + virtual Status FindFirst(std::string_view partial, std::string_view block, + int64_t* out_pos) = 0; + + /// \brief Find the position of the last delimiter inside block + /// + /// The returned `out_pos` is relative to `block`'s start and should point + /// to the first character after the last delimiter. + /// `out_pos` will be -1 if no delimiter is found. + virtual Status FindLast(std::string_view block, int64_t* out_pos) = 0; + + /// \brief Find the position of the Nth delimiter inside the block + /// + /// `partial` is taken to be the beginning of the block, and `block` + /// its continuation. Also, `partial` doesn't contain a delimiter. + /// + /// The returned `out_pos` is relative to `block`'s start and should point + /// to the first character after the first delimiter. + /// `out_pos` will be -1 if no delimiter is found. + /// + /// The returned `num_found` is the number of delimiters actually found + virtual Status FindNth(std::string_view partial, std::string_view block, int64_t count, + int64_t* out_pos, int64_t* num_found) = 0; + + static constexpr int64_t kNoDelimiterFound = -1; + + protected: + ARROW_DISALLOW_COPY_AND_ASSIGN(BoundaryFinder); +}; + +ARROW_EXPORT +std::shared_ptr MakeNewlineBoundaryFinder(); + +/// \brief A reusable block-based chunker for delimited data +/// +/// The chunker takes a block of delimited data and helps carve a sub-block +/// which begins and ends on delimiters (suitable for consumption by parsers +/// which can only parse whole objects). +class ARROW_EXPORT Chunker { + public: + explicit Chunker(std::shared_ptr delimiter); + ~Chunker(); + + /// \brief Carve up a chunk in a block of data to contain only whole objects + /// + /// Pre-conditions: + /// - `block` is the start of a valid block of delimited data + /// (i.e. starts just after a delimiter) + /// + /// Post-conditions: + /// - block == whole + partial + /// - `whole` is a valid block of delimited data + /// (i.e. starts just after a delimiter and ends with a delimiter) + /// - `partial` doesn't contain an entire delimited object + /// (IOW: `partial` is generally small) + /// + /// This method will look for the last delimiter in `block` and may + /// therefore be costly. + /// + /// \param[in] block data to be chunked + /// \param[out] whole subrange of block containing whole delimited objects + /// \param[out] partial subrange of block starting with a partial delimited object + Status Process(std::shared_ptr block, std::shared_ptr* whole, + std::shared_ptr* partial); + + /// \brief Carve the completion of a partial object out of a block + /// + /// Pre-conditions: + /// - `partial` is the start of a valid block of delimited data + /// (i.e. starts just after a delimiter) + /// - `block` follows `partial` in file order + /// + /// Post-conditions: + /// - block == completion + rest + /// - `partial + completion` is a valid block of delimited data + /// (i.e. starts just after a delimiter and ends with a delimiter) + /// - `completion` doesn't contain an entire delimited object + /// (IOW: `completion` is generally small) + /// + /// This method will look for the first delimiter in `block` and should + /// therefore be reasonably cheap. + /// + /// \param[in] partial incomplete delimited data + /// \param[in] block delimited data following partial + /// \param[out] completion subrange of block containing the completion of partial + /// \param[out] rest subrange of block containing what completion does not cover + Status ProcessWithPartial(std::shared_ptr partial, + std::shared_ptr block, + std::shared_ptr* completion, + std::shared_ptr* rest); + + /// \brief Like ProcessWithPartial, but for the last block of a file + /// + /// This method allows for a final delimited object without a trailing delimiter + /// (ProcessWithPartial would return an error in that case). + /// + /// Pre-conditions: + /// - `partial` is the start of a valid block of delimited data + /// - `block` follows `partial` in file order and is the last data block + /// + /// Post-conditions: + /// - block == completion + rest + /// - `partial + completion` is a valid block of delimited data + /// - `completion` doesn't contain an entire delimited object + /// (IOW: `completion` is generally small) + /// + Status ProcessFinal(std::shared_ptr partial, std::shared_ptr block, + std::shared_ptr* completion, std::shared_ptr* rest); + + /// \brief Skip count number of rows + /// Pre-conditions: + /// - `partial` is the start of a valid block of delimited data + /// (i.e. starts just after a delimiter) + /// - `block` follows `partial` in file order + /// + /// Post-conditions: + /// - `count` is updated to indicate the number of rows that still need to be skipped + /// - If `count` is > 0 then `rest` is an incomplete block that should be a future + /// `partial` + /// - Else `rest` could be one or more valid blocks of delimited data which need to be + /// parsed + /// + /// \param[in] partial incomplete delimited data + /// \param[in] block delimited data following partial + /// \param[in] final whether this is the final chunk + /// \param[in,out] count number of rows that need to be skipped + /// \param[out] rest subrange of block containing what was not skipped + Status ProcessSkip(std::shared_ptr partial, std::shared_ptr block, + bool final, int64_t* count, std::shared_ptr* rest); + + protected: + ARROW_DISALLOW_COPY_AND_ASSIGN(Chunker); + + std::shared_ptr boundary_finder_; +}; + +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/dict_util.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/dict_util.h new file mode 100644 index 0000000000000000000000000000000000000000..a92733ae0f63d589e8dbb381c020e009c453ab4e --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/dict_util.h @@ -0,0 +1,28 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/array/data.h" + +namespace arrow { +namespace dict_util { + +int64_t LogicalNullCount(const ArraySpan& span); + +} // namespace dict_util +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/formatting.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/formatting.h new file mode 100644 index 0000000000000000000000000000000000000000..f2e3622ce60d529ddee20f049c98246f04b97ff9 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/formatting.h @@ -0,0 +1,668 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// This is a private header for number-to-string formatting utilities + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/type_traits.h" +#include "arrow/util/double_conversion.h" +#include "arrow/util/macros.h" +#include "arrow/util/string.h" +#include "arrow/util/time.h" +#include "arrow/util/visibility.h" +#include "arrow/vendored/datetime.h" + +namespace arrow { +namespace internal { + +/// \brief The entry point for conversion to strings. +template +class StringFormatter; + +template +struct is_formattable { + template ::value_type> + static std::true_type Test(U*); + + template + static std::false_type Test(...); + + static constexpr bool value = decltype(Test(NULLPTR))::value; +}; + +template +using enable_if_formattable = enable_if_t::value, R>; + +template +using Return = decltype(std::declval()(std::string_view{})); + +///////////////////////////////////////////////////////////////////////// +// Boolean formatting + +template <> +class StringFormatter { + public: + explicit StringFormatter(const DataType* = NULLPTR) {} + + using value_type = bool; + + template + Return operator()(bool value, Appender&& append) { + if (value) { + const char string[] = "true"; + return append(std::string_view(string)); + } else { + const char string[] = "false"; + return append(std::string_view(string)); + } + } +}; + +///////////////////////////////////////////////////////////////////////// +// Decimals formatting + +template +class DecimalToStringFormatterMixin { + public: + explicit DecimalToStringFormatterMixin(const DataType* type) + : scale_(static_cast(type)->scale()) {} + + using value_type = typename TypeTraits::CType; + + template + Return operator()(const value_type& value, Appender&& append) { + return append(value.ToString(scale_)); + } + + private: + int32_t scale_; +}; + +template <> +class StringFormatter + : public DecimalToStringFormatterMixin { + using DecimalToStringFormatterMixin::DecimalToStringFormatterMixin; +}; + +template <> +class StringFormatter + : public DecimalToStringFormatterMixin { + using DecimalToStringFormatterMixin::DecimalToStringFormatterMixin; +}; + +template <> +class StringFormatter + : public DecimalToStringFormatterMixin { + using DecimalToStringFormatterMixin::DecimalToStringFormatterMixin; +}; + +template <> +class StringFormatter + : public DecimalToStringFormatterMixin { + using DecimalToStringFormatterMixin::DecimalToStringFormatterMixin; +}; + +///////////////////////////////////////////////////////////////////////// +// Integer formatting + +namespace detail { + +// A 2x100 direct table mapping integers in [0..99] to their decimal representations. +ARROW_EXPORT extern const char digit_pairs[]; + +// Based on fmtlib's format_int class: +// Write digits from right to left into a stack allocated buffer. +// \pre *cursor points to the byte after the one that will be written. +// \post *cursor points to the byte that was written. +inline void FormatOneChar(char c, char** cursor) { *(--(*cursor)) = c; } + +template +void FormatOneDigit(Int value, char** cursor) { + assert(value >= 0 && value <= 9); + FormatOneChar(static_cast('0' + value), cursor); +} + +// GH-35662: I don't know why but the following combination causes SEGV: +// * template implementation without inline +// * MinGW +// * Release build +template +inline void FormatTwoDigits(Int value, char** cursor) { + assert(value >= 0 && value <= 99); + auto digit_pair = &digit_pairs[value * 2]; + FormatOneChar(digit_pair[1], cursor); + FormatOneChar(digit_pair[0], cursor); +} + +template +void FormatAllDigits(Int value, char** cursor) { + assert(value >= 0); + while (value >= 100) { + FormatTwoDigits(value % 100, cursor); + value /= 100; + } + + if (value >= 10) { + FormatTwoDigits(value, cursor); + } else { + FormatOneDigit(value, cursor); + } +} + +template +void FormatAllDigitsLeftPadded(Int value, size_t pad, char pad_char, char** cursor) { + auto end = *cursor - pad; + FormatAllDigits(value, cursor); + while (*cursor > end) { + FormatOneChar(pad_char, cursor); + } +} + +template +std::string_view ViewDigitBuffer(const std::array& buffer, + char* cursor) { + auto buffer_end = buffer.data() + BUFFER_SIZE; + return {cursor, static_cast(buffer_end - cursor)}; +} + +template ::type> +constexpr UInt Abs(Int value) { + return value < 0 ? ~static_cast(value) + 1 : static_cast(value); +} + +template +constexpr size_t Digits10(Int value) { + return value <= 9 ? 1 : Digits10(value / 10) + 1; +} + +} // namespace detail + +template +class IntToStringFormatterMixin { + public: + explicit IntToStringFormatterMixin(const DataType* = NULLPTR) {} + + using value_type = typename ARROW_TYPE::c_type; + + template + Return operator()(value_type value, Appender&& append) { + constexpr size_t buffer_size = + detail::Digits10(std::numeric_limits::max()) + 1; + + std::array buffer; + char* cursor = buffer.data() + buffer_size; + detail::FormatAllDigits(detail::Abs(value), &cursor); + if (value < 0) { + detail::FormatOneChar('-', &cursor); + } + return append(detail::ViewDigitBuffer(buffer, cursor)); + } +}; + +template <> +class StringFormatter : public IntToStringFormatterMixin { + using IntToStringFormatterMixin::IntToStringFormatterMixin; +}; + +template <> +class StringFormatter : public IntToStringFormatterMixin { + using IntToStringFormatterMixin::IntToStringFormatterMixin; +}; + +template <> +class StringFormatter : public IntToStringFormatterMixin { + using IntToStringFormatterMixin::IntToStringFormatterMixin; +}; + +template <> +class StringFormatter : public IntToStringFormatterMixin { + using IntToStringFormatterMixin::IntToStringFormatterMixin; +}; + +template <> +class StringFormatter : public IntToStringFormatterMixin { + using IntToStringFormatterMixin::IntToStringFormatterMixin; +}; + +template <> +class StringFormatter : public IntToStringFormatterMixin { + using IntToStringFormatterMixin::IntToStringFormatterMixin; +}; + +template <> +class StringFormatter : public IntToStringFormatterMixin { + using IntToStringFormatterMixin::IntToStringFormatterMixin; +}; + +template <> +class StringFormatter : public IntToStringFormatterMixin { + using IntToStringFormatterMixin::IntToStringFormatterMixin; +}; + +///////////////////////////////////////////////////////////////////////// +// Floating-point formatting + +class ARROW_EXPORT FloatToStringFormatter { + public: + FloatToStringFormatter(); + FloatToStringFormatter(int flags, const char* inf_symbol, const char* nan_symbol, + char exp_character, int decimal_in_shortest_low, + int decimal_in_shortest_high, + int max_leading_padding_zeroes_in_precision_mode, + int max_trailing_padding_zeroes_in_precision_mode); + ~FloatToStringFormatter(); + + // Returns the number of characters written + int FormatFloat(float v, char* out_buffer, int out_size); + int FormatFloat(double v, char* out_buffer, int out_size); + int FormatFloat(uint16_t v, char* out_buffer, int out_size); + + protected: + struct Impl; + std::unique_ptr impl_; +}; + +template +class FloatToStringFormatterMixin : public FloatToStringFormatter { + public: + using value_type = typename ARROW_TYPE::c_type; + + static constexpr int buffer_size = 50; + + explicit FloatToStringFormatterMixin(const DataType* = NULLPTR) {} + + FloatToStringFormatterMixin(int flags, const char* inf_symbol, const char* nan_symbol, + char exp_character, int decimal_in_shortest_low, + int decimal_in_shortest_high, + int max_leading_padding_zeroes_in_precision_mode, + int max_trailing_padding_zeroes_in_precision_mode) + : FloatToStringFormatter(flags, inf_symbol, nan_symbol, exp_character, + decimal_in_shortest_low, decimal_in_shortest_high, + max_leading_padding_zeroes_in_precision_mode, + max_trailing_padding_zeroes_in_precision_mode) {} + + template + Return operator()(value_type value, Appender&& append) { + char buffer[buffer_size]; + int size = FormatFloat(value, buffer, buffer_size); + return append(std::string_view(buffer, size)); + } +}; + +template <> +class StringFormatter : public FloatToStringFormatterMixin { + public: + using FloatToStringFormatterMixin::FloatToStringFormatterMixin; +}; + +template <> +class StringFormatter : public FloatToStringFormatterMixin { + public: + using FloatToStringFormatterMixin::FloatToStringFormatterMixin; +}; + +template <> +class StringFormatter : public FloatToStringFormatterMixin { + public: + using FloatToStringFormatterMixin::FloatToStringFormatterMixin; +}; + +///////////////////////////////////////////////////////////////////////// +// Temporal formatting + +namespace detail { + +constexpr size_t BufferSizeYYYY_MM_DD() { + // "-"? "99999-12-31" + return 1 + detail::Digits10(99999) + 1 + detail::Digits10(12) + 1 + + detail::Digits10(31); +} + +inline void FormatYYYY_MM_DD(arrow_vendored::date::year_month_day ymd, char** cursor) { + FormatTwoDigits(static_cast(ymd.day()), cursor); + FormatOneChar('-', cursor); + FormatTwoDigits(static_cast(ymd.month()), cursor); + FormatOneChar('-', cursor); + auto year = static_cast(ymd.year()); + const auto is_neg_year = year < 0; + year = std::abs(year); + assert(year <= 99999); + FormatTwoDigits(year % 100, cursor); + year /= 100; + FormatTwoDigits(year % 100, cursor); + if (year >= 100) { + FormatOneDigit(year / 100, cursor); + } + if (is_neg_year) { + FormatOneChar('-', cursor); + } +} + +template +constexpr size_t BufferSizeHH_MM_SS() { + // "23:59:59" ("." "9"+)? + return detail::Digits10(23) + 1 + detail::Digits10(59) + 1 + detail::Digits10(59) + 1 + + detail::Digits10(Duration::period::den) - 1; +} + +template +void FormatHH_MM_SS(arrow_vendored::date::hh_mm_ss hms, char** cursor) { + constexpr size_t subsecond_digits = Digits10(Duration::period::den) - 1; + if (subsecond_digits != 0) { + FormatAllDigitsLeftPadded(hms.subseconds().count(), subsecond_digits, '0', cursor); + FormatOneChar('.', cursor); + } + FormatTwoDigits(hms.seconds().count(), cursor); + FormatOneChar(':', cursor); + FormatTwoDigits(hms.minutes().count(), cursor); + FormatOneChar(':', cursor); + FormatTwoDigits(hms.hours().count(), cursor); +} + +// Some out-of-bound datetime values would result in erroneous printing +// because of silent integer wraparound in the `arrow_vendored::date` library. +// +// To avoid such misprinting, we must therefore check the bounds explicitly. +// The bounds correspond to start of year -32767 and end of year 32767, +// respectively (-32768 is an invalid year value in `arrow_vendored::date`). +// +// Note these values are the same as documented for C++20: +// https://en.cppreference.com/w/cpp/chrono/year_month_day/operator_days +template +bool IsDateTimeInRange(Unit duration) { + constexpr Unit kMinIncl = + std::chrono::duration_cast(arrow_vendored::date::days{-12687428}); + constexpr Unit kMaxExcl = + std::chrono::duration_cast(arrow_vendored::date::days{11248738}); + return duration >= kMinIncl && duration < kMaxExcl; +} + +// IsDateTimeInRange() specialization for nanoseconds: a 64-bit number of +// nanoseconds cannot represent years outside of the [-32767, 32767] +// range, and the {kMinIncl, kMaxExcl} constants above would overflow. +constexpr bool IsDateTimeInRange(std::chrono::nanoseconds duration) { return true; } + +template +bool IsTimeInRange(Unit duration) { + constexpr Unit kMinIncl = std::chrono::duration_cast(std::chrono::seconds{0}); + constexpr Unit kMaxExcl = std::chrono::duration_cast(std::chrono::seconds{86400}); + return duration >= kMinIncl && duration < kMaxExcl; +} + +template +Return FormatOutOfRange(RawValue&& raw_value, Appender&& append) { + // XXX locale-sensitive but good enough for now + std::string formatted = ""; + return append(std::move(formatted)); +} + +const auto kEpoch = arrow_vendored::date::sys_days{arrow_vendored::date::jan / 1 / 1970}; + +} // namespace detail + +template <> +class StringFormatter : public IntToStringFormatterMixin { + using IntToStringFormatterMixin::IntToStringFormatterMixin; +}; + +class DateToStringFormatterMixin { + public: + explicit DateToStringFormatterMixin(const DataType* = NULLPTR) {} + + protected: + template + Return FormatDays(arrow_vendored::date::days since_epoch, Appender&& append) { + arrow_vendored::date::sys_days timepoint_days{since_epoch}; + + constexpr size_t buffer_size = detail::BufferSizeYYYY_MM_DD(); + + std::array buffer; + char* cursor = buffer.data() + buffer_size; + + detail::FormatYYYY_MM_DD(arrow_vendored::date::year_month_day{timepoint_days}, + &cursor); + return append(detail::ViewDigitBuffer(buffer, cursor)); + } +}; + +template <> +class StringFormatter : public DateToStringFormatterMixin { + public: + using value_type = typename Date32Type::c_type; + + using DateToStringFormatterMixin::DateToStringFormatterMixin; + + template + Return operator()(value_type value, Appender&& append) { + const auto since_epoch = arrow_vendored::date::days{value}; + if (!ARROW_PREDICT_TRUE(detail::IsDateTimeInRange(since_epoch))) { + return detail::FormatOutOfRange(value, append); + } + return FormatDays(since_epoch, std::forward(append)); + } +}; + +template <> +class StringFormatter : public DateToStringFormatterMixin { + public: + using value_type = typename Date64Type::c_type; + + using DateToStringFormatterMixin::DateToStringFormatterMixin; + + template + Return operator()(value_type value, Appender&& append) { + const auto since_epoch = std::chrono::milliseconds{value}; + if (!ARROW_PREDICT_TRUE(detail::IsDateTimeInRange(since_epoch))) { + return detail::FormatOutOfRange(value, append); + } + return FormatDays(std::chrono::duration_cast(since_epoch), + std::forward(append)); + } +}; + +template <> +class StringFormatter { + public: + using value_type = int64_t; + + explicit StringFormatter(const DataType* type) + : unit_(checked_cast(*type).unit()), + timezone_(checked_cast(*type).timezone()) {} + + template + Return operator()(Duration, value_type value, Appender&& append) { + using arrow_vendored::date::days; + + const Duration since_epoch{value}; + if (!ARROW_PREDICT_TRUE(detail::IsDateTimeInRange(since_epoch))) { + return detail::FormatOutOfRange(value, append); + } + + const auto timepoint = detail::kEpoch + since_epoch; + // Round days towards zero + // (the naive approach of using arrow_vendored::date::floor() would + // result in UB for very large negative timestamps, similarly as + // https://github.com/HowardHinnant/date/issues/696) + auto timepoint_days = std::chrono::time_point_cast(timepoint); + Duration since_midnight; + if (timepoint_days <= timepoint) { + // Year >= 1970 + since_midnight = timepoint - timepoint_days; + } else { + // Year < 1970 + since_midnight = days(1) - (timepoint_days - timepoint); + timepoint_days -= days(1); + } + + // YYYY_MM_DD " " HH_MM_SS "Z"? + constexpr size_t buffer_size = + detail::BufferSizeYYYY_MM_DD() + 1 + detail::BufferSizeHH_MM_SS() + 1; + + std::array buffer; + char* cursor = buffer.data() + buffer_size; + + if (timezone_.size() > 0) { + detail::FormatOneChar('Z', &cursor); + } + detail::FormatHH_MM_SS(arrow_vendored::date::make_time(since_midnight), &cursor); + detail::FormatOneChar(' ', &cursor); + detail::FormatYYYY_MM_DD(timepoint_days, &cursor); + return append(detail::ViewDigitBuffer(buffer, cursor)); + } + + template + Return operator()(value_type value, Appender&& append) { + return util::VisitDuration(unit_, *this, value, std::forward(append)); + } + + private: + TimeUnit::type unit_; + std::string timezone_; +}; + +template +class StringFormatter> { + public: + using value_type = typename T::c_type; + + explicit StringFormatter(const DataType* type) + : unit_(checked_cast(*type).unit()) {} + + template + Return operator()(Duration, value_type count, Appender&& append) { + const Duration since_midnight{count}; + if (!ARROW_PREDICT_TRUE(detail::IsTimeInRange(since_midnight))) { + return detail::FormatOutOfRange(count, append); + } + + constexpr size_t buffer_size = detail::BufferSizeHH_MM_SS(); + + std::array buffer; + char* cursor = buffer.data() + buffer_size; + + detail::FormatHH_MM_SS(arrow_vendored::date::make_time(since_midnight), &cursor); + return append(detail::ViewDigitBuffer(buffer, cursor)); + } + + template + Return operator()(value_type value, Appender&& append) { + return util::VisitDuration(unit_, *this, value, std::forward(append)); + } + + private: + TimeUnit::type unit_; +}; + +template <> +class StringFormatter { + public: + using value_type = MonthIntervalType::c_type; + + explicit StringFormatter(const DataType*) {} + + template + Return operator()(value_type interval, Appender&& append) { + constexpr size_t buffer_size = + /*'m'*/ 3 + /*negative signs*/ 1 + + /*months*/ detail::Digits10(std::numeric_limits::max()); + std::array buffer; + char* cursor = buffer.data() + buffer_size; + + detail::FormatOneChar('M', &cursor); + detail::FormatAllDigits(detail::Abs(interval), &cursor); + if (interval < 0) detail::FormatOneChar('-', &cursor); + + return append(detail::ViewDigitBuffer(buffer, cursor)); + } +}; + +template <> +class StringFormatter { + public: + using value_type = DayTimeIntervalType::DayMilliseconds; + + explicit StringFormatter(const DataType*) {} + + template + Return operator()(value_type interval, Appender&& append) { + constexpr size_t buffer_size = + /*d, ms*/ 3 + /*negative signs*/ 2 + + /*days/milliseconds*/ 2 * detail::Digits10(std::numeric_limits::max()); + std::array buffer; + char* cursor = buffer.data() + buffer_size; + + detail::FormatOneChar('s', &cursor); + detail::FormatOneChar('m', &cursor); + detail::FormatAllDigits(detail::Abs(interval.milliseconds), &cursor); + if (interval.milliseconds < 0) detail::FormatOneChar('-', &cursor); + + detail::FormatOneChar('d', &cursor); + detail::FormatAllDigits(detail::Abs(interval.days), &cursor); + if (interval.days < 0) detail::FormatOneChar('-', &cursor); + + return append(detail::ViewDigitBuffer(buffer, cursor)); + } +}; + +template <> +class StringFormatter { + public: + using value_type = MonthDayNanoIntervalType::MonthDayNanos; + + explicit StringFormatter(const DataType*) {} + + template + Return operator()(value_type interval, Appender&& append) { + constexpr size_t buffer_size = + /*m, d, ns*/ 4 + /*negative signs*/ 3 + + /*months/days*/ 2 * detail::Digits10(std::numeric_limits::max()) + + /*nanoseconds*/ detail::Digits10(std::numeric_limits::max()); + std::array buffer; + char* cursor = buffer.data() + buffer_size; + + detail::FormatOneChar('s', &cursor); + detail::FormatOneChar('n', &cursor); + detail::FormatAllDigits(detail::Abs(interval.nanoseconds), &cursor); + if (interval.nanoseconds < 0) detail::FormatOneChar('-', &cursor); + + detail::FormatOneChar('d', &cursor); + detail::FormatAllDigits(detail::Abs(interval.days), &cursor); + if (interval.days < 0) detail::FormatOneChar('-', &cursor); + + detail::FormatOneChar('M', &cursor); + detail::FormatAllDigits(detail::Abs(interval.months), &cursor); + if (interval.months < 0) detail::FormatOneChar('-', &cursor); + + return append(detail::ViewDigitBuffer(buffer, cursor)); + } +}; + +} // namespace internal +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/int_util_overflow.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/int_util_overflow.h new file mode 100644 index 0000000000000000000000000000000000000000..ffe78be2470ddb846b5816be632e9921c041a23e --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/int_util_overflow.h @@ -0,0 +1,118 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/status.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +// "safe-math.h" includes from the Windows headers. +#include "arrow/util/windows_compatibility.h" +#include "arrow/vendored/portable-snippets/safe-math.h" +// clang-format off (avoid include reordering) +#include "arrow/util/windows_fixup.h" +// clang-format on + +namespace arrow { +namespace internal { + +// Define functions AddWithOverflow, SubtractWithOverflow, MultiplyWithOverflow +// with the signature `bool(T u, T v, T* out)` where T is an integer type. +// On overflow, these functions return true. Otherwise, false is returned +// and `out` is updated with the result of the operation. + +#define OP_WITH_OVERFLOW(_func_name, _psnip_op, _type, _psnip_type) \ + [[nodiscard]] static inline bool _func_name(_type u, _type v, _type* out) { \ + return !psnip_safe_##_psnip_type##_##_psnip_op(out, u, v); \ + } + +#define OPS_WITH_OVERFLOW(_func_name, _psnip_op) \ + OP_WITH_OVERFLOW(_func_name, _psnip_op, int8_t, int8) \ + OP_WITH_OVERFLOW(_func_name, _psnip_op, int16_t, int16) \ + OP_WITH_OVERFLOW(_func_name, _psnip_op, int32_t, int32) \ + OP_WITH_OVERFLOW(_func_name, _psnip_op, int64_t, int64) \ + OP_WITH_OVERFLOW(_func_name, _psnip_op, uint8_t, uint8) \ + OP_WITH_OVERFLOW(_func_name, _psnip_op, uint16_t, uint16) \ + OP_WITH_OVERFLOW(_func_name, _psnip_op, uint32_t, uint32) \ + OP_WITH_OVERFLOW(_func_name, _psnip_op, uint64_t, uint64) + +OPS_WITH_OVERFLOW(AddWithOverflow, add) +OPS_WITH_OVERFLOW(SubtractWithOverflow, sub) +OPS_WITH_OVERFLOW(MultiplyWithOverflow, mul) +OPS_WITH_OVERFLOW(DivideWithOverflow, div) + +#undef OP_WITH_OVERFLOW +#undef OPS_WITH_OVERFLOW + +// Define function NegateWithOverflow with the signature `bool(T u, T* out)` +// where T is a signed integer type. On overflow, these functions return true. +// Otherwise, false is returned and `out` is updated with the result of the +// operation. + +#define UNARY_OP_WITH_OVERFLOW(_func_name, _psnip_op, _type, _psnip_type) \ + [[nodiscard]] static inline bool _func_name(_type u, _type* out) { \ + return !psnip_safe_##_psnip_type##_##_psnip_op(out, u); \ + } + +#define SIGNED_UNARY_OPS_WITH_OVERFLOW(_func_name, _psnip_op) \ + UNARY_OP_WITH_OVERFLOW(_func_name, _psnip_op, int8_t, int8) \ + UNARY_OP_WITH_OVERFLOW(_func_name, _psnip_op, int16_t, int16) \ + UNARY_OP_WITH_OVERFLOW(_func_name, _psnip_op, int32_t, int32) \ + UNARY_OP_WITH_OVERFLOW(_func_name, _psnip_op, int64_t, int64) + +SIGNED_UNARY_OPS_WITH_OVERFLOW(NegateWithOverflow, neg) + +#undef UNARY_OP_WITH_OVERFLOW +#undef SIGNED_UNARY_OPS_WITH_OVERFLOW + +/// Signed addition with well-defined behaviour on overflow (as unsigned) +template +SignedInt SafeSignedAdd(SignedInt u, SignedInt v) { + using UnsignedInt = typename std::make_unsigned::type; + return static_cast(static_cast(u) + + static_cast(v)); +} + +/// Signed subtraction with well-defined behaviour on overflow (as unsigned) +template +SignedInt SafeSignedSubtract(SignedInt u, SignedInt v) { + using UnsignedInt = typename std::make_unsigned::type; + return static_cast(static_cast(u) - + static_cast(v)); +} + +/// Signed negation with well-defined behaviour on overflow (as unsigned) +template +SignedInt SafeSignedNegate(SignedInt u) { + using UnsignedInt = typename std::make_unsigned::type; + return static_cast(~static_cast(u) + 1); +} + +/// Signed left shift with well-defined behaviour on negative numbers or overflow +template +SignedInt SafeLeftShift(SignedInt u, Shift shift) { + using UnsignedInt = typename std::make_unsigned::type; + return static_cast(static_cast(u) << shift); +} + +} // namespace internal +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/launder.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/launder.h new file mode 100644 index 0000000000000000000000000000000000000000..9e4533c4b4760a416b0aca4b91c32ffd324d7f08 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/launder.h @@ -0,0 +1,35 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +namespace arrow { +namespace internal { + +#if __cpp_lib_launder +using std::launder; +#else +template +constexpr T* launder(T* p) noexcept { + return p; +} +#endif + +} // namespace internal +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/logging.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/logging.h new file mode 100644 index 0000000000000000000000000000000000000000..04c6bc21cac73669c0a4e14c1525c1a69f354ad0 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/logging.h @@ -0,0 +1,277 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#ifdef GANDIVA_IR + +// The LLVM IR code doesn't have an NDEBUG mode. And, it shouldn't include references to +// streams or stdc++. So, making the DCHECK calls void in that case. + +# define ARROW_IGNORE_EXPR(expr) ((void)(expr)) + +# define DCHECK(condition) ARROW_IGNORE_EXPR(condition) +# define DCHECK_OK(status) ARROW_IGNORE_EXPR(status) +# define DCHECK_EQ(val1, val2) ARROW_IGNORE_EXPR(val1) +# define DCHECK_NE(val1, val2) ARROW_IGNORE_EXPR(val1) +# define DCHECK_LE(val1, val2) ARROW_IGNORE_EXPR(val1) +# define DCHECK_LT(val1, val2) ARROW_IGNORE_EXPR(val1) +# define DCHECK_GE(val1, val2) ARROW_IGNORE_EXPR(val1) +# define DCHECK_GT(val1, val2) ARROW_IGNORE_EXPR(val1) + +#else // !GANDIVA_IR + +# include +# include +# include + +# include "arrow/util/macros.h" +# include "arrow/util/visibility.h" + +namespace arrow { +namespace util { + +enum class ArrowLogLevel : int { + ARROW_TRACE = -2, + ARROW_DEBUG = -1, + ARROW_INFO = 0, + ARROW_WARNING = 1, + ARROW_ERROR = 2, + ARROW_FATAL = 3 +}; + +# define ARROW_LOG_INTERNAL(level) ::arrow::util::ArrowLog(__FILE__, __LINE__, level) +# define ARROW_LOG(level) ARROW_LOG_INTERNAL(::arrow::util::ArrowLogLevel::ARROW_##level) + +# define ARROW_IGNORE_EXPR(expr) ((void)(expr)) + +# define ARROW_CHECK_OR_LOG(condition, level) \ + ARROW_PREDICT_TRUE(condition) \ + ? ARROW_IGNORE_EXPR(0) \ + : ::arrow::util::Voidify() & ARROW_LOG(level) << " Check failed: " #condition " " + +# define ARROW_CHECK(condition) ARROW_CHECK_OR_LOG(condition, FATAL) + +// If 'to_call' returns a bad status, CHECK immediately with a logged message +// of 'msg' followed by the status. +# define ARROW_CHECK_OK_PREPEND(to_call, msg, level) \ + do { \ + ::arrow::Status _s = (to_call); \ + ARROW_CHECK_OR_LOG(_s.ok(), level) \ + << "Operation failed: " << ARROW_STRINGIFY(to_call) << "\n" \ + << (msg) << ": " << _s.ToString(); \ + } while (false) + +// If the status is bad, CHECK immediately, appending the status to the +// logged message. +# define ARROW_CHECK_OK(s) ARROW_CHECK_OK_PREPEND(s, "Bad status", FATAL) + +# define ARROW_CHECK_EQ(val1, val2) ARROW_CHECK((val1) == (val2)) +# define ARROW_CHECK_NE(val1, val2) ARROW_CHECK((val1) != (val2)) +# define ARROW_CHECK_LE(val1, val2) ARROW_CHECK((val1) <= (val2)) +# define ARROW_CHECK_LT(val1, val2) ARROW_CHECK((val1) < (val2)) +# define ARROW_CHECK_GE(val1, val2) ARROW_CHECK((val1) >= (val2)) +# define ARROW_CHECK_GT(val1, val2) ARROW_CHECK((val1) > (val2)) + +# ifdef NDEBUG +# define ARROW_DFATAL ::arrow::util::ArrowLogLevel::ARROW_WARNING + +// CAUTION: DCHECK_OK() always evaluates its argument, but other DCHECK*() macros +// only do so in debug mode. + +# define ARROW_DCHECK(condition) \ + while (false) ARROW_IGNORE_EXPR(condition); \ + while (false) ::arrow::util::detail::NullLog() +# define ARROW_DCHECK_OK(s) \ + ARROW_IGNORE_EXPR(s); \ + while (false) ::arrow::util::detail::NullLog() +# define ARROW_DCHECK_EQ(val1, val2) \ + while (false) ARROW_IGNORE_EXPR(val1); \ + while (false) ARROW_IGNORE_EXPR(val2); \ + while (false) ::arrow::util::detail::NullLog() +# define ARROW_DCHECK_NE(val1, val2) \ + while (false) ARROW_IGNORE_EXPR(val1); \ + while (false) ARROW_IGNORE_EXPR(val2); \ + while (false) ::arrow::util::detail::NullLog() +# define ARROW_DCHECK_LE(val1, val2) \ + while (false) ARROW_IGNORE_EXPR(val1); \ + while (false) ARROW_IGNORE_EXPR(val2); \ + while (false) ::arrow::util::detail::NullLog() +# define ARROW_DCHECK_LT(val1, val2) \ + while (false) ARROW_IGNORE_EXPR(val1); \ + while (false) ARROW_IGNORE_EXPR(val2); \ + while (false) ::arrow::util::detail::NullLog() +# define ARROW_DCHECK_GE(val1, val2) \ + while (false) ARROW_IGNORE_EXPR(val1); \ + while (false) ARROW_IGNORE_EXPR(val2); \ + while (false) ::arrow::util::detail::NullLog() +# define ARROW_DCHECK_GT(val1, val2) \ + while (false) ARROW_IGNORE_EXPR(val1); \ + while (false) ARROW_IGNORE_EXPR(val2); \ + while (false) ::arrow::util::detail::NullLog() + +# else +# define ARROW_DFATAL ::arrow::util::ArrowLogLevel::ARROW_FATAL + +# define ARROW_DCHECK ARROW_CHECK +# define ARROW_DCHECK_OK ARROW_CHECK_OK +# define ARROW_DCHECK_EQ ARROW_CHECK_EQ +# define ARROW_DCHECK_NE ARROW_CHECK_NE +# define ARROW_DCHECK_LE ARROW_CHECK_LE +# define ARROW_DCHECK_LT ARROW_CHECK_LT +# define ARROW_DCHECK_GE ARROW_CHECK_GE +# define ARROW_DCHECK_GT ARROW_CHECK_GT + +# endif // NDEBUG + +// These are internal-use macros and should not be used in public headers. +# ifndef DCHECK +# define DCHECK ARROW_DCHECK +# endif +# ifndef DCHECK_OK +# define DCHECK_OK ARROW_DCHECK_OK +# endif +# ifndef DCHECK_EQ +# define DCHECK_EQ ARROW_DCHECK_EQ +# endif +# ifndef DCHECK_NE +# define DCHECK_NE ARROW_DCHECK_NE +# endif +# ifndef DCHECK_LE +# define DCHECK_LE ARROW_DCHECK_LE +# endif +# ifndef DCHECK_LT +# define DCHECK_LT ARROW_DCHECK_LT +# endif +# ifndef DCHECK_GE +# define DCHECK_GE ARROW_DCHECK_GE +# endif +# ifndef DCHECK_GT +# define DCHECK_GT ARROW_DCHECK_GT +# endif + +// This code is adapted from +// https://github.com/ray-project/ray/blob/master/src/ray/util/logging.h. + +// To make the logging lib pluggable with other logging libs and make +// the implementation unawared by the user, ArrowLog is only a declaration +// which hide the implementation into logging.cc file. +// In logging.cc, we can choose different log libs using different macros. + +// This is also a null log which does not output anything. +class ARROW_EXPORT ArrowLogBase { + public: + virtual ~ArrowLogBase() {} + + virtual bool IsEnabled() const { return false; } + + template + ArrowLogBase& operator<<(const T& t) { + if (IsEnabled()) { + Stream() << t; + } + return *this; + } + + protected: + virtual std::ostream& Stream() = 0; +}; + +class ARROW_EXPORT ArrowLog : public ArrowLogBase { + public: + ArrowLog(const char* file_name, int line_number, ArrowLogLevel severity); + ~ArrowLog() override; + + /// Return whether or not current logging instance is enabled. + /// + /// \return True if logging is enabled and false otherwise. + bool IsEnabled() const override; + + /// The init function of arrow log for a program which should be called only once. + /// + /// \param appName The app name which starts the log. + /// \param severity_threshold Logging threshold for the program. + /// \param logDir Logging output file name. If empty, the log won't output to file. + static void StartArrowLog(const std::string& appName, + ArrowLogLevel severity_threshold = ArrowLogLevel::ARROW_INFO, + const std::string& logDir = ""); + + /// The shutdown function of arrow log, it should be used with StartArrowLog as a pair. + static void ShutDownArrowLog(); + + /// Install the failure signal handler to output call stack when crash. + /// If glog is not installed, this function won't do anything. + static void InstallFailureSignalHandler(); + + /// Uninstall the signal actions installed by InstallFailureSignalHandler. + static void UninstallSignalAction(); + + /// Return whether or not the log level is enabled in current setting. + /// + /// \param log_level The input log level to test. + /// \return True if input log level is not lower than the threshold. + static bool IsLevelEnabled(ArrowLogLevel log_level); + + private: + ARROW_DISALLOW_COPY_AND_ASSIGN(ArrowLog); + + // Hide the implementation of log provider by void *. + // Otherwise, lib user may define the same macro to use the correct header file. + void* logging_provider_; + /// True if log messages should be logged and false if they should be ignored. + bool is_enabled_; + + static ArrowLogLevel severity_threshold_; + + protected: + std::ostream& Stream() override; +}; + +// This class make ARROW_CHECK compilation pass to change the << operator to void. +// This class is copied from glog. +class ARROW_EXPORT Voidify { + public: + Voidify() {} + // This has to be an operator with a precedence lower than << but + // higher than ?: + void operator&(ArrowLogBase&) {} +}; + +namespace detail { + +/// @brief A helper for the nil log sink. +/// +/// Using this helper is analogous to sending log messages to /dev/null: +/// nothing gets logged. +class NullLog { + public: + /// The no-op output operator. + /// + /// @param [in] t + /// The object to send into the nil sink. + /// @return Reference to the updated object. + template + NullLog& operator<<(const T& t) { + return *this; + } +}; + +} // namespace detail +} // namespace util +} // namespace arrow + +#endif // GANDIVA_IR diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/rows_to_batches.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/rows_to_batches.h new file mode 100644 index 0000000000000000000000000000000000000000..8ad254df200efc08c5c9a4956e0e781b496b2b07 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/rows_to_batches.h @@ -0,0 +1,163 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/record_batch.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/table_builder.h" +#include "arrow/util/iterator.h" + +#include + +namespace arrow::util { + +namespace detail { + +// Default identity function row accessor. Used to for the common case where the value +// of each row iterated over is it's self also directly iterable. +[[nodiscard]] constexpr inline auto MakeDefaultRowAccessor() { + return [](auto& x) -> Result { return std::ref(x); }; +} + +// Meta-function to check if a type `T` is a range (iterable using `std::begin()` / +// `std::end()`). `is_range::value` will be false if `T` is not a valid range. +template +struct is_range : std::false_type {}; + +template +struct is_range())), + decltype(std::end(std::declval()))>> : std::true_type { +}; + +} // namespace detail + +/// Delete overload for `const Range&& rows` because the data's lifetime must exceed +/// the lifetime of the function call. `data` will be read when client uses the +/// `RecordBatchReader` +template +[[nodiscard]] typename std::enable_if_t::value, + Result>> +/* Result>> */ RowsToBatches( + const std::shared_ptr& schema, const Range&& rows, + DataPointConvertor&& data_point_convertor, + RowAccessor&& row_accessor = detail::MakeDefaultRowAccessor(), + MemoryPool* pool = default_memory_pool(), + const std::size_t batch_size = 1024) = delete; + +/// \brief Utility function for converting any row-based structure into an +/// `arrow::RecordBatchReader` (this can be easily converted to an `arrow::Table` using +/// `arrow::RecordBatchReader::ToTable()`). +/// +/// Examples of supported types: +/// - `std::vector>>` +/// - `std::vector` + +/// If `rows` (client’s row-based structure) is not a valid C++ range, the client will +/// need to either make it iterable, or make an adapter/wrapper that is a valid C++ +/// range. + +/// The client must provide a `DataPointConvertor` callable type that will convert the +/// structure’s data points into the corresponding arrow types. + +/// Complex nested rows can be supported by providing a custom `row_accessor` instead +/// of the default. + +/// Example usage: +/// \code{.cpp} +/// auto IntConvertor = [](ArrayBuilder& array_builder, int value) { +/// return static_cast(array_builder).Append(value); +/// }; +/// std::vector> data = {{1, 2, 4}, {5, 6, 7}}; +/// auto batches = RowsToBatches(kTestSchema, data, IntConvertor); +/// \endcode + +/// \param[in] schema - The schema to be used in the `RecordBatchReader` + +/// \param[in] rows - Iterable row-based structure that will be converted to arrow +/// batches + +/// \param[in] data_point_convertor - Client provided callable type that will convert +/// the structure’s data points into the corresponding arrow types. The convertor must +/// return an error `Status` if an error happens during conversion. + +/// \param[in] row_accessor - In the common case where the value of each row iterated +/// over is it's self also directly iterable, the client can just use the default. +/// The provided callable must take the values of the `rows` range and return a +/// `std::reference_wrapper` to the data points in a given row. The data points +/// must be in order of their corresponding fields in the schema. +/// see: /ref `MakeDefaultRowAccessor` + +/// \param[in] pool - The MemoryPool to use for allocations. + +/// \param[in] batch_size - Number of rows to insert into each RecordBatch. + +/// \return `Result>>` result will be a +/// `std::shared_ptr>` if not errors occurred, else an error status. +template +[[nodiscard]] typename std::enable_if_t::value, + Result>> +/* Result>> */ RowsToBatches( + const std::shared_ptr& schema, const Range& rows, + DataPointConvertor&& data_point_convertor, + RowAccessor&& row_accessor = detail::MakeDefaultRowAccessor(), + MemoryPool* pool = default_memory_pool(), const std::size_t batch_size = 1024) { + auto make_next_batch = + [pool = pool, batch_size = batch_size, rows_ittr = std::begin(rows), + rows_ittr_end = std::end(rows), schema = schema, + row_accessor = std::forward(row_accessor), + data_point_convertor = std::forward( + data_point_convertor)]() mutable -> Result> { + if (rows_ittr == rows_ittr_end) return NULLPTR; + + ARROW_ASSIGN_OR_RAISE(auto record_batch_builder, + RecordBatchBuilder::Make(schema, pool, batch_size)); + + for (size_t i = 0; i < batch_size && (rows_ittr != rows_ittr_end); + i++, std::advance(rows_ittr, 1)) { + int col_index = 0; + ARROW_ASSIGN_OR_RAISE(const auto row, row_accessor(*rows_ittr)); + + // If the accessor returns a `std::reference_wrapper` unwrap if + const auto& row_unwrapped = [&]() { + if constexpr (detail::is_range::value) + return row; + else + return row.get(); + }(); + + for (auto& data_point : row_unwrapped) { + ArrayBuilder* array_builder = record_batch_builder->GetField(col_index); + ARROW_RETURN_IF(array_builder == NULLPTR, + Status::Invalid("array_builder == NULLPTR")); + + ARROW_RETURN_NOT_OK(data_point_convertor(*array_builder, data_point)); + col_index++; + } + } + + ARROW_ASSIGN_OR_RAISE(auto result, record_batch_builder->Flush()); + return result; + }; + return RecordBatchReader::MakeFromIterator(MakeFunctionIterator(make_next_batch), + schema); +} + +} // namespace arrow::util diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/spaced.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/spaced.h new file mode 100644 index 0000000000000000000000000000000000000000..8265e1d22ae0e78d7343b2fce6a0de4bc669ccc8 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/spaced.h @@ -0,0 +1,98 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/util/bit_run_reader.h" + +namespace arrow { +namespace util { +namespace internal { + +/// \brief Compress the buffer to spaced, excluding the null entries. +/// +/// \param[in] src the source buffer +/// \param[in] num_values the size of source buffer +/// \param[in] valid_bits bitmap data indicating position of valid slots +/// \param[in] valid_bits_offset offset into valid_bits +/// \param[out] output the output buffer spaced +/// \return The size of spaced buffer. +template +inline int SpacedCompress(const T* src, int num_values, const uint8_t* valid_bits, + int64_t valid_bits_offset, T* output) { + int num_valid_values = 0; + + arrow::internal::SetBitRunReader reader(valid_bits, valid_bits_offset, num_values); + while (true) { + const auto run = reader.NextRun(); + if (run.length == 0) { + break; + } + std::memcpy(output + num_valid_values, src + run.position, run.length * sizeof(T)); + num_valid_values += static_cast(run.length); + } + + return num_valid_values; +} + +/// \brief Relocate values in buffer into positions of non-null values as indicated by +/// a validity bitmap. +/// +/// \param[in, out] buffer the in-place buffer +/// \param[in] num_values total size of buffer including null slots +/// \param[in] null_count number of null slots +/// \param[in] valid_bits bitmap data indicating position of valid slots +/// \param[in] valid_bits_offset offset into valid_bits +/// \return The number of values expanded, including nulls. +template +inline int SpacedExpand(T* buffer, int num_values, int null_count, + const uint8_t* valid_bits, int64_t valid_bits_offset) { + // Point to end as we add the spacing from the back. + int idx_decode = num_values - null_count; + + // Depending on the number of nulls, some of the value slots in buffer may + // be uninitialized, and this will cause valgrind warnings / potentially UB + std::memset(static_cast(buffer + idx_decode), 0, null_count * sizeof(T)); + if (idx_decode == 0) { + // All nulls, nothing more to do + return num_values; + } + + arrow::internal::ReverseSetBitRunReader reader(valid_bits, valid_bits_offset, + num_values); + while (true) { + const auto run = reader.NextRun(); + if (run.length == 0) { + break; + } + idx_decode -= static_cast(run.length); + assert(idx_decode >= 0); + std::memmove(buffer + run.position, buffer + idx_decode, run.length * sizeof(T)); + } + + // Otherwise caller gave an incorrect null_count + assert(idx_decode == 0); + return num_values; +} + +} // namespace internal +} // namespace util +} // namespace arrow diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/span.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/span.h new file mode 100644 index 0000000000000000000000000000000000000000..71cf9ed44890a78675e4187e03b4c01bff60ae54 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/span.h @@ -0,0 +1,156 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include + +namespace arrow::util { + +template +class span; + +// This trait is used to check if a type R can be used to construct a span. +// Specifically, it checks if std::data(R) and std::size(R) are valid expressions +// that may be passed to the span(T*, size_t) constructor. The reason this trait +// is needed rather than expressing this directly in the relevant span constructor +// is that this check requires instantiating span, which would violate the +// C++ standard if written directly in the constructor's enable_if clause +// because span is an incomplete type at that point. By defining this trait +// instead, we add an extra level of indirection that lets us delay the +// evaluation of the template until the first time the associated constructor +// is actually called, at which point span is a complete type. +// +// Note that most compilers do support the noncompliant construct, but nvcc +// does not. See https://github.com/apache/arrow/issues/40252 +template +struct ConstructibleFromDataAndSize : std::false_type {}; + +template +struct ConstructibleFromDataAndSize< + span, R, + std::void_t{std::data(std::declval()), + std::size(std::declval())})>> : std::true_type {}; + +/// std::span polyfill. +/// +/// Does not support static extents. +template +class span { + static_assert(sizeof(T), + R"( +std::span allows contiguous_iterators instead of just pointers, the enforcement +of which requires T to be a complete type. arrow::util::span does not support +contiguous_iterators, but T is still required to be a complete type to prevent +writing code which would break when it is replaced by std::span.)"); + + public: + using element_type = T; + using value_type = std::remove_cv_t; + using iterator = T*; + using const_iterator = T const*; + + span() = default; + span(const span&) = default; + span& operator=(const span&) = default; + + template >> + // NOLINTNEXTLINE runtime/explicit + constexpr span(span mut) : span{mut.data(), mut.size()} {} + + constexpr span(T* data, size_t count) : data_{data}, size_{count} {} + + constexpr span(T* begin, T* end) + : data_{begin}, size_{static_cast(end - begin)} {} + + template < + typename R, + std::enable_if_t, R>::value, bool> = true, + typename DisableUnlessSimilarTypes = std::enable_if_t()))>>, + std::decay_t>>> + // NOLINTNEXTLINE runtime/explicit, non-const reference + constexpr span(R&& range) : span{std::data(range), std::size(range)} {} + + constexpr T* begin() const { return data_; } + constexpr T* end() const { return data_ + size_; } + constexpr T* data() const { return data_; } + + constexpr size_t size() const { return size_; } + constexpr size_t size_bytes() const { return size_ * sizeof(T); } + constexpr bool empty() const { return size_ == 0; } + + constexpr T& operator[](size_t i) { return data_[i]; } + constexpr const T& operator[](size_t i) const { return data_[i]; } + + constexpr span subspan(size_t offset) const { + if (offset > size_) return {data_, data_}; + return {data_ + offset, size_ - offset}; + } + + constexpr span subspan(size_t offset, size_t count) const { + auto out = subspan(offset); + if (count < out.size_) { + out.size_ = count; + } + return out; + } + + constexpr bool operator==(span const& other) const { + if (size_ != other.size_) return false; + + if constexpr (std::is_integral_v) { + if (size_ == 0) { + return true; // memcmp does not handle null pointers, even if size_ == 0 + } + return std::memcmp(data_, other.data_, size_bytes()) == 0; + } else { + T* ptr = data_; + for (T const& e : other) { + if (*ptr++ != e) return false; + } + return true; + } + } + constexpr bool operator!=(span const& other) const { return !(*this == other); } + + private: + T* data_{}; + size_t size_{}; +}; + +template +span(R& range) -> span>; + +template +span(T*, size_t) -> span; + +template +constexpr span as_bytes(span s) { + return {reinterpret_cast(s.data()), s.size_bytes()}; +} + +template +constexpr span as_writable_bytes(span s) { + return {reinterpret_cast(s.data()), s.size_bytes()}; +} + +} // namespace arrow::util diff --git a/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/type_fwd.h b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/type_fwd.h new file mode 100644 index 0000000000000000000000000000000000000000..3174881f4d018c6193ff5c12a7d308e39ed75561 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyarrow/include/arrow/util/type_fwd.h @@ -0,0 +1,70 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +namespace arrow { + +namespace internal { +struct Empty; +} // namespace internal + +template +class WeakFuture; +class FutureWaiter; + +class TimestampParser; + +namespace internal { + +class Executor; +class TaskGroup; +class ThreadPool; +class CpuInfo; + +namespace tracing { + +struct Scope; + +} // namespace tracing +} // namespace internal + +struct Compression { + /// \brief Compression algorithm + enum type { + UNCOMPRESSED, + SNAPPY, + GZIP, + BROTLI, + ZSTD, + LZ4, + LZ4_FRAME, + LZO, + BZ2, + LZ4_HADOOP + }; +}; + +namespace util { +class AsyncTaskScheduler; +class Compressor; +class Decompressor; +class Codec; +class Uri; +} // namespace util + +} // namespace arrow