paredeyes commited on
Commit
32d2a01
·
verified ·
1 Parent(s): 1739e64

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. miniCUDA124/lib/x64/nvfatbin_static.lib +3 -0
  3. miniMSVC/VC/Tools/MSVC/14.42.34433/include/__msvc_all_public_headers.hpp +226 -0
  4. miniMSVC/VC/Tools/MSVC/14.42.34433/include/__msvc_bit_utils.hpp +426 -0
  5. miniMSVC/VC/Tools/MSVC/14.42.34433/include/__msvc_chrono.hpp +801 -0
  6. miniMSVC/VC/Tools/MSVC/14.42.34433/include/__msvc_cxx_stdatomic.hpp +138 -0
  7. miniMSVC/VC/Tools/MSVC/14.42.34433/include/__msvc_filebuf.hpp +790 -0
  8. miniMSVC/VC/Tools/MSVC/14.42.34433/include/__msvc_format_ucd_tables.hpp +551 -0
  9. miniMSVC/VC/Tools/MSVC/14.42.34433/include/__msvc_formatter.hpp +373 -0
  10. miniMSVC/VC/Tools/MSVC/14.42.34433/include/system_error +738 -0
  11. miniMSVC/VC/Tools/MSVC/14.42.34433/include/thread +446 -0
  12. miniMSVC/VC/Tools/MSVC/14.42.34433/include/threads.h +146 -0
  13. miniMSVC/VC/Tools/MSVC/14.42.34433/include/tmmintrin.h +147 -0
  14. miniMSVC/VC/Tools/MSVC/14.42.34433/include/tuple +1177 -0
  15. miniMSVC/VC/Tools/MSVC/14.42.34433/include/type_traits +0 -0
  16. miniMSVC/VC/Tools/MSVC/14.42.34433/include/typeindex +95 -0
  17. miniMSVC/VC/Tools/MSVC/14.42.34433/include/typeinfo +73 -0
  18. miniMSVC/VC/Tools/MSVC/14.42.34433/include/unordered_map +944 -0
  19. miniMSVC/VC/Tools/MSVC/14.42.34433/include/unordered_set +771 -0
  20. miniMSVC/VC/Tools/MSVC/14.42.34433/include/use_ansi.h +57 -0
  21. miniMSVC/VC/Tools/MSVC/14.42.34433/include/utility +988 -0
  22. miniMSVC/VC/Tools/MSVC/14.42.34433/include/vadefs.h +208 -0
  23. miniMSVC/VC/Tools/MSVC/14.42.34433/include/valarray +2120 -0
  24. miniMSVC/VC/Tools/MSVC/14.42.34433/include/varargs.h +49 -0
  25. miniMSVC/VC/Tools/MSVC/14.42.34433/include/variant +1718 -0
  26. miniMSVC/VC/Tools/MSVC/14.42.34433/include/vcclr.h +53 -0
  27. miniMSVC/VC/Tools/MSVC/14.42.34433/include/vccorlib.h +0 -0
  28. miniMSVC/VC/Tools/MSVC/14.42.34433/include/vcruntime.h +404 -0
  29. miniMSVC/VC/Tools/MSVC/14.42.34433/include/vcruntime_c11_atomic_support.h +1127 -0
  30. miniMSVC/VC/Tools/MSVC/14.42.34433/include/vcruntime_c11_stdatomic.h +128 -0
  31. miniMSVC/VC/Tools/MSVC/14.42.34433/include/vcruntime_exception.h +155 -0
  32. miniMSVC/VC/Tools/MSVC/14.42.34433/include/vcruntime_new.h +198 -0
  33. miniMSVC/VC/Tools/MSVC/14.42.34433/include/vcruntime_new_debug.h +64 -0
  34. miniMSVC/VC/Tools/MSVC/14.42.34433/include/vcruntime_startup.h +56 -0
  35. miniMSVC/VC/Tools/MSVC/14.42.34433/include/vcruntime_string.h +113 -0
  36. miniMSVC/VC/Tools/MSVC/14.42.34433/include/vcruntime_typeinfo.h +221 -0
  37. miniMSVC/VC/Tools/MSVC/14.42.34433/include/vector +0 -0
  38. miniMSVC/VC/Tools/MSVC/14.42.34433/include/version +9 -0
  39. miniMSVC/VC/Tools/MSVC/14.42.34433/include/wmmintrin.h +91 -0
  40. miniMSVC/VC/Tools/MSVC/14.42.34433/include/xatomic.h +132 -0
  41. miniMSVC/VC/Tools/MSVC/14.42.34433/include/xatomic_wait.h +70 -0
  42. miniMSVC/VC/Tools/MSVC/14.42.34433/include/xbit_ops.h +97 -0
  43. miniMSVC/VC/Tools/MSVC/14.42.34433/include/xcall_once.h +117 -0
  44. miniMSVC/VC/Tools/MSVC/14.42.34433/include/xcharconv.h +52 -0
  45. miniMSVC/VC/Tools/MSVC/14.42.34433/include/xcharconv_ryu.h +0 -0
  46. miniMSVC/VC/Tools/MSVC/14.42.34433/include/xcharconv_ryu_tables.h +157 -0
  47. miniMSVC/VC/Tools/MSVC/14.42.34433/include/xcharconv_tables.h +113 -0
  48. miniMSVC/VC/Tools/MSVC/14.42.34433/include/xerrc.h +108 -0
  49. miniMSVC/VC/Tools/MSVC/14.42.34433/include/xfacet +45 -0
  50. miniMSVC/VC/Tools/MSVC/14.42.34433/include/xfilesystem_abi.h +399 -0
.gitattributes CHANGED
@@ -105,3 +105,4 @@ ComfyUI-YoloWorld-EfficientSAM/efficient_sam_s_gpu.jit filter=lfs diff=lfs merge
105
  miniCUDA124/lib/x64/cudart_static.lib filter=lfs diff=lfs merge=lfs -text
106
  miniCUDA124/lib/x64/nppist.lib filter=lfs diff=lfs merge=lfs -text
107
  miniCUDA124/bin/nvlink.exe filter=lfs diff=lfs merge=lfs -text
 
 
105
  miniCUDA124/lib/x64/cudart_static.lib filter=lfs diff=lfs merge=lfs -text
106
  miniCUDA124/lib/x64/nppist.lib filter=lfs diff=lfs merge=lfs -text
107
  miniCUDA124/bin/nvlink.exe filter=lfs diff=lfs merge=lfs -text
108
+ miniCUDA124/lib/x64/nvfatbin_static.lib filter=lfs diff=lfs merge=lfs -text
miniCUDA124/lib/x64/nvfatbin_static.lib ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b229fd6f7c3ccdd918b5f9a9e58d2974d67ff81c27d6f3729e463ea088d8ac6
3
+ size 4004556
miniMSVC/VC/Tools/MSVC/14.42.34433/include/__msvc_all_public_headers.hpp ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
3
+
4
+ // This file is intended as a test resource for tools that want to verify that they can parse all MSVC standard
5
+ // library headers without warnings. This file disables deprecations, so it should not be included in programs directly.
6
+ //
7
+ // This file may be changed, renamed, or removed at any time.
8
+
9
+ #ifndef __MSVC_ALL_PUBLIC_HEADERS_HPP
10
+ #define __MSVC_ALL_PUBLIC_HEADERS_HPP
11
+
12
+ #pragma warning(push)
13
+ #pragma warning(1 : 4668) // 'MEOW' is not defined as a preprocessor macro, replacing with '0' for '#if/#elif'
14
+
15
+ // All STL headers should protect themselves from macroized new.
16
+ #if !(defined(__CUDACC__) && defined(__clang__))
17
+ #pragma push_macro("new")
18
+ #undef new
19
+ #define new WILL NOT COMPILE
20
+ #endif // !(defined(__CUDACC__) && defined(__clang__))
21
+
22
+ // VSO-768746: mbctype.h macroizes _MS, _MP, _M1, and _M2. Include it first for test coverage.
23
+ #ifndef _MSVC_TESTING_NVCC
24
+ #include <mbctype.h>
25
+ #endif // !defined(_MSVC_TESTING_NVCC)
26
+
27
+ #define _SILENCE_CXX17_C_HEADER_DEPRECATION_WARNING
28
+ #define _SILENCE_CXX20_CISO646_REMOVED_WARNING
29
+ #define _SILENCE_EXPERIMENTAL_FILESYSTEM_DEPRECATION_WARNING
30
+ #define _SILENCE_STDEXT_HASH_DEPRECATION_WARNINGS
31
+
32
+ // Core STL Headers
33
+ #include <bit>
34
+ #include <compare>
35
+ #include <concepts>
36
+ #include <coroutine>
37
+ #include <initializer_list>
38
+ #include <limits>
39
+ #include <numbers>
40
+ #include <ratio>
41
+ #include <source_location>
42
+ #include <stdfloat>
43
+ #include <tuple>
44
+ #include <type_traits>
45
+ #include <utility>
46
+ #include <version>
47
+
48
+ // Core C Wrapper Headers
49
+ #include <cassert>
50
+ #include <cctype>
51
+ #include <cerrno>
52
+ #include <cfenv>
53
+ #include <cfloat>
54
+ #include <cinttypes>
55
+ #include <climits>
56
+ #include <clocale>
57
+ #include <csetjmp>
58
+ #include <csignal>
59
+ #include <cstdarg>
60
+ #include <cstddef>
61
+ #include <cstdint>
62
+ #include <cstdio>
63
+ #include <cstdlib>
64
+ #include <cstring>
65
+ #include <ctime>
66
+ #include <cuchar>
67
+ #include <cwchar>
68
+ #include <cwctype>
69
+
70
+ #ifndef _CORE_HEADERS_ONLY
71
+
72
+ // Non-Core STL Headers
73
+ #include <algorithm>
74
+ #include <any>
75
+ #include <array>
76
+ #include <bitset>
77
+ #include <charconv>
78
+ #include <chrono>
79
+ #include <codecvt>
80
+ #include <complex>
81
+ #include <deque>
82
+ #include <exception>
83
+ #include <expected>
84
+ #include <filesystem>
85
+ #include <format>
86
+ #include <forward_list>
87
+ #include <fstream>
88
+ #include <functional>
89
+ #include <hash_map>
90
+ #include <hash_set>
91
+ #include <iomanip>
92
+ #include <ios>
93
+ #include <iosfwd>
94
+ #include <iostream>
95
+ #include <istream>
96
+ #include <iterator>
97
+ #include <list>
98
+ #include <locale>
99
+ #include <map>
100
+ #include <mdspan>
101
+ #include <memory>
102
+ #include <memory_resource>
103
+ #include <new>
104
+ #include <numeric>
105
+ #include <optional>
106
+ #include <ostream>
107
+ #include <print>
108
+ #include <queue>
109
+ #include <random>
110
+ #include <ranges>
111
+ #include <regex>
112
+ #include <scoped_allocator>
113
+ #include <set>
114
+ #include <span>
115
+ #include <spanstream>
116
+ #include <sstream>
117
+ #include <stack>
118
+ #include <stacktrace>
119
+ #include <stdexcept>
120
+ #include <streambuf>
121
+ #include <string>
122
+ #include <string_view>
123
+ #include <strstream>
124
+ #include <syncstream>
125
+ #include <system_error>
126
+ #include <typeindex>
127
+ #include <typeinfo>
128
+ #include <unordered_map>
129
+ #include <unordered_set>
130
+ #include <valarray>
131
+ #include <variant>
132
+ #include <vector>
133
+
134
+ #ifndef _M_CEE_PURE
135
+ #include <__msvc_cxx_stdatomic.hpp>
136
+ #include <atomic>
137
+ #include <barrier>
138
+ #include <condition_variable>
139
+ #include <execution>
140
+ #include <future>
141
+ #include <latch>
142
+ #include <mutex>
143
+ #include <semaphore>
144
+ #include <shared_mutex>
145
+ #include <stop_token>
146
+ #include <thread>
147
+ #endif // !defined(_M_CEE_PURE)
148
+
149
+ // Non-Core C Wrapper Headers
150
+ #include <ccomplex>
151
+ #include <ciso646>
152
+ #include <cmath>
153
+ #include <cstdalign>
154
+ #include <cstdbool>
155
+ #include <ctgmath>
156
+
157
+ // Non-Core Experimental Headers
158
+ #include <experimental/filesystem>
159
+
160
+ #endif // !defined(_CORE_HEADERS_ONLY)
161
+
162
+ #ifndef _MSVC_TESTING_NVCC
163
+ #include <assert.h>
164
+ #include <conio.h>
165
+ #include <crtdbg.h>
166
+ #include <ctype.h>
167
+ #include <direct.h>
168
+ #include <dos.h>
169
+ #include <errno.h>
170
+ #include <excpt.h>
171
+ #include <fcntl.h>
172
+ #include <fenv.h>
173
+ #include <float.h>
174
+ #include <intrin.h>
175
+ #include <inttypes.h>
176
+ #include <io.h>
177
+ #include <iso646.h>
178
+ #include <limits.h>
179
+ #include <locale.h>
180
+ #include <malloc.h>
181
+ #include <math.h>
182
+ #include <mbstring.h>
183
+ #include <memory.h>
184
+ #include <minmax.h>
185
+ #include <process.h>
186
+ #include <safeint.h>
187
+ #include <sal.h>
188
+ #include <search.h>
189
+ #include <setjmp.h>
190
+ #include <share.h>
191
+ #include <signal.h>
192
+ #include <stdarg.h>
193
+ #include <stdbool.h>
194
+ #include <stddef.h>
195
+ #include <stdint.h>
196
+ #include <stdio.h>
197
+ #include <stdlib.h>
198
+ #include <string.h>
199
+ #include <sys/locking.h>
200
+ #include <sys/stat.h>
201
+ #include <sys/timeb.h>
202
+ #include <sys/types.h>
203
+ #include <sys/utime.h>
204
+ #include <tchar.h>
205
+ #include <time.h>
206
+ #include <uchar.h>
207
+ #include <wchar.h>
208
+ #include <wctype.h>
209
+
210
+ #ifndef _CORE_HEADERS_ONLY
211
+ #include <complex.h>
212
+ #include <new.h>
213
+ #endif // !defined(_CORE_HEADERS_ONLY)
214
+
215
+ #ifndef _M_CEE_PURE
216
+ #include <fpieee.h>
217
+ #endif // !defined(_M_CEE_PURE)
218
+ #endif // !defined(_MSVC_TESTING_NVCC)
219
+
220
+ #if !(defined(__CUDACC__) && defined(__clang__))
221
+ #pragma pop_macro("new")
222
+ #endif // !(defined(__CUDACC__) && defined(__clang__))
223
+
224
+ #pragma warning(pop)
225
+
226
+ #endif // __MSVC_ALL_PUBLIC_HEADERS_HPP
miniMSVC/VC/Tools/MSVC/14.42.34433/include/__msvc_bit_utils.hpp ADDED
@@ -0,0 +1,426 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // __msvc_bit_utils.hpp internal header (core)
2
+
3
+ // Copyright (c) Microsoft Corporation.
4
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
5
+
6
+ #ifndef __MSVC_BIT_UTILS_HPP
7
+ #define __MSVC_BIT_UTILS_HPP
8
+ #include <yvals_core.h>
9
+ #if _STL_COMPILER_PREPROCESSOR
10
+
11
+ #include <climits>
12
+ #include <xtr1common>
13
+
14
+ #include _STL_INTRIN_HEADER
15
+
16
+ #pragma pack(push, _CRT_PACKING)
17
+ #pragma warning(push, _STL_WARNING_LEVEL)
18
+ #pragma warning(disable : _STL_DISABLED_WARNINGS)
19
+ _STL_DISABLE_CLANG_WARNINGS
20
+ #pragma push_macro("new")
21
+ #undef new
22
+
23
+ _STD_BEGIN
24
+ extern "C" {
25
+ extern int __isa_available;
26
+ }
27
+
28
+ _INLINE_VAR constexpr int _Stl_isa_available_sse42 = 2; // equal to __ISA_AVAILABLE_SSE42
29
+ _INLINE_VAR constexpr int _Stl_isa_available_avx2 = 5; // equal to __ISA_AVAILABLE_AVX2
30
+
31
+ template <class _UInt>
32
+ constexpr int _Unsigned_integer_digits = sizeof(_UInt) * CHAR_BIT;
33
+
34
+ // Implementation of countl_zero without using specialized CPU instructions.
35
+ // Used at compile time and when said instructions are not supported.
36
+ // see "Hacker's Delight" section 5-3
37
+ template <class _Ty>
38
+ _NODISCARD constexpr int _Countl_zero_fallback(_Ty _Val) noexcept {
39
+ _Ty _Yx = 0;
40
+
41
+ unsigned int _Nx = _Unsigned_integer_digits<_Ty>;
42
+ unsigned int _Cx = _Unsigned_integer_digits<_Ty> / 2;
43
+ do {
44
+ _Yx = static_cast<_Ty>(_Val >> _Cx);
45
+ if (_Yx != 0) {
46
+ _Nx -= _Cx;
47
+ _Val = _Yx;
48
+ }
49
+ _Cx >>= 1;
50
+ } while (_Cx != 0);
51
+ return static_cast<int>(_Nx) - static_cast<int>(_Val);
52
+ }
53
+
54
+ #if !defined(_M_CEE_PURE) && !defined(__CUDACC__) && !defined(__INTEL_COMPILER)
55
+ #define _HAS_COUNTL_ZERO_INTRINSICS 1
56
+ #else // ^^^ intrinsics available / intrinsics unavailable vvv
57
+ #define _HAS_COUNTL_ZERO_INTRINSICS 0
58
+ #endif // ^^^ intrinsics unavailable ^^^
59
+
60
+ #if _HAS_COUNTL_ZERO_INTRINSICS
61
+ #if (defined(_M_IX86) && !defined(_M_HYBRID_X86_ARM64)) || (defined(_M_X64) && !defined(_M_ARM64EC))
62
+ template <class _Ty>
63
+ _NODISCARD int _Countl_zero_lzcnt(const _Ty _Val) noexcept {
64
+ constexpr int _Digits = _Unsigned_integer_digits<_Ty>;
65
+
66
+ if constexpr (_Digits <= 16) {
67
+ return static_cast<int>(__lzcnt16(_Val) - (16 - _Digits));
68
+ } else if constexpr (_Digits == 32) {
69
+ return static_cast<int>(__lzcnt(_Val));
70
+ } else {
71
+ #ifdef _M_IX86
72
+ const unsigned int _High = _Val >> 32;
73
+ const auto _Low = static_cast<unsigned int>(_Val);
74
+ if (_High == 0) {
75
+ return 32 + _Countl_zero_lzcnt(_Low);
76
+ } else {
77
+ return _Countl_zero_lzcnt(_High);
78
+ }
79
+ #else // ^^^ defined(_M_IX86) / !defined(_M_IX86) vvv
80
+ return static_cast<int>(__lzcnt64(_Val));
81
+ #endif // ^^^ !defined(_M_IX86) ^^^
82
+ }
83
+ }
84
+
85
+ template <class _Ty>
86
+ _NODISCARD int _Countl_zero_bsr(const _Ty _Val) noexcept {
87
+ constexpr int _Digits = _Unsigned_integer_digits<_Ty>;
88
+
89
+ unsigned long _Result;
90
+ if constexpr (_Digits <= 32) {
91
+ if (!_BitScanReverse(&_Result, _Val)) {
92
+ return _Digits;
93
+ }
94
+ } else {
95
+ #ifdef _M_IX86
96
+ const unsigned int _High = _Val >> 32;
97
+ if (_BitScanReverse(&_Result, _High)) {
98
+ return static_cast<int>(31 - _Result);
99
+ }
100
+
101
+ const auto _Low = static_cast<unsigned int>(_Val);
102
+ if (!_BitScanReverse(&_Result, _Low)) {
103
+ return _Digits;
104
+ }
105
+ #else // ^^^ defined(_M_IX86) / !defined(_M_IX86) vvv
106
+ if (!_BitScanReverse64(&_Result, _Val)) {
107
+ return _Digits;
108
+ }
109
+ #endif // ^^^ !defined(_M_IX86) ^^^
110
+ }
111
+ return static_cast<int>(_Digits - 1 - _Result);
112
+ }
113
+
114
+ template <class _Ty>
115
+ _NODISCARD int _Checked_x86_x64_countl_zero(const _Ty _Val) noexcept {
116
+ #ifdef __AVX2__
117
+ return _Countl_zero_lzcnt(_Val);
118
+ #else // ^^^ defined(__AVX2__) / !defined(__AVX2__) vvv
119
+ const bool _Definitely_have_lzcnt = __isa_available >= _Stl_isa_available_avx2;
120
+ if (_Definitely_have_lzcnt) {
121
+ return _Countl_zero_lzcnt(_Val);
122
+ } else {
123
+ return _Countl_zero_bsr(_Val);
124
+ }
125
+ #endif // ^^^ !defined(__AVX2__) ^^^
126
+ }
127
+ #endif // (defined(_M_IX86) && !defined(_M_HYBRID_X86_ARM64)) || (defined(_M_X64) && !defined(_M_ARM64EC))
128
+
129
+ #if defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC) || defined(_M_HYBRID_X86_ARM64)
130
+ #ifdef __clang__ // TRANSITION, GH-1586
131
+ _NODISCARD constexpr int _Clang_arm_arm64_countl_zero(const unsigned short _Val) {
132
+ return __builtin_clzs(_Val);
133
+ }
134
+
135
+ _NODISCARD constexpr int _Clang_arm_arm64_countl_zero(const unsigned int _Val) {
136
+ return __builtin_clz(_Val);
137
+ }
138
+
139
+ _NODISCARD constexpr int _Clang_arm_arm64_countl_zero(const unsigned long _Val) {
140
+ return __builtin_clzl(_Val);
141
+ }
142
+
143
+ _NODISCARD constexpr int _Clang_arm_arm64_countl_zero(const unsigned long long _Val) {
144
+ return __builtin_clzll(_Val);
145
+ }
146
+ #endif // TRANSITION, GH-1586
147
+
148
+ template <class _Ty>
149
+ _NODISCARD int _Checked_arm_arm64_countl_zero(const _Ty _Val) noexcept {
150
+ constexpr int _Digits = _Unsigned_integer_digits<_Ty>;
151
+ if (_Val == 0) {
152
+ return _Digits;
153
+ }
154
+
155
+ #ifdef __clang__ // TRANSITION, GH-1586
156
+ if constexpr (is_same_v<remove_cv_t<_Ty>, unsigned char>) {
157
+ return _Clang_arm_arm64_countl_zero(static_cast<unsigned short>(_Val))
158
+ - (_Unsigned_integer_digits<unsigned short> - _Digits);
159
+ } else {
160
+ return _Clang_arm_arm64_countl_zero(_Val);
161
+ }
162
+ #else // ^^^ workaround / no workaround vvv
163
+ if constexpr (_Digits <= 32) {
164
+ return static_cast<int>(_CountLeadingZeros(_Val)) - (_Unsigned_integer_digits<unsigned long> - _Digits);
165
+ } else {
166
+ return static_cast<int>(_CountLeadingZeros64(_Val));
167
+ }
168
+ #endif // ^^^ no workaround ^^^
169
+ }
170
+ #endif // defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC) || defined(_M_HYBRID_X86_ARM64)
171
+ #endif // _HAS_COUNTL_ZERO_INTRINSICS
172
+
173
+ // Implementation of countr_zero without using specialized CPU instructions.
174
+ // Used at compile time and when said instructions are not supported.
175
+ // see "Hacker's Delight" section 5-4
176
+ template <class _Ty>
177
+ _NODISCARD constexpr int _Countr_zero_fallback(const _Ty _Val) noexcept {
178
+ constexpr int _Digits = _Unsigned_integer_digits<_Ty>;
179
+ return _Digits - _Countl_zero_fallback(static_cast<_Ty>(static_cast<_Ty>(~_Val) & static_cast<_Ty>(_Val - 1)));
180
+ }
181
+
182
+ // Implementation of popcount without using specialized CPU instructions.
183
+ // Used at compile time and when said instructions are not supported.
184
+ template <class _Ty>
185
+ _NODISCARD constexpr int _Popcount_fallback(_Ty _Val) noexcept {
186
+ constexpr int _Digits = _Unsigned_integer_digits<_Ty>;
187
+ #if (defined(_M_IX86) && !defined(_M_HYBRID_X86_ARM64)) || defined(_M_ARM)
188
+ if constexpr (_Digits == 64) {
189
+ // 64-bit bit operations on architectures without 64-bit registers are less efficient,
190
+ // hence we split the value so that it fits in 32-bit registers
191
+ return _Popcount_fallback(static_cast<unsigned long>(_Val))
192
+ + _Popcount_fallback(static_cast<unsigned long>(_Val >> 32));
193
+ } else
194
+ #endif // (defined(_M_IX86) && !defined(_M_HYBRID_X86_ARM64)) || defined(_M_ARM)
195
+ {
196
+ // we static_cast these bit patterns in order to truncate them to the correct size
197
+ _Val = static_cast<_Ty>(_Val - ((_Val >> 1) & static_cast<_Ty>(0x5555'5555'5555'5555ull)));
198
+ _Val = static_cast<_Ty>((_Val & static_cast<_Ty>(0x3333'3333'3333'3333ull))
199
+ + ((_Val >> 2) & static_cast<_Ty>(0x3333'3333'3333'3333ull)));
200
+ _Val = static_cast<_Ty>((_Val + (_Val >> 4)) & static_cast<_Ty>(0x0F0F'0F0F'0F0F'0F0Full));
201
+ // Multiply by one in each byte, so that it will have the sum of all source bytes in the highest byte
202
+ _Val = static_cast<_Ty>(_Val * static_cast<_Ty>(0x0101'0101'0101'0101ull));
203
+ // Extract highest byte
204
+ return static_cast<int>(_Val >> (_Digits - 8));
205
+ }
206
+ }
207
+
208
+ #if ((defined(_M_IX86) && !defined(_M_HYBRID_X86_ARM64)) || (defined(_M_X64) && !defined(_M_ARM64EC))) \
209
+ && !defined(_M_CEE_PURE) && !defined(__CUDACC__) && !defined(__INTEL_COMPILER)
210
+ #define _HAS_TZCNT_BSF_INTRINSICS 1
211
+ #else // ^^^ intrinsics available / intrinsics unavailable vvv
212
+ #define _HAS_TZCNT_BSF_INTRINSICS 0
213
+ #endif // ^^^ intrinsics unavailable ^^^
214
+
215
+ #if _HAS_TZCNT_BSF_INTRINSICS
216
+ #ifdef __clang__
217
+ #define _TZCNT_U32 __builtin_ia32_tzcnt_u32
218
+ #define _TZCNT_U64 __builtin_ia32_tzcnt_u64
219
+ #else // ^^^ __clang__ / !__clang__ vvv
220
+ #define _TZCNT_U32 _tzcnt_u32
221
+ #define _TZCNT_U64 _tzcnt_u64
222
+ #endif // __clang__
223
+
224
+ template <class _Ty>
225
+ _NODISCARD int _Countr_zero_tzcnt(const _Ty _Val) noexcept {
226
+ constexpr int _Digits = _Unsigned_integer_digits<_Ty>;
227
+ constexpr _Ty _Max = static_cast<_Ty>(-1); // equal to (numeric_limits<_Ty>::max)()
228
+
229
+ if constexpr (_Digits <= 32) {
230
+ // Intended widening to int. This operation means that a narrow 0 will widen
231
+ // to 0xFFFF....FFFF0... instead of 0. We need this to avoid counting all the zeros
232
+ // of the wider type.
233
+ return static_cast<int>(_TZCNT_U32(static_cast<unsigned int>(~_Max | _Val)));
234
+ } else {
235
+ #ifdef _M_IX86
236
+ const auto _Low = static_cast<unsigned int>(_Val);
237
+ if (_Low == 0) {
238
+ const unsigned int _High = _Val >> 32;
239
+ return static_cast<int>(32 + _TZCNT_U32(_High));
240
+ } else {
241
+ return static_cast<int>(_TZCNT_U32(_Low));
242
+ }
243
+ #else // ^^^ defined(_M_IX86) / !defined(_M_IX86) vvv
244
+ return static_cast<int>(_TZCNT_U64(_Val));
245
+ #endif // ^^^ !defined(_M_IX86) ^^^
246
+ }
247
+ }
248
+
249
+ #undef _TZCNT_U32
250
+ #undef _TZCNT_U64
251
+
252
+ template <class _Ty>
253
+ _NODISCARD int _Countr_zero_bsf(const _Ty _Val) noexcept {
254
+ constexpr int _Digits = _Unsigned_integer_digits<_Ty>;
255
+ constexpr _Ty _Max = static_cast<_Ty>(-1); // equal to (numeric_limits<_Ty>::max)()
256
+
257
+ unsigned long _Result;
258
+ if constexpr (_Digits <= 32) {
259
+ // Intended widening to int. This operation means that a narrow 0 will widen
260
+ // to 0xFFFF....FFFF0... instead of 0. We need this to avoid counting all the zeros
261
+ // of the wider type.
262
+ if (!_BitScanForward(&_Result, static_cast<unsigned int>(~_Max | _Val))) {
263
+ return _Digits;
264
+ }
265
+ } else {
266
+ #ifdef _M_IX86
267
+ const auto _Low = static_cast<unsigned int>(_Val);
268
+ if (_BitScanForward(&_Result, _Low)) {
269
+ return static_cast<int>(_Result);
270
+ }
271
+
272
+ const unsigned int _High = _Val >> 32;
273
+ if (!_BitScanForward(&_Result, _High)) {
274
+ return _Digits;
275
+ } else {
276
+ return static_cast<int>(_Result + 32);
277
+ }
278
+ #else // ^^^ defined(_M_IX86) / !defined(_M_IX86) vvv
279
+ if (!_BitScanForward64(&_Result, _Val)) {
280
+ return _Digits;
281
+ }
282
+ #endif // ^^^ !defined(_M_IX86) ^^^
283
+ }
284
+ return static_cast<int>(_Result);
285
+ }
286
+
287
+ template <class _Ty>
288
+ _NODISCARD int _Checked_x86_x64_countr_zero(const _Ty _Val) noexcept {
289
+ #ifdef __AVX2__
290
+ return _Countr_zero_tzcnt(_Val);
291
+ #else // ^^^ defined(__AVX2__) / !defined(__AVX2__) vvv
292
+ const bool _Definitely_have_tzcnt = __isa_available >= _Stl_isa_available_avx2;
293
+ if (_Definitely_have_tzcnt) {
294
+ return _Countr_zero_tzcnt(_Val);
295
+ } else {
296
+ return _Countr_zero_bsf(_Val);
297
+ }
298
+ #endif // ^^^ !defined(__AVX2__) ^^^
299
+ }
300
+
301
+ #endif // _HAS_TZCNT_BSF_INTRINSICS
302
+
303
+ #if (defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM64)) && !defined(_M_CEE_PURE) && !defined(__CUDACC__) \
304
+ && !defined(__INTEL_COMPILER)
305
+ #define _HAS_POPCNT_INTRINSICS 1
306
+ #if defined(__AVX__) || defined(_M_ARM64) || defined(_M_ARM64EC)
307
+ #define _POPCNT_INTRINSICS_ALWAYS_AVAILABLE 1
308
+ #else // ^^^ intrinsics always available / intrinsics not always available vvv
309
+ #define _POPCNT_INTRINSICS_ALWAYS_AVAILABLE 0
310
+ #endif // ^^^ intrinsics not always available ^^^
311
+ #else // ^^^ intrinsics available / intrinsics unavailable vvv
312
+ #define _HAS_POPCNT_INTRINSICS 0
313
+ #endif // ^^^ intrinsics unavailable ^^^
314
+
315
+ #if _HAS_POPCNT_INTRINSICS
316
+ template <class _Ty>
317
+ _NODISCARD int _Unchecked_popcount(const _Ty _Val) noexcept {
318
+ constexpr int _Digits = _Unsigned_integer_digits<_Ty>;
319
+ if constexpr (_Digits <= 16) {
320
+ return static_cast<int>(__popcnt16(_Val));
321
+ } else if constexpr (_Digits == 32) {
322
+ return static_cast<int>(__popcnt(_Val));
323
+ } else {
324
+ #ifdef _M_IX86
325
+ return static_cast<int>(__popcnt(_Val >> 32) + __popcnt(static_cast<unsigned int>(_Val)));
326
+ #else // ^^^ defined(_M_IX86) / !defined(_M_IX86) vvv
327
+ return static_cast<int>(__popcnt64(_Val));
328
+ #endif // ^^^ !defined(_M_IX86) ^^^
329
+ }
330
+ }
331
+
332
+ template <class _Ty>
333
+ _NODISCARD int _Checked_popcount(const _Ty _Val) noexcept {
334
+ #if !_POPCNT_INTRINSICS_ALWAYS_AVAILABLE
335
+ const bool _Definitely_have_popcnt = __isa_available >= _Stl_isa_available_sse42;
336
+ if (!_Definitely_have_popcnt) {
337
+ return _Popcount_fallback(_Val);
338
+ }
339
+ #endif // ^^^ !_POPCNT_INTRINSICS_ALWAYS_AVAILABLE ^^^
340
+ return _Unchecked_popcount(_Val);
341
+ }
342
+ #endif // ^^^ _HAS_POPCNT_INTRINSICS ^^^
343
+
344
+ template <class _Ty>
345
+ constexpr bool _Is_standard_unsigned_integer =
346
+ _Is_any_of_v<remove_cv_t<_Ty>, unsigned char, unsigned short, unsigned int, unsigned long, unsigned long long>;
347
+
348
+ template <class _Ty, enable_if_t<_Is_standard_unsigned_integer<_Ty>, int> = 0>
349
+ _NODISCARD _CONSTEXPR20 int _Countr_zero(const _Ty _Val) noexcept {
350
+ #if _HAS_TZCNT_BSF_INTRINSICS
351
+ #if _HAS_CXX20
352
+ if (!_STD is_constant_evaluated())
353
+ #endif // _HAS_CXX20
354
+ {
355
+ return _Checked_x86_x64_countr_zero(_Val);
356
+ }
357
+ #endif // _HAS_TZCNT_BSF_INTRINSICS
358
+ return _Countr_zero_fallback(_Val);
359
+ }
360
+
361
+ template <class _Ty, class _Fn>
362
+ constexpr decltype(auto) _Select_countr_zero_impl(_Fn _Callback) {
363
+ // TRANSITION, DevCom-1527995: Lambdas in this function ensure inlining
364
+ #if _HAS_TZCNT_BSF_INTRINSICS && _HAS_CXX20
365
+ if (!_STD is_constant_evaluated()) {
366
+ #ifdef __AVX2__
367
+ return _Callback([](_Ty _Val) _STATIC_CALL_OPERATOR { return _Countr_zero_tzcnt(_Val); });
368
+ #else // ^^^ AVX2 / not AVX2 vvv
369
+ const bool _Definitely_have_tzcnt = __isa_available >= _Stl_isa_available_avx2;
370
+ if (_Definitely_have_tzcnt) {
371
+ return _Callback([](_Ty _Val) _STATIC_CALL_OPERATOR { return _Countr_zero_tzcnt(_Val); });
372
+ } else {
373
+ return _Callback([](_Ty _Val) _STATIC_CALL_OPERATOR { return _Countr_zero_bsf(_Val); });
374
+ }
375
+ #endif // ^^^ not AVX2 ^^^
376
+ }
377
+ #endif // ^^^ _HAS_TZCNT_BSF_INTRINSICS && _HAS_CXX20 ^^^
378
+ // C++17 constexpr gcd() calls this function, so it should be constexpr unless we detect runtime evaluation.
379
+ return _Callback([](_Ty _Val) _STATIC_CALL_OPERATOR { return _Countr_zero_fallback(_Val); });
380
+ }
381
+
382
+ template <class _Ty, enable_if_t<_Is_standard_unsigned_integer<_Ty>, int> = 0>
383
+ _NODISCARD _CONSTEXPR20 int _Popcount(const _Ty _Val) noexcept {
384
+ #if _HAS_POPCNT_INTRINSICS
385
+ #if _HAS_CXX20
386
+ if (!_STD is_constant_evaluated())
387
+ #endif // _HAS_CXX20
388
+ {
389
+ return _Checked_popcount(_Val);
390
+ }
391
+ #endif // ^^^ _HAS_POPCNT_INTRINSICS ^^^
392
+ return _Popcount_fallback(_Val);
393
+ }
394
+
395
+ template <class _Ty, class _Fn>
396
+ _CONSTEXPR20 decltype(auto) _Select_popcount_impl(_Fn _Callback) {
397
+ // TRANSITION, DevCom-1527995: Lambdas in this function ensure inlining
398
+ #if _HAS_POPCNT_INTRINSICS
399
+ #if _HAS_CXX20
400
+ if (!_STD is_constant_evaluated())
401
+ #endif // _HAS_CXX20
402
+ {
403
+ #if !_POPCNT_INTRINSICS_ALWAYS_AVAILABLE
404
+ const bool _Definitely_have_popcnt = __isa_available >= _Stl_isa_available_sse42;
405
+ if (!_Definitely_have_popcnt) {
406
+ return _Callback([](_Ty _Val) _STATIC_CALL_OPERATOR { return _Popcount_fallback(_Val); });
407
+ }
408
+ #endif // ^^^ !_POPCNT_INTRINSICS_ALWAYS_AVAILABLE ^^^
409
+ return _Callback([](_Ty _Val) _STATIC_CALL_OPERATOR { return _Unchecked_popcount(_Val); });
410
+ }
411
+ #endif // ^^^ _HAS_POPCNT_INTRINSICS ^^^
412
+ return _Callback([](_Ty _Val) _STATIC_CALL_OPERATOR { return _Popcount_fallback(_Val); });
413
+ }
414
+
415
+ #undef _HAS_POPCNT_INTRINSICS
416
+ #undef _HAS_TZCNT_BSF_INTRINSICS
417
+ #undef _POPCNT_INTRINSICS_ALWAYS_AVAILABLE
418
+
419
+ _STD_END
420
+
421
+ #pragma pop_macro("new")
422
+ _STL_RESTORE_CLANG_WARNINGS
423
+ #pragma warning(pop)
424
+ #pragma pack(pop)
425
+ #endif // _STL_COMPILER_PREPROCESSOR
426
+ #endif // __MSVC_BIT_UTILS_HPP
miniMSVC/VC/Tools/MSVC/14.42.34433/include/__msvc_chrono.hpp ADDED
@@ -0,0 +1,801 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // __msvc_chrono.hpp internal header
2
+
3
+ // Copyright (c) Microsoft Corporation.
4
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
5
+
6
+ #ifndef __MSVC_CHRONO_HPP
7
+ #define __MSVC_CHRONO_HPP
8
+ #include <yvals.h>
9
+ #if _STL_COMPILER_PREPROCESSOR
10
+ #include <ctime>
11
+ #include <limits>
12
+ #include <ratio>
13
+ #include <type_traits>
14
+ #include <utility>
15
+ #include <xtimec.h>
16
+
17
+ #if _HAS_CXX20
18
+ #include <compare>
19
+ #endif
20
+
21
+ #pragma pack(push, _CRT_PACKING)
22
+ #pragma warning(push, _STL_WARNING_LEVEL)
23
+ #pragma warning(disable : _STL_DISABLED_WARNINGS)
24
+ _STL_DISABLE_CLANG_WARNINGS
25
+ #pragma push_macro("new")
26
+ #undef new
27
+
28
+ _STD_BEGIN
29
+ namespace chrono {
30
+ _EXPORT_STD template <class _Rep>
31
+ struct treat_as_floating_point : is_floating_point<_Rep> {}; // tests for floating-point type
32
+
33
+ _EXPORT_STD template <class _Rep>
34
+ constexpr bool treat_as_floating_point_v = treat_as_floating_point<_Rep>::value;
35
+
36
+ _EXPORT_STD template <class _Rep>
37
+ struct duration_values { // gets arithmetic properties of a type
38
+ _NODISCARD static constexpr _Rep zero() noexcept {
39
+ // get zero value
40
+ return _Rep(0);
41
+ }
42
+
43
+ _NODISCARD static constexpr _Rep(min)() noexcept {
44
+ // get smallest value
45
+ return numeric_limits<_Rep>::lowest();
46
+ }
47
+
48
+ _NODISCARD static constexpr _Rep(max)() noexcept {
49
+ // get largest value
50
+ return (numeric_limits<_Rep>::max)();
51
+ }
52
+ };
53
+
54
+ #if _HAS_CXX20
55
+ _EXPORT_STD template <class _Clock>
56
+ constexpr bool is_clock_v = requires {
57
+ typename _Clock::rep;
58
+ typename _Clock::period;
59
+ typename _Clock::duration;
60
+ typename _Clock::time_point;
61
+ _Clock::is_steady;
62
+ _Clock::now();
63
+ };
64
+ _EXPORT_STD template <class _Clock>
65
+ struct is_clock : bool_constant<is_clock_v<_Clock>> {};
66
+
67
+ template <class _Clock>
68
+ constexpr bool _Is_clock_v = is_clock_v<_Clock>;
69
+ #else // ^^^ _HAS_CXX20 / !_HAS_CXX20 vvv
70
+ template <class _Clock, class = void>
71
+ constexpr bool _Is_clock_v = false;
72
+
73
+ template <class _Clock>
74
+ constexpr bool
75
+ _Is_clock_v<_Clock, void_t<typename _Clock::rep, typename _Clock::period, typename _Clock::duration,
76
+ typename _Clock::time_point, decltype(_Clock::is_steady), decltype(_Clock::now())>> =
77
+ true;
78
+ #endif // ^^^ !_HAS_CXX20 ^^^
79
+
80
+ _EXPORT_STD template <class _Rep, class _Period = ratio<1>>
81
+ class duration;
82
+
83
+ template <class _Ty>
84
+ constexpr bool _Is_duration_v = _Is_specialization_v<_Ty, duration>;
85
+
86
+ _EXPORT_STD template <class _To, class _Rep, class _Period, enable_if_t<_Is_duration_v<_To>, int> = 0>
87
+ constexpr _To duration_cast(const duration<_Rep, _Period>&) noexcept(
88
+ is_arithmetic_v<_Rep> && is_arithmetic_v<typename _To::rep>); // strengthened
89
+
90
+ _EXPORT_STD template <class _Rep, class _Period>
91
+ class duration { // represents a time duration
92
+ public:
93
+ using rep = _Rep;
94
+ using period = typename _Period::type;
95
+
96
+ static_assert(!_Is_duration_v<_Rep>, "duration can't have duration as first template argument");
97
+ static_assert(_Is_ratio_v<_Period>, "period not an instance of std::ratio");
98
+ static_assert(0 < _Period::num, "period negative or zero");
99
+
100
+ constexpr duration() = default;
101
+
102
+ template <class _Rep2,
103
+ enable_if_t<is_convertible_v<const _Rep2&, _Rep>
104
+ && (treat_as_floating_point_v<_Rep> || !treat_as_floating_point_v<_Rep2>),
105
+ int> = 0>
106
+ constexpr explicit duration(const _Rep2& _Val) noexcept(
107
+ is_arithmetic_v<_Rep> && is_arithmetic_v<_Rep2>) // strengthened
108
+ : _MyRep(static_cast<_Rep>(_Val)) {}
109
+
110
+ template <class _Rep2, class _Period2,
111
+ enable_if_t<treat_as_floating_point_v<_Rep>
112
+ || (_Ratio_divide_sfinae<_Period2, _Period>::den == 1 && !treat_as_floating_point_v<_Rep2>),
113
+ int> = 0>
114
+ constexpr duration(const duration<_Rep2, _Period2>& _Dur) noexcept(
115
+ is_arithmetic_v<_Rep> && is_arithmetic_v<_Rep2>) // strengthened
116
+ : _MyRep(_CHRONO duration_cast<duration>(_Dur).count()) {}
117
+
118
+ _NODISCARD constexpr _Rep count() const noexcept(is_arithmetic_v<_Rep>) /* strengthened */ {
119
+ return _MyRep;
120
+ }
121
+
122
+ _NODISCARD constexpr common_type_t<duration> operator+() const
123
+ noexcept(is_arithmetic_v<_Rep>) /* strengthened */ {
124
+ return common_type_t<duration>(*this);
125
+ }
126
+
127
+ _NODISCARD constexpr common_type_t<duration> operator-() const
128
+ noexcept(is_arithmetic_v<_Rep>) /* strengthened */ {
129
+ return common_type_t<duration>(-_MyRep);
130
+ }
131
+
132
+ _CONSTEXPR17 duration& operator++() noexcept(is_arithmetic_v<_Rep>) /* strengthened */ {
133
+ ++_MyRep;
134
+ return *this;
135
+ }
136
+
137
+ _CONSTEXPR17 duration operator++(int) noexcept(is_arithmetic_v<_Rep>) /* strengthened */ {
138
+ return duration(_MyRep++);
139
+ }
140
+
141
+ _CONSTEXPR17 duration& operator--() noexcept(is_arithmetic_v<_Rep>) /* strengthened */ {
142
+ --_MyRep;
143
+ return *this;
144
+ }
145
+
146
+ _CONSTEXPR17 duration operator--(int) noexcept(is_arithmetic_v<_Rep>) /* strengthened */ {
147
+ return duration(_MyRep--);
148
+ }
149
+
150
+ _CONSTEXPR17 duration& operator+=(const duration& _Right) noexcept(is_arithmetic_v<_Rep>) /* strengthened */ {
151
+ _MyRep += _Right._MyRep;
152
+ return *this;
153
+ }
154
+
155
+ _CONSTEXPR17 duration& operator-=(const duration& _Right) noexcept(is_arithmetic_v<_Rep>) /* strengthened */ {
156
+ _MyRep -= _Right._MyRep;
157
+ return *this;
158
+ }
159
+
160
+ _CONSTEXPR17 duration& operator*=(const _Rep& _Right) noexcept(is_arithmetic_v<_Rep>) /* strengthened */ {
161
+ _MyRep *= _Right;
162
+ return *this;
163
+ }
164
+
165
+ _CONSTEXPR17 duration& operator/=(const _Rep& _Right) noexcept(is_arithmetic_v<_Rep>) /* strengthened */ {
166
+ _MyRep /= _Right;
167
+ return *this;
168
+ }
169
+
170
+ _CONSTEXPR17 duration& operator%=(const _Rep& _Right) noexcept(is_arithmetic_v<_Rep>) /* strengthened */ {
171
+ _MyRep %= _Right;
172
+ return *this;
173
+ }
174
+
175
+ _CONSTEXPR17 duration& operator%=(const duration& _Right) noexcept(is_arithmetic_v<_Rep>) /* strengthened */ {
176
+ _MyRep %= _Right.count();
177
+ return *this;
178
+ }
179
+
180
+ _NODISCARD static constexpr duration zero() noexcept {
181
+ // get zero value
182
+ return duration(duration_values<_Rep>::zero());
183
+ }
184
+
185
+ _NODISCARD static constexpr duration(min)() noexcept {
186
+ // get minimum value
187
+ return duration((duration_values<_Rep>::min)());
188
+ }
189
+
190
+ _NODISCARD static constexpr duration(max)() noexcept {
191
+ // get maximum value
192
+ return duration((duration_values<_Rep>::max)());
193
+ }
194
+
195
+ private:
196
+ _Rep _MyRep; // the stored rep
197
+ };
198
+
199
+ _EXPORT_STD template <class _Clock, class _Duration = typename _Clock::duration>
200
+ class time_point { // represents a point in time
201
+ public:
202
+ using clock = _Clock;
203
+ using duration = _Duration;
204
+ using rep = typename _Duration::rep;
205
+ using period = typename _Duration::period;
206
+
207
+ static_assert(_Is_duration_v<_Duration>,
208
+ "N4950 [time.point.general]/1 mandates Duration to be a specialization of chrono::duration.");
209
+
210
+ constexpr time_point() = default;
211
+
212
+ constexpr explicit time_point(const _Duration& _Other) noexcept(is_arithmetic_v<rep>) // strengthened
213
+ : _MyDur(_Other) {}
214
+
215
+ template <class _Duration2, enable_if_t<is_convertible_v<_Duration2, _Duration>, int> = 0>
216
+ constexpr time_point(const time_point<_Clock, _Duration2>& _Tp) noexcept(
217
+ is_arithmetic_v<rep> && is_arithmetic_v<typename _Duration2::rep>) // strengthened
218
+ : _MyDur(_Tp.time_since_epoch()) {}
219
+
220
+ _NODISCARD constexpr _Duration time_since_epoch() const noexcept(is_arithmetic_v<rep>) /* strengthened */ {
221
+ return _MyDur;
222
+ }
223
+
224
+ #if _HAS_CXX20
225
+ constexpr time_point& operator++() noexcept(is_arithmetic_v<rep>) /* strengthened */ {
226
+ ++_MyDur;
227
+ return *this;
228
+ }
229
+ constexpr time_point operator++(int) noexcept(is_arithmetic_v<rep>) /* strengthened */ {
230
+ return time_point{_MyDur++};
231
+ }
232
+ constexpr time_point& operator--() noexcept(is_arithmetic_v<rep>) /* strengthened */ {
233
+ --_MyDur;
234
+ return *this;
235
+ }
236
+ constexpr time_point operator--(int) noexcept(is_arithmetic_v<rep>) /* strengthened */ {
237
+ return time_point{_MyDur--};
238
+ }
239
+ #endif // _HAS_CXX20
240
+
241
+ _CONSTEXPR17 time_point& operator+=(const _Duration& _Dur) noexcept(is_arithmetic_v<rep>) /* strengthened */ {
242
+ _MyDur += _Dur;
243
+ return *this;
244
+ }
245
+
246
+ _CONSTEXPR17 time_point& operator-=(const _Duration& _Dur) noexcept(is_arithmetic_v<rep>) /* strengthened */ {
247
+ _MyDur -= _Dur;
248
+ return *this;
249
+ }
250
+
251
+ _NODISCARD static constexpr time_point(min)() noexcept {
252
+ return time_point((_Duration::min)());
253
+ }
254
+
255
+ _NODISCARD static constexpr time_point(max)() noexcept {
256
+ return time_point((_Duration::max)());
257
+ }
258
+
259
+ private:
260
+ _Duration _MyDur{duration::zero()}; // duration since the epoch
261
+ };
262
+ } // namespace chrono
263
+
264
+ template <class _Rep, class _Period>
265
+ constexpr bool _Is_trivially_swappable_v<chrono::duration<_Rep, _Period>> = _Is_trivially_swappable_v<_Rep>;
266
+
267
+ template <class _Clock, class _Duration>
268
+ constexpr bool _Is_trivially_swappable_v<chrono::time_point<_Clock, _Duration>> = _Is_trivially_swappable_v<_Duration>;
269
+
270
+ _NODISCARD constexpr intmax_t _Lcm(const intmax_t _Ax, const intmax_t _Bx) noexcept {
271
+ return (_Ax / _Gcd(_Ax, _Bx)) * _Bx;
272
+ }
273
+
274
+ template <class _Rep1, class _Period1, class _Rep2, class _Period2>
275
+ struct common_type<_CHRONO duration<_Rep1, _Period1>, _CHRONO duration<_Rep2, _Period2>> {
276
+ using type = _CHRONO duration<common_type_t<_Rep1, _Rep2>,
277
+ ratio<_Gcd(_Period1::num, _Period2::num), _Lcm(_Period1::den, _Period2::den)>>;
278
+ };
279
+
280
+ template <class _Clock, class _Duration1, class _Duration2>
281
+ struct common_type<_CHRONO time_point<_Clock, _Duration1>,
282
+ _CHRONO time_point<_Clock, _Duration2>> { // common type of two time points
283
+ using type = _CHRONO time_point<_Clock, common_type_t<_Duration1, _Duration2>>;
284
+ };
285
+
286
+ namespace chrono {
287
+ _EXPORT_STD template <class _Rep1, class _Period1, class _Rep2, class _Period2>
288
+ _NODISCARD constexpr common_type_t<duration<_Rep1, _Period1>, duration<_Rep2, _Period2>>
289
+ operator+(const duration<_Rep1, _Period1>& _Left, const duration<_Rep2, _Period2>& _Right) noexcept(
290
+ is_arithmetic_v<_Rep1> && is_arithmetic_v<_Rep2>) /* strengthened */ {
291
+ using _CD = common_type_t<duration<_Rep1, _Period1>, duration<_Rep2, _Period2>>;
292
+ return _CD(_CD(_Left).count() + _CD(_Right).count());
293
+ }
294
+
295
+ _EXPORT_STD template <class _Rep1, class _Period1, class _Rep2, class _Period2>
296
+ _NODISCARD constexpr common_type_t<duration<_Rep1, _Period1>, duration<_Rep2, _Period2>>
297
+ operator-(const duration<_Rep1, _Period1>& _Left, const duration<_Rep2, _Period2>& _Right) noexcept(
298
+ is_arithmetic_v<_Rep1> && is_arithmetic_v<_Rep2>) /* strengthened */ {
299
+ using _CD = common_type_t<duration<_Rep1, _Period1>, duration<_Rep2, _Period2>>;
300
+ return _CD(_CD(_Left).count() - _CD(_Right).count());
301
+ }
302
+
303
+ _EXPORT_STD template <class _Rep1, class _Period1, class _Rep2,
304
+ enable_if_t<is_convertible_v<const _Rep2&, common_type_t<_Rep1, _Rep2>>, int> = 0>
305
+ _NODISCARD constexpr duration<common_type_t<_Rep1, _Rep2>, _Period1> operator*(
306
+ const duration<_Rep1, _Period1>& _Left,
307
+ const _Rep2& _Right) noexcept(is_arithmetic_v<_Rep1> && is_arithmetic_v<_Rep2>) /* strengthened */ {
308
+ using _CR = common_type_t<_Rep1, _Rep2>;
309
+ using _CD = duration<_CR, _Period1>;
310
+ return _CD(_CD(_Left).count() * _Right);
311
+ }
312
+
313
+ _EXPORT_STD template <class _Rep1, class _Rep2, class _Period2,
314
+ enable_if_t<is_convertible_v<const _Rep1&, common_type_t<_Rep1, _Rep2>>, int> = 0>
315
+ _NODISCARD constexpr duration<common_type_t<_Rep1, _Rep2>, _Period2> operator*(const _Rep1& _Left,
316
+ const duration<_Rep2, _Period2>& _Right) noexcept(is_arithmetic_v<_Rep1>
317
+ && is_arithmetic_v<_Rep2>) /* strengthened */ {
318
+ return _Right * _Left;
319
+ }
320
+
321
+ template <class _CR, class _Period1, class _Rep2, bool = is_convertible_v<const _Rep2&, _CR>>
322
+ struct _Duration_div_mod1 { // return type for duration / rep and duration % rep
323
+ using type = duration<_CR, _Period1>;
324
+ };
325
+
326
+ template <class _CR, class _Period1, class _Rep2>
327
+ struct _Duration_div_mod1<_CR, _Period1, _Rep2, false> {}; // no return type
328
+
329
+ template <class _CR, class _Period1, class _Rep2, bool = _Is_duration_v<_Rep2>>
330
+ struct _Duration_div_mod {}; // no return type
331
+
332
+ template <class _CR, class _Period1, class _Rep2>
333
+ struct _Duration_div_mod<_CR, _Period1, _Rep2, false> : _Duration_div_mod1<_CR, _Period1, _Rep2> {
334
+ // return type for duration / rep and duration % rep
335
+ };
336
+
337
+ _EXPORT_STD template <class _Rep1, class _Period1, class _Rep2>
338
+ _NODISCARD constexpr typename _Duration_div_mod<common_type_t<_Rep1, _Rep2>, _Period1, _Rep2>::type operator/(
339
+ const duration<_Rep1, _Period1>& _Left,
340
+ const _Rep2& _Right) noexcept(is_arithmetic_v<_Rep1> && is_arithmetic_v<_Rep2>) /* strengthened */ {
341
+ using _CR = common_type_t<_Rep1, _Rep2>;
342
+ using _CD = duration<_CR, _Period1>;
343
+ return _CD(_CD(_Left).count() / _Right);
344
+ }
345
+
346
+ _EXPORT_STD template <class _Rep1, class _Period1, class _Rep2, class _Period2>
347
+ _NODISCARD constexpr common_type_t<_Rep1, _Rep2> operator/(const duration<_Rep1, _Period1>& _Left,
348
+ const duration<_Rep2, _Period2>& _Right) noexcept(is_arithmetic_v<_Rep1>
349
+ && is_arithmetic_v<_Rep2>) /* strengthened */ {
350
+ using _CD = common_type_t<duration<_Rep1, _Period1>, duration<_Rep2, _Period2>>;
351
+ return _CD(_Left).count() / _CD(_Right).count();
352
+ }
353
+
354
+ _EXPORT_STD template <class _Rep1, class _Period1, class _Rep2>
355
+ _NODISCARD constexpr typename _Duration_div_mod<common_type_t<_Rep1, _Rep2>, _Period1, _Rep2>::type operator%(
356
+ const duration<_Rep1, _Period1>& _Left,
357
+ const _Rep2& _Right) noexcept(is_arithmetic_v<_Rep1> && is_arithmetic_v<_Rep2>) /* strengthened */ {
358
+ using _CR = common_type_t<_Rep1, _Rep2>;
359
+ using _CD = duration<_CR, _Period1>;
360
+ return _CD(_CD(_Left).count() % _Right);
361
+ }
362
+
363
+ _EXPORT_STD template <class _Rep1, class _Period1, class _Rep2, class _Period2>
364
+ _NODISCARD constexpr common_type_t<duration<_Rep1, _Period1>, duration<_Rep2, _Period2>>
365
+ operator%(const duration<_Rep1, _Period1>& _Left, const duration<_Rep2, _Period2>& _Right) noexcept(
366
+ is_arithmetic_v<_Rep1> && is_arithmetic_v<_Rep2>) /* strengthened */ {
367
+ using _CD = common_type_t<duration<_Rep1, _Period1>, duration<_Rep2, _Period2>>;
368
+ return _CD(_CD(_Left).count() % _CD(_Right).count());
369
+ }
370
+
371
+ _EXPORT_STD template <class _Rep1, class _Period1, class _Rep2, class _Period2>
372
+ _NODISCARD constexpr bool operator==(const duration<_Rep1, _Period1>& _Left,
373
+ const duration<_Rep2, _Period2>& _Right) noexcept(is_arithmetic_v<_Rep1>
374
+ && is_arithmetic_v<_Rep2>) /* strengthened */ {
375
+ using _CT = common_type_t<duration<_Rep1, _Period1>, duration<_Rep2, _Period2>>;
376
+ return _CT(_Left).count() == _CT(_Right).count();
377
+ }
378
+
379
+ #if !_HAS_CXX20
380
+ template <class _Rep1, class _Period1, class _Rep2, class _Period2>
381
+ _NODISCARD constexpr bool operator!=(const duration<_Rep1, _Period1>& _Left,
382
+ const duration<_Rep2, _Period2>& _Right) noexcept(is_arithmetic_v<_Rep1>
383
+ && is_arithmetic_v<_Rep2>) /* strengthened */ {
384
+ return !(_Left == _Right);
385
+ }
386
+ #endif // !_HAS_CXX20
387
+
388
+ _EXPORT_STD template <class _Rep1, class _Period1, class _Rep2, class _Period2>
389
+ _NODISCARD constexpr bool operator<(const duration<_Rep1, _Period1>& _Left,
390
+ const duration<_Rep2, _Period2>& _Right) noexcept(is_arithmetic_v<_Rep1>
391
+ && is_arithmetic_v<_Rep2>) /* strengthened */ {
392
+ using _CT = common_type_t<duration<_Rep1, _Period1>, duration<_Rep2, _Period2>>;
393
+ return _CT(_Left).count() < _CT(_Right).count();
394
+ }
395
+
396
+ _EXPORT_STD template <class _Rep1, class _Period1, class _Rep2, class _Period2>
397
+ _NODISCARD constexpr bool operator<=(const duration<_Rep1, _Period1>& _Left,
398
+ const duration<_Rep2, _Period2>& _Right) noexcept(is_arithmetic_v<_Rep1>
399
+ && is_arithmetic_v<_Rep2>) /* strengthened */ {
400
+ return !(_Right < _Left);
401
+ }
402
+
403
+ _EXPORT_STD template <class _Rep1, class _Period1, class _Rep2, class _Period2>
404
+ _NODISCARD constexpr bool operator>(const duration<_Rep1, _Period1>& _Left,
405
+ const duration<_Rep2, _Period2>& _Right) noexcept(is_arithmetic_v<_Rep1>
406
+ && is_arithmetic_v<_Rep2>) /* strengthened */ {
407
+ return _Right < _Left;
408
+ }
409
+
410
+ _EXPORT_STD template <class _Rep1, class _Period1, class _Rep2, class _Period2>
411
+ _NODISCARD constexpr bool operator>=(const duration<_Rep1, _Period1>& _Left,
412
+ const duration<_Rep2, _Period2>& _Right) noexcept(is_arithmetic_v<_Rep1>
413
+ && is_arithmetic_v<_Rep2>) /* strengthened */ {
414
+ return !(_Left < _Right);
415
+ }
416
+
417
+ #if _HAS_CXX20
418
+ _EXPORT_STD template <class _Rep1, class _Period1, class _Rep2, class _Period2>
419
+ requires three_way_comparable<typename common_type_t<duration<_Rep1, _Period1>, duration<_Rep2, _Period2>>::rep>
420
+ _NODISCARD constexpr auto operator<=>(const duration<_Rep1, _Period1>& _Left,
421
+ const duration<_Rep2, _Period2>& _Right) noexcept(is_arithmetic_v<_Rep1>
422
+ && is_arithmetic_v<_Rep2>) /* strengthened */ {
423
+ using _CT = common_type_t<duration<_Rep1, _Period1>, duration<_Rep2, _Period2>>;
424
+ return _CT(_Left).count() <=> _CT(_Right).count();
425
+ }
426
+ #endif // _HAS_CXX20
427
+
428
+ _EXPORT_STD template <class _To, class _Rep, class _Period, enable_if_t<_Is_duration_v<_To>, int> /* = 0 */>
429
+ _NODISCARD constexpr _To duration_cast(const duration<_Rep, _Period>& _Dur) noexcept(
430
+ is_arithmetic_v<_Rep> && is_arithmetic_v<typename _To::rep>) /* strengthened */ {
431
+ // convert duration to another duration; truncate
432
+ using _CF = ratio_divide<_Period, typename _To::period>;
433
+
434
+ using _ToRep = typename _To::rep;
435
+ using _CR = common_type_t<_ToRep, _Rep, intmax_t>;
436
+
437
+ constexpr bool _Num_is_one = _CF::num == 1;
438
+ constexpr bool _Den_is_one = _CF::den == 1;
439
+
440
+ if constexpr (_Den_is_one) {
441
+ if constexpr (_Num_is_one) {
442
+ return static_cast<_To>(static_cast<_ToRep>(_Dur.count()));
443
+ } else {
444
+ return static_cast<_To>(
445
+ static_cast<_ToRep>(static_cast<_CR>(_Dur.count()) * static_cast<_CR>(_CF::num)));
446
+ }
447
+ } else {
448
+ if constexpr (_Num_is_one) {
449
+ return static_cast<_To>(
450
+ static_cast<_ToRep>(static_cast<_CR>(_Dur.count()) / static_cast<_CR>(_CF::den)));
451
+ } else {
452
+ return static_cast<_To>(static_cast<_ToRep>(
453
+ static_cast<_CR>(_Dur.count()) * static_cast<_CR>(_CF::num) / static_cast<_CR>(_CF::den)));
454
+ }
455
+ }
456
+ }
457
+
458
+ _EXPORT_STD template <class _To, class _Rep, class _Period, enable_if_t<_Is_duration_v<_To>, int> = 0>
459
+ _NODISCARD constexpr _To floor(const duration<_Rep, _Period>& _Dur) noexcept(
460
+ is_arithmetic_v<_Rep> && is_arithmetic_v<typename _To::rep>) /* strengthened */ {
461
+ // convert duration to another duration; round towards negative infinity
462
+ // i.e. the greatest integral result such that the result <= _Dur
463
+ const _To _Casted{_CHRONO duration_cast<_To>(_Dur)};
464
+ if (_Casted > _Dur) {
465
+ return _To{_Casted.count() - static_cast<typename _To::rep>(1)};
466
+ }
467
+
468
+ return _Casted;
469
+ }
470
+
471
+ _EXPORT_STD template <class _To, class _Rep, class _Period, enable_if_t<_Is_duration_v<_To>, int> = 0>
472
+ _NODISCARD constexpr _To ceil(const duration<_Rep, _Period>& _Dur) noexcept(
473
+ is_arithmetic_v<_Rep> && is_arithmetic_v<typename _To::rep>) /* strengthened */ {
474
+ // convert duration to another duration; round towards positive infinity
475
+ // i.e. the least integral result such that _Dur <= the result
476
+ const _To _Casted{_CHRONO duration_cast<_To>(_Dur)};
477
+ if (_Casted < _Dur) {
478
+ return _To{_Casted.count() + static_cast<typename _To::rep>(1)};
479
+ }
480
+
481
+ return _Casted;
482
+ }
483
+
484
+ template <class _Rep>
485
+ constexpr bool _Is_even(_Rep _Val) noexcept(is_arithmetic_v<_Rep>) /* strengthened */ {
486
+ // Tests whether _Val is even
487
+ return _Val % 2 == 0;
488
+ }
489
+
490
+ _EXPORT_STD template <class _To, class _Rep, class _Period,
491
+ enable_if_t<_Is_duration_v<_To> && !treat_as_floating_point_v<typename _To::rep>, int> = 0>
492
+ _NODISCARD constexpr _To round(const duration<_Rep, _Period>& _Dur) noexcept(
493
+ is_arithmetic_v<_Rep> && is_arithmetic_v<typename _To::rep>) /* strengthened */ {
494
+ // convert duration to another duration, round to nearest, ties to even
495
+ const _To _Floored{_CHRONO floor<_To>(_Dur)};
496
+ const _To _Ceiled{_Floored + _To{1}};
497
+ const auto _Floor_adjustment = _Dur - _Floored;
498
+ const auto _Ceil_adjustment = _Ceiled - _Dur;
499
+ if (_Floor_adjustment < _Ceil_adjustment
500
+ || (_Floor_adjustment == _Ceil_adjustment && _Is_even(_Floored.count()))) {
501
+ return _Floored;
502
+ }
503
+
504
+ return _Ceiled;
505
+ }
506
+
507
+ _EXPORT_STD template <class _Rep, class _Period, enable_if_t<numeric_limits<_Rep>::is_signed, int> = 0>
508
+ _NODISCARD constexpr duration<_Rep, _Period> abs(const duration<_Rep, _Period> _Dur) noexcept(
509
+ is_arithmetic_v<_Rep>) /* strengthened */ {
510
+ // create a duration whose count() is the absolute value of _Dur.count()
511
+ if (_Dur < duration<_Rep, _Period>::zero()) {
512
+ return -_Dur;
513
+ } else {
514
+ return _Dur;
515
+ }
516
+ }
517
+
518
+ _EXPORT_STD using nanoseconds = duration<long long, nano>;
519
+ _EXPORT_STD using microseconds = duration<long long, micro>;
520
+ _EXPORT_STD using milliseconds = duration<long long, milli>;
521
+ _EXPORT_STD using seconds = duration<long long>;
522
+ _EXPORT_STD using minutes = duration<int, ratio<60>>;
523
+ _EXPORT_STD using hours = duration<int, ratio<3600>>;
524
+ #if _HAS_CXX20
525
+ _EXPORT_STD using days = duration<int, ratio_multiply<ratio<24>, hours::period>>;
526
+ _EXPORT_STD using weeks = duration<int, ratio_multiply<ratio<7>, days::period>>;
527
+ _EXPORT_STD using years = duration<int, ratio_multiply<ratio<146097, 400>, days::period>>;
528
+ _EXPORT_STD using months = duration<int, ratio_divide<years::period, ratio<12>>>;
529
+ #endif // _HAS_CXX20
530
+
531
+ _EXPORT_STD template <class _Clock, class _Duration, class _Rep, class _Period>
532
+ _NODISCARD constexpr time_point<_Clock, common_type_t<_Duration, duration<_Rep, _Period>>>
533
+ operator+(const time_point<_Clock, _Duration>& _Left, const duration<_Rep, _Period>& _Right) noexcept(
534
+ is_arithmetic_v<typename _Duration::rep> && is_arithmetic_v<_Rep>) /* strengthened */ {
535
+ using _RT = time_point<_Clock, common_type_t<_Duration, duration<_Rep, _Period>>>;
536
+ return _RT(_Left.time_since_epoch() + _Right);
537
+ }
538
+
539
+ _EXPORT_STD template <class _Rep, class _Period, class _Clock, class _Duration>
540
+ _NODISCARD constexpr time_point<_Clock, common_type_t<duration<_Rep, _Period>, _Duration>>
541
+ operator+(const duration<_Rep, _Period>& _Left, const time_point<_Clock, _Duration>& _Right) noexcept(
542
+ is_arithmetic_v<_Rep> && is_arithmetic_v<typename _Duration::rep>) /* strengthened */ {
543
+ return _Right + _Left;
544
+ }
545
+
546
+ _EXPORT_STD template <class _Clock, class _Duration, class _Rep, class _Period>
547
+ _NODISCARD constexpr time_point<_Clock, common_type_t<_Duration, duration<_Rep, _Period>>>
548
+ operator-(const time_point<_Clock, _Duration>& _Left, const duration<_Rep, _Period>& _Right) noexcept(
549
+ is_arithmetic_v<typename _Duration::rep> && is_arithmetic_v<_Rep>) /* strengthened */ {
550
+ using _RT = time_point<_Clock, common_type_t<_Duration, duration<_Rep, _Period>>>;
551
+ return _RT(_Left.time_since_epoch() - _Right);
552
+ }
553
+
554
+ _EXPORT_STD template <class _Clock, class _Duration1, class _Duration2>
555
+ _NODISCARD constexpr common_type_t<_Duration1, _Duration2>
556
+ operator-(const time_point<_Clock, _Duration1>& _Left, const time_point<_Clock, _Duration2>& _Right) noexcept(
557
+ is_arithmetic_v<typename _Duration1::rep> && is_arithmetic_v<typename _Duration2::rep>) /* strengthened */ {
558
+ return _Left.time_since_epoch() - _Right.time_since_epoch();
559
+ }
560
+
561
+ _EXPORT_STD template <class _Clock, class _Duration1, class _Duration2>
562
+ _NODISCARD constexpr bool
563
+ operator==(const time_point<_Clock, _Duration1>& _Left, const time_point<_Clock, _Duration2>& _Right) noexcept(
564
+ is_arithmetic_v<typename _Duration1::rep> && is_arithmetic_v<typename _Duration2::rep>) /* strengthened */ {
565
+ return _Left.time_since_epoch() == _Right.time_since_epoch();
566
+ }
567
+
568
+ #if !_HAS_CXX20
569
+ template <class _Clock, class _Duration1, class _Duration2>
570
+ _NODISCARD constexpr bool
571
+ operator!=(const time_point<_Clock, _Duration1>& _Left, const time_point<_Clock, _Duration2>& _Right) noexcept(
572
+ is_arithmetic_v<typename _Duration1::rep> && is_arithmetic_v<typename _Duration2::rep>) /* strengthened */ {
573
+ return !(_Left == _Right);
574
+ }
575
+ #endif // !_HAS_CXX20
576
+
577
+ _EXPORT_STD template <class _Clock, class _Duration1, class _Duration2>
578
+ _NODISCARD constexpr bool
579
+ operator<(const time_point<_Clock, _Duration1>& _Left, const time_point<_Clock, _Duration2>& _Right) noexcept(
580
+ is_arithmetic_v<typename _Duration1::rep> && is_arithmetic_v<typename _Duration2::rep>) /* strengthened */ {
581
+ return _Left.time_since_epoch() < _Right.time_since_epoch();
582
+ }
583
+
584
+ _EXPORT_STD template <class _Clock, class _Duration1, class _Duration2>
585
+ _NODISCARD constexpr bool
586
+ operator<=(const time_point<_Clock, _Duration1>& _Left, const time_point<_Clock, _Duration2>& _Right) noexcept(
587
+ is_arithmetic_v<typename _Duration1::rep> && is_arithmetic_v<typename _Duration2::rep>) /* strengthened */ {
588
+ return !(_Right < _Left);
589
+ }
590
+
591
+ _EXPORT_STD template <class _Clock, class _Duration1, class _Duration2>
592
+ _NODISCARD constexpr bool
593
+ operator>(const time_point<_Clock, _Duration1>& _Left, const time_point<_Clock, _Duration2>& _Right) noexcept(
594
+ is_arithmetic_v<typename _Duration1::rep> && is_arithmetic_v<typename _Duration2::rep>) /* strengthened */ {
595
+ return _Right < _Left;
596
+ }
597
+
598
+ _EXPORT_STD template <class _Clock, class _Duration1, class _Duration2>
599
+ _NODISCARD constexpr bool
600
+ operator>=(const time_point<_Clock, _Duration1>& _Left, const time_point<_Clock, _Duration2>& _Right) noexcept(
601
+ is_arithmetic_v<typename _Duration1::rep> && is_arithmetic_v<typename _Duration2::rep>) /* strengthened */ {
602
+ return !(_Left < _Right);
603
+ }
604
+
605
+ #if _HAS_CXX20
606
+ _EXPORT_STD template <class _Clock, class _Duration1, three_way_comparable_with<_Duration1> _Duration2>
607
+ _NODISCARD constexpr auto
608
+ operator<=>(const time_point<_Clock, _Duration1>& _Left, const time_point<_Clock, _Duration2>& _Right) noexcept(
609
+ is_arithmetic_v<typename _Duration1::rep> && is_arithmetic_v<typename _Duration2::rep>) /* strengthened */ {
610
+ return _Left.time_since_epoch() <=> _Right.time_since_epoch();
611
+ }
612
+ #endif // _HAS_CXX20
613
+
614
+ _EXPORT_STD template <class _To, class _Clock, class _Duration, enable_if_t<_Is_duration_v<_To>, int> = 0>
615
+ _NODISCARD constexpr time_point<_Clock, _To> time_point_cast(const time_point<_Clock, _Duration>& _Time) noexcept(
616
+ is_arithmetic_v<typename _Duration::rep> && is_arithmetic_v<typename _To::rep>) /* strengthened */ {
617
+ // change the duration type of a time_point; truncate
618
+ return time_point<_Clock, _To>(_CHRONO duration_cast<_To>(_Time.time_since_epoch()));
619
+ }
620
+
621
+ _EXPORT_STD template <class _To, class _Clock, class _Duration, enable_if_t<_Is_duration_v<_To>, int> = 0>
622
+ _NODISCARD constexpr time_point<_Clock, _To> floor(const time_point<_Clock, _Duration>& _Time) noexcept(
623
+ is_arithmetic_v<typename _Duration::rep> && is_arithmetic_v<typename _To::rep>) /* strengthened */ {
624
+ // change the duration type of a time_point; round towards negative infinity
625
+ return time_point<_Clock, _To>(_CHRONO floor<_To>(_Time.time_since_epoch()));
626
+ }
627
+
628
+ _EXPORT_STD template <class _To, class _Clock, class _Duration, enable_if_t<_Is_duration_v<_To>, int> = 0>
629
+ _NODISCARD constexpr time_point<_Clock, _To> ceil(const time_point<_Clock, _Duration>& _Time) noexcept(
630
+ is_arithmetic_v<typename _Duration::rep> && is_arithmetic_v<typename _To::rep>) /* strengthened */ {
631
+ // change the duration type of a time_point; round towards positive infinity
632
+ return time_point<_Clock, _To>(_CHRONO ceil<_To>(_Time.time_since_epoch()));
633
+ }
634
+
635
+ _EXPORT_STD template <class _To, class _Clock, class _Duration,
636
+ enable_if_t<_Is_duration_v<_To> && !treat_as_floating_point_v<typename _To::rep>, int> = 0>
637
+ _NODISCARD constexpr time_point<_Clock, _To> round(const time_point<_Clock, _Duration>& _Time) noexcept(
638
+ is_arithmetic_v<typename _Duration::rep> && is_arithmetic_v<typename _To::rep>) /* strengthened */ {
639
+ // change the duration type of a time_point; round to nearest, ties to even
640
+ return time_point<_Clock, _To>(_CHRONO round<_To>(_Time.time_since_epoch()));
641
+ }
642
+
643
+ _EXPORT_STD struct system_clock { // wraps GetSystemTimePreciseAsFileTime
644
+ using rep = long long;
645
+ using period = ratio<1, 10'000'000>; // 100 nanoseconds
646
+ using duration = _CHRONO duration<rep, period>;
647
+ using time_point = _CHRONO time_point<system_clock>;
648
+ static constexpr bool is_steady = false;
649
+
650
+ _NODISCARD static time_point now() noexcept { // get current time
651
+ return time_point(duration(_Xtime_get_ticks()));
652
+ }
653
+
654
+ _NODISCARD static __time64_t to_time_t(const time_point& _Time) noexcept { // convert to __time64_t
655
+ return duration_cast<seconds>(_Time.time_since_epoch()).count();
656
+ }
657
+
658
+ _NODISCARD static time_point from_time_t(__time64_t _Tm) noexcept { // convert from __time64_t
659
+ return time_point{seconds{_Tm}};
660
+ }
661
+ };
662
+
663
+ #if _HAS_CXX20
664
+ _EXPORT_STD template <class _Duration>
665
+ using sys_time = time_point<system_clock, _Duration>;
666
+ _EXPORT_STD using sys_seconds = sys_time<seconds>;
667
+ _EXPORT_STD using sys_days = sys_time<days>;
668
+ #endif // _HAS_CXX20
669
+
670
+ _EXPORT_STD struct steady_clock { // wraps QueryPerformanceCounter
671
+ using rep = long long;
672
+ using period = nano;
673
+ using duration = nanoseconds;
674
+ using time_point = _CHRONO time_point<steady_clock>;
675
+ static constexpr bool is_steady = true;
676
+
677
+ #if defined(_M_ARM) || defined(_M_ARM64) // vvv ARM or ARM64 arch vvv
678
+ #define _LIKELY_ARM_ARM64 _LIKELY
679
+ #define _LIKELY_X86_X64
680
+ #elif defined(_M_IX86) || defined(_M_X64) // ^^^ ARM or ARM64 arch / x86 or x64 arch vvv
681
+ #define _LIKELY_ARM_ARM64
682
+ #define _LIKELY_X86_X64 _LIKELY
683
+ #else // ^^^ x86 or x64 arch / other arch vvv
684
+ #define _LIKELY_ARM_ARM64
685
+ #define _LIKELY_X86_X64
686
+ #endif // ^^^ other arch ^^^
687
+ _NODISCARD static time_point now() noexcept { // get current time
688
+ const long long _Freq = _Query_perf_frequency(); // doesn't change after system boot
689
+ const long long _Ctr = _Query_perf_counter();
690
+ static_assert(period::num == 1, "This assumes period::num == 1.");
691
+ // The compiler recognizes the constants for frequency and time period and uses shifts and
692
+ // multiplies instead of divides to calculate the nanosecond value.
693
+ constexpr long long _TenMHz = 10'000'000;
694
+ constexpr long long _TwentyFourMHz = 24'000'000;
695
+ // clang-format off
696
+ if (_Freq == _TenMHz) _LIKELY_X86_X64 {
697
+ // 10 MHz is a very common QPC frequency on modern x86/x64 PCs. Optimizing for
698
+ // this specific frequency can double the performance of this function by
699
+ // avoiding the expensive frequency conversion path.
700
+ static_assert(period::den % _TenMHz == 0, "It should never fail.");
701
+ constexpr long long _Multiplier = period::den / _TenMHz;
702
+ return time_point(duration(_Ctr * _Multiplier));
703
+ } else if (_Freq == _TwentyFourMHz) _LIKELY_ARM_ARM64 {
704
+ // 24 MHz is a common frequency on ARM/ARM64, including cases where it emulates x86/x64.
705
+ const long long _Whole = (_Ctr / _TwentyFourMHz) * period::den;
706
+ const long long _Part = (_Ctr % _TwentyFourMHz) * period::den / _TwentyFourMHz;
707
+ return time_point(duration(_Whole + _Part));
708
+ } else {
709
+ // Instead of just having "(_Ctr * period::den) / _Freq",
710
+ // the algorithm below prevents overflow when _Ctr is sufficiently large.
711
+ // It assumes that _Freq * period::den does not overflow, which is currently true for nano period.
712
+ // It is not realistic for _Ctr to accumulate to large values from zero with this assumption,
713
+ // but the initial value of _Ctr could be large.
714
+ const long long _Whole = (_Ctr / _Freq) * period::den;
715
+ const long long _Part = (_Ctr % _Freq) * period::den / _Freq;
716
+ return time_point(duration(_Whole + _Part));
717
+ }
718
+ // clang-format on
719
+ }
720
+ #undef _LIKELY_ARM_ARM64
721
+ #undef _LIKELY_X86_X64
722
+ };
723
+
724
+ _EXPORT_STD using high_resolution_clock = steady_clock;
725
+ } // namespace chrono
726
+
727
+ inline namespace literals {
728
+ inline namespace chrono_literals {
729
+ _EXPORT_STD _NODISCARD constexpr _CHRONO hours operator""h(unsigned long long _Val) noexcept
730
+ /* strengthened */ {
731
+ return _CHRONO hours(_Val);
732
+ }
733
+
734
+ _EXPORT_STD _NODISCARD constexpr _CHRONO duration<double, ratio<3600>> operator""h(long double _Val) noexcept
735
+ /* strengthened */ {
736
+ return _CHRONO duration<double, ratio<3600>>(_Val);
737
+ }
738
+
739
+ _EXPORT_STD _NODISCARD constexpr _CHRONO minutes operator""min(unsigned long long _Val) noexcept
740
+ /* strengthened */ {
741
+ return _CHRONO minutes(_Val);
742
+ }
743
+
744
+ _EXPORT_STD _NODISCARD constexpr _CHRONO duration<double, ratio<60>> operator""min(long double _Val) noexcept
745
+ /* strengthened */ {
746
+ return _CHRONO duration<double, ratio<60>>(_Val);
747
+ }
748
+
749
+ _EXPORT_STD _NODISCARD constexpr _CHRONO seconds operator""s(unsigned long long _Val) noexcept
750
+ /* strengthened */ {
751
+ return _CHRONO seconds(_Val);
752
+ }
753
+
754
+ _EXPORT_STD _NODISCARD constexpr _CHRONO duration<double> operator""s(long double _Val) noexcept
755
+ /* strengthened */ {
756
+ return _CHRONO duration<double>(_Val);
757
+ }
758
+
759
+ _EXPORT_STD _NODISCARD constexpr _CHRONO milliseconds operator""ms(unsigned long long _Val) noexcept
760
+ /* strengthened */ {
761
+ return _CHRONO milliseconds(_Val);
762
+ }
763
+
764
+ _EXPORT_STD _NODISCARD constexpr _CHRONO duration<double, milli> operator""ms(long double _Val) noexcept
765
+ /* strengthened */ {
766
+ return _CHRONO duration<double, milli>(_Val);
767
+ }
768
+
769
+ _EXPORT_STD _NODISCARD constexpr _CHRONO microseconds operator""us(unsigned long long _Val) noexcept
770
+ /* strengthened */ {
771
+ return _CHRONO microseconds(_Val);
772
+ }
773
+
774
+ _EXPORT_STD _NODISCARD constexpr _CHRONO duration<double, micro> operator""us(long double _Val) noexcept
775
+ /* strengthened */ {
776
+ return _CHRONO duration<double, micro>(_Val);
777
+ }
778
+
779
+ _EXPORT_STD _NODISCARD constexpr _CHRONO nanoseconds operator""ns(unsigned long long _Val) noexcept
780
+ /* strengthened */ {
781
+ return _CHRONO nanoseconds(_Val);
782
+ }
783
+
784
+ _EXPORT_STD _NODISCARD constexpr _CHRONO duration<double, nano> operator""ns(long double _Val) noexcept
785
+ /* strengthened */ {
786
+ return _CHRONO duration<double, nano>(_Val);
787
+ }
788
+ } // namespace chrono_literals
789
+ } // namespace literals
790
+
791
+ namespace chrono {
792
+ _EXPORT_STD using namespace literals::chrono_literals;
793
+ } // namespace chrono
794
+ _STD_END
795
+
796
+ #pragma pop_macro("new")
797
+ _STL_RESTORE_CLANG_WARNINGS
798
+ #pragma warning(pop)
799
+ #pragma pack(pop)
800
+ #endif // _STL_COMPILER_PREPROCESSOR
801
+ #endif // __MSVC_CHRONO_HPP
miniMSVC/VC/Tools/MSVC/14.42.34433/include/__msvc_cxx_stdatomic.hpp ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // __msvc_cxx_stdatomic.hpp internal header
2
+
3
+ // Copyright (c) Microsoft Corporation.
4
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
5
+
6
+ #ifndef __MSVC_CXX_STDATOMIC_HPP
7
+ #define __MSVC_CXX_STDATOMIC_HPP
8
+
9
+ // see _STL_COMPILER_PREPROCESSOR in yvals_core.h
10
+ #if !defined(RC_INVOKED) && !defined(Q_MOC_RUN) && !defined(__midl)
11
+
12
+ // provide a specific error message for C compilers, before the general error message in yvals_core.h
13
+ #ifndef __cplusplus
14
+ #error <__msvc_cxx_stdatomic.hpp> is an internal header. It is incompatible with C and should not be directly included.
15
+ #endif // !defined(__cplusplus)
16
+
17
+ #include <yvals.h>
18
+
19
+ #ifdef _M_CEE_PURE
20
+ #error <stdatomic.h> is not supported when compiling with /clr:pure.
21
+ #endif // defined(_M_CEE_PURE)
22
+
23
+ #if !_HAS_CXX23
24
+ _EMIT_STL_WARNING(STL4038, "The contents of <stdatomic.h> are available only with C++23 or later.");
25
+ #else // ^^^ !_HAS_CXX23 / _HAS_CXX23 vvv
26
+
27
+ #include <atomic>
28
+
29
+ #pragma pack(push, _CRT_PACKING)
30
+ #pragma warning(push, _STL_WARNING_LEVEL)
31
+ #pragma warning(disable : _STL_DISABLED_WARNINGS)
32
+ _STL_DISABLE_CLANG_WARNINGS
33
+ #pragma push_macro("new")
34
+ #undef new
35
+
36
+ template <class _Ty>
37
+ using _Std_atomic = _STD atomic<_Ty>;
38
+
39
+ #define _Atomic(T) _Std_atomic<T>
40
+
41
+ using _STD memory_order;
42
+ using _STD memory_order_relaxed;
43
+ using _STD memory_order_consume;
44
+ using _STD memory_order_acquire;
45
+ using _STD memory_order_release;
46
+ using _STD memory_order_acq_rel;
47
+ using _STD memory_order_seq_cst;
48
+
49
+ using _STD atomic_flag;
50
+
51
+ using _STD atomic_bool;
52
+ using _STD atomic_char;
53
+ using _STD atomic_schar;
54
+ using _STD atomic_uchar;
55
+ using _STD atomic_short;
56
+ using _STD atomic_ushort;
57
+ using _STD atomic_int;
58
+ using _STD atomic_uint;
59
+ using _STD atomic_long;
60
+ using _STD atomic_ulong;
61
+ using _STD atomic_llong;
62
+ using _STD atomic_ullong;
63
+
64
+ #ifdef __cpp_lib_char8_t
65
+ using _STD atomic_char8_t;
66
+ #endif // defined(__cpp_lib_char8_t)
67
+
68
+ using _STD atomic_char16_t;
69
+ using _STD atomic_char32_t;
70
+ using _STD atomic_wchar_t;
71
+ using _STD atomic_int8_t;
72
+ using _STD atomic_uint8_t;
73
+ using _STD atomic_int16_t;
74
+ using _STD atomic_uint16_t;
75
+ using _STD atomic_int32_t;
76
+ using _STD atomic_uint32_t;
77
+ using _STD atomic_int64_t;
78
+ using _STD atomic_uint64_t;
79
+ using _STD atomic_int_least8_t;
80
+ using _STD atomic_uint_least8_t;
81
+ using _STD atomic_int_least16_t;
82
+ using _STD atomic_uint_least16_t;
83
+ using _STD atomic_int_least32_t;
84
+ using _STD atomic_uint_least32_t;
85
+ using _STD atomic_int_least64_t;
86
+ using _STD atomic_uint_least64_t;
87
+ using _STD atomic_int_fast8_t;
88
+ using _STD atomic_uint_fast8_t;
89
+ using _STD atomic_int_fast16_t;
90
+ using _STD atomic_uint_fast16_t;
91
+ using _STD atomic_int_fast32_t;
92
+ using _STD atomic_uint_fast32_t;
93
+ using _STD atomic_int_fast64_t;
94
+ using _STD atomic_uint_fast64_t;
95
+ using _STD atomic_intptr_t;
96
+ using _STD atomic_uintptr_t;
97
+ using _STD atomic_size_t;
98
+ using _STD atomic_ptrdiff_t;
99
+ using _STD atomic_intmax_t;
100
+ using _STD atomic_uintmax_t;
101
+
102
+ using _STD atomic_is_lock_free;
103
+ using _STD atomic_load;
104
+ using _STD atomic_load_explicit;
105
+ using _STD atomic_store;
106
+ using _STD atomic_store_explicit;
107
+ using _STD atomic_exchange;
108
+ using _STD atomic_exchange_explicit;
109
+ using _STD atomic_compare_exchange_strong;
110
+ using _STD atomic_compare_exchange_strong_explicit;
111
+ using _STD atomic_compare_exchange_weak;
112
+ using _STD atomic_compare_exchange_weak_explicit;
113
+ using _STD atomic_fetch_add;
114
+ using _STD atomic_fetch_add_explicit;
115
+ using _STD atomic_fetch_sub;
116
+ using _STD atomic_fetch_sub_explicit;
117
+ using _STD atomic_fetch_or;
118
+ using _STD atomic_fetch_or_explicit;
119
+ using _STD atomic_fetch_xor;
120
+ using _STD atomic_fetch_xor_explicit;
121
+ using _STD atomic_fetch_and;
122
+ using _STD atomic_fetch_and_explicit;
123
+ using _STD atomic_flag_test_and_set;
124
+ using _STD atomic_flag_test_and_set_explicit;
125
+ using _STD atomic_flag_clear;
126
+ using _STD atomic_flag_clear_explicit;
127
+
128
+ using _STD atomic_thread_fence;
129
+ using _STD atomic_signal_fence;
130
+
131
+ #pragma pop_macro("new")
132
+ _STL_RESTORE_CLANG_WARNINGS
133
+ #pragma warning(pop)
134
+ #pragma pack(pop)
135
+ #endif // ^^^ _HAS_CXX23 ^^^
136
+
137
+ #endif // !defined(RC_INVOKED) && !defined(Q_MOC_RUN) && !defined(__midl)
138
+ #endif // __MSVC_CXX_STDATOMIC_HPP
miniMSVC/VC/Tools/MSVC/14.42.34433/include/__msvc_filebuf.hpp ADDED
@@ -0,0 +1,790 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // __msvc_filebuf.hpp internal header
2
+
3
+ // Copyright (c) Microsoft Corporation.
4
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
5
+
6
+ #ifndef __MSVC_FILEBUF_HPP
7
+ #define __MSVC_FILEBUF_HPP
8
+ #include <yvals.h>
9
+ #if _STL_COMPILER_PREPROCESSOR
10
+
11
+ #include <cstdio>
12
+ #include <streambuf>
13
+
14
+ #pragma pack(push, _CRT_PACKING)
15
+ #pragma warning(push, _STL_WARNING_LEVEL)
16
+ #pragma warning(disable : _STL_DISABLED_WARNINGS)
17
+ _STL_DISABLE_CLANG_WARNINGS
18
+ #pragma push_macro("new")
19
+ #undef new
20
+
21
+ // TRANSITION, ABI: The _Path_ish functions accepting filesystem::path or experimental::filesystem::path are templates
22
+ // which always use the same types as a workaround for user code deriving from iostreams types and
23
+ // __declspec(dllexport)ing the derived types. Adding member functions to iostreams broke the ABI of such DLLs.
24
+ // Deriving and __declspec(dllexport)ing standard library types is not supported, but in this particular case
25
+ // the workaround was inexpensive. The workaround will be removed in the next ABI breaking release of the
26
+ // Visual C++ Libraries.
27
+ _STD_BEGIN
28
+ #if _HAS_CXX17
29
+ namespace filesystem {
30
+ _EXPORT_STD class path;
31
+ }
32
+ #endif // _HAS_CXX17
33
+
34
+ #ifndef _FSTREAM_SUPPORTS_EXPERIMENTAL_FILESYSTEM
35
+ #ifdef _M_CEE
36
+ #define _FSTREAM_SUPPORTS_EXPERIMENTAL_FILESYSTEM 0
37
+ #else // ^^^ defined(_M_CEE) / !defined(_M_CEE) vvv
38
+ #define _FSTREAM_SUPPORTS_EXPERIMENTAL_FILESYSTEM 1
39
+ #endif // ^^^ !defined(_M_CEE) ^^^
40
+ #endif // _FSTREAM_SUPPORTS_EXPERIMENTAL_FILESYSTEM
41
+
42
+ #if _FSTREAM_SUPPORTS_EXPERIMENTAL_FILESYSTEM
43
+ namespace experimental {
44
+ namespace filesystem {
45
+ inline namespace v1 {
46
+ class path;
47
+ }
48
+ } // namespace filesystem
49
+ } // namespace experimental
50
+ #endif // _FSTREAM_SUPPORTS_EXPERIMENTAL_FILESYSTEM
51
+
52
+ // clang-format off
53
+ template <class _Ty>
54
+ constexpr bool _Is_any_path = _Is_any_of_v<_Ty
55
+ #if _FSTREAM_SUPPORTS_EXPERIMENTAL_FILESYSTEM
56
+ , experimental::filesystem::path
57
+ #endif // _FSTREAM_SUPPORTS_EXPERIMENTAL_FILESYSTEM
58
+ #if _HAS_CXX17
59
+ , filesystem::path
60
+ #endif // _HAS_CXX17
61
+ >;
62
+ // clang-format on
63
+
64
+ extern "C++" _CRTIMP2_PURE FILE* __CLRCALL_PURE_OR_CDECL _Fiopen(const char*, ios_base::openmode, int);
65
+ extern "C++" _CRTIMP2_PURE FILE* __CLRCALL_PURE_OR_CDECL _Fiopen(const wchar_t*, ios_base::openmode, int);
66
+
67
+ template <class _Elem>
68
+ bool _Fgetc(_Elem& _Ch, FILE* _File) { // get an element from a C stream
69
+ return _CSTD fread(&_Ch, sizeof(_Elem), 1, _File) == 1;
70
+ }
71
+
72
+ template <>
73
+ inline bool _Fgetc(char& _Byte, FILE* _File) { // get a char element from a C stream
74
+ int _Meta;
75
+ if ((_Meta = _CSTD fgetc(_File)) == EOF) {
76
+ return false;
77
+ } else { // got one, convert to char
78
+ _Byte = static_cast<char>(_Meta);
79
+ return true;
80
+ }
81
+ }
82
+
83
+ template <>
84
+ inline bool _Fgetc(wchar_t& _Wchar, FILE* _File) { // get a wchar_t element from a C stream
85
+ wint_t _Meta;
86
+ if ((_Meta = _CSTD fgetwc(_File)) == WEOF) {
87
+ return false;
88
+ } else { // got one, convert to wchar_t
89
+ _Wchar = static_cast<wchar_t>(_Meta);
90
+ return true;
91
+ }
92
+ }
93
+
94
+ #ifdef _CRTBLD
95
+ template <>
96
+ inline bool _Fgetc(unsigned short& _Wchar, FILE* _File) { // get an unsigned short element from a C stream
97
+ wint_t _Meta;
98
+ if ((_Meta = _CSTD fgetwc(_File)) == WEOF) {
99
+ return false;
100
+ } else { // got one, convert to unsigned short
101
+ _Wchar = static_cast<unsigned short>(_Meta);
102
+ return true;
103
+ }
104
+ }
105
+ #endif // defined(_CRTBLD)
106
+
107
+ template <class _Elem>
108
+ bool _Fputc(_Elem _Ch, FILE* _File) { // put an element to a C stream
109
+ return _CSTD fwrite(&_Ch, 1, sizeof(_Elem), _File) == sizeof(_Elem);
110
+ }
111
+
112
+ template <>
113
+ inline bool _Fputc(char _Byte, FILE* _File) { // put a char element to a C stream
114
+ return _CSTD fputc(_Byte, _File) != EOF;
115
+ }
116
+
117
+ template <>
118
+ inline bool _Fputc(wchar_t _Wchar, FILE* _File) { // put a wchar_t element to a C stream
119
+ return _CSTD fputwc(_Wchar, _File) != WEOF;
120
+ }
121
+
122
+ #ifdef _CRTBLD
123
+ template <>
124
+ inline bool _Fputc(unsigned short _Wchar, FILE* _File) { // put an unsigned short element to a C stream
125
+ return _CSTD fputwc(_Wchar, _File) != WEOF;
126
+ }
127
+ #endif // defined(_CRTBLD)
128
+
129
+ template <class _Elem>
130
+ bool _Ungetc(const _Elem&, FILE*) { // put back an arbitrary element to a C stream (always fail)
131
+ return false;
132
+ }
133
+
134
+ template <>
135
+ inline bool _Ungetc(const char& _Byte, FILE* _File) { // put back a char element to a C stream
136
+ return _CSTD ungetc(static_cast<unsigned char>(_Byte), _File) != EOF;
137
+ }
138
+
139
+ template <>
140
+ inline bool _Ungetc(const signed char& _Byte, FILE* _File) { // put back a signed char element to a C stream
141
+ return _CSTD ungetc(static_cast<unsigned char>(_Byte), _File) != EOF;
142
+ }
143
+
144
+ template <>
145
+ inline bool _Ungetc(const unsigned char& _Byte, FILE* _File) { // put back an unsigned char element to a C stream
146
+ return _CSTD ungetc(_Byte, _File) != EOF;
147
+ }
148
+
149
+ template <>
150
+ inline bool _Ungetc(const wchar_t& _Wchar, FILE* _File) { // put back a wchar_t element to a C stream
151
+ return _CSTD ungetwc(_Wchar, _File) != WEOF;
152
+ }
153
+
154
+ #ifdef _CRTBLD
155
+ template <>
156
+ inline bool _Ungetc(const unsigned short& _Wchar, FILE* _File) { // put back an unsigned short element to a C stream
157
+ return _CSTD ungetwc(_Wchar, _File) != WEOF;
158
+ }
159
+ #endif // defined(_CRTBLD)
160
+
161
+ _EXPORT_STD template <class _Elem, class _Traits>
162
+ class basic_filebuf : public basic_streambuf<_Elem, _Traits> { // stream buffer associated with a C stream
163
+ public:
164
+ using _Mysb = basic_streambuf<_Elem, _Traits>;
165
+ using _Cvt = codecvt<_Elem, char, typename _Traits::state_type>;
166
+
167
+ basic_filebuf() : _Mysb() {
168
+ _Init(nullptr, _Newfl);
169
+ }
170
+
171
+ explicit basic_filebuf(FILE* const _File) : _Mysb() { // extension
172
+ _Init(_File, _Newfl);
173
+ }
174
+
175
+ __CLR_OR_THIS_CALL ~basic_filebuf() noexcept override {
176
+ if (_Myfile) {
177
+ _Reset_back(); // revert from _Mychar buffer
178
+ }
179
+
180
+ if (_Closef) {
181
+ close();
182
+ }
183
+ }
184
+
185
+ using int_type = typename _Traits::int_type;
186
+ using pos_type = typename _Traits::pos_type;
187
+ using off_type = typename _Traits::off_type;
188
+
189
+ basic_filebuf(_Uninitialized) noexcept : _Mysb(_Noinit) {}
190
+
191
+ basic_filebuf(basic_filebuf&& _Right) {
192
+ _Init(_Right._Myfile, _Newfl); // match buffering styles
193
+ _Init(static_cast<FILE*>(nullptr), _Closefl); // then make *this look closed
194
+ _Assign_rv(_STD move(_Right));
195
+ }
196
+
197
+ basic_filebuf& operator=(basic_filebuf&& _Right) {
198
+ _Assign_rv(_STD move(_Right));
199
+ return *this;
200
+ }
201
+
202
+ void _Assign_rv(basic_filebuf&& _Right) {
203
+ if (this != _STD addressof(_Right)) {
204
+ close();
205
+ this->swap(_Right);
206
+ }
207
+ }
208
+
209
+ void swap(basic_filebuf& _Right) noexcept /* strengthened */ {
210
+ if (this != _STD addressof(_Right)) {
211
+ FILE* _Myfile_sav = _Myfile;
212
+ const _Cvt* _Pcvt_sav = _Pcvt;
213
+ typename _Traits::state_type _State_sav = _State;
214
+ bool _Wrotesome_sav = _Wrotesome;
215
+ bool _Closef_sav = _Closef;
216
+ bool _Set_eback_sav = _Mysb::eback() == &_Mychar;
217
+ bool _Set_eback_live = _Mysb::gptr() == &_Mychar;
218
+
219
+ _Elem* _Pfirst0 = _Mysb::pbase();
220
+ _Elem* _Pnext0 = _Mysb::pptr();
221
+ _Elem* _Pend = _Mysb::epptr();
222
+ _Elem* _Gfirst0 = _Mysb::eback();
223
+ _Elem* _Gnext0 = _Mysb::gptr();
224
+ _Elem* _Gend = _Mysb::egptr();
225
+
226
+ // reinitialize *this
227
+ _Init(_Right._Myfile, _Right._Myfile ? _Openfl : _Newfl);
228
+ _Mysb::setp(_Right.pbase(), _Right.pptr(), _Right.epptr());
229
+ if (_Right.eback() != &_Right._Mychar) {
230
+ _Mysb::setg(_Right.eback(), _Right.gptr(), _Right.egptr());
231
+ } else if (_Right.gptr() != &_Right._Mychar) {
232
+ _Mysb::setg(&_Mychar, &_Mychar + 1, &_Mychar + 1);
233
+ } else {
234
+ _Mysb::setg(&_Mychar, &_Mychar, &_Mychar + 1);
235
+ }
236
+
237
+ _Pcvt = _Right._Pcvt;
238
+ _State = _Right._State;
239
+ _Wrotesome = _Right._Wrotesome;
240
+ _Closef = _Right._Closef;
241
+
242
+ // reinitialize _Right
243
+ _Right._Init(_Myfile_sav, _Myfile_sav ? _Openfl : _Newfl);
244
+ _Right.setp(_Pfirst0, _Pnext0, _Pend);
245
+ if (!_Set_eback_sav) {
246
+ _Right.setg(_Gfirst0, _Gnext0, _Gend);
247
+ } else if (!_Set_eback_live) {
248
+ _Right.setg(&_Right._Mychar, &_Right._Mychar + 1, &_Right._Mychar + 1);
249
+ } else {
250
+ _Right.setg(&_Right._Mychar, &_Right._Mychar, &_Right._Mychar + 1);
251
+ }
252
+
253
+ _Right._Pcvt = _Pcvt_sav;
254
+ _Right._State = _State_sav;
255
+ _Right._Wrotesome = _Wrotesome_sav;
256
+ _Right._Closef = _Closef_sav;
257
+
258
+ // swap ancillary data
259
+ _STD swap(_Set_eback, _Right._Set_eback);
260
+ _STD swap(_Set_egptr, _Right._Set_egptr);
261
+
262
+ _STD swap(_Mychar, _Right._Mychar);
263
+ _STD swap(_Mysb::_Plocale, _Right._Plocale);
264
+ }
265
+ }
266
+
267
+ basic_filebuf(const basic_filebuf&) = delete;
268
+ basic_filebuf& operator=(const basic_filebuf&) = delete;
269
+
270
+ enum _Initfl { // reasons for a call to _Init
271
+ _Newfl,
272
+ _Openfl,
273
+ _Closefl
274
+ };
275
+
276
+ _NODISCARD bool is_open() const noexcept /* strengthened */ {
277
+ return static_cast<bool>(_Myfile);
278
+ }
279
+
280
+ basic_filebuf* open(const char* _Filename, ios_base::openmode _Mode, int _Prot = ios_base::_Default_open_prot) {
281
+ // _Prot is an extension
282
+ if (_Myfile) {
283
+ return nullptr;
284
+ }
285
+
286
+ const auto _File = _Fiopen(_Filename, _Mode, _Prot);
287
+ if (!_File) {
288
+ return nullptr; // open failed
289
+ }
290
+
291
+ _Init(_File, _Openfl);
292
+ _Initcvt(_STD use_facet<_Cvt>(_Mysb::getloc()));
293
+ return this; // open succeeded
294
+ }
295
+
296
+ basic_filebuf* open(const string& _Str, ios_base::openmode _Mode, int _Prot = ios_base::_Default_open_prot) {
297
+ // _Prot is an extension
298
+ return open(_Str.c_str(), _Mode, _Prot);
299
+ }
300
+
301
+ #if _HAS_OLD_IOSTREAMS_MEMBERS
302
+ basic_filebuf* open(const char* _Filename, ios_base::open_mode _Mode) {
303
+ return open(_Filename, static_cast<ios_base::openmode>(_Mode));
304
+ }
305
+ #endif // _HAS_OLD_IOSTREAMS_MEMBERS
306
+
307
+ basic_filebuf* open(const wchar_t* _Filename, ios_base::openmode _Mode, int _Prot = ios_base::_Default_open_prot) {
308
+ // in standard as const std::filesystem::path::value_type *; _Prot is an extension
309
+ if (_Myfile) {
310
+ return nullptr;
311
+ }
312
+
313
+ const auto _File = _Fiopen(_Filename, _Mode, _Prot);
314
+ if (!_File) {
315
+ return nullptr; // open failed
316
+ }
317
+
318
+ _Init(_File, _Openfl);
319
+ _Initcvt(_STD use_facet<_Cvt>(_Mysb::getloc()));
320
+ return this; // open succeeded
321
+ }
322
+
323
+ basic_filebuf* open(const wstring& _Str, ios_base::openmode _Mode, int _Prot = ios_base::_Default_open_prot) {
324
+ // extension
325
+ return open(_Str.c_str(), _Mode, _Prot);
326
+ }
327
+
328
+ #if _FSTREAM_SUPPORTS_EXPERIMENTAL_FILESYSTEM
329
+ template <class _Path_ish = experimental::filesystem::path>
330
+ basic_filebuf* open(
331
+ const _Identity_t<_Path_ish>& _Path, ios_base::openmode _Mode, int _Prot = ios_base::_Default_open_prot) {
332
+ // _Prot is an extension
333
+ return open(_Path.c_str(), _Mode, _Prot);
334
+ }
335
+ #endif // _FSTREAM_SUPPORTS_EXPERIMENTAL_FILESYSTEM
336
+
337
+ #if _HAS_CXX17
338
+ template <int = 0, class _Path_ish = filesystem::path>
339
+ basic_filebuf* open(
340
+ const _Identity_t<_Path_ish>& _Path, ios_base::openmode _Mode, int _Prot = ios_base::_Default_open_prot) {
341
+ // _Prot is an extension
342
+ return open(_Path.c_str(), _Mode, _Prot);
343
+ }
344
+ #endif // _HAS_CXX17
345
+
346
+ #if _HAS_OLD_IOSTREAMS_MEMBERS
347
+ basic_filebuf* open(const wchar_t* _Filename, ios_base::open_mode _Mode) {
348
+ // in standard as const std::filesystem::path::value_type *
349
+ return open(_Filename, static_cast<ios_base::openmode>(_Mode));
350
+ }
351
+ #endif // _HAS_OLD_IOSTREAMS_MEMBERS
352
+
353
+ basic_filebuf* close() {
354
+ basic_filebuf* _Ans;
355
+ if (_Myfile) { // put any homing sequence and close file
356
+ _Reset_back(); // revert from _Mychar buffer
357
+
358
+ _Ans = this;
359
+ if (!_Endwrite()) {
360
+ _Ans = nullptr;
361
+ }
362
+
363
+ if (_CSTD fclose(_Myfile) != 0) {
364
+ _Ans = nullptr;
365
+ }
366
+ } else {
367
+ _Ans = nullptr;
368
+ }
369
+
370
+ _Init(nullptr, _Closefl);
371
+ return _Ans;
372
+ }
373
+
374
+ void __CLR_OR_THIS_CALL _Lock() override { // lock file instead of stream buffer
375
+ if (_Myfile) {
376
+ _CSTD _lock_file(_Myfile);
377
+ }
378
+ }
379
+
380
+ void __CLR_OR_THIS_CALL _Unlock() override { // unlock file instead of stream buffer
381
+ if (_Myfile) {
382
+ _CSTD _unlock_file(_Myfile);
383
+ }
384
+ }
385
+
386
+ #if _HAS_CXX23 && defined(_CPPRTTI)
387
+ template <class _Filebuf_type>
388
+ friend ios_base::iostate _Print_noformat_unicode(ostream&, string_view);
389
+ #endif
390
+
391
+ protected:
392
+ int_type __CLR_OR_THIS_CALL overflow(int_type _Meta = _Traits::eof()) override { // put an element to stream
393
+ if (_Traits::eq_int_type(_Traits::eof(), _Meta)) {
394
+ return _Traits::not_eof(_Meta); // EOF, return success code
395
+ }
396
+
397
+ if (_Mysb::pptr() && _Mysb::pptr() < _Mysb::epptr()) { // room in buffer, store it
398
+ *_Mysb::_Pninc() = _Traits::to_char_type(_Meta);
399
+ return _Meta;
400
+ }
401
+
402
+ if (!_Myfile) {
403
+ return _Traits::eof(); // no open C stream, fail
404
+ }
405
+
406
+ _Reset_back(); // revert from _Mychar buffer
407
+ if (!_Pcvt) { // no codecvt facet, put as is
408
+ return _Fputc(_Traits::to_char_type(_Meta), _Myfile) ? _Meta : _Traits::eof();
409
+ }
410
+
411
+ // put using codecvt facet
412
+ constexpr size_t _Codecvt_temp_buf = 32;
413
+ char _Str[_Codecvt_temp_buf];
414
+ const _Elem _Ch = _Traits::to_char_type(_Meta);
415
+ const _Elem* _Src;
416
+ char* _Dest;
417
+
418
+ // test result of converting one element
419
+ switch (_Pcvt->out(_State, &_Ch, &_Ch + 1, _Src, _Str, _Str + _Codecvt_temp_buf, _Dest)) {
420
+ case codecvt_base::partial:
421
+ case codecvt_base::ok:
422
+ { // converted something, try to put it out
423
+ const auto _Count = static_cast<size_t>(_Dest - _Str);
424
+ if (0 < _Count && _Count != static_cast<size_t>(_CSTD fwrite(_Str, 1, _Count, _Myfile))) {
425
+ return _Traits::eof(); // write failed
426
+ }
427
+
428
+ _Wrotesome = true; // write succeeded
429
+ if (_Src != &_Ch) {
430
+ return _Meta; // converted whole element
431
+ }
432
+
433
+ return _Traits::eof(); // conversion failed
434
+ }
435
+
436
+ case codecvt_base::noconv:
437
+ // no conversion, put as is
438
+ return _Fputc(_Ch, _Myfile) ? _Meta : _Traits::eof();
439
+
440
+ default:
441
+ return _Traits::eof(); // conversion failed
442
+ }
443
+ }
444
+
445
+ int_type __CLR_OR_THIS_CALL pbackfail(int_type _Meta = _Traits::eof()) override {
446
+ // put an element back to stream
447
+ if (_Mysb::gptr() && _Mysb::eback() < _Mysb::gptr()
448
+ && (_Traits::eq_int_type(_Traits::eof(), _Meta)
449
+ || _Traits::eq_int_type(_Traits::to_int_type(_Mysb::gptr()[-1]),
450
+ _Meta))) { // just back up position
451
+ _Mysb::_Gndec();
452
+ return _Traits::not_eof(_Meta);
453
+ } else if (!_Myfile || _Traits::eq_int_type(_Traits::eof(), _Meta)) {
454
+ return _Traits::eof(); // no open C stream or EOF, fail
455
+ } else if (!_Pcvt && _Ungetc(_Traits::to_char_type(_Meta), _Myfile)) {
456
+ return _Meta; // no facet and unget succeeded, return
457
+ } else if (_Mysb::gptr() != &_Mychar) { // putback to _Mychar
458
+ _Mychar = _Traits::to_char_type(_Meta);
459
+ _Set_back(); // switch to _Mychar buffer
460
+ return _Meta;
461
+ } else {
462
+ return _Traits::eof(); // nowhere to put back
463
+ }
464
+ }
465
+
466
+ int_type __CLR_OR_THIS_CALL underflow() override { // get an element from stream, but don't point past it
467
+ int_type _Meta;
468
+ if (_Mysb::gptr() && _Mysb::gptr() < _Mysb::egptr()) {
469
+ return _Traits::to_int_type(*_Mysb::gptr()); // return buffered
470
+ } else if (_Traits::eq_int_type(_Traits::eof(), _Meta = uflow())) {
471
+ return _Meta; // uflow failed, return EOF
472
+ } else { // get a char, don't point past it
473
+ pbackfail(_Meta);
474
+ return _Meta;
475
+ }
476
+ }
477
+
478
+ int_type __CLR_OR_THIS_CALL uflow() override { // get an element from stream, point past it
479
+ if (_Mysb::gptr() && _Mysb::gptr() < _Mysb::egptr()) {
480
+ return _Traits::to_int_type(*_Mysb::_Gninc()); // return buffered
481
+ }
482
+
483
+ if (!_Myfile) {
484
+ return _Traits::eof(); // no open C stream, fail
485
+ }
486
+
487
+ _Reset_back(); // revert from _Mychar buffer
488
+ if (!_Pcvt) { // no codecvt facet, just get it
489
+ _Elem _Ch;
490
+ return _Fgetc(_Ch, _Myfile) ? _Traits::to_int_type(_Ch) : _Traits::eof();
491
+ }
492
+
493
+ // build string until codecvt succeeds
494
+ string _Str;
495
+
496
+ for (;;) { // get using codecvt facet
497
+ const char* _Src;
498
+ int _Meta = _CSTD fgetc(_Myfile);
499
+
500
+ if (_Meta == EOF) {
501
+ return _Traits::eof(); // partial char?
502
+ }
503
+
504
+ _Str.push_back(static_cast<char>(_Meta)); // append byte and convert
505
+
506
+ _Elem _Ch;
507
+ _Elem* _Dest;
508
+
509
+ // test result of converting one element
510
+ switch (_Pcvt->in(_State, _Str.data(), _Str.data() + _Str.size(), _Src, &_Ch, &_Ch + 1, _Dest)) {
511
+ case codecvt_base::partial:
512
+ case codecvt_base::ok:
513
+ if (_Dest != &_Ch) { // got an element, put back excess and deliver it
514
+ auto _Nleft = _Str.data() + _Str.size() - _Src;
515
+ while (0 < _Nleft) {
516
+ _CSTD ungetc(_Src[--_Nleft], _Myfile);
517
+ }
518
+
519
+ return _Traits::to_int_type(_Ch);
520
+ }
521
+
522
+ _Str.erase(0, static_cast<size_t>(_Src - _Str.data())); // partial, discard used input
523
+ break;
524
+
525
+ case codecvt_base::noconv:
526
+ // noconv is only possible if _Elem is char, so we can use it directly
527
+ return static_cast<int_type>(_Str.front());
528
+
529
+ default:
530
+ return _Traits::eof(); // conversion failed
531
+ }
532
+ }
533
+ }
534
+
535
+ streamsize __CLR_OR_THIS_CALL xsgetn(_Elem* _Ptr, streamsize _Count) override {
536
+ // get _Count characters from stream
537
+ if constexpr (sizeof(_Elem) == 1) {
538
+ if (_Count <= 0) {
539
+ return 0;
540
+ }
541
+
542
+ if (_Pcvt) { // if we need a nontrivial codecvt transform, do the default expensive thing
543
+ return _Mysb::xsgetn(_Ptr, _Count);
544
+ }
545
+
546
+ // assuming this is OK because _Ptr + _Count must be valid
547
+ auto _Count_s = static_cast<size_t>(_Count);
548
+ const auto _Start_count = _Count;
549
+ const auto _Available = static_cast<size_t>(_Mysb::_Gnavail());
550
+ if (0 < _Available) { // copy from get area
551
+ const auto _Read_size = (_STD min)(_Count_s, _Available);
552
+ _Traits::copy(_Ptr, _Mysb::gptr(), _Read_size);
553
+ _Ptr += _Read_size;
554
+ _Count_s -= _Read_size;
555
+ _Mysb::gbump(static_cast<int>(_Read_size));
556
+ }
557
+
558
+ if (_Myfile) { // open C stream, attempt read
559
+ _Reset_back(); // revert from _Mychar buffer
560
+ // process in 4k - 1 chunks to avoid tripping over fread's clobber-the-end behavior when
561
+ // doing \r\n -> \n translation
562
+ constexpr size_t _Read_size = 4095; // _INTERNAL_BUFSIZ - 1
563
+ while (_Read_size < _Count_s) {
564
+ const auto _Actual_read = _CSTD fread(_Ptr, sizeof(_Elem), _Read_size, _Myfile);
565
+ _Ptr += _Actual_read;
566
+ _Count_s -= _Actual_read;
567
+ if (_Actual_read != _Read_size) {
568
+ return static_cast<streamsize>(_Start_count - _Count_s);
569
+ }
570
+ }
571
+
572
+ if (0 < _Count_s) {
573
+ _Count_s -= _CSTD fread(_Ptr, sizeof(_Elem), _Count_s, _Myfile);
574
+ }
575
+ }
576
+
577
+ return static_cast<streamsize>(_Start_count - _Count_s);
578
+ } else { // non-chars always get element-by-element processing
579
+ return _Mysb::xsgetn(_Ptr, _Count);
580
+ }
581
+ }
582
+
583
+ streamsize __CLR_OR_THIS_CALL xsputn(const _Elem* _Ptr, streamsize _Count) override {
584
+ // put _Count characters to stream
585
+ if constexpr (sizeof(_Elem) == 1) {
586
+ if (_Pcvt) { // if we need a nontrivial codecvt transform, do the default expensive thing
587
+ return _Mysb::xsputn(_Ptr, _Count);
588
+ }
589
+
590
+ const streamsize _Start_count = _Count;
591
+ streamsize _Size = _Mysb::_Pnavail();
592
+ if (0 < _Count && 0 < _Size) { // copy to write buffer
593
+ if (_Count < _Size) {
594
+ _Size = _Count;
595
+ }
596
+
597
+ _Traits::copy(_Mysb::pptr(), _Ptr, static_cast<size_t>(_Size));
598
+ _Ptr += _Size;
599
+ _Count -= _Size;
600
+ _Mysb::pbump(static_cast<int>(_Size));
601
+ }
602
+
603
+ if (0 < _Count && _Myfile) { // open C stream, attempt write
604
+ _Count -= _CSTD fwrite(_Ptr, sizeof(_Elem), static_cast<size_t>(_Count), _Myfile);
605
+ }
606
+
607
+ return _Start_count - _Count;
608
+ } else { // non-chars always get element-by-element processing
609
+ return _Mysb::xsputn(_Ptr, _Count);
610
+ }
611
+ }
612
+
613
+ pos_type __CLR_OR_THIS_CALL seekoff(off_type _Off, ios_base::seekdir _Way,
614
+ ios_base::openmode = ios_base::in | ios_base::out) override { // change position by _Off
615
+ fpos_t _Fileposition;
616
+
617
+ if (_Mysb::gptr() == &_Mychar // something putback
618
+ && _Way == ios_base::cur // a relative seek
619
+ && !_Pcvt) { // not converting
620
+ _Off -= static_cast<off_type>(sizeof(_Elem)); // back up over _Elem bytes
621
+ }
622
+
623
+ if (!_Myfile || !_Endwrite()
624
+ || ((_Off != 0 || _Way != ios_base::cur) && _CSTD _fseeki64(_Myfile, _Off, _Way) != 0)
625
+ || _CSTD fgetpos(_Myfile, &_Fileposition) != 0) {
626
+ return pos_type{off_type{-1}}; // report failure
627
+ }
628
+
629
+ _Reset_back(); // revert from _Mychar buffer, discarding any putback
630
+ return pos_type{_State, _Fileposition}; // return new position
631
+ }
632
+
633
+ pos_type __CLR_OR_THIS_CALL seekpos(pos_type _Pos, ios_base::openmode = ios_base::in | ios_base::out) override {
634
+ // change position to _Pos
635
+ off_type _Off = static_cast<off_type>(_Pos);
636
+
637
+ if (!_Myfile || !_Endwrite() || _CSTD fsetpos(_Myfile, &_Off) != 0) {
638
+ return pos_type{off_type{-1}}; // report failure
639
+ }
640
+
641
+ _State = _Pos.state();
642
+ _Reset_back(); // revert from _Mychar buffer, discarding any putback
643
+ return pos_type{_State, _Off}; // return new position
644
+ }
645
+
646
+ _Mysb* __CLR_OR_THIS_CALL setbuf(_Elem* _Buffer, streamsize _Count) override { // offer _Buffer to C stream
647
+ int _Mode;
648
+ if (!_Buffer && _Count == 0) {
649
+ _Mode = _IONBF;
650
+ } else {
651
+ _Mode = _IOFBF;
652
+ }
653
+
654
+ const size_t _Size = static_cast<size_t>(_Count) * sizeof(_Elem);
655
+
656
+ if (!_Myfile || _CSTD setvbuf(_Myfile, reinterpret_cast<char*>(_Buffer), _Mode, _Size) != 0) {
657
+ return nullptr; // failed
658
+ }
659
+
660
+ // new buffer, reinitialize pointers
661
+ _Init(_Myfile, _Openfl);
662
+ return this;
663
+ }
664
+
665
+ int __CLR_OR_THIS_CALL sync() override { // synchronize C stream with external file
666
+ if (!_Myfile || _Traits::eq_int_type(_Traits::eof(), overflow()) || 0 <= _CSTD fflush(_Myfile)) {
667
+ return 0;
668
+ }
669
+
670
+ return -1;
671
+ }
672
+
673
+ void __CLR_OR_THIS_CALL imbue(const locale& _Loc) override {
674
+ // set locale to argument (capture nontrivial codecvt facet)
675
+ _Initcvt(_STD use_facet<_Cvt>(_Loc));
676
+ }
677
+
678
+ void _Init(FILE* _File, _Initfl _Which) noexcept { // initialize to C stream _File after {new, open, close}
679
+ using _State_type = typename _Traits::state_type;
680
+
681
+ __PURE_APPDOMAIN_GLOBAL static _State_type _Stinit; // initial state
682
+
683
+ _Closef = _Which == _Openfl;
684
+ _Wrotesome = false;
685
+
686
+ _Mysb::_Init(); // initialize stream buffer base object
687
+
688
+ if (_File && sizeof(_Elem) == 1) { // point inside C stream with [first, first + count) buffer
689
+ _Elem** _Pb = nullptr;
690
+ _Elem** _Pn = nullptr;
691
+ int* _Nr = nullptr;
692
+
693
+ ::_get_stream_buffer_pointers(
694
+ _File, reinterpret_cast<char***>(&_Pb), reinterpret_cast<char***>(&_Pn), &_Nr);
695
+ int* _Nw = _Nr;
696
+
697
+ _Mysb::_Init(_Pb, _Pn, _Nr, _Pb, _Pn, _Nw);
698
+ }
699
+
700
+ _Myfile = _File;
701
+ _State = _Stinit;
702
+ _Pcvt = nullptr; // pointer to codecvt facet
703
+ }
704
+
705
+ bool _Endwrite() { // put shift to initial conversion state, as needed
706
+ if (!_Pcvt || !_Wrotesome) {
707
+ return true;
708
+ }
709
+
710
+ // may have to put
711
+ if (_Traits::eq_int_type(_Traits::eof(), overflow())) {
712
+ return false;
713
+ }
714
+
715
+ constexpr size_t _Codecvt_temp_buf = 32;
716
+ char _Str[_Codecvt_temp_buf];
717
+ char* _Dest;
718
+ switch (_Pcvt->unshift(_State, _Str, _Str + _Codecvt_temp_buf, _Dest)) { // test result of homing conversion
719
+ case codecvt_base::ok:
720
+ _Wrotesome = false; // homed successfully
721
+ _FALLTHROUGH;
722
+
723
+ case codecvt_base::partial:
724
+ { // put any generated bytes
725
+ const auto _Count = static_cast<size_t>(_Dest - _Str);
726
+ if (0 < _Count && _Count != static_cast<size_t>(_CSTD fwrite(_Str, 1, _Count, _Myfile))) {
727
+ return false; // write failed
728
+ }
729
+
730
+ return !_Wrotesome;
731
+ }
732
+
733
+ case codecvt_base::noconv:
734
+ _Wrotesome = false; // homed successfully
735
+ return true; // nothing else to do
736
+
737
+ default:
738
+ return false; // conversion failed
739
+ }
740
+ }
741
+
742
+ void _Initcvt(const _Cvt& _Newcvt) noexcept { // initialize codecvt pointer
743
+ if (_Newcvt.always_noconv()) {
744
+ _Pcvt = nullptr; // nothing to do
745
+ } else { // set up for nontrivial codecvt facet
746
+ _Pcvt = _STD addressof(_Newcvt);
747
+ _Mysb::_Init(); // reset any buffering
748
+ }
749
+ }
750
+
751
+ private:
752
+ const _Cvt* _Pcvt; // pointer to codecvt facet (may be null)
753
+ _Elem _Mychar; // putback character, when _Ungetc fails
754
+ bool _Wrotesome; // true if homing sequence may be needed
755
+ typename _Traits::state_type _State; // current conversion state
756
+ bool _Closef; // true if C stream must be closed
757
+ FILE* _Myfile; // pointer to C stream
758
+
759
+ void _Reset_back() noexcept { // restore buffer after putback
760
+ if (_Mysb::eback() == &_Mychar) {
761
+ _Mysb::setg(_Set_eback, _Set_eback, _Set_egptr);
762
+ }
763
+ }
764
+
765
+ void _Set_back() noexcept { // set up putback area
766
+ if (_Mysb::eback() != &_Mychar) { // save current get buffer
767
+ _Set_eback = _Mysb::eback();
768
+ _Set_egptr = _Mysb::egptr();
769
+ }
770
+ _Mysb::setg(&_Mychar, &_Mychar, &_Mychar + 1);
771
+ }
772
+
773
+ _Elem* _Set_eback; // saves eback() during one-element putback
774
+ _Elem* _Set_egptr; // saves egptr()
775
+ };
776
+
777
+ _EXPORT_STD template <class _Elem, class _Traits>
778
+ void swap(basic_filebuf<_Elem, _Traits>& _Left, basic_filebuf<_Elem, _Traits>& _Right) noexcept /* strengthened */ {
779
+ _Left.swap(_Right);
780
+ }
781
+
782
+ _STD_END
783
+
784
+ #pragma pop_macro("new")
785
+ _STL_RESTORE_CLANG_WARNINGS
786
+ #pragma warning(pop)
787
+ #pragma pack(pop)
788
+
789
+ #endif // _STL_COMPILER_PREPROCESSOR
790
+ #endif // __MSVC_FILEBUF_HPP
miniMSVC/VC/Tools/MSVC/14.42.34433/include/__msvc_format_ucd_tables.hpp ADDED
@@ -0,0 +1,551 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // __msvc_format_ucd_tables.hpp internal header
2
+
3
+ // Copyright (c) Microsoft Corporation.
4
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
5
+
6
+ // WARNING, this entire header is generated by
7
+ // tools/unicode_properties_parse/unicode_properties_data_gen.py
8
+ // DO NOT MODIFY!
9
+
10
+ // UNICODE, INC. LICENSE AGREEMENT - DATA FILES AND SOFTWARE
11
+ //
12
+ // See Terms of Use <https://www.unicode.org/copyright.html>
13
+ // for definitions of Unicode Inc.'s Data Files and Software.
14
+ //
15
+ // NOTICE TO USER: Carefully read the following legal agreement.
16
+ // BY DOWNLOADING, INSTALLING, COPYING OR OTHERWISE USING UNICODE INC.'S
17
+ // DATA FILES ("DATA FILES"), AND/OR SOFTWARE ("SOFTWARE"),
18
+ // YOU UNEQUIVOCALLY ACCEPT, AND AGREE TO BE BOUND BY, ALL OF THE
19
+ // TERMS AND CONDITIONS OF THIS AGREEMENT.
20
+ // IF YOU DO NOT AGREE, DO NOT DOWNLOAD, INSTALL, COPY, DISTRIBUTE OR USE
21
+ // THE DATA FILES OR SOFTWARE.
22
+ //
23
+ // COPYRIGHT AND PERMISSION NOTICE
24
+ //
25
+ // Copyright (c) 1991-2022 Unicode, Inc. All rights reserved.
26
+ // Distributed under the Terms of Use in https://www.unicode.org/copyright.html.
27
+ //
28
+ // Permission is hereby granted, free of charge, to any person obtaining
29
+ // a copy of the Unicode data files and any associated documentation
30
+ // (the "Data Files") or Unicode software and any associated documentation
31
+ // (the "Software") to deal in the Data Files or Software
32
+ // without restriction, including without limitation the rights to use,
33
+ // copy, modify, merge, publish, distribute, and/or sell copies of
34
+ // the Data Files or Software, and to permit persons to whom the Data Files
35
+ // or Software are furnished to do so, provided that either
36
+ // (a) this copyright and permission notice appear with all copies
37
+ // of the Data Files or Software, or
38
+ // (b) this copyright and permission notice appear in associated
39
+ // Documentation.
40
+ //
41
+ // THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF
42
+ // ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
43
+ // WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
44
+ // NONINFRINGEMENT OF THIRD PARTY RIGHTS.
45
+ // IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS
46
+ // NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL
47
+ // DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
48
+ // DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
49
+ // TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
50
+ // PERFORMANCE OF THE DATA FILES OR SOFTWARE.
51
+ //
52
+ // Except as contained in this notice, the name of a copyright holder
53
+ // shall not be used in advertising or otherwise to promote the sale,
54
+ // use or other dealings in these Data Files or Software without prior
55
+ // written authorization of the copyright holder.
56
+
57
+ #ifndef __MSVC_FORMAT_UCD_TABLES_HPP
58
+ #define __MSVC_FORMAT_UCD_TABLES_HPP
59
+ #include <yvals_core.h>
60
+ #if _STL_COMPILER_PREPROCESSOR
61
+
62
+ #include <cstdint>
63
+ #include <xutility>
64
+
65
+ #pragma pack(push, _CRT_PACKING)
66
+ #pragma warning(push, _STL_WARNING_LEVEL)
67
+ #pragma warning(disable : _STL_DISABLED_WARNINGS)
68
+ _STL_DISABLE_CLANG_WARNINGS
69
+ #pragma push_macro("new")
70
+ #undef new
71
+
72
+ _STD_BEGIN
73
+
74
+ template <class _ValueEnum, size_t _NumRanges, bool _Is_binary_property>
75
+ struct _Unicode_property_data {
76
+ uint32_t _Lower_bounds[_NumRanges];
77
+ uint16_t _Props_and_size[_NumRanges];
78
+ _NODISCARD constexpr _ValueEnum _Get_property_for_codepoint(const uint32_t _Code_point) const noexcept {
79
+ ptrdiff_t _Upper_idx = _STD upper_bound(_Lower_bounds, _STD end(_Lower_bounds), _Code_point) - _Lower_bounds;
80
+ constexpr auto _No_value_constant = static_cast<_ValueEnum>(UINT8_MAX);
81
+ if (_Upper_idx == 0) {
82
+ return _No_value_constant;
83
+ }
84
+ --_Upper_idx;
85
+ const uint32_t _Lower_bound = _Lower_bounds[_Upper_idx];
86
+ const uint16_t _Data = _Props_and_size[_Upper_idx];
87
+ _STL_INTERNAL_CHECK(_Code_point >= _Lower_bound);
88
+ if constexpr (_Is_binary_property) {
89
+ if (_Code_point < _Lower_bound + _Data) {
90
+ return static_cast<_ValueEnum>(0);
91
+ }
92
+ } else {
93
+ const uint16_t _Size = static_cast<uint16_t>(_Data & 0x0FFF);
94
+ const _ValueEnum _Prop = static_cast<_ValueEnum>((_Data & 0xF000) >> 12);
95
+ if (_Code_point < _Lower_bound + _Size) {
96
+ return _Prop;
97
+ }
98
+ }
99
+ return _No_value_constant;
100
+ }
101
+ };
102
+
103
+ // The following static data tables are generated from the Unicode character database.
104
+ // _Grapheme_Break_property_data comes from ucd/auxiliary/GraphemeBreakProperty.txt.
105
+ //
106
+ // _Extended_Pictographic_property_data comes from ucd/emoji/emoji-data.txt.
107
+ //
108
+ // __printable_property_data comes from ucd/extracted/DerivedGeneralCategory.txt.
109
+ //
110
+ // _Grapheme_Extend_property_data comes from ucd/DerivedCoreProperties.txt.
111
+ //
112
+ // The enums containing the values for the properties are also generated, in order to ensure they match
113
+ // up correctly with how we're parsing them.
114
+ //
115
+ // All sets of data tables are generated by tools/unicode_properties_parse/unicode_properties_data_gen.py in the
116
+ // https://github.com/microsoft/stl repository.
117
+ //
118
+ // The data format is a set of arrays for each character property. The first is an array of uint32_t encoding
119
+ // the lower bound of each range of codepoints that has the given property.
120
+ // The second is an array of uint16_t.
121
+ // - For enumerated properties, this array encodes both the range size and property value as follows:
122
+ // 16 12 0
123
+ // +-----------------------------------------------------+
124
+ // | property_value | range_size |
125
+ // +-----------------------------------------------------+
126
+ // that is: the size is stored in the least significant 12 bits
127
+ // (leading to a max size of 4095), and the property value is stored in the most significant 4 bits,
128
+ // leading to a maximum of 16 property values.
129
+ // - For binary properties, this array simply stores the range size.
130
+ //
131
+ // Codepoint ranges may not overlap, and, within one property, a codepoint may only appear once. Furthermore the
132
+ // codepoint lower bounds appear in sorted (ascending) order.
133
+
134
+ // GraphemeBreakProperty-15.0.0.txt
135
+ // Date: 2022-04-27, 17:07:38 GMT
136
+ enum class _Grapheme_Break_property_values : uint8_t {
137
+ _CR_value,
138
+ _Control_value,
139
+ _Extend_value,
140
+ _L_value,
141
+ _LF_value,
142
+ _LV_value,
143
+ _LVT_value,
144
+ _Prepend_value,
145
+ _Regional_Indicator_value,
146
+ _SpacingMark_value,
147
+ _T_value,
148
+ _V_value,
149
+ _ZWJ_value,
150
+ _No_value = 255
151
+ };
152
+
153
+ // GraphemeBreakProperty-15.0.0.txt
154
+ // Date: 2022-04-27, 17:07:38 GMT
155
+ inline constexpr _Unicode_property_data<_Grapheme_Break_property_values, 1371, false> _Grapheme_Break_property_data{
156
+ {0x0, 0xa, 0xb, 0xd, 0xe, 0x7f, 0xad, 0x300, 0x483, 0x591, 0x5bf, 0x5c1, 0x5c4, 0x5c7, 0x600, 0x610, 0x61c, 0x64b,
157
+ 0x670, 0x6d6, 0x6dd, 0x6df, 0x6e7, 0x6ea, 0x70f, 0x711, 0x730, 0x7a6, 0x7eb, 0x7fd, 0x816, 0x81b, 0x825, 0x829,
158
+ 0x859, 0x890, 0x898, 0x8ca, 0x8e2, 0x8e3, 0x903, 0x93a, 0x93b, 0x93c, 0x93e, 0x941, 0x949, 0x94d, 0x94e, 0x951,
159
+ 0x962, 0x981, 0x982, 0x9bc, 0x9be, 0x9bf, 0x9c1, 0x9c7, 0x9cb, 0x9cd, 0x9d7, 0x9e2, 0x9fe, 0xa01, 0xa03, 0xa3c,
160
+ 0xa3e, 0xa41, 0xa47, 0xa4b, 0xa51, 0xa70, 0xa75, 0xa81, 0xa83, 0xabc, 0xabe, 0xac1, 0xac7, 0xac9, 0xacb, 0xacd,
161
+ 0xae2, 0xafa, 0xb01, 0xb02, 0xb3c, 0xb3e, 0xb40, 0xb41, 0xb47, 0xb4b, 0xb4d, 0xb55, 0xb62, 0xb82, 0xbbe, 0xbbf,
162
+ 0xbc0, 0xbc1, 0xbc6, 0xbca, 0xbcd, 0xbd7, 0xc00, 0xc01, 0xc04, 0xc3c, 0xc3e, 0xc41, 0xc46, 0xc4a, 0xc55, 0xc62,
163
+ 0xc81, 0xc82, 0xcbc, 0xcbe, 0xcbf, 0xcc0, 0xcc2, 0xcc3, 0xcc6, 0xcc7, 0xcca, 0xccc, 0xcd5, 0xce2, 0xcf3, 0xd00,
164
+ 0xd02, 0xd3b, 0xd3e, 0xd3f, 0xd41, 0xd46, 0xd4a, 0xd4d, 0xd4e, 0xd57, 0xd62, 0xd81, 0xd82, 0xdca, 0xdcf, 0xdd0,
165
+ 0xdd2, 0xdd6, 0xdd8, 0xddf, 0xdf2, 0xe31, 0xe33, 0xe34, 0xe47, 0xeb1, 0xeb3, 0xeb4, 0xec8, 0xf18, 0xf35, 0xf37,
166
+ 0xf39, 0xf3e, 0xf71, 0xf7f, 0xf80, 0xf86, 0xf8d, 0xf99, 0xfc6, 0x102d, 0x1031, 0x1032, 0x1039, 0x103b, 0x103d,
167
+ 0x1056, 0x1058, 0x105e, 0x1071, 0x1082, 0x1084, 0x1085, 0x108d, 0x109d, 0x1100, 0x1160, 0x11a8, 0x135d, 0x1712,
168
+ 0x1715, 0x1732, 0x1734, 0x1752, 0x1772, 0x17b4, 0x17b6, 0x17b7, 0x17be, 0x17c6, 0x17c7, 0x17c9, 0x17dd, 0x180b,
169
+ 0x180e, 0x180f, 0x1885, 0x18a9, 0x1920, 0x1923, 0x1927, 0x1929, 0x1930, 0x1932, 0x1933, 0x1939, 0x1a17, 0x1a19,
170
+ 0x1a1b, 0x1a55, 0x1a56, 0x1a57, 0x1a58, 0x1a60, 0x1a62, 0x1a65, 0x1a6d, 0x1a73, 0x1a7f, 0x1ab0, 0x1b00, 0x1b04,
171
+ 0x1b34, 0x1b3b, 0x1b3c, 0x1b3d, 0x1b42, 0x1b43, 0x1b6b, 0x1b80, 0x1b82, 0x1ba1, 0x1ba2, 0x1ba6, 0x1ba8, 0x1baa,
172
+ 0x1bab, 0x1be6, 0x1be7, 0x1be8, 0x1bea, 0x1bed, 0x1bee, 0x1bef, 0x1bf2, 0x1c24, 0x1c2c, 0x1c34, 0x1c36, 0x1cd0,
173
+ 0x1cd4, 0x1ce1, 0x1ce2, 0x1ced, 0x1cf4, 0x1cf7, 0x1cf8, 0x1dc0, 0x200b, 0x200c, 0x200d, 0x200e, 0x2028, 0x2060,
174
+ 0x20d0, 0x2cef, 0x2d7f, 0x2de0, 0x302a, 0x3099, 0xa66f, 0xa674, 0xa69e, 0xa6f0, 0xa802, 0xa806, 0xa80b, 0xa823,
175
+ 0xa825, 0xa827, 0xa82c, 0xa880, 0xa8b4, 0xa8c4, 0xa8e0, 0xa8ff, 0xa926, 0xa947, 0xa952, 0xa960, 0xa980, 0xa983,
176
+ 0xa9b3, 0xa9b4, 0xa9b6, 0xa9ba, 0xa9bc, 0xa9be, 0xa9e5, 0xaa29, 0xaa2f, 0xaa31, 0xaa33, 0xaa35, 0xaa43, 0xaa4c,
177
+ 0xaa4d, 0xaa7c, 0xaab0, 0xaab2, 0xaab7, 0xaabe, 0xaac1, 0xaaeb, 0xaaec, 0xaaee, 0xaaf5, 0xaaf6, 0xabe3, 0xabe5,
178
+ 0xabe6, 0xabe8, 0xabe9, 0xabec, 0xabed, 0xac00, 0xac01, 0xac1c, 0xac1d, 0xac38, 0xac39, 0xac54, 0xac55, 0xac70,
179
+ 0xac71, 0xac8c, 0xac8d, 0xaca8, 0xaca9, 0xacc4, 0xacc5, 0xace0, 0xace1, 0xacfc, 0xacfd, 0xad18, 0xad19, 0xad34,
180
+ 0xad35, 0xad50, 0xad51, 0xad6c, 0xad6d, 0xad88, 0xad89, 0xada4, 0xada5, 0xadc0, 0xadc1, 0xaddc, 0xaddd, 0xadf8,
181
+ 0xadf9, 0xae14, 0xae15, 0xae30, 0xae31, 0xae4c, 0xae4d, 0xae68, 0xae69, 0xae84, 0xae85, 0xaea0, 0xaea1, 0xaebc,
182
+ 0xaebd, 0xaed8, 0xaed9, 0xaef4, 0xaef5, 0xaf10, 0xaf11, 0xaf2c, 0xaf2d, 0xaf48, 0xaf49, 0xaf64, 0xaf65, 0xaf80,
183
+ 0xaf81, 0xaf9c, 0xaf9d, 0xafb8, 0xafb9, 0xafd4, 0xafd5, 0xaff0, 0xaff1, 0xb00c, 0xb00d, 0xb028, 0xb029, 0xb044,
184
+ 0xb045, 0xb060, 0xb061, 0xb07c, 0xb07d, 0xb098, 0xb099, 0xb0b4, 0xb0b5, 0xb0d0, 0xb0d1, 0xb0ec, 0xb0ed, 0xb108,
185
+ 0xb109, 0xb124, 0xb125, 0xb140, 0xb141, 0xb15c, 0xb15d, 0xb178, 0xb179, 0xb194, 0xb195, 0xb1b0, 0xb1b1, 0xb1cc,
186
+ 0xb1cd, 0xb1e8, 0xb1e9, 0xb204, 0xb205, 0xb220, 0xb221, 0xb23c, 0xb23d, 0xb258, 0xb259, 0xb274, 0xb275, 0xb290,
187
+ 0xb291, 0xb2ac, 0xb2ad, 0xb2c8, 0xb2c9, 0xb2e4, 0xb2e5, 0xb300, 0xb301, 0xb31c, 0xb31d, 0xb338, 0xb339, 0xb354,
188
+ 0xb355, 0xb370, 0xb371, 0xb38c, 0xb38d, 0xb3a8, 0xb3a9, 0xb3c4, 0xb3c5, 0xb3e0, 0xb3e1, 0xb3fc, 0xb3fd, 0xb418,
189
+ 0xb419, 0xb434, 0xb435, 0xb450, 0xb451, 0xb46c, 0xb46d, 0xb488, 0xb489, 0xb4a4, 0xb4a5, 0xb4c0, 0xb4c1, 0xb4dc,
190
+ 0xb4dd, 0xb4f8, 0xb4f9, 0xb514, 0xb515, 0xb530, 0xb531, 0xb54c, 0xb54d, 0xb568, 0xb569, 0xb584, 0xb585, 0xb5a0,
191
+ 0xb5a1, 0xb5bc, 0xb5bd, 0xb5d8, 0xb5d9, 0xb5f4, 0xb5f5, 0xb610, 0xb611, 0xb62c, 0xb62d, 0xb648, 0xb649, 0xb664,
192
+ 0xb665, 0xb680, 0xb681, 0xb69c, 0xb69d, 0xb6b8, 0xb6b9, 0xb6d4, 0xb6d5, 0xb6f0, 0xb6f1, 0xb70c, 0xb70d, 0xb728,
193
+ 0xb729, 0xb744, 0xb745, 0xb760, 0xb761, 0xb77c, 0xb77d, 0xb798, 0xb799, 0xb7b4, 0xb7b5, 0xb7d0, 0xb7d1, 0xb7ec,
194
+ 0xb7ed, 0xb808, 0xb809, 0xb824, 0xb825, 0xb840, 0xb841, 0xb85c, 0xb85d, 0xb878, 0xb879, 0xb894, 0xb895, 0xb8b0,
195
+ 0xb8b1, 0xb8cc, 0xb8cd, 0xb8e8, 0xb8e9, 0xb904, 0xb905, 0xb920, 0xb921, 0xb93c, 0xb93d, 0xb958, 0xb959, 0xb974,
196
+ 0xb975, 0xb990, 0xb991, 0xb9ac, 0xb9ad, 0xb9c8, 0xb9c9, 0xb9e4, 0xb9e5, 0xba00, 0xba01, 0xba1c, 0xba1d, 0xba38,
197
+ 0xba39, 0xba54, 0xba55, 0xba70, 0xba71, 0xba8c, 0xba8d, 0xbaa8, 0xbaa9, 0xbac4, 0xbac5, 0xbae0, 0xbae1, 0xbafc,
198
+ 0xbafd, 0xbb18, 0xbb19, 0xbb34, 0xbb35, 0xbb50, 0xbb51, 0xbb6c, 0xbb6d, 0xbb88, 0xbb89, 0xbba4, 0xbba5, 0xbbc0,
199
+ 0xbbc1, 0xbbdc, 0xbbdd, 0xbbf8, 0xbbf9, 0xbc14, 0xbc15, 0xbc30, 0xbc31, 0xbc4c, 0xbc4d, 0xbc68, 0xbc69, 0xbc84,
200
+ 0xbc85, 0xbca0, 0xbca1, 0xbcbc, 0xbcbd, 0xbcd8, 0xbcd9, 0xbcf4, 0xbcf5, 0xbd10, 0xbd11, 0xbd2c, 0xbd2d, 0xbd48,
201
+ 0xbd49, 0xbd64, 0xbd65, 0xbd80, 0xbd81, 0xbd9c, 0xbd9d, 0xbdb8, 0xbdb9, 0xbdd4, 0xbdd5, 0xbdf0, 0xbdf1, 0xbe0c,
202
+ 0xbe0d, 0xbe28, 0xbe29, 0xbe44, 0xbe45, 0xbe60, 0xbe61, 0xbe7c, 0xbe7d, 0xbe98, 0xbe99, 0xbeb4, 0xbeb5, 0xbed0,
203
+ 0xbed1, 0xbeec, 0xbeed, 0xbf08, 0xbf09, 0xbf24, 0xbf25, 0xbf40, 0xbf41, 0xbf5c, 0xbf5d, 0xbf78, 0xbf79, 0xbf94,
204
+ 0xbf95, 0xbfb0, 0xbfb1, 0xbfcc, 0xbfcd, 0xbfe8, 0xbfe9, 0xc004, 0xc005, 0xc020, 0xc021, 0xc03c, 0xc03d, 0xc058,
205
+ 0xc059, 0xc074, 0xc075, 0xc090, 0xc091, 0xc0ac, 0xc0ad, 0xc0c8, 0xc0c9, 0xc0e4, 0xc0e5, 0xc100, 0xc101, 0xc11c,
206
+ 0xc11d, 0xc138, 0xc139, 0xc154, 0xc155, 0xc170, 0xc171, 0xc18c, 0xc18d, 0xc1a8, 0xc1a9, 0xc1c4, 0xc1c5, 0xc1e0,
207
+ 0xc1e1, 0xc1fc, 0xc1fd, 0xc218, 0xc219, 0xc234, 0xc235, 0xc250, 0xc251, 0xc26c, 0xc26d, 0xc288, 0xc289, 0xc2a4,
208
+ 0xc2a5, 0xc2c0, 0xc2c1, 0xc2dc, 0xc2dd, 0xc2f8, 0xc2f9, 0xc314, 0xc315, 0xc330, 0xc331, 0xc34c, 0xc34d, 0xc368,
209
+ 0xc369, 0xc384, 0xc385, 0xc3a0, 0xc3a1, 0xc3bc, 0xc3bd, 0xc3d8, 0xc3d9, 0xc3f4, 0xc3f5, 0xc410, 0xc411, 0xc42c,
210
+ 0xc42d, 0xc448, 0xc449, 0xc464, 0xc465, 0xc480, 0xc481, 0xc49c, 0xc49d, 0xc4b8, 0xc4b9, 0xc4d4, 0xc4d5, 0xc4f0,
211
+ 0xc4f1, 0xc50c, 0xc50d, 0xc528, 0xc529, 0xc544, 0xc545, 0xc560, 0xc561, 0xc57c, 0xc57d, 0xc598, 0xc599, 0xc5b4,
212
+ 0xc5b5, 0xc5d0, 0xc5d1, 0xc5ec, 0xc5ed, 0xc608, 0xc609, 0xc624, 0xc625, 0xc640, 0xc641, 0xc65c, 0xc65d, 0xc678,
213
+ 0xc679, 0xc694, 0xc695, 0xc6b0, 0xc6b1, 0xc6cc, 0xc6cd, 0xc6e8, 0xc6e9, 0xc704, 0xc705, 0xc720, 0xc721, 0xc73c,
214
+ 0xc73d, 0xc758, 0xc759, 0xc774, 0xc775, 0xc790, 0xc791, 0xc7ac, 0xc7ad, 0xc7c8, 0xc7c9, 0xc7e4, 0xc7e5, 0xc800,
215
+ 0xc801, 0xc81c, 0xc81d, 0xc838, 0xc839, 0xc854, 0xc855, 0xc870, 0xc871, 0xc88c, 0xc88d, 0xc8a8, 0xc8a9, 0xc8c4,
216
+ 0xc8c5, 0xc8e0, 0xc8e1, 0xc8fc, 0xc8fd, 0xc918, 0xc919, 0xc934, 0xc935, 0xc950, 0xc951, 0xc96c, 0xc96d, 0xc988,
217
+ 0xc989, 0xc9a4, 0xc9a5, 0xc9c0, 0xc9c1, 0xc9dc, 0xc9dd, 0xc9f8, 0xc9f9, 0xca14, 0xca15, 0xca30, 0xca31, 0xca4c,
218
+ 0xca4d, 0xca68, 0xca69, 0xca84, 0xca85, 0xcaa0, 0xcaa1, 0xcabc, 0xcabd, 0xcad8, 0xcad9, 0xcaf4, 0xcaf5, 0xcb10,
219
+ 0xcb11, 0xcb2c, 0xcb2d, 0xcb48, 0xcb49, 0xcb64, 0xcb65, 0xcb80, 0xcb81, 0xcb9c, 0xcb9d, 0xcbb8, 0xcbb9, 0xcbd4,
220
+ 0xcbd5, 0xcbf0, 0xcbf1, 0xcc0c, 0xcc0d, 0xcc28, 0xcc29, 0xcc44, 0xcc45, 0xcc60, 0xcc61, 0xcc7c, 0xcc7d, 0xcc98,
221
+ 0xcc99, 0xccb4, 0xccb5, 0xccd0, 0xccd1, 0xccec, 0xcced, 0xcd08, 0xcd09, 0xcd24, 0xcd25, 0xcd40, 0xcd41, 0xcd5c,
222
+ 0xcd5d, 0xcd78, 0xcd79, 0xcd94, 0xcd95, 0xcdb0, 0xcdb1, 0xcdcc, 0xcdcd, 0xcde8, 0xcde9, 0xce04, 0xce05, 0xce20,
223
+ 0xce21, 0xce3c, 0xce3d, 0xce58, 0xce59, 0xce74, 0xce75, 0xce90, 0xce91, 0xceac, 0xcead, 0xcec8, 0xcec9, 0xcee4,
224
+ 0xcee5, 0xcf00, 0xcf01, 0xcf1c, 0xcf1d, 0xcf38, 0xcf39, 0xcf54, 0xcf55, 0xcf70, 0xcf71, 0xcf8c, 0xcf8d, 0xcfa8,
225
+ 0xcfa9, 0xcfc4, 0xcfc5, 0xcfe0, 0xcfe1, 0xcffc, 0xcffd, 0xd018, 0xd019, 0xd034, 0xd035, 0xd050, 0xd051, 0xd06c,
226
+ 0xd06d, 0xd088, 0xd089, 0xd0a4, 0xd0a5, 0xd0c0, 0xd0c1, 0xd0dc, 0xd0dd, 0xd0f8, 0xd0f9, 0xd114, 0xd115, 0xd130,
227
+ 0xd131, 0xd14c, 0xd14d, 0xd168, 0xd169, 0xd184, 0xd185, 0xd1a0, 0xd1a1, 0xd1bc, 0xd1bd, 0xd1d8, 0xd1d9, 0xd1f4,
228
+ 0xd1f5, 0xd210, 0xd211, 0xd22c, 0xd22d, 0xd248, 0xd249, 0xd264, 0xd265, 0xd280, 0xd281, 0xd29c, 0xd29d, 0xd2b8,
229
+ 0xd2b9, 0xd2d4, 0xd2d5, 0xd2f0, 0xd2f1, 0xd30c, 0xd30d, 0xd328, 0xd329, 0xd344, 0xd345, 0xd360, 0xd361, 0xd37c,
230
+ 0xd37d, 0xd398, 0xd399, 0xd3b4, 0xd3b5, 0xd3d0, 0xd3d1, 0xd3ec, 0xd3ed, 0xd408, 0xd409, 0xd424, 0xd425, 0xd440,
231
+ 0xd441, 0xd45c, 0xd45d, 0xd478, 0xd479, 0xd494, 0xd495, 0xd4b0, 0xd4b1, 0xd4cc, 0xd4cd, 0xd4e8, 0xd4e9, 0xd504,
232
+ 0xd505, 0xd520, 0xd521, 0xd53c, 0xd53d, 0xd558, 0xd559, 0xd574, 0xd575, 0xd590, 0xd591, 0xd5ac, 0xd5ad, 0xd5c8,
233
+ 0xd5c9, 0xd5e4, 0xd5e5, 0xd600, 0xd601, 0xd61c, 0xd61d, 0xd638, 0xd639, 0xd654, 0xd655, 0xd670, 0xd671, 0xd68c,
234
+ 0xd68d, 0xd6a8, 0xd6a9, 0xd6c4, 0xd6c5, 0xd6e0, 0xd6e1, 0xd6fc, 0xd6fd, 0xd718, 0xd719, 0xd734, 0xd735, 0xd750,
235
+ 0xd751, 0xd76c, 0xd76d, 0xd788, 0xd789, 0xd7b0, 0xd7cb, 0xfb1e, 0xfe00, 0xfe20, 0xfeff, 0xff9e, 0xfff0, 0x101fd,
236
+ 0x102e0, 0x10376, 0x10a01, 0x10a05, 0x10a0c, 0x10a38, 0x10a3f, 0x10ae5, 0x10d24, 0x10eab, 0x10efd, 0x10f46,
237
+ 0x10f82, 0x11000, 0x11001, 0x11002, 0x11038, 0x11070, 0x11073, 0x1107f, 0x11082, 0x110b0, 0x110b3, 0x110b7,
238
+ 0x110b9, 0x110bd, 0x110c2, 0x110cd, 0x11100, 0x11127, 0x1112c, 0x1112d, 0x11145, 0x11173, 0x11180, 0x11182,
239
+ 0x111b3, 0x111b6, 0x111bf, 0x111c2, 0x111c9, 0x111ce, 0x111cf, 0x1122c, 0x1122f, 0x11232, 0x11234, 0x11235,
240
+ 0x11236, 0x1123e, 0x11241, 0x112df, 0x112e0, 0x112e3, 0x11300, 0x11302, 0x1133b, 0x1133e, 0x1133f, 0x11340,
241
+ 0x11341, 0x11347, 0x1134b, 0x11357, 0x11362, 0x11366, 0x11370, 0x11435, 0x11438, 0x11440, 0x11442, 0x11445,
242
+ 0x11446, 0x1145e, 0x114b0, 0x114b1, 0x114b3, 0x114b9, 0x114ba, 0x114bb, 0x114bd, 0x114be, 0x114bf, 0x114c1,
243
+ 0x114c2, 0x115af, 0x115b0, 0x115b2, 0x115b8, 0x115bc, 0x115be, 0x115bf, 0x115dc, 0x11630, 0x11633, 0x1163b,
244
+ 0x1163d, 0x1163e, 0x1163f, 0x116ab, 0x116ac, 0x116ad, 0x116ae, 0x116b0, 0x116b6, 0x116b7, 0x1171d, 0x11722,
245
+ 0x11726, 0x11727, 0x1182c, 0x1182f, 0x11838, 0x11839, 0x11930, 0x11931, 0x11937, 0x1193b, 0x1193d, 0x1193e,
246
+ 0x1193f, 0x11940, 0x11941, 0x11942, 0x11943, 0x119d1, 0x119d4, 0x119da, 0x119dc, 0x119e0, 0x119e4, 0x11a01,
247
+ 0x11a33, 0x11a39, 0x11a3a, 0x11a3b, 0x11a47, 0x11a51, 0x11a57, 0x11a59, 0x11a84, 0x11a8a, 0x11a97, 0x11a98,
248
+ 0x11c2f, 0x11c30, 0x11c38, 0x11c3e, 0x11c3f, 0x11c92, 0x11ca9, 0x11caa, 0x11cb1, 0x11cb2, 0x11cb4, 0x11cb5,
249
+ 0x11d31, 0x11d3a, 0x11d3c, 0x11d3f, 0x11d46, 0x11d47, 0x11d8a, 0x11d90, 0x11d93, 0x11d95, 0x11d96, 0x11d97,
250
+ 0x11ef3, 0x11ef5, 0x11f00, 0x11f02, 0x11f03, 0x11f34, 0x11f36, 0x11f3e, 0x11f40, 0x11f41, 0x11f42, 0x13430,
251
+ 0x13440, 0x13447, 0x16af0, 0x16b30, 0x16f4f, 0x16f51, 0x16f8f, 0x16fe4, 0x16ff0, 0x1bc9d, 0x1bca0, 0x1cf00,
252
+ 0x1cf30, 0x1d165, 0x1d166, 0x1d167, 0x1d16d, 0x1d16e, 0x1d173, 0x1d17b, 0x1d185, 0x1d1aa, 0x1d242, 0x1da00,
253
+ 0x1da3b, 0x1da75, 0x1da84, 0x1da9b, 0x1daa1, 0x1e000, 0x1e008, 0x1e01b, 0x1e023, 0x1e026, 0x1e08f, 0x1e130,
254
+ 0x1e2ae, 0x1e2ec, 0x1e4ec, 0x1e8d0, 0x1e944, 0x1f1e6, 0x1f3fb, 0xe0000, 0xe0020, 0xe0080, 0xe0100, 0xe01f0},
255
+ {0x100a, 0x4001, 0x1002, 0x1, 0x1012, 0x1021, 0x1001, 0x2070, 0x2007, 0x202d, 0x2001, 0x2002, 0x2002, 0x2001,
256
+ 0x7006, 0x200b, 0x1001, 0x2015, 0x2001, 0x2007, 0x7001, 0x2006, 0x2002, 0x2004, 0x7001, 0x2001, 0x201b, 0x200b,
257
+ 0x2009, 0x2001, 0x2004, 0x2009, 0x2003, 0x2005, 0x2003, 0x7002, 0x2008, 0x2018, 0x7001, 0x2020, 0x9001, 0x2001,
258
+ 0x9001, 0x2001, 0x9003, 0x2008, 0x9004, 0x2001, 0x9002, 0x2007, 0x2002, 0x2001, 0x9002, 0x2001, 0x2001, 0x9002,
259
+ 0x2004, 0x9002, 0x9002, 0x2001, 0x2001, 0x2002, 0x2001, 0x2002, 0x9001, 0x2001, 0x9003, 0x2002, 0x2002, 0x2003,
260
+ 0x2001, 0x2002, 0x2001, 0x2002, 0x9001, 0x2001, 0x9003, 0x2005, 0x2002, 0x9001, 0x9002, 0x2001, 0x2002, 0x2006,
261
+ 0x2001, 0x9002, 0x2001, 0x2002, 0x9001, 0x2004, 0x9002, 0x9002, 0x2001, 0x2003, 0x2002, 0x2001, 0x2001, 0x9001,
262
+ 0x2001, 0x9002, 0x9003, 0x9003, 0x2001, 0x2001, 0x2001, 0x9003, 0x2001, 0x2001, 0x2003, 0x9004, 0x2003, 0x2004,
263
+ 0x2002, 0x2002, 0x2001, 0x9002, 0x2001, 0x9001, 0x2001, 0x9002, 0x2001, 0x9002, 0x2001, 0x9002, 0x9002, 0x2002,
264
+ 0x2002, 0x2002, 0x9001, 0x2002, 0x9002, 0x2002, 0x2001, 0x9002, 0x2004, 0x9003, 0x9003, 0x2001, 0x7001, 0x2001,
265
+ 0x2002, 0x2001, 0x9002, 0x2001, 0x2001, 0x9002, 0x2003, 0x2001, 0x9007, 0x2001, 0x9002, 0x2001, 0x9001, 0x2007,
266
+ 0x2008, 0x2001, 0x9001, 0x2009, 0x2007, 0x2002, 0x2001, 0x2001, 0x2001, 0x9002, 0x200e, 0x9001, 0x2005, 0x2002,
267
+ 0x200b, 0x2024, 0x2001, 0x2004, 0x9001, 0x2006, 0x2002, 0x9002, 0x2002, 0x9002, 0x2002, 0x2003, 0x2004, 0x2001,
268
+ 0x9001, 0x2002, 0x2001, 0x2001, 0x3060, 0xb048, 0xa058, 0x2003, 0x2003, 0x9001, 0x2002, 0x9001, 0x2002, 0x2002,
269
+ 0x2002, 0x9001, 0x2007, 0x9008, 0x2001, 0x9002, 0x200b, 0x2001, 0x2003, 0x1001, 0x2001, 0x2002, 0x2001, 0x2003,
270
+ 0x9004, 0x2002, 0x9003, 0x9002, 0x2001, 0x9006, 0x2003, 0x2002, 0x9002, 0x2001, 0x9001, 0x2001, 0x9001, 0x2007,
271
+ 0x2001, 0x2001, 0x2008, 0x9006, 0x200a, 0x2001, 0x201f, 0x2004, 0x9001, 0x2007, 0x9001, 0x2001, 0x9005, 0x2001,
272
+ 0x9002, 0x2009, 0x2002, 0x9001, 0x9001, 0x2004, 0x9002, 0x2002, 0x9001, 0x2003, 0x2001, 0x9001, 0x2002, 0x9003,
273
+ 0x2001, 0x9001, 0x2003, 0x9002, 0x9008, 0x2008, 0x9002, 0x2002, 0x2003, 0x200d, 0x9001, 0x2007, 0x2001, 0x2001,
274
+ 0x9001, 0x2002, 0x2040, 0x1001, 0x2001, 0xc001, 0x1002, 0x1007, 0x1010, 0x2021, 0x2003, 0x2001, 0x2020, 0x2006,
275
+ 0x2002, 0x2004, 0x200a, 0x2002, 0x2002, 0x2001, 0x2001, 0x2001, 0x9002, 0x2002, 0x9001, 0x2001, 0x9002, 0x9010,
276
+ 0x2002, 0x2012, 0x2001, 0x2008, 0x200b, 0x9002, 0x301d, 0x2003, 0x9001, 0x2001, 0x9002, 0x2004, 0x9002, 0x2002,
277
+ 0x9003, 0x2001, 0x2006, 0x9002, 0x2002, 0x9002, 0x2002, 0x2001, 0x2001, 0x9001, 0x2001, 0x2001, 0x2003, 0x2002,
278
+ 0x2002, 0x2001, 0x9001, 0x2002, 0x9002, 0x9001, 0x2001, 0x9002, 0x2001, 0x9002, 0x2001, 0x9002, 0x9001, 0x2001,
279
+ 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b,
280
+ 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b,
281
+ 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b,
282
+ 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b,
283
+ 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b,
284
+ 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b,
285
+ 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b,
286
+ 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b,
287
+ 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b,
288
+ 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b,
289
+ 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b,
290
+ 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b,
291
+ 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b,
292
+ 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b,
293
+ 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b,
294
+ 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b,
295
+ 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b,
296
+ 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b,
297
+ 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b,
298
+ 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b,
299
+ 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b,
300
+ 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b,
301
+ 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b,
302
+ 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b,
303
+ 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b,
304
+ 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b,
305
+ 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b,
306
+ 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b,
307
+ 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b,
308
+ 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b,
309
+ 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b,
310
+ 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b,
311
+ 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b,
312
+ 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b,
313
+ 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b,
314
+ 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b,
315
+ 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b,
316
+ 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b,
317
+ 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b,
318
+ 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b,
319
+ 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b,
320
+ 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b,
321
+ 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b,
322
+ 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b,
323
+ 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b,
324
+ 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b,
325
+ 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b,
326
+ 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b,
327
+ 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b,
328
+ 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b,
329
+ 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b,
330
+ 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b,
331
+ 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b,
332
+ 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b,
333
+ 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b,
334
+ 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b,
335
+ 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b, 0x5001, 0x601b,
336
+ 0xb017, 0xa031, 0x2001, 0x2010, 0x2010, 0x1001, 0x2002, 0x100c, 0x2001, 0x2001, 0x2005, 0x2003, 0x2002, 0x2004,
337
+ 0x2003, 0x2001, 0x2002, 0x2004, 0x2002, 0x2003, 0x200b, 0x2004, 0x9001, 0x2001, 0x9001, 0x200f, 0x2001, 0x2002,
338
+ 0x2003, 0x9001, 0x9003, 0x2004, 0x9002, 0x2002, 0x7001, 0x2001, 0x7001, 0x2003, 0x2005, 0x9001, 0x2008, 0x9002,
339
+ 0x2001, 0x2002, 0x9001, 0x9003, 0x2009, 0x9002, 0x7002, 0x2004, 0x9001, 0x2001, 0x9003, 0x2003, 0x9002, 0x2001,
340
+ 0x9001, 0x2002, 0x2001, 0x2001, 0x2001, 0x9003, 0x2008, 0x2002, 0x9002, 0x2002, 0x2001, 0x9001, 0x2001, 0x9004,
341
+ 0x9002, 0x9003, 0x2001, 0x9002, 0x2007, 0x2005, 0x9003, 0x2008, 0x9002, 0x2003, 0x9001, 0x2001, 0x2001, 0x2001,
342
+ 0x9002, 0x2006, 0x9001, 0x2001, 0x9002, 0x2001, 0x9001, 0x2002, 0x9001, 0x2002, 0x2001, 0x9002, 0x2004, 0x9004,
343
+ 0x2002, 0x9001, 0x2002, 0x2002, 0x9003, 0x2008, 0x9002, 0x2001, 0x9001, 0x2002, 0x2001, 0x9001, 0x2001, 0x9002,
344
+ 0x2006, 0x9001, 0x2001, 0x2003, 0x2004, 0x9001, 0x2005, 0x9003, 0x2009, 0x9001, 0x2002, 0x2001, 0x9005, 0x9002,
345
+ 0x2002, 0x9001, 0x2001, 0x7001, 0x9001, 0x7001, 0x9001, 0x2001, 0x9003, 0x2004, 0x2002, 0x9004, 0x2001, 0x9001,
346
+ 0x200a, 0x2006, 0x9001, 0x7001, 0x2004, 0x2001, 0x2006, 0x9002, 0x2003, 0x7006, 0x200d, 0x9001, 0x2002, 0x9001,
347
+ 0x2007, 0x2006, 0x9001, 0x2001, 0x2016, 0x9001, 0x2007, 0x9001, 0x2002, 0x9001, 0x2002, 0x2006, 0x2001, 0x2002,
348
+ 0x2007, 0x7001, 0x2001, 0x9005, 0x2002, 0x9002, 0x2001, 0x9001, 0x2001, 0x2002, 0x9002, 0x2002, 0x7001, 0x9001,
349
+ 0x9002, 0x2005, 0x9002, 0x2001, 0x9001, 0x2001, 0x1010, 0x2001, 0x200f, 0x2005, 0x2007, 0x2001, 0x9037, 0x2004,
350
+ 0x2001, 0x9002, 0x2002, 0x1004, 0x202e, 0x2017, 0x2001, 0x9001, 0x2003, 0x9001, 0x2005, 0x1008, 0x2008, 0x2007,
351
+ 0x2004, 0x2003, 0x2037, 0x2032, 0x2001, 0x2001, 0x2005, 0x200f, 0x2007, 0x2011, 0x2007, 0x2002, 0x2005, 0x2001,
352
+ 0x2007, 0x2001, 0x2004, 0x2004, 0x2007, 0x2007, 0x801a, 0x2005, 0x1020, 0x2060, 0x1080, 0x20f0, 0x1e10}};
353
+
354
+ // emoji-data.txt
355
+ // Date: 2022-08-02, 00:26:10 GMT
356
+ enum class _Extended_Pictographic_property_values : uint8_t { _Extended_Pictographic_value, _No_value = 255 };
357
+
358
+ // emoji-data.txt
359
+ // Date: 2022-08-02, 00:26:10 GMT
360
+ inline constexpr _Unicode_property_data<_Extended_Pictographic_property_values, 78, true>
361
+ _Extended_Pictographic_property_data{
362
+ {0xa9, 0xae, 0x203c, 0x2049, 0x2122, 0x2139, 0x2194, 0x21a9, 0x231a, 0x2328, 0x2388, 0x23cf, 0x23e9, 0x23f8,
363
+ 0x24c2, 0x25aa, 0x25b6, 0x25c0, 0x25fb, 0x2600, 0x2607, 0x2614, 0x2690, 0x2708, 0x2714, 0x2716, 0x271d,
364
+ 0x2721, 0x2728, 0x2733, 0x2744, 0x2747, 0x274c, 0x274e, 0x2753, 0x2757, 0x2763, 0x2795, 0x27a1, 0x27b0,
365
+ 0x27bf, 0x2934, 0x2b05, 0x2b1b, 0x2b50, 0x2b55, 0x3030, 0x303d, 0x3297, 0x3299, 0x1f000, 0x1f10d, 0x1f12f,
366
+ 0x1f16c, 0x1f17e, 0x1f18e, 0x1f191, 0x1f1ad, 0x1f201, 0x1f21a, 0x1f22f, 0x1f232, 0x1f23c, 0x1f249, 0x1f400,
367
+ 0x1f546, 0x1f680, 0x1f774, 0x1f7d5, 0x1f80c, 0x1f848, 0x1f85a, 0x1f888, 0x1f8ae, 0x1f90c, 0x1f93c, 0x1f947,
368
+ 0x1fc00},
369
+ {0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x6, 0x2, 0x2, 0x1, 0x1, 0x1, 0xb, 0x3, 0x1, 0x2, 0x1, 0x1, 0x4, 0x6, 0xc, 0x72,
370
+ 0x76, 0xb, 0x1, 0x1, 0x1, 0x1, 0x1, 0x2, 0x1, 0x1, 0x1, 0x1, 0x3, 0x1, 0x5, 0x3, 0x1, 0x1, 0x1, 0x2, 0x3,
371
+ 0x2, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x100, 0x3, 0x1, 0x6, 0x2, 0x1, 0xa, 0x39, 0xf, 0x1, 0x1, 0x9, 0x4,
372
+ 0x1b2, 0x13e, 0x10a, 0x80, 0xc, 0x2b, 0x4, 0x8, 0x6, 0x8, 0x52, 0x2f, 0xa, 0x1b9, 0x3fe}};
373
+
374
+ // DerivedGeneralCategory-15.0.0.txt
375
+ // Date: 2022-04-26, 23:14:35 GMT
376
+ enum class __printable_property_values : uint8_t { _Yes_value, _No_value = 255 };
377
+
378
+ // DerivedGeneralCategory-15.0.0.txt
379
+ // Date: 2022-04-26, 23:14:35 GMT
380
+ inline constexpr _Unicode_property_data<__printable_property_values, 711, true> __printable_property_data{
381
+ {0x20, 0xa1, 0xae, 0x37a, 0x384, 0x38c, 0x38e, 0x3a3, 0x531, 0x559, 0x58d, 0x591, 0x5d0, 0x5ef, 0x606, 0x61d, 0x6de,
382
+ 0x710, 0x74d, 0x7c0, 0x7fd, 0x830, 0x840, 0x85e, 0x860, 0x870, 0x898, 0x8e3, 0x985, 0x98f, 0x993, 0x9aa, 0x9b2,
383
+ 0x9b6, 0x9bc, 0x9c7, 0x9cb, 0x9d7, 0x9dc, 0x9df, 0x9e6, 0xa01, 0xa05, 0xa0f, 0xa13, 0xa2a, 0xa32, 0xa35, 0xa38,
384
+ 0xa3c, 0xa3e, 0xa47, 0xa4b, 0xa51, 0xa59, 0xa5e, 0xa66, 0xa81, 0xa85, 0xa8f, 0xa93, 0xaaa, 0xab2, 0xab5, 0xabc,
385
+ 0xac7, 0xacb, 0xad0, 0xae0, 0xae6, 0xaf9, 0xb01, 0xb05, 0xb0f, 0xb13, 0xb2a, 0xb32, 0xb35, 0xb3c, 0xb47, 0xb4b,
386
+ 0xb55, 0xb5c, 0xb5f, 0xb66, 0xb82, 0xb85, 0xb8e, 0xb92, 0xb99, 0xb9c, 0xb9e, 0xba3, 0xba8, 0xbae, 0xbbe, 0xbc6,
387
+ 0xbca, 0xbd0, 0xbd7, 0xbe6, 0xc00, 0xc0e, 0xc12, 0xc2a, 0xc3c, 0xc46, 0xc4a, 0xc55, 0xc58, 0xc5d, 0xc60, 0xc66,
388
+ 0xc77, 0xc8e, 0xc92, 0xcaa, 0xcb5, 0xcbc, 0xcc6, 0xcca, 0xcd5, 0xcdd, 0xce0, 0xce6, 0xcf1, 0xd00, 0xd0e, 0xd12,
389
+ 0xd46, 0xd4a, 0xd54, 0xd66, 0xd81, 0xd85, 0xd9a, 0xdb3, 0xdbd, 0xdc0, 0xdca, 0xdcf, 0xdd6, 0xdd8, 0xde6, 0xdf2,
390
+ 0xe01, 0xe3f, 0xe81, 0xe84, 0xe86, 0xe8c, 0xea5, 0xea7, 0xec0, 0xec6, 0xec8, 0xed0, 0xedc, 0xf00, 0xf49, 0xf71,
391
+ 0xf99, 0xfbe, 0xfce, 0x1000, 0x10c7, 0x10cd, 0x10d0, 0x124a, 0x1250, 0x1258, 0x125a, 0x1260, 0x128a, 0x1290,
392
+ 0x12b2, 0x12b8, 0x12c0, 0x12c2, 0x12c8, 0x12d8, 0x1312, 0x1318, 0x135d, 0x1380, 0x13a0, 0x13f8, 0x1400, 0x1681,
393
+ 0x16a0, 0x1700, 0x171f, 0x1740, 0x1760, 0x176e, 0x1772, 0x1780, 0x17e0, 0x17f0, 0x1800, 0x180f, 0x1820, 0x1880,
394
+ 0x18b0, 0x1900, 0x1920, 0x1930, 0x1940, 0x1944, 0x1970, 0x1980, 0x19b0, 0x19d0, 0x19de, 0x1a1e, 0x1a60, 0x1a7f,
395
+ 0x1a90, 0x1aa0, 0x1ab0, 0x1b00, 0x1b50, 0x1b80, 0x1bfc, 0x1c3b, 0x1c4d, 0x1c90, 0x1cbd, 0x1cd0, 0x1d00, 0x1f18,
396
+ 0x1f20, 0x1f48, 0x1f50, 0x1f59, 0x1f5b, 0x1f5d, 0x1f5f, 0x1f80, 0x1fb6, 0x1fc6, 0x1fd6, 0x1fdd, 0x1ff2, 0x1ff6,
397
+ 0x2010, 0x2030, 0x2070, 0x2074, 0x2090, 0x20a0, 0x20d0, 0x2100, 0x2190, 0x2440, 0x2460, 0x2b76, 0x2b97, 0x2cf9,
398
+ 0x2d27, 0x2d2d, 0x2d30, 0x2d6f, 0x2d7f, 0x2da0, 0x2da8, 0x2db0, 0x2db8, 0x2dc0, 0x2dc8, 0x2dd0, 0x2dd8, 0x2de0,
399
+ 0x2e80, 0x2e9b, 0x2f00, 0x2ff0, 0x3001, 0x3041, 0x3099, 0x3105, 0x3131, 0x3190, 0x31f0, 0x3220, 0xa490, 0xa4d0,
400
+ 0xa640, 0xa700, 0xa7d0, 0xa7d3, 0xa7d5, 0xa7f2, 0xa830, 0xa840, 0xa880, 0xa8ce, 0xa8e0, 0xa95f, 0xa980, 0xa9cf,
401
+ 0xa9de, 0xaa00, 0xaa40, 0xaa50, 0xaa5c, 0xaadb, 0xab01, 0xab09, 0xab11, 0xab20, 0xab28, 0xab30, 0xab70, 0xabf0,
402
+ 0xac00, 0xd7b0, 0xd7cb, 0xf900, 0xfa70, 0xfb00, 0xfb13, 0xfb1d, 0xfb38, 0xfb3e, 0xfb40, 0xfb43, 0xfb46, 0xfbd3,
403
+ 0xfd92, 0xfdcf, 0xfdf0, 0xfe20, 0xfe54, 0xfe68, 0xfe70, 0xfe76, 0xff01, 0xffc2, 0xffca, 0xffd2, 0xffda, 0xffe0,
404
+ 0xffe8, 0xfffc, 0x10000, 0x1000d, 0x10028, 0x1003c, 0x1003f, 0x10050, 0x10080, 0x10100, 0x10107, 0x10137,
405
+ 0x10190, 0x101a0, 0x101d0, 0x10280, 0x102a0, 0x102e0, 0x10300, 0x1032d, 0x10350, 0x10380, 0x1039f, 0x103c8,
406
+ 0x10400, 0x104a0, 0x104b0, 0x104d8, 0x10500, 0x10530, 0x1056f, 0x1057c, 0x1058c, 0x10594, 0x10597, 0x105a3,
407
+ 0x105b3, 0x105bb, 0x10600, 0x10740, 0x10760, 0x10780, 0x10787, 0x107b2, 0x10800, 0x10808, 0x1080a, 0x10837,
408
+ 0x1083c, 0x1083f, 0x10857, 0x108a7, 0x108e0, 0x108f4, 0x108fb, 0x1091f, 0x1093f, 0x10980, 0x109bc, 0x109d2,
409
+ 0x10a05, 0x10a0c, 0x10a15, 0x10a19, 0x10a38, 0x10a3f, 0x10a50, 0x10a60, 0x10ac0, 0x10aeb, 0x10b00, 0x10b39,
410
+ 0x10b58, 0x10b78, 0x10b99, 0x10ba9, 0x10c00, 0x10c80, 0x10cc0, 0x10cfa, 0x10d30, 0x10e60, 0x10e80, 0x10eab,
411
+ 0x10eb0, 0x10efd, 0x10f30, 0x10f70, 0x10fb0, 0x10fe0, 0x11000, 0x11052, 0x1107f, 0x110be, 0x110d0, 0x110f0,
412
+ 0x11100, 0x11136, 0x11150, 0x11180, 0x111e1, 0x11200, 0x11213, 0x11280, 0x11288, 0x1128a, 0x1128f, 0x1129f,
413
+ 0x112b0, 0x112f0, 0x11300, 0x11305, 0x1130f, 0x11313, 0x1132a, 0x11332, 0x11335, 0x1133b, 0x11347, 0x1134b,
414
+ 0x11350, 0x11357, 0x1135d, 0x11366, 0x11370, 0x11400, 0x1145d, 0x11480, 0x114d0, 0x11580, 0x115b8, 0x11600,
415
+ 0x11650, 0x11660, 0x11680, 0x116c0, 0x11700, 0x1171d, 0x11730, 0x11800, 0x118a0, 0x118ff, 0x11909, 0x1190c,
416
+ 0x11915, 0x11918, 0x11937, 0x1193b, 0x11950, 0x119a0, 0x119aa, 0x119da, 0x11a00, 0x11a50, 0x11ab0, 0x11b00,
417
+ 0x11c00, 0x11c0a, 0x11c38, 0x11c50, 0x11c70, 0x11c92, 0x11ca9, 0x11d00, 0x11d08, 0x11d0b, 0x11d3a, 0x11d3c,
418
+ 0x11d3f, 0x11d50, 0x11d60, 0x11d67, 0x11d6a, 0x11d90, 0x11d93, 0x11da0, 0x11ee0, 0x11f00, 0x11f12, 0x11f3e,
419
+ 0x11fb0, 0x11fc0, 0x11fff, 0x12400, 0x12470, 0x12480, 0x12f90, 0x13000, 0x13440, 0x14400, 0x16800, 0x16a40,
420
+ 0x16a60, 0x16a6e, 0x16ac0, 0x16ad0, 0x16af0, 0x16b00, 0x16b50, 0x16b5b, 0x16b63, 0x16b7d, 0x16e40, 0x16f00,
421
+ 0x16f4f, 0x16f8f, 0x16fe0, 0x16ff0, 0x17000, 0x18800, 0x18d00, 0x1aff0, 0x1aff5, 0x1affd, 0x1b000, 0x1b132,
422
+ 0x1b150, 0x1b155, 0x1b164, 0x1b170, 0x1bc00, 0x1bc70, 0x1bc80, 0x1bc90, 0x1bc9c, 0x1cf00, 0x1cf30, 0x1cf50,
423
+ 0x1d000, 0x1d100, 0x1d129, 0x1d17b, 0x1d200, 0x1d2c0, 0x1d2e0, 0x1d300, 0x1d360, 0x1d400, 0x1d456, 0x1d49e,
424
+ 0x1d4a2, 0x1d4a5, 0x1d4a9, 0x1d4ae, 0x1d4bb, 0x1d4bd, 0x1d4c5, 0x1d507, 0x1d50d, 0x1d516, 0x1d51e, 0x1d53b,
425
+ 0x1d540, 0x1d546, 0x1d54a, 0x1d552, 0x1d6a8, 0x1d7ce, 0x1da9b, 0x1daa1, 0x1df00, 0x1df25, 0x1e000, 0x1e008,
426
+ 0x1e01b, 0x1e023, 0x1e026, 0x1e030, 0x1e08f, 0x1e100, 0x1e130, 0x1e140, 0x1e14e, 0x1e290, 0x1e2c0, 0x1e2ff,
427
+ 0x1e4d0, 0x1e7e0, 0x1e7e8, 0x1e7ed, 0x1e7f0, 0x1e800, 0x1e8c7, 0x1e900, 0x1e950, 0x1e95e, 0x1ec71, 0x1ed01,
428
+ 0x1ee00, 0x1ee05, 0x1ee21, 0x1ee24, 0x1ee27, 0x1ee29, 0x1ee34, 0x1ee39, 0x1ee3b, 0x1ee42, 0x1ee47, 0x1ee49,
429
+ 0x1ee4b, 0x1ee4d, 0x1ee51, 0x1ee54, 0x1ee57, 0x1ee59, 0x1ee5b, 0x1ee5d, 0x1ee5f, 0x1ee61, 0x1ee64, 0x1ee67,
430
+ 0x1ee6c, 0x1ee74, 0x1ee79, 0x1ee7e, 0x1ee80, 0x1ee8b, 0x1eea1, 0x1eea5, 0x1eeab, 0x1eef0, 0x1f000, 0x1f030,
431
+ 0x1f0a0, 0x1f0b1, 0x1f0c1, 0x1f0d1, 0x1f100, 0x1f1e6, 0x1f210, 0x1f240, 0x1f250, 0x1f260, 0x1f300, 0x1f6dc,
432
+ 0x1f6f0, 0x1f700, 0x1f77b, 0x1f7e0, 0x1f7f0, 0x1f800, 0x1f810, 0x1f850, 0x1f860, 0x1f890, 0x1f8b0, 0x1f900,
433
+ 0x1fa60, 0x1fa70, 0x1fa80, 0x1fa90, 0x1fabf, 0x1face, 0x1fae0, 0x1faf0, 0x1fb00, 0x1fb94, 0x1fbf0, 0x20000,
434
+ 0x2a700, 0x2b740, 0x2b820, 0x2ceb0, 0x2f800, 0x30000, 0x31350, 0xe0100},
435
+ {0x5f, 0xc, 0x2ca, 0x6, 0x7, 0x1, 0x14, 0x18d, 0x26, 0x32, 0x3, 0x37, 0x1b, 0x6, 0x16, 0xc0, 0x30, 0x3b, 0x65, 0x3b,
436
+ 0x31, 0xf, 0x1c, 0x1, 0xb, 0x1f, 0x4a, 0xa1, 0x8, 0x2, 0x16, 0x7, 0x1, 0x4, 0x9, 0x2, 0x4, 0x1, 0x2, 0x5, 0x19,
437
+ 0x3, 0x6, 0x2, 0x16, 0x7, 0x2, 0x2, 0x2, 0x1, 0x5, 0x2, 0x3, 0x1, 0x4, 0x1, 0x11, 0x3, 0x9, 0x3, 0x16, 0x7, 0x2,
438
+ 0x5, 0xa, 0x3, 0x3, 0x1, 0x4, 0xc, 0x7, 0x3, 0x8, 0x2, 0x16, 0x7, 0x2, 0x5, 0x9, 0x2, 0x3, 0x3, 0x2, 0x5, 0x12,
439
+ 0x2, 0x6, 0x3, 0x4, 0x2, 0x1, 0x2, 0x2, 0x3, 0xc, 0x5, 0x3, 0x4, 0x1, 0x1, 0x15, 0xd, 0x3, 0x17, 0x10, 0x9, 0x3,
440
+ 0x4, 0x2, 0x3, 0x1, 0x4, 0xa, 0x16, 0x3, 0x17, 0xa, 0x5, 0x9, 0x3, 0x4, 0x2, 0x2, 0x4, 0xa, 0x3, 0xd, 0x3, 0x33,
441
+ 0x3, 0x6, 0x10, 0x1a, 0x3, 0x12, 0x18, 0x9, 0x1, 0x7, 0x1, 0x6, 0x1, 0x8, 0xa, 0x3, 0x3a, 0x1d, 0x2, 0x1, 0x5,
442
+ 0x18, 0x1, 0x17, 0x5, 0x1, 0x7, 0xa, 0x4, 0x48, 0x24, 0x27, 0x24, 0xf, 0xd, 0xc6, 0x1, 0x1, 0x179, 0x4, 0x7,
443
+ 0x1, 0x4, 0x29, 0x4, 0x21, 0x4, 0x7, 0x1, 0x4, 0xf, 0x39, 0x4, 0x43, 0x20, 0x1a, 0x56, 0x6, 0x280, 0x1c, 0x59,
444
+ 0x16, 0x18, 0x14, 0xd, 0x3, 0x2, 0x5e, 0xa, 0xa, 0xe, 0xb, 0x59, 0x2b, 0x46, 0x1f, 0xc, 0xc, 0x1, 0x2a, 0x5,
445
+ 0x2c, 0x1a, 0xb, 0x3e, 0x41, 0x1d, 0xb, 0xa, 0xe, 0x1f, 0x4d, 0x2f, 0x74, 0x3c, 0xf, 0x3c, 0x2b, 0xb, 0x2b,
446
+ 0x216, 0x6, 0x26, 0x6, 0x8, 0x1, 0x1, 0x1, 0x1f, 0x35, 0xf, 0xe, 0x6, 0x13, 0x3, 0x9, 0x18, 0x2f, 0x2, 0x1b,
447
+ 0xd, 0x21, 0x21, 0x8c, 0x297, 0xb, 0x714, 0x20, 0x15d, 0x2d, 0x1, 0x1, 0x38, 0x2, 0x18, 0x7, 0x7, 0x7, 0x7, 0x7,
448
+ 0x7, 0x7, 0x7, 0x7e, 0x1a, 0x59, 0xd6, 0xc, 0x3f, 0x56, 0x67, 0x2b, 0x5e, 0x54, 0x2f, 0x726d, 0x37, 0x15c, 0xb8,
449
+ 0xcb, 0x2, 0x1, 0x5, 0x3b, 0xa, 0x38, 0x46, 0xc, 0x74, 0x1e, 0x4e, 0xb, 0x21, 0x37, 0xe, 0xa, 0x67, 0x1c, 0x6,
450
+ 0x6, 0x6, 0x7, 0x7, 0x3c, 0x7e, 0xa, 0x2ba4, 0x17, 0x31, 0x16e, 0x6a, 0x7, 0x5, 0x1a, 0x5, 0x1, 0x2, 0x2, 0x7d,
451
+ 0x1bd, 0x36, 0x1, 0x2a, 0x33, 0x13, 0x4, 0x5, 0x87, 0xbe, 0x6, 0x6, 0x6, 0x3, 0x7, 0x7, 0x2, 0xc, 0x1a, 0x13,
452
+ 0x2, 0xf, 0xe, 0x7b, 0x3, 0x2d, 0x58, 0xd, 0x1, 0x2e, 0x1d, 0x31, 0x1c, 0x24, 0x1e, 0x2b, 0x1e, 0x25, 0xe, 0x9e,
453
+ 0xa, 0x24, 0x24, 0x28, 0x34, 0xc, 0xf, 0x7, 0x2, 0xb, 0xf, 0x7, 0x2, 0x137, 0x16, 0x8, 0x6, 0x2a, 0x9, 0x6, 0x1,
454
+ 0x2c, 0x2, 0x1, 0x17, 0x48, 0x9, 0x13, 0x2, 0x21, 0x1b, 0x1, 0x38, 0x14, 0x32, 0x2, 0x8, 0x3, 0x1d, 0x3, 0xa,
455
+ 0x9, 0x40, 0x27, 0xc, 0x36, 0x1d, 0x1b, 0x1a, 0x4, 0x7, 0x49, 0x33, 0x33, 0x2e, 0xa, 0x1f, 0x2a, 0x3, 0x2, 0x2b,
456
+ 0x2a, 0x1a, 0x1c, 0x17, 0x4e, 0x24, 0x3e, 0x5, 0x19, 0xa, 0x35, 0x12, 0x27, 0x60, 0x14, 0x12, 0x2f, 0x7, 0x1,
457
+ 0x4, 0xf, 0xb, 0x3b, 0xa, 0x4, 0x8, 0x2, 0x16, 0x7, 0x2, 0x5, 0xa, 0x2, 0x3, 0x1, 0x1, 0x7, 0x7, 0x5, 0x5c, 0x5,
458
+ 0x48, 0xa, 0x36, 0x26, 0x45, 0xa, 0xd, 0x3a, 0xa, 0x1b, 0xf, 0x17, 0x3c, 0x53, 0x8, 0x1, 0x8, 0x2, 0x1e, 0x2,
459
+ 0xc, 0xa, 0x8, 0x2e, 0xb, 0x48, 0x53, 0x49, 0xa, 0x9, 0x2d, 0xe, 0x1d, 0x20, 0x16, 0xe, 0x7, 0x2, 0x2c, 0x1,
460
+ 0x2, 0x9, 0xa, 0x6, 0x2, 0x25, 0x2, 0x6, 0xa, 0x19, 0x11, 0x29, 0x1c, 0x1, 0x32, 0x39b, 0x6f, 0x5, 0xc4, 0x63,
461
+ 0x430, 0x16, 0x247, 0x239, 0x1f, 0xa, 0x51, 0xa, 0x1e, 0x6, 0x46, 0xa, 0x7, 0x15, 0x13, 0x5b, 0x4b, 0x39, 0x11,
462
+ 0x5, 0x2, 0x17f8, 0x4d6, 0x9, 0x4, 0x7, 0x2, 0x123, 0x1, 0x3, 0x1, 0x4, 0x18c, 0x6b, 0xd, 0x9, 0xa, 0x4, 0x2e,
463
+ 0x17, 0x74, 0xf6, 0x27, 0x4a, 0x70, 0x46, 0x14, 0x14, 0x57, 0x19, 0x55, 0x47, 0x2, 0x1, 0x2, 0x4, 0xc, 0x1, 0x7,
464
+ 0x41, 0x4, 0x8, 0x7, 0x1c, 0x4, 0x5, 0x1, 0x7, 0x154, 0x124, 0x2be, 0x5, 0xf, 0x1f, 0x6, 0x7, 0x11, 0x7, 0x2,
465
+ 0x5, 0x3e, 0x1, 0x2d, 0xe, 0xa, 0x2, 0x1f, 0x3a, 0x1, 0x2a, 0x7, 0x4, 0x2, 0xf, 0xc5, 0x10, 0x4c, 0xa, 0x2,
466
+ 0x44, 0x3d, 0x4, 0x1b, 0x2, 0x1, 0x1, 0xa, 0x4, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x3, 0x2, 0x1, 0x1, 0x1, 0x1, 0x1,
467
+ 0x1, 0x2, 0x1, 0x4, 0x7, 0x4, 0x4, 0x1, 0xa, 0x11, 0x3, 0x5, 0x11, 0x2, 0x2c, 0x64, 0xf, 0xf, 0xf, 0x25, 0xae,
468
+ 0x1d, 0x2c, 0x9, 0x2, 0x6, 0x3d8, 0x11, 0xd, 0x77, 0x5f, 0xc, 0x1, 0xc, 0x38, 0xa, 0x28, 0x1e, 0x2, 0x154, 0xe,
469
+ 0xd, 0x9, 0x2e, 0x7, 0xe, 0x9, 0x9, 0x93, 0x37, 0xa, 0xa6e0, 0x103a, 0xde, 0x1682, 0x1d31, 0x21e, 0x134b,
470
+ 0x1060, 0xf0}};
471
+
472
+ // DerivedCoreProperties-15.0.0.txt
473
+ // Date: 2022-08-05, 22:17:05 GMT
474
+ enum class _Grapheme_Extend_property_values : uint8_t { _Grapheme_Extend_value, _No_value = 255 };
475
+
476
+ // DerivedCoreProperties-15.0.0.txt
477
+ // Date: 2022-08-05, 22:17:05 GMT
478
+ inline constexpr _Unicode_property_data<_Grapheme_Extend_property_values, 363, true> _Grapheme_Extend_property_data{
479
+ {0x300, 0x483, 0x591, 0x5bf, 0x5c1, 0x5c4, 0x5c7, 0x610, 0x64b, 0x670, 0x6d6, 0x6df, 0x6e7, 0x6ea, 0x711, 0x730,
480
+ 0x7a6, 0x7eb, 0x7fd, 0x816, 0x81b, 0x825, 0x829, 0x859, 0x898, 0x8ca, 0x8e3, 0x93a, 0x93c, 0x941, 0x94d, 0x951,
481
+ 0x962, 0x981, 0x9bc, 0x9be, 0x9c1, 0x9cd, 0x9d7, 0x9e2, 0x9fe, 0xa01, 0xa3c, 0xa41, 0xa47, 0xa4b, 0xa51, 0xa70,
482
+ 0xa75, 0xa81, 0xabc, 0xac1, 0xac7, 0xacd, 0xae2, 0xafa, 0xb01, 0xb3c, 0xb3e, 0xb41, 0xb4d, 0xb55, 0xb62, 0xb82,
483
+ 0xbbe, 0xbc0, 0xbcd, 0xbd7, 0xc00, 0xc04, 0xc3c, 0xc3e, 0xc46, 0xc4a, 0xc55, 0xc62, 0xc81, 0xcbc, 0xcbf, 0xcc2,
484
+ 0xcc6, 0xccc, 0xcd5, 0xce2, 0xd00, 0xd3b, 0xd3e, 0xd41, 0xd4d, 0xd57, 0xd62, 0xd81, 0xdca, 0xdcf, 0xdd2, 0xdd6,
485
+ 0xddf, 0xe31, 0xe34, 0xe47, 0xeb1, 0xeb4, 0xec8, 0xf18, 0xf35, 0xf37, 0xf39, 0xf71, 0xf80, 0xf86, 0xf8d, 0xf99,
486
+ 0xfc6, 0x102d, 0x1032, 0x1039, 0x103d, 0x1058, 0x105e, 0x1071, 0x1082, 0x1085, 0x108d, 0x109d, 0x135d, 0x1712,
487
+ 0x1732, 0x1752, 0x1772, 0x17b4, 0x17b7, 0x17c6, 0x17c9, 0x17dd, 0x180b, 0x180f, 0x1885, 0x18a9, 0x1920, 0x1927,
488
+ 0x1932, 0x1939, 0x1a17, 0x1a1b, 0x1a56, 0x1a58, 0x1a60, 0x1a62, 0x1a65, 0x1a73, 0x1a7f, 0x1ab0, 0x1b00, 0x1b34,
489
+ 0x1b3c, 0x1b42, 0x1b6b, 0x1b80, 0x1ba2, 0x1ba8, 0x1bab, 0x1be6, 0x1be8, 0x1bed, 0x1bef, 0x1c2c, 0x1c36, 0x1cd0,
490
+ 0x1cd4, 0x1ce2, 0x1ced, 0x1cf4, 0x1cf8, 0x1dc0, 0x200c, 0x20d0, 0x2cef, 0x2d7f, 0x2de0, 0x302a, 0x3099, 0xa66f,
491
+ 0xa674, 0xa69e, 0xa6f0, 0xa802, 0xa806, 0xa80b, 0xa825, 0xa82c, 0xa8c4, 0xa8e0, 0xa8ff, 0xa926, 0xa947, 0xa980,
492
+ 0xa9b3, 0xa9b6, 0xa9bc, 0xa9e5, 0xaa29, 0xaa31, 0xaa35, 0xaa43, 0xaa4c, 0xaa7c, 0xaab0, 0xaab2, 0xaab7, 0xaabe,
493
+ 0xaac1, 0xaaec, 0xaaf6, 0xabe5, 0xabe8, 0xabed, 0xfb1e, 0xfe00, 0xfe20, 0xff9e, 0x101fd, 0x102e0, 0x10376,
494
+ 0x10a01, 0x10a05, 0x10a0c, 0x10a38, 0x10a3f, 0x10ae5, 0x10d24, 0x10eab, 0x10efd, 0x10f46, 0x10f82, 0x11001,
495
+ 0x11038, 0x11070, 0x11073, 0x1107f, 0x110b3, 0x110b9, 0x110c2, 0x11100, 0x11127, 0x1112d, 0x11173, 0x11180,
496
+ 0x111b6, 0x111c9, 0x111cf, 0x1122f, 0x11234, 0x11236, 0x1123e, 0x11241, 0x112df, 0x112e3, 0x11300, 0x1133b,
497
+ 0x1133e, 0x11340, 0x11357, 0x11366, 0x11370, 0x11438, 0x11442, 0x11446, 0x1145e, 0x114b0, 0x114b3, 0x114ba,
498
+ 0x114bd, 0x114bf, 0x114c2, 0x115af, 0x115b2, 0x115bc, 0x115bf, 0x115dc, 0x11633, 0x1163d, 0x1163f, 0x116ab,
499
+ 0x116ad, 0x116b0, 0x116b7, 0x1171d, 0x11722, 0x11727, 0x1182f, 0x11839, 0x11930, 0x1193b, 0x1193e, 0x11943,
500
+ 0x119d4, 0x119da, 0x119e0, 0x11a01, 0x11a33, 0x11a3b, 0x11a47, 0x11a51, 0x11a59, 0x11a8a, 0x11a98, 0x11c30,
501
+ 0x11c38, 0x11c3f, 0x11c92, 0x11caa, 0x11cb2, 0x11cb5, 0x11d31, 0x11d3a, 0x11d3c, 0x11d3f, 0x11d47, 0x11d90,
502
+ 0x11d95, 0x11d97, 0x11ef3, 0x11f00, 0x11f36, 0x11f40, 0x11f42, 0x13440, 0x13447, 0x16af0, 0x16b30, 0x16f4f,
503
+ 0x16f8f, 0x16fe4, 0x1bc9d, 0x1cf00, 0x1cf30, 0x1d165, 0x1d167, 0x1d16e, 0x1d17b, 0x1d185, 0x1d1aa, 0x1d242,
504
+ 0x1da00, 0x1da3b, 0x1da75, 0x1da84, 0x1da9b, 0x1daa1, 0x1e000, 0x1e008, 0x1e01b, 0x1e023, 0x1e026, 0x1e08f,
505
+ 0x1e130, 0x1e2ae, 0x1e2ec, 0x1e4ec, 0x1e8d0, 0x1e944, 0xe0020, 0xe0100},
506
+ {0x70, 0x7, 0x2d, 0x1, 0x2, 0x2, 0x1, 0xb, 0x15, 0x1, 0x7, 0x6, 0x2, 0x4, 0x1, 0x1b, 0xb, 0x9, 0x1, 0x4, 0x9, 0x3,
507
+ 0x5, 0x3, 0x8, 0x18, 0x20, 0x1, 0x1, 0x8, 0x1, 0x7, 0x2, 0x1, 0x1, 0x1, 0x4, 0x1, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2,
508
+ 0x2, 0x3, 0x1, 0x2, 0x1, 0x2, 0x1, 0x5, 0x2, 0x1, 0x2, 0x6, 0x1, 0x1, 0x2, 0x4, 0x1, 0x3, 0x2, 0x1, 0x1, 0x1,
509
+ 0x1, 0x1, 0x1, 0x1, 0x1, 0x3, 0x3, 0x4, 0x2, 0x2, 0x1, 0x1, 0x1, 0x1, 0x1, 0x2, 0x2, 0x2, 0x2, 0x2, 0x1, 0x4,
510
+ 0x1, 0x1, 0x2, 0x1, 0x1, 0x1, 0x3, 0x1, 0x1, 0x1, 0x7, 0x8, 0x1, 0x9, 0x7, 0x2, 0x1, 0x1, 0x1, 0xe, 0x5, 0x2,
511
+ 0xb, 0x24, 0x1, 0x4, 0x6, 0x2, 0x2, 0x2, 0x3, 0x4, 0x1, 0x2, 0x1, 0x1, 0x3, 0x3, 0x2, 0x2, 0x2, 0x2, 0x7, 0x1,
512
+ 0xb, 0x1, 0x3, 0x1, 0x2, 0x1, 0x3, 0x2, 0x1, 0x3, 0x2, 0x1, 0x1, 0x7, 0x1, 0x1, 0x8, 0xa, 0x1, 0x1f, 0x4, 0x7,
513
+ 0x1, 0x1, 0x9, 0x2, 0x4, 0x2, 0x3, 0x1, 0x2, 0x1, 0x3, 0x8, 0x2, 0x3, 0xd, 0x7, 0x1, 0x1, 0x2, 0x40, 0x1, 0x21,
514
+ 0x3, 0x1, 0x20, 0x6, 0x2, 0x4, 0xa, 0x2, 0x2, 0x1, 0x1, 0x1, 0x2, 0x1, 0x2, 0x12, 0x1, 0x8, 0xb, 0x3, 0x1, 0x4,
515
+ 0x2, 0x1, 0x6, 0x2, 0x2, 0x1, 0x1, 0x1, 0x1, 0x3, 0x2, 0x2, 0x1, 0x2, 0x1, 0x1, 0x1, 0x1, 0x1, 0x10, 0x10, 0x2,
516
+ 0x1, 0x1, 0x5, 0x3, 0x2, 0x4, 0x3, 0x1, 0x2, 0x4, 0x2, 0x3, 0xb, 0x4, 0x1, 0xf, 0x1, 0x2, 0x3, 0x4, 0x2, 0x1,
517
+ 0x3, 0x5, 0x8, 0x1, 0x2, 0x9, 0x4, 0x1, 0x3, 0x1, 0x2, 0x1, 0x1, 0x1, 0x8, 0x2, 0x2, 0x1, 0x1, 0x1, 0x7, 0x5,
518
+ 0x8, 0x3, 0x1, 0x1, 0x1, 0x6, 0x1, 0x1, 0x2, 0x2, 0x1, 0x4, 0x2, 0x2, 0x2, 0x8, 0x1, 0x2, 0x1, 0x1, 0x6, 0x1,
519
+ 0x3, 0x4, 0x5, 0x9, 0x2, 0x1, 0x2, 0x1, 0x1, 0x4, 0x2, 0x1, 0xa, 0x6, 0x4, 0x1, 0x6, 0x3, 0xd, 0x2, 0x7, 0x6,
520
+ 0x1, 0x16, 0x7, 0x2, 0x2, 0x6, 0x1, 0x2, 0x7, 0x1, 0x2, 0x1, 0x1, 0x2, 0x2, 0x5, 0x1, 0x1, 0x1, 0xf, 0x5, 0x7,
521
+ 0x1, 0x4, 0x1, 0x2, 0x2e, 0x17, 0x1, 0x3, 0x5, 0x8, 0x7, 0x4, 0x3, 0x37, 0x32, 0x1, 0x1, 0x5, 0xf, 0x7, 0x11,
522
+ 0x7, 0x2, 0x5, 0x1, 0x7, 0x1, 0x4, 0x4, 0x7, 0x7, 0x60, 0xf0}};
523
+
524
+ // EastAsianWidth-15.0.0.txt
525
+ // Date: 2022-05-24, 17:40:20 GMT [KW, LI]
526
+ inline constexpr char32_t _Width_estimate_intervals_v2[] = {0x1100, 0x1160, 0x231a, 0x231c, 0x2329, 0x232b, 0x23e9,
527
+ 0x23ed, 0x23f0, 0x23f1, 0x23f3, 0x23f4, 0x25fd, 0x25ff, 0x2614, 0x2616, 0x2648, 0x2654, 0x267f, 0x2680, 0x2693,
528
+ 0x2694, 0x26a1, 0x26a2, 0x26aa, 0x26ac, 0x26bd, 0x26bf, 0x26c4, 0x26c6, 0x26ce, 0x26cf, 0x26d4, 0x26d5, 0x26ea,
529
+ 0x26eb, 0x26f2, 0x26f4, 0x26f5, 0x26f6, 0x26fa, 0x26fb, 0x26fd, 0x26fe, 0x2705, 0x2706, 0x270a, 0x270c, 0x2728,
530
+ 0x2729, 0x274c, 0x274d, 0x274e, 0x274f, 0x2753, 0x2756, 0x2757, 0x2758, 0x2795, 0x2798, 0x27b0, 0x27b1, 0x27bf,
531
+ 0x27c0, 0x2b1b, 0x2b1d, 0x2b50, 0x2b51, 0x2b55, 0x2b56, 0x2e80, 0x2e9a, 0x2e9b, 0x2ef4, 0x2f00, 0x2fd6, 0x2ff0,
532
+ 0x2ffc, 0x3000, 0x303f, 0x3041, 0x3097, 0x3099, 0x3100, 0x3105, 0x3130, 0x3131, 0x318f, 0x3190, 0x31e4, 0x31f0,
533
+ 0x321f, 0x3220, 0x3248, 0x3250, 0xa48d, 0xa490, 0xa4c7, 0xa960, 0xa97d, 0xac00, 0xd7a4, 0xf900, 0xfb00, 0xfe10,
534
+ 0xfe1a, 0xfe30, 0xfe53, 0xfe54, 0xfe67, 0xfe68, 0xfe6c, 0xff01, 0xff61, 0xffe0, 0xffe7, 0x16fe0, 0x16fe5, 0x16ff0,
535
+ 0x16ff2, 0x17000, 0x187f8, 0x18800, 0x18cd6, 0x18d00, 0x18d09, 0x1aff0, 0x1aff4, 0x1aff5, 0x1affc, 0x1affd, 0x1afff,
536
+ 0x1b000, 0x1b123, 0x1b132, 0x1b133, 0x1b150, 0x1b153, 0x1b155, 0x1b156, 0x1b164, 0x1b168, 0x1b170, 0x1b2fc, 0x1f004,
537
+ 0x1f005, 0x1f0cf, 0x1f0d0, 0x1f18e, 0x1f18f, 0x1f191, 0x1f19b, 0x1f200, 0x1f203, 0x1f210, 0x1f23c, 0x1f240, 0x1f249,
538
+ 0x1f250, 0x1f252, 0x1f260, 0x1f266, 0x1f300, 0x1f650, 0x1f680, 0x1f6c6, 0x1f6cc, 0x1f6cd, 0x1f6d0, 0x1f6d3, 0x1f6d5,
539
+ 0x1f6d8, 0x1f6dc, 0x1f6e0, 0x1f6eb, 0x1f6ed, 0x1f6f4, 0x1f6fd, 0x1f7e0, 0x1f7ec, 0x1f7f0, 0x1f7f1, 0x1f900, 0x1fa00,
540
+ 0x1fa70, 0x1fa7d, 0x1fa80, 0x1fa89, 0x1fa90, 0x1fabe, 0x1fabf, 0x1fac6, 0x1face, 0x1fadc, 0x1fae0, 0x1fae9, 0x1faf0,
541
+ 0x1faf9, 0x20000, 0x2fffe, 0x30000, 0x3fffe};
542
+
543
+ _STD_END
544
+
545
+ #pragma pop_macro("new")
546
+ _STL_RESTORE_CLANG_WARNINGS
547
+ #pragma warning(pop)
548
+ #pragma pack(pop)
549
+
550
+ #endif // _STL_COMPILER_PREPROCESSOR
551
+ #endif // __MSVC_FORMAT_UCD_TABLES_HPP
miniMSVC/VC/Tools/MSVC/14.42.34433/include/__msvc_formatter.hpp ADDED
@@ -0,0 +1,373 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // __msvc_formatter.hpp internal header
2
+
3
+ // Copyright (c) Microsoft Corporation.
4
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
5
+
6
+ // NOTE:
7
+ // The contents of this header are derived in part from libfmt under the following license:
8
+
9
+ // Copyright (c) 2012 - present, Victor Zverovich
10
+ //
11
+ // Permission is hereby granted, free of charge, to any person obtaining
12
+ // a copy of this software and associated documentation files (the
13
+ // "Software"), to deal in the Software without restriction, including
14
+ // without limitation the rights to use, copy, modify, merge, publish,
15
+ // distribute, sublicense, and/or sell copies of the Software, and to
16
+ // permit persons to whom the Software is furnished to do so, subject to
17
+ // the following conditions:
18
+ //
19
+ // The above copyright notice and this permission notice shall be
20
+ // included in all copies or substantial portions of the Software.
21
+
22
+ // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23
+ // EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24
+ // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
25
+ // NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
26
+ // LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
27
+ // OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
28
+ // WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29
+ //
30
+ // --- Optional exception to the license ---
31
+ //
32
+ // As an exception, if, as a result of your compiling your source code, portions
33
+ // of this Software are embedded into a machine-executable object form of such
34
+ // source code, you may redistribute such embedded portions in such object form
35
+ // without including the above copyright and permission notices.
36
+
37
+ #ifndef __MSVC_FORMATTER_HPP
38
+ #define __MSVC_FORMATTER_HPP
39
+ #include <yvals_core.h>
40
+ #if _STL_COMPILER_PREPROCESSOR
41
+
42
+ #if !_HAS_CXX20
43
+ #error The contents of <format> are only available with C++20. (Also, you should not include this internal header.)
44
+ #endif // !_HAS_CXX20
45
+
46
+ #include <concepts>
47
+ #include <cstddef>
48
+ #include <cstdint>
49
+ #include <type_traits>
50
+ #if _HAS_CXX23
51
+ #include <xutility>
52
+ #endif // _HAS_CXX23
53
+
54
+ #pragma pack(push, _CRT_PACKING)
55
+ #pragma warning(push, _STL_WARNING_LEVEL)
56
+ #pragma warning(disable : _STL_DISABLED_WARNINGS)
57
+ _STL_DISABLE_CLANG_WARNINGS
58
+ #pragma push_macro("new")
59
+ #undef new
60
+
61
+ _STD_BEGIN
62
+ #if _HAS_CXX23
63
+ #define _FMT_P2286_BEGIN inline namespace __p2286 {
64
+ #define _FMT_P2286_END }
65
+ #else // ^^^ _HAS_CXX23 / !_HAS_CXX23 vvv
66
+ #define _FMT_P2286_BEGIN
67
+ #define _FMT_P2286_END
68
+ #endif // ^^^ !_HAS_CXX23 ^^^
69
+
70
+ enum class _Fmt_align : uint8_t { _None, _Left, _Right, _Center };
71
+
72
+ enum class _Fmt_sign : uint8_t { _None, _Plus, _Minus, _Space };
73
+
74
+ enum class _Basic_format_arg_type : uint8_t {
75
+ _None,
76
+ _Int_type,
77
+ _UInt_type,
78
+ _Long_long_type,
79
+ _ULong_long_type,
80
+ _Bool_type,
81
+ _Char_type,
82
+ _Float_type,
83
+ _Double_type,
84
+ _Long_double_type,
85
+ _Pointer_type,
86
+ _CString_type,
87
+ _String_type,
88
+ _Custom_type,
89
+ };
90
+ static_assert(static_cast<int>(_Basic_format_arg_type::_Custom_type) < 16, "must fit in 4-bit bitfield");
91
+
92
+ #if _HAS_CXX23
93
+ _NODISCARD consteval bool _Is_debug_enabled_fmt_type(_Basic_format_arg_type _Ty) {
94
+ return _Ty == _Basic_format_arg_type::_Char_type || _Ty == _Basic_format_arg_type::_CString_type
95
+ || _Ty == _Basic_format_arg_type::_String_type;
96
+ }
97
+ #endif // _HAS_CXX23
98
+
99
+ template <class _CharT>
100
+ struct _Basic_format_specs {
101
+ int _Width = 0;
102
+ int _Precision = -1;
103
+ char _Type = '\0';
104
+ _Fmt_align _Alignment = _Fmt_align::_None;
105
+ _Fmt_sign _Sgn = _Fmt_sign::_None;
106
+ bool _Alt = false;
107
+ bool _Localized = false;
108
+ bool _Leading_zero = false;
109
+ uint8_t _Fill_length = 1;
110
+ // At most one codepoint (so one char32_t or four utf-8 char8_t).
111
+ _CharT _Fill[4 / sizeof(_CharT)] = {_CharT{' '}};
112
+ };
113
+
114
+ // Adds width and precision references to _Basic_format_specs.
115
+ // This is required for std::formatter implementations because we must
116
+ // parse the format specs without having access to the format args (via a format context).
117
+ template <class _CharT>
118
+ struct _Dynamic_format_specs : _Basic_format_specs<_CharT> {
119
+ int _Dynamic_width_index = -1;
120
+ int _Dynamic_precision_index = -1;
121
+ };
122
+
123
+ _EXPORT_STD template <class _CharT>
124
+ class basic_format_parse_context;
125
+
126
+ template <class _CharT>
127
+ concept _Format_supported_charT = _Is_any_of_v<_CharT, char, wchar_t>;
128
+
129
+ // Generic formatter definition, the deleted default constructor
130
+ // makes it "disabled" as per N4950 [format.formatter.spec]/5
131
+ _EXPORT_STD template <class _Ty, class _CharT = char>
132
+ struct formatter {
133
+ formatter() = delete;
134
+ formatter(const formatter&) = delete;
135
+ formatter& operator=(const formatter&) = delete;
136
+ };
137
+
138
+ _FMT_P2286_BEGIN
139
+ // TRANSITION, VSO-1236041: Avoid declaring and defining member functions in different headers.
140
+ template <_Basic_format_arg_type _ArgType, class _CharT, class _Pc>
141
+ constexpr _Pc::iterator _Formatter_base_parse(_Dynamic_format_specs<_CharT>& _Specs, _Pc& _ParseCtx);
142
+
143
+ template <class _Ty, class _CharT, class _FormatContext>
144
+ _FormatContext::iterator _Formatter_base_format(
145
+ const _Dynamic_format_specs<_CharT>& _Specs, const _Ty& _Val, _FormatContext& _FormatCtx);
146
+
147
+ template <class _Ty, class _CharT, _Basic_format_arg_type _ArgType>
148
+ struct _Formatter_base {
149
+ public:
150
+ #if _HAS_CXX23
151
+ constexpr void _Set_debug_format() noexcept
152
+ requires (_Is_debug_enabled_fmt_type(_ArgType))
153
+ {
154
+ _Specs._Type = '?';
155
+ }
156
+ #endif // _HAS_CXX23
157
+
158
+ template <class _Pc = basic_format_parse_context<_CharT>>
159
+ constexpr _Pc::iterator parse(type_identity_t<_Pc&> _ParseCtx) {
160
+ return _Formatter_base_parse<_ArgType>(_Specs, _ParseCtx);
161
+ }
162
+
163
+ template <class _FormatContext>
164
+ _FormatContext::iterator format(const _Ty& _Val, _FormatContext& _FormatCtx) const {
165
+ return _Formatter_base_format(_Specs, _Val, _FormatCtx);
166
+ }
167
+
168
+ private:
169
+ _Dynamic_format_specs<_CharT> _Specs;
170
+ };
171
+ _FMT_P2286_END
172
+
173
+ #define _FORMAT_SPECIALIZE_FOR(_Type, _ArgType) \
174
+ template <_Format_supported_charT _CharT> \
175
+ struct formatter<_Type, _CharT> : _Formatter_base<_Type, _CharT, _ArgType> {}
176
+
177
+ _FORMAT_SPECIALIZE_FOR(int, _Basic_format_arg_type::_Int_type);
178
+ _FORMAT_SPECIALIZE_FOR(unsigned int, _Basic_format_arg_type::_UInt_type);
179
+ _FORMAT_SPECIALIZE_FOR(long long, _Basic_format_arg_type::_Long_long_type);
180
+ _FORMAT_SPECIALIZE_FOR(unsigned long long, _Basic_format_arg_type::_ULong_long_type);
181
+ _FORMAT_SPECIALIZE_FOR(bool, _Basic_format_arg_type::_Bool_type);
182
+ _FORMAT_SPECIALIZE_FOR(float, _Basic_format_arg_type::_Float_type);
183
+ _FORMAT_SPECIALIZE_FOR(double, _Basic_format_arg_type::_Double_type);
184
+ _FORMAT_SPECIALIZE_FOR(long double, _Basic_format_arg_type::_Long_double_type);
185
+ _FORMAT_SPECIALIZE_FOR(nullptr_t, _Basic_format_arg_type::_Pointer_type);
186
+ _FORMAT_SPECIALIZE_FOR(void*, _Basic_format_arg_type::_Pointer_type);
187
+ _FORMAT_SPECIALIZE_FOR(const void*, _Basic_format_arg_type::_Pointer_type);
188
+ _FORMAT_SPECIALIZE_FOR(short, _Basic_format_arg_type::_Int_type);
189
+ _FORMAT_SPECIALIZE_FOR(unsigned short, _Basic_format_arg_type::_UInt_type);
190
+ _FORMAT_SPECIALIZE_FOR(long, _Basic_format_arg_type::_Int_type);
191
+ _FORMAT_SPECIALIZE_FOR(unsigned long, _Basic_format_arg_type::_UInt_type);
192
+ _FORMAT_SPECIALIZE_FOR(signed char, _Basic_format_arg_type::_Int_type);
193
+ _FORMAT_SPECIALIZE_FOR(unsigned char, _Basic_format_arg_type::_UInt_type);
194
+
195
+ #undef _FORMAT_SPECIALIZE_FOR
196
+
197
+ // not using the macro because we'd like to add 'set_debug_format' member function in C++23 mode
198
+ template <_Format_supported_charT _CharT>
199
+ struct formatter<char, _CharT> : _Formatter_base<char, _CharT, _Basic_format_arg_type::_Char_type> {
200
+ #if _HAS_CXX23
201
+ constexpr void set_debug_format() noexcept {
202
+ this->_Set_debug_format();
203
+ }
204
+ #endif // _HAS_CXX23
205
+ };
206
+
207
+ // not using the macro because we'd like to avoid the formatter<wchar_t, char> specialization
208
+ template <>
209
+ struct formatter<wchar_t, wchar_t> : _Formatter_base<wchar_t, wchar_t, _Basic_format_arg_type::_Char_type> {
210
+ #if _HAS_CXX23
211
+ constexpr void set_debug_format() noexcept {
212
+ _Set_debug_format();
213
+ }
214
+ #endif // _HAS_CXX23
215
+ };
216
+
217
+ // We could use the macro for these specializations, but it's confusing to refer to symbols that are defined
218
+ // inside the macro in the macro's "call".
219
+ template <_Format_supported_charT _CharT>
220
+ struct formatter<_CharT*, _CharT> : _Formatter_base<_CharT*, _CharT, _Basic_format_arg_type::_CString_type> {
221
+ #if _HAS_CXX23
222
+ constexpr void set_debug_format() noexcept {
223
+ this->_Set_debug_format();
224
+ }
225
+ #endif // _HAS_CXX23
226
+ };
227
+
228
+ template <_Format_supported_charT _CharT>
229
+ struct formatter<const _CharT*, _CharT>
230
+ : _Formatter_base<const _CharT*, _CharT, _Basic_format_arg_type::_CString_type> {
231
+ #if _HAS_CXX23
232
+ constexpr void set_debug_format() noexcept {
233
+ this->_Set_debug_format();
234
+ }
235
+ #endif // _HAS_CXX23
236
+ };
237
+
238
+ template <_Format_supported_charT _CharT, size_t _Nx>
239
+ struct formatter<_CharT[_Nx], _CharT> : _Formatter_base<_CharT[_Nx], _CharT, _Basic_format_arg_type::_CString_type> {
240
+ #if _HAS_CXX23
241
+ constexpr void set_debug_format() noexcept {
242
+ this->_Set_debug_format();
243
+ }
244
+ #endif // _HAS_CXX23
245
+ };
246
+
247
+ _EXPORT_STD template <class _Elem, class _Traits, class _Alloc>
248
+ class basic_string;
249
+
250
+ _EXPORT_STD template <class _Elem, class _Traits>
251
+ class basic_string_view;
252
+
253
+ template <_Format_supported_charT _CharT, class _Traits, class _Allocator>
254
+ struct formatter<basic_string<_CharT, _Traits, _Allocator>, _CharT>
255
+ : _Formatter_base<basic_string<_CharT, _Traits, _Allocator>, _CharT, _Basic_format_arg_type::_String_type> {
256
+ #if _HAS_CXX23
257
+ constexpr void set_debug_format() noexcept {
258
+ this->_Set_debug_format();
259
+ }
260
+ #endif // _HAS_CXX23
261
+ };
262
+
263
+ template <_Format_supported_charT _CharT, class _Traits>
264
+ struct formatter<basic_string_view<_CharT, _Traits>, _CharT>
265
+ : _Formatter_base<basic_string_view<_CharT, _Traits>, _CharT, _Basic_format_arg_type::_String_type> {
266
+ #if _HAS_CXX23
267
+ constexpr void set_debug_format() noexcept {
268
+ this->_Set_debug_format();
269
+ }
270
+ #endif // _HAS_CXX23
271
+ };
272
+
273
+ #if _HAS_CXX23
274
+ template <>
275
+ struct formatter<char*, wchar_t> {
276
+ formatter() = delete;
277
+ formatter(const formatter&) = delete;
278
+ formatter& operator=(const formatter&) = delete;
279
+ };
280
+
281
+ template <>
282
+ struct formatter<const char*, wchar_t> {
283
+ formatter() = delete;
284
+ formatter(const formatter&) = delete;
285
+ formatter& operator=(const formatter&) = delete;
286
+ };
287
+
288
+ template <size_t _Size>
289
+ struct formatter<char[_Size], wchar_t> {
290
+ formatter() = delete;
291
+ formatter(const formatter&) = delete;
292
+ formatter& operator=(const formatter&) = delete;
293
+ };
294
+
295
+ template <class _Traits, class _Allocator>
296
+ struct formatter<basic_string<char, _Traits, _Allocator>, wchar_t> {
297
+ formatter() = delete;
298
+ formatter(const formatter&) = delete;
299
+ formatter& operator=(const formatter&) = delete;
300
+ };
301
+
302
+ template <class _Traits>
303
+ struct formatter<basic_string_view<char, _Traits>, wchar_t> {
304
+ formatter() = delete;
305
+ formatter(const formatter&) = delete;
306
+ formatter& operator=(const formatter&) = delete;
307
+ };
308
+
309
+ _EXPORT_STD enum class range_format { disabled, map, set, sequence, string, debug_string };
310
+
311
+ template <class _Ty>
312
+ struct _Invalid_format_kind {
313
+ static_assert(_Always_false<_Ty>, "A program that instantiates the primary template of format_kind is ill-formed. "
314
+ "(N4981 [format.range.fmtkind]/1)");
315
+ };
316
+
317
+ _EXPORT_STD template <class _Ty>
318
+ constexpr _Invalid_format_kind<_Ty> format_kind;
319
+
320
+ template <class _Ty>
321
+ constexpr bool _Is_two_tuple = false;
322
+
323
+ template <class _Ty, class _Uty>
324
+ constexpr bool _Is_two_tuple<pair<_Ty, _Uty>> = true;
325
+
326
+ template <class _Ty, class _Uty>
327
+ constexpr bool _Is_two_tuple<tuple<_Ty, _Uty>> = true;
328
+
329
+ template <_RANGES input_range _Rng>
330
+ requires same_as<_Rng, remove_cvref_t<_Rng>>
331
+ constexpr range_format format_kind<_Rng> = []() consteval {
332
+ using _Ref_value_t = remove_cvref_t<_RANGES range_reference_t<_Rng>>;
333
+ if constexpr (same_as<_Ref_value_t, _Rng>) {
334
+ return range_format::disabled;
335
+ } else if constexpr (requires { typename _Rng::key_type; }) {
336
+ if constexpr (requires { typename _Rng::mapped_type; } && _Is_two_tuple<_Ref_value_t>) {
337
+ return range_format::map;
338
+ } else {
339
+ return range_format::set;
340
+ }
341
+ } else {
342
+ return range_format::sequence;
343
+ }
344
+ }();
345
+
346
+ // Specializations for pairs, tuples, and ranges are forward-declared to avoid any risk of using the disabled primary
347
+ // template.
348
+
349
+ // Per LWG-3997, `_CharT` in library-provided `formatter` specializations is
350
+ // constrained to character types supported by `format`.
351
+
352
+ template <class _Rng>
353
+ concept _Formatting_enabled_range = format_kind<_Rng> != range_format::disabled;
354
+
355
+ template <_RANGES input_range _Rng, _Format_supported_charT _CharT>
356
+ requires _Formatting_enabled_range<_Rng>
357
+ struct formatter<_Rng, _CharT>;
358
+
359
+ template <_Format_supported_charT _CharT, class _Ty1, class _Ty2>
360
+ struct formatter<pair<_Ty1, _Ty2>, _CharT>;
361
+
362
+ template <_Format_supported_charT _CharT, class... _Types>
363
+ struct formatter<tuple<_Types...>, _CharT>;
364
+ #endif // _HAS_CXX23
365
+ _STD_END
366
+
367
+ #pragma pop_macro("new")
368
+ _STL_RESTORE_CLANG_WARNINGS
369
+ #pragma warning(pop)
370
+ #pragma pack(pop)
371
+
372
+ #endif // _STL_COMPILER_PREPROCESSOR
373
+ #endif // __MSVC_FORMATTER_HPP
miniMSVC/VC/Tools/MSVC/14.42.34433/include/system_error ADDED
@@ -0,0 +1,738 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // system_error standard header
2
+
3
+ // Copyright (c) Microsoft Corporation.
4
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
5
+
6
+ #ifndef _SYSTEM_ERROR_
7
+ #define _SYSTEM_ERROR_
8
+ #include <yvals.h>
9
+ #if _STL_COMPILER_PREPROCESSOR
10
+ #include <__msvc_system_error_abi.hpp>
11
+ #include <cerrno>
12
+ #include <cstdlib>
13
+ #include <stdexcept>
14
+ #include <xcall_once.h>
15
+ #include <xerrc.h>
16
+ #ifndef _M_CEE_PURE
17
+ #include <atomic>
18
+ #endif // !defined(_M_CEE_PURE)
19
+
20
+ #if _HAS_CXX20
21
+ #include <compare>
22
+ #endif // _HAS_CXX20
23
+
24
+ #pragma pack(push, _CRT_PACKING)
25
+ #pragma warning(push, _STL_WARNING_LEVEL)
26
+ #pragma warning(disable : _STL_DISABLED_WARNINGS)
27
+ _STL_DISABLE_CLANG_WARNINGS
28
+ #pragma push_macro("new")
29
+ #undef new
30
+
31
+ // TRANSITION, non-_Ugly attribute tokens
32
+ #pragma push_macro("msvc")
33
+ #pragma push_macro("noop_dtor")
34
+ #undef msvc
35
+ #undef noop_dtor
36
+
37
+ _STD_BEGIN
38
+ _EXPORT_STD enum class io_errc { // error codes for ios_base::failure
39
+ stream = 1
40
+ };
41
+
42
+ _EXPORT_STD template <class _Enum>
43
+ struct is_error_code_enum : false_type {};
44
+
45
+ template <>
46
+ struct is_error_code_enum<io_errc> : true_type {};
47
+
48
+ _EXPORT_STD template <class _Ty>
49
+ constexpr bool is_error_code_enum_v = is_error_code_enum<_Ty>::value;
50
+
51
+ _EXPORT_STD template <class _Enum>
52
+ struct is_error_condition_enum : false_type {};
53
+
54
+ template <>
55
+ struct is_error_condition_enum<errc> : true_type {};
56
+
57
+ _EXPORT_STD template <class _Ty>
58
+ constexpr bool is_error_condition_enum_v = is_error_condition_enum<_Ty>::value;
59
+
60
+ _EXPORT_STD class error_code;
61
+ _EXPORT_STD class error_condition;
62
+
63
+ namespace _Ensure_adl {
64
+ void make_error_code() = delete;
65
+ void make_error_condition() = delete;
66
+ } // namespace _Ensure_adl
67
+
68
+ _EXPORT_STD class error_category;
69
+
70
+ _EXPORT_STD _NODISCARD const error_category& generic_category() noexcept;
71
+ _EXPORT_STD _NODISCARD const error_category& iostream_category() noexcept;
72
+ _EXPORT_STD _NODISCARD const error_category& system_category() noexcept;
73
+
74
+ _EXPORT_STD class __declspec(novtable) error_category { // categorize an error
75
+ public:
76
+ #ifdef _M_CEE_PURE
77
+ /* constexpr */ error_category() noexcept { // TRANSITION, ABI
78
+ _Addr = reinterpret_cast<uintptr_t>(this);
79
+ }
80
+ #else // ^^^ defined(_M_CEE_PURE) / !defined(_M_CEE_PURE) vvv
81
+ #pragma warning(push)
82
+ #pragma warning(disable : 4355) // 'this': used in base member initializer list
83
+ constexpr error_category() noexcept : _Addr(this) {}
84
+ #pragma warning(pop)
85
+ #endif // ^^^ !defined(_M_CEE_PURE) ^^^
86
+
87
+ _CONSTEXPR20 virtual ~error_category() noexcept = default;
88
+
89
+ _NODISCARD virtual const char* name() const noexcept = 0;
90
+
91
+ _NODISCARD virtual string message(int _Errval) const = 0;
92
+
93
+ _NODISCARD virtual error_condition default_error_condition(int _Errval) const noexcept;
94
+
95
+ _NODISCARD virtual bool equivalent(int _Errval, const error_condition& _Cond) const noexcept;
96
+
97
+ _NODISCARD virtual bool equivalent(const error_code& _Code, int _Errval) const noexcept;
98
+
99
+ _NODISCARD bool operator==(const error_category& _Right) const noexcept {
100
+ #ifdef _M_CEE_PURE
101
+ return _Addr == _Right._Addr;
102
+ #else // ^^^ defined(_M_CEE_PURE) / !defined(_M_CEE_PURE) vvv
103
+ return _Bit_cast<uintptr_t>(_Addr) == _Bit_cast<uintptr_t>(_Right._Addr);
104
+ #endif // ^^^ !defined(_M_CEE_PURE) ^^^
105
+ }
106
+
107
+ #if _HAS_CXX20
108
+ _NODISCARD strong_ordering operator<=>(const error_category& _Right) const noexcept {
109
+ return _Bit_cast<uintptr_t>(_Addr) <=> _Bit_cast<uintptr_t>(_Right._Addr);
110
+ }
111
+ #else // ^^^ _HAS_CXX20 / !_HAS_CXX20 vvv
112
+ _NODISCARD bool operator!=(const error_category& _Right) const noexcept {
113
+ return !(*this == _Right);
114
+ }
115
+
116
+ _NODISCARD bool operator<(const error_category& _Right) const noexcept {
117
+ #ifdef _M_CEE_PURE
118
+ return _Addr < _Right._Addr;
119
+ #else // ^^^ defined(_M_CEE_PURE) / !defined(_M_CEE_PURE) vvv
120
+ return _Bit_cast<uintptr_t>(_Addr) < _Bit_cast<uintptr_t>(_Right._Addr);
121
+ #endif // ^^^ !defined(_M_CEE_PURE) ^^^
122
+ }
123
+ #endif // ^^^ !_HAS_CXX20 ^^^
124
+
125
+ error_category(const error_category&) = delete;
126
+ error_category& operator=(const error_category&) = delete;
127
+
128
+ protected:
129
+ #ifdef _M_CEE_PURE
130
+ uintptr_t _Addr;
131
+ #else // ^^^ defined(_M_CEE_PURE) / !defined(_M_CEE_PURE) vvv
132
+ union _Addr_storage {
133
+ private:
134
+ uintptr_t _Num;
135
+ error_category* _Ptr;
136
+
137
+ public:
138
+ constexpr explicit _Addr_storage(const uintptr_t _Addr_num) noexcept : _Num(_Addr_num) {}
139
+ constexpr explicit _Addr_storage(error_category* const _Addr_ptr) noexcept : _Ptr(_Addr_ptr) {}
140
+
141
+ // TRANSITION: As of Boost 1.80.0, boost::system::detail::std_category assigns to _Addr.
142
+ constexpr _Addr_storage& operator=(const uintptr_t _Addr_num) noexcept {
143
+ _Num = _Addr_num;
144
+ return *this;
145
+ }
146
+ };
147
+ _STL_INTERNAL_STATIC_ASSERT(sizeof(_Addr_storage) == sizeof(uintptr_t));
148
+ _STL_INTERNAL_STATIC_ASSERT(alignof(_Addr_storage) == alignof(uintptr_t));
149
+
150
+ _Addr_storage _Addr;
151
+ #endif // ^^^ !defined(_M_CEE_PURE) ^^^
152
+
153
+ constexpr explicit error_category(const uintptr_t _Addr_) noexcept : _Addr(_Addr_) {}
154
+
155
+ enum : uintptr_t { // symbolic addresses for Standard error_category objects
156
+ _Future_addr = 1,
157
+ _Generic_addr = 3,
158
+ _Iostream_addr = 5,
159
+ _System_addr = 7
160
+ };
161
+ };
162
+
163
+ #if _STL_OPTIMIZE_SYSTEM_ERROR_OPERATORS
164
+ _NODISCARD inline bool _System_error_equal(const error_code&, const error_condition&) noexcept;
165
+ #endif // _STL_OPTIMIZE_SYSTEM_ERROR_OPERATORS
166
+
167
+ _EXPORT_STD class error_code { // store an implementation-specific error code and category
168
+ public:
169
+ error_code() noexcept : _Myval(0), _Mycat(&_STD system_category()) {} // construct non-error
170
+
171
+ error_code(int _Val, const error_category& _Cat) noexcept : _Myval(_Val), _Mycat(&_Cat) {}
172
+
173
+ template <class _Enum, enable_if_t<is_error_code_enum_v<_Enum>, int> = 0>
174
+ error_code(_Enum _Errcode) noexcept : _Myval(0), _Mycat(nullptr) {
175
+ using _Ensure_adl::make_error_code;
176
+ *this = make_error_code(_Errcode); // intentional ADL
177
+ }
178
+
179
+ void assign(int _Val, const error_category& _Cat) noexcept {
180
+ _Myval = _Val;
181
+ _Mycat = &_Cat;
182
+ }
183
+
184
+ template <class _Enum, enable_if_t<is_error_code_enum_v<_Enum>, int> = 0>
185
+ error_code& operator=(_Enum _Errcode) noexcept {
186
+ using _Ensure_adl::make_error_code;
187
+ *this = make_error_code(_Errcode); // intentional ADL
188
+ return *this;
189
+ }
190
+
191
+ void clear() noexcept {
192
+ _Myval = 0;
193
+ _Mycat = &_STD system_category();
194
+ }
195
+
196
+ _NODISCARD int value() const noexcept {
197
+ return _Myval;
198
+ }
199
+
200
+ _NODISCARD const error_category& category() const noexcept {
201
+ return *_Mycat;
202
+ }
203
+
204
+ _NODISCARD error_condition default_error_condition() const noexcept;
205
+
206
+ _NODISCARD string message() const {
207
+ return category().message(value());
208
+ }
209
+
210
+ explicit operator bool() const noexcept {
211
+ return value() != 0;
212
+ }
213
+
214
+ #if _STL_OPTIMIZE_SYSTEM_ERROR_OPERATORS
215
+ _NODISCARD friend bool operator==(const error_code& _Left, const error_code& _Right) noexcept {
216
+ return _Left.category() == _Right.category() && _Left.value() == _Right.value();
217
+ }
218
+
219
+ _NODISCARD friend bool operator==(const error_code& _Left, const error_condition& _Right) noexcept {
220
+ return _System_error_equal(_Left, _Right);
221
+ }
222
+
223
+ #if _HAS_CXX20
224
+ _NODISCARD friend strong_ordering operator<=>(const error_code& _Left, const error_code& _Right) noexcept {
225
+ if (const auto _Result = _Left.category() <=> _Right.category(); _Result != 0) {
226
+ return _Result;
227
+ }
228
+ return _Left.value() <=> _Right.value();
229
+ }
230
+ #else // ^^^ _HAS_CXX20 / !_HAS_CXX20 vvv
231
+ _NODISCARD friend bool operator<(const error_code& _Left, const error_code& _Right) noexcept {
232
+ return _Left.category() < _Right.category()
233
+ || (_Left.category() == _Right.category() && _Left.value() < _Right.value());
234
+ }
235
+ _NODISCARD friend bool operator==(const error_condition& _Left, const error_code& _Right) noexcept {
236
+ return _System_error_equal(_Right, _Left);
237
+ }
238
+
239
+ _NODISCARD friend bool operator!=(const error_code& _Left, const error_code& _Right) noexcept {
240
+ return !(_Left == _Right);
241
+ }
242
+
243
+ _NODISCARD friend bool operator!=(const error_code& _Left, const error_condition& _Right) noexcept {
244
+ return !_System_error_equal(_Left, _Right);
245
+ }
246
+
247
+ _NODISCARD friend bool operator!=(const error_condition& _Left, const error_code& _Right) noexcept {
248
+ return !_System_error_equal(_Right, _Left);
249
+ }
250
+ #endif // ^^^ !_HAS_CXX20 ^^^
251
+ #endif // _STL_OPTIMIZE_SYSTEM_ERROR_OPERATORS
252
+
253
+ private:
254
+ int _Myval; // the stored error number
255
+ const error_category* _Mycat; // pointer to error category
256
+ };
257
+
258
+ _EXPORT_STD class error_condition { // store an abstract error code and category
259
+ public:
260
+ error_condition() noexcept : _Myval(0), _Mycat(&_STD generic_category()) {} // construct non-error
261
+
262
+ error_condition(int _Val, const error_category& _Cat) noexcept : _Myval(_Val), _Mycat(&_Cat) {}
263
+
264
+ template <class _Enum, enable_if_t<is_error_condition_enum_v<_Enum>, int> = 0>
265
+ error_condition(_Enum _Errcode) noexcept : _Myval(0), _Mycat(nullptr) {
266
+ using _Ensure_adl::make_error_condition;
267
+ *this = make_error_condition(_Errcode); // intentional ADL
268
+ }
269
+
270
+ void assign(int _Val, const error_category& _Cat) noexcept {
271
+ _Myval = _Val;
272
+ _Mycat = &_Cat;
273
+ }
274
+
275
+ template <class _Enum, enable_if_t<is_error_condition_enum_v<_Enum>, int> = 0>
276
+ error_condition& operator=(_Enum _Errcode) noexcept {
277
+ using _Ensure_adl::make_error_condition;
278
+ *this = make_error_condition(_Errcode); // intentional ADL
279
+ return *this;
280
+ }
281
+
282
+ void clear() noexcept {
283
+ _Myval = 0;
284
+ _Mycat = &_STD generic_category();
285
+ }
286
+
287
+ _NODISCARD int value() const noexcept {
288
+ return _Myval;
289
+ }
290
+
291
+ _NODISCARD const error_category& category() const noexcept {
292
+ return *_Mycat;
293
+ }
294
+
295
+ _NODISCARD string message() const {
296
+ return category().message(value());
297
+ }
298
+
299
+ explicit operator bool() const noexcept {
300
+ return value() != 0;
301
+ }
302
+
303
+ #if _STL_OPTIMIZE_SYSTEM_ERROR_OPERATORS
304
+ _NODISCARD friend bool operator==(const error_condition& _Left, const error_condition& _Right) noexcept {
305
+ return _Left.category() == _Right.category() && _Left.value() == _Right.value();
306
+ }
307
+
308
+ #if _HAS_CXX20
309
+ _NODISCARD friend strong_ordering operator<=>(
310
+ const error_condition& _Left, const error_condition& _Right) noexcept {
311
+ if (const auto _Result = _Left.category() <=> _Right.category(); _Result != 0) {
312
+ return _Result;
313
+ }
314
+ return _Left.value() <=> _Right.value();
315
+ }
316
+ #else // ^^^ _HAS_CXX20 / !_HAS_CXX20 vvv
317
+ _NODISCARD friend bool operator<(const error_condition& _Left, const error_condition& _Right) noexcept {
318
+ return _Left.category() < _Right.category()
319
+ || (_Left.category() == _Right.category() && _Left.value() < _Right.value());
320
+ }
321
+ _NODISCARD friend bool operator!=(const error_condition& _Left, const error_condition& _Right) noexcept {
322
+ return !(_Left == _Right);
323
+ }
324
+ #endif // ^^^ !_HAS_CXX20 ^^^
325
+
326
+ // We grant friendship to the operators from error_code here to allow is_error_code_enum_v but not
327
+ // is_error_condition_enum_v enums to be compared directly with error_condition; for example:
328
+ // io_errc::stream == make_error_condition(errc::out_of_memory)
329
+ friend bool operator==(const error_code& _Left, const error_condition& _Right) noexcept;
330
+ #if !_HAS_CXX20
331
+ friend bool operator==(const error_condition& _Left, const error_code& _Right) noexcept;
332
+ friend bool operator!=(const error_code& _Left, const error_condition& _Right) noexcept;
333
+ friend bool operator!=(const error_condition& _Left, const error_code& _Right) noexcept;
334
+ #endif // !_HAS_CXX20
335
+ #endif // _STL_OPTIMIZE_SYSTEM_ERROR_OPERATORS
336
+
337
+ private:
338
+ int _Myval; // the stored error number
339
+ const error_category* _Mycat; // pointer to error category
340
+ };
341
+
342
+ #if _STL_OPTIMIZE_SYSTEM_ERROR_OPERATORS
343
+ _NODISCARD inline bool _System_error_equal(const error_code& _Left, const error_condition& _Right) noexcept {
344
+ return _Left.category().equivalent(_Left.value(), _Right) || _Right.category().equivalent(_Left, _Right.value());
345
+ }
346
+ #else // ^^^ _STL_OPTIMIZE_SYSTEM_ERROR_OPERATORS / !_STL_OPTIMIZE_SYSTEM_ERROR_OPERATORS vvv
347
+ _EXPORT_STD _NODISCARD inline bool operator==(const error_code& _Left, const error_code& _Right) noexcept {
348
+ return _Left.category() == _Right.category() && _Left.value() == _Right.value();
349
+ }
350
+
351
+ _EXPORT_STD _NODISCARD inline bool operator==(const error_code& _Left, const error_condition& _Right) noexcept {
352
+ return _Left.category().equivalent(_Left.value(), _Right) || _Right.category().equivalent(_Left, _Right.value());
353
+ }
354
+
355
+ _EXPORT_STD _NODISCARD inline bool operator==(const error_condition& _Left, const error_condition& _Right) noexcept {
356
+ return _Left.category() == _Right.category() && _Left.value() == _Right.value();
357
+ }
358
+
359
+ #if _HAS_CXX20
360
+ _EXPORT_STD _NODISCARD inline strong_ordering operator<=>(const error_code& _Left, const error_code& _Right) noexcept {
361
+ if (const auto _Result = _Left.category() <=> _Right.category(); _Result != 0) {
362
+ return _Result;
363
+ }
364
+ return _Left.value() <=> _Right.value();
365
+ }
366
+
367
+ _EXPORT_STD _NODISCARD inline strong_ordering operator<=>(
368
+ const error_condition& _Left, const error_condition& _Right) noexcept {
369
+ if (const auto _Result = _Left.category() <=> _Right.category(); _Result != 0) {
370
+ return _Result;
371
+ }
372
+ return _Left.value() <=> _Right.value();
373
+ }
374
+ #else // ^^^ _HAS_CXX20 / !_HAS_CXX20 vvv
375
+ _NODISCARD inline bool operator<(const error_code& _Left, const error_code& _Right) noexcept {
376
+ return _Left.category() < _Right.category()
377
+ || (_Left.category() == _Right.category() && _Left.value() < _Right.value());
378
+ }
379
+
380
+ _NODISCARD inline bool operator<(const error_condition& _Left, const error_condition& _Right) noexcept {
381
+ return _Left.category() < _Right.category()
382
+ || (_Left.category() == _Right.category() && _Left.value() < _Right.value());
383
+ }
384
+
385
+ _NODISCARD inline bool operator==(const error_condition& _Left, const error_code& _Right) noexcept {
386
+ return _Right.category().equivalent(_Right.value(), _Left) || _Left.category().equivalent(_Right, _Left.value());
387
+ }
388
+
389
+ _NODISCARD inline bool operator!=(const error_code& _Left, const error_code& _Right) noexcept {
390
+ return !(_Left == _Right);
391
+ }
392
+
393
+ _NODISCARD inline bool operator!=(const error_code& _Left, const error_condition& _Right) noexcept {
394
+ return !(_Left == _Right);
395
+ }
396
+
397
+ _NODISCARD inline bool operator!=(const error_condition& _Left, const error_code& _Right) noexcept {
398
+ return !(_Left == _Right);
399
+ }
400
+
401
+ _NODISCARD inline bool operator!=(const error_condition& _Left, const error_condition& _Right) noexcept {
402
+ return !(_Left == _Right);
403
+ }
404
+ #endif // ^^^ !_HAS_CXX20 ^^^
405
+ #endif // ^^^ !_STL_OPTIMIZE_SYSTEM_ERROR_OPERATORS ^^^
406
+
407
+ _NODISCARD inline error_condition error_category::default_error_condition(int _Errval) const noexcept {
408
+ // make error_condition for error code
409
+ return error_condition(_Errval, *this);
410
+ }
411
+
412
+ _NODISCARD inline bool error_category::equivalent(int _Errval, const error_condition& _Cond) const noexcept {
413
+ return default_error_condition(_Errval) == _Cond;
414
+ }
415
+
416
+ _NODISCARD inline bool error_category::equivalent(const error_code& _Code, int _Errval) const noexcept {
417
+ return *this == _Code.category() && _Code.value() == _Errval;
418
+ }
419
+
420
+ _NODISCARD inline error_condition error_code::default_error_condition() const noexcept {
421
+ // make error_condition for error code
422
+ return category().default_error_condition(value());
423
+ }
424
+
425
+ _EXPORT_STD _NODISCARD inline error_code make_error_code(errc _Ec) noexcept {
426
+ return error_code(static_cast<int>(_Ec), _STD generic_category());
427
+ }
428
+
429
+ _EXPORT_STD _NODISCARD inline error_code make_error_code(io_errc _Ec) noexcept {
430
+ return error_code(static_cast<int>(_Ec), _STD iostream_category());
431
+ }
432
+
433
+ _EXPORT_STD _NODISCARD inline error_condition make_error_condition(errc _Ec) noexcept {
434
+ return error_condition(static_cast<int>(_Ec), _STD generic_category());
435
+ }
436
+
437
+ _EXPORT_STD _NODISCARD inline error_condition make_error_condition(io_errc _Ec) noexcept {
438
+ return error_condition(static_cast<int>(_Ec), _STD iostream_category());
439
+ }
440
+
441
+ template <>
442
+ struct hash<error_code> {
443
+ using _ARGUMENT_TYPE_NAME _CXX17_DEPRECATE_ADAPTOR_TYPEDEFS = error_code;
444
+ using _RESULT_TYPE_NAME _CXX17_DEPRECATE_ADAPTOR_TYPEDEFS = size_t;
445
+
446
+ _NODISCARD _STATIC_CALL_OPERATOR size_t operator()(const error_code& _Keyval) _CONST_CALL_OPERATOR noexcept {
447
+ return hash<int>{}(_Keyval.value());
448
+ }
449
+ };
450
+
451
+ template <>
452
+ struct hash<error_condition> {
453
+ using _ARGUMENT_TYPE_NAME _CXX17_DEPRECATE_ADAPTOR_TYPEDEFS = error_condition;
454
+ using _RESULT_TYPE_NAME _CXX17_DEPRECATE_ADAPTOR_TYPEDEFS = size_t;
455
+
456
+ _NODISCARD _STATIC_CALL_OPERATOR size_t operator()(const error_condition& _Keyval) _CONST_CALL_OPERATOR noexcept {
457
+ return hash<int>{}(_Keyval.value());
458
+ }
459
+ };
460
+
461
+ class _System_error : public runtime_error { // base of all system-error exceptions
462
+ private:
463
+ static string _Makestr(error_code _Errcode, string _Message) { // compose error message
464
+ if (!_Message.empty()) {
465
+ _Message.append(": ");
466
+ }
467
+
468
+ _Message.append(_Errcode.message());
469
+ return _Message;
470
+ }
471
+
472
+ protected:
473
+ _System_error(error_code _Errcode) : runtime_error(_Errcode.message()), _Mycode(_Errcode) {}
474
+
475
+ _System_error(error_code _Errcode, const string& _Message)
476
+ : runtime_error(_Makestr(_Errcode, _Message)), _Mycode(_Errcode) {}
477
+
478
+ error_code _Mycode; // the stored error code
479
+ };
480
+
481
+ _EXPORT_STD class system_error : public _System_error { // base of all system-error exceptions
482
+ private:
483
+ using _Mybase = _System_error;
484
+
485
+ public:
486
+ system_error(error_code _Errcode) : _Mybase(_Errcode) {}
487
+
488
+ system_error(error_code _Errcode, const string& _Message) : _Mybase(_Errcode, _Message) {}
489
+
490
+ system_error(error_code _Errcode, const char* _Message) : _Mybase(_Errcode, _Message) {}
491
+
492
+ system_error(int _Errval, const error_category& _Errcat) : _Mybase(error_code(_Errval, _Errcat)) {}
493
+
494
+ system_error(int _Errval, const error_category& _Errcat, const string& _Message)
495
+ : _Mybase(error_code(_Errval, _Errcat), _Message) {}
496
+
497
+ system_error(int _Errval, const error_category& _Errcat, const char* _Message)
498
+ : _Mybase(error_code(_Errval, _Errcat), _Message) {}
499
+
500
+ _NODISCARD const error_code& code() const noexcept {
501
+ return _Mycode;
502
+ }
503
+
504
+ #if !_HAS_EXCEPTIONS
505
+ protected:
506
+ void _Doraise() const override { // perform class-specific exception handling
507
+ _RAISE(*this);
508
+ }
509
+ #endif // !_HAS_EXCEPTIONS
510
+ };
511
+
512
+ [[noreturn]] inline void _Throw_system_error(const errc _Ec) {
513
+ _THROW(system_error{_STD make_error_code(_Ec)});
514
+ }
515
+
516
+ extern "C++" _CRTIMP2_PURE const char* __CLRCALL_PURE_OR_CDECL _Syserror_map(int);
517
+ extern "C++" _CRTIMP2_PURE int __CLRCALL_PURE_OR_CDECL _Winerror_map(int);
518
+
519
+ struct _System_error_message {
520
+ char* _Str;
521
+ size_t _Length;
522
+
523
+ explicit _System_error_message(const unsigned long _Ec) noexcept
524
+ : _Str(nullptr), _Length(_CSTD __std_system_error_allocate_message(_Ec, &_Str)) {}
525
+
526
+ _System_error_message(const _System_error_message&) = delete;
527
+ _System_error_message& operator=(const _System_error_message&) = delete;
528
+
529
+ ~_System_error_message() {
530
+ _CSTD __std_system_error_deallocate_message(_Str);
531
+ }
532
+ };
533
+
534
+ class _Generic_error_category : public error_category { // categorize a generic error
535
+ public:
536
+ constexpr _Generic_error_category() noexcept : error_category(_Generic_addr) {}
537
+
538
+ _NODISCARD const char* name() const noexcept override {
539
+ return "generic";
540
+ }
541
+
542
+ _NODISCARD string message(int _Errcode) const override {
543
+ return _Syserror_map(_Errcode);
544
+ }
545
+ };
546
+
547
+ class _Iostream_error_category2 : public error_category { // categorize an iostream error
548
+ public:
549
+ constexpr _Iostream_error_category2() noexcept : error_category(_Iostream_addr) {}
550
+
551
+ _NODISCARD const char* name() const noexcept override {
552
+ return "iostream";
553
+ }
554
+
555
+ _NODISCARD string message(int _Errcode) const override {
556
+ if (_Errcode == static_cast<int>(io_errc::stream)) {
557
+ static constexpr char _Iostream_error[] = "iostream stream error";
558
+ constexpr size_t _Iostream_error_length = sizeof(_Iostream_error) - 1; // TRANSITION, DevCom-906503
559
+ return string{_Iostream_error, _Iostream_error_length};
560
+ } else {
561
+ return _Syserror_map(_Errcode);
562
+ }
563
+ }
564
+ };
565
+
566
+ class _System_error_category : public error_category { // categorize an operating system error
567
+ public:
568
+ constexpr _System_error_category() noexcept : error_category(_System_addr) {}
569
+
570
+ _NODISCARD const char* name() const noexcept override {
571
+ return "system";
572
+ }
573
+
574
+ _NODISCARD string message(int _Errcode) const override {
575
+ const _System_error_message _Msg(static_cast<unsigned long>(_Errcode));
576
+
577
+ if (_Msg._Str && _Msg._Length != 0) {
578
+ // CodeQL [SM02310] _Msg's ctor inits _Str(nullptr) before doing work, then we test _Msg._Str above.
579
+ return string{_Msg._Str, _Msg._Length};
580
+ } else {
581
+ static constexpr char _Unknown_error[] = "unknown error";
582
+ constexpr size_t _Unknown_error_length = sizeof(_Unknown_error) - 1; // TRANSITION, DevCom-906503
583
+ return string{_Unknown_error, _Unknown_error_length};
584
+ }
585
+ }
586
+
587
+ _NODISCARD error_condition default_error_condition(int _Errval) const noexcept override {
588
+ if (_Errval == 0) {
589
+ return error_condition(0, _STD generic_category());
590
+ }
591
+
592
+ // make error_condition for error code (generic if possible)
593
+ const int _Posv = _Winerror_map(_Errval);
594
+ if (_Posv == 0) {
595
+ return error_condition(_Errval, _STD system_category());
596
+ } else {
597
+ return error_condition(_Posv, _STD generic_category());
598
+ }
599
+ }
600
+ };
601
+
602
+ // _Immortalize_memcpy_image is used to provide a nonstandard guarantee.
603
+ // Specifically, we want the error category objects returned from things like std::system_category() to always
604
+ // be available, even during DLL unload (otherwise, <system_error> would be a huge regression vs. legacy error codes).
605
+ // Moreover, we need to be very conservative in the runtime support we request. Thus, we have these constraints:
606
+ //
607
+ // * can't use magic statics in standard modes, because that would inject a .TLS section into all binaries using
608
+ // <system_error> and would likely put borderline programs over the TLS slot count limit, and would destroy the
609
+ // variable during DLL unload
610
+ // * can't declare the error_category as an ordinary constexpr variable for most compilers before C++20, because
611
+ // error_category has a virtual destructor
612
+ // * can't declare the error_category as an ordinary non-constexpr variable even with a constexpr constructor, because
613
+ // the compiler will emit code to destroy it which invalidates its use in these DLL shutdown scenarios
614
+ //
615
+ // As a result, we use a workaround: We create an atomic<uintptr_t> array to store the error_category instance, test
616
+ // if the first atomic is nonzero (acquire), and if so, we know we have formed the instance and can return a
617
+ // reinterpreted pointer to that storage. If the first atomic is zero, we write all except the first atomic (relaxed),
618
+ // then write the first one as a store-release. (The non-first values are transferred to other threads in the
619
+ // release sequence).
620
+ //
621
+ // Acknowledged undefined and implementation-defined behavior happening here:
622
+ // * There is a data race when filling in the values other than the first atomic; this is OK on all hardware we target
623
+ // because the racing threads are all writing identical values that never change afterwards.
624
+ // * We are reaching into the layout of atomic<uintptr_t>[N] and assuming we can reinterpret that as some other type.
625
+ // * We are assuming that virtual functions are implemented with a vfptr located as the first member of an object.
626
+ // (there are probably others)
627
+ //
628
+ // Inspecting the resulting assembly of any callers of _Immortalize_memcpy_image is recommended.
629
+ //
630
+
631
+ #if defined(_M_CEE_PURE)
632
+ // /clr:pure doesn't ever do constant initialization, so rely on the CLR and magic statics
633
+ template <class _Ty>
634
+ _NODISCARD const _Ty& _Immortalize_memcpy_image() noexcept {
635
+ /* MAGIC */ static _Immortalizer_impl<_Ty> _Static;
636
+ return _Static._Storage;
637
+ }
638
+ #elif _HAS_CXX20
639
+ template <class _Ty>
640
+ _NODISCARD const _Ty& _Immortalize_memcpy_image() noexcept {
641
+ static constexpr _Ty _Static;
642
+ return _Static;
643
+ }
644
+ #elif defined(__clang__)
645
+ template <class _Ty>
646
+ _NODISCARD const _Ty& _Immortalize_memcpy_image() noexcept {
647
+ [[_Clang::__require_constant_initialization__]] static _Ty _Static;
648
+ return _Static;
649
+ }
650
+ #elif !defined(_M_CEE)
651
+ template <class _Ty>
652
+ struct _Constexpr_immortalize_impl {
653
+ union {
654
+ _Ty _Storage;
655
+ };
656
+
657
+ constexpr _Constexpr_immortalize_impl() noexcept : _Storage{} {}
658
+
659
+ _Constexpr_immortalize_impl(const _Constexpr_immortalize_impl&) = delete;
660
+ _Constexpr_immortalize_impl& operator=(const _Constexpr_immortalize_impl&) = delete;
661
+
662
+ _MSVC_NOOP_DTOR ~_Constexpr_immortalize_impl() {
663
+ // do nothing, allowing _Ty to be used during shutdown
664
+ }
665
+ };
666
+
667
+ template <class _Ty>
668
+ _NODISCARD const _Ty& _Immortalize_memcpy_image() noexcept {
669
+ static _Constexpr_immortalize_impl<_Ty> _Static;
670
+ return _Static._Storage;
671
+ }
672
+ #else // ^^^ !defined(_M_CEE) / defined(_M_CEE), TRANSITION, VSO-1153256 vvv
673
+ template <class _Ty>
674
+ _NODISCARD const _Ty& _Immortalize_memcpy_image() noexcept {
675
+ // return reference to a memcpy'd default-initialized _Ty
676
+ // pre: A default-initialized _Ty sets the first pointer-sized field to nonzero
677
+ constexpr size_t _Pointer_count = sizeof(_Ty) / sizeof(uintptr_t);
678
+ static atomic<uintptr_t> _Storage[_Pointer_count];
679
+ static_assert(sizeof(_Storage) == sizeof(_Ty), "Bad storage size");
680
+ static_assert(alignof(decltype(_Storage)) >= alignof(_Ty), "Bad alignment assumptions");
681
+ if (_Storage[0].load(memory_order_acquire) != 0) {
682
+ return reinterpret_cast<_Ty&>(_Storage);
683
+ }
684
+
685
+ const _Ty _Target;
686
+ const auto _Target_iter = reinterpret_cast<const uintptr_t*>(_STD addressof(_Target));
687
+ _CSTD memcpy(_Storage + 1, _Target_iter + 1, sizeof(_Ty) - sizeof(uintptr_t));
688
+ _Storage[0].store(_Target_iter[0], memory_order_release);
689
+ return reinterpret_cast<_Ty&>(_Storage);
690
+ }
691
+ #endif // ^^^ defined(_M_CEE), TRANSITION, VSO-1153256 ^^^
692
+
693
+ _EXPORT_STD _NODISCARD inline const error_category& generic_category() noexcept {
694
+ return _Immortalize_memcpy_image<_Generic_error_category>();
695
+ }
696
+
697
+ _EXPORT_STD _NODISCARD inline const error_category& iostream_category() noexcept {
698
+ return _Immortalize_memcpy_image<_Iostream_error_category2>();
699
+ }
700
+
701
+ _EXPORT_STD _NODISCARD inline const error_category& system_category() noexcept {
702
+ return _Immortalize_memcpy_image<_System_error_category>();
703
+ }
704
+ _STD_END
705
+
706
+ #if _HAS_CXX17
707
+ extern "C" {
708
+ enum class __std_win_error : unsigned long;
709
+ } // extern "C"
710
+
711
+ _STD_BEGIN
712
+ // We would really love to use the proper way of building error_code by specializing
713
+ // is_error_code_enum and make_error_code for __std_win_error, but because:
714
+ // 1. We would like to keep the definition of __std_win_error in xfilesystem_abi.h
715
+ // 2. and xfilesystem_abi.h cannot include <system_error>
716
+ // 3. and specialization of is_error_code_enum and overload of make_error_code
717
+ // need to be kept together with the enum (see limerick in N4950 [temp.expl.spec]/8)
718
+ // we resort to using this _Make_ec helper.
719
+ _NODISCARD inline error_code _Make_ec(__std_win_error _Errno) noexcept { // make an error_code
720
+ return {static_cast<int>(_Errno), _STD system_category()};
721
+ }
722
+
723
+ [[noreturn]] inline void _Throw_system_error_from_std_win_error(const __std_win_error _Errno) {
724
+ _THROW(system_error{_Make_ec(_Errno)});
725
+ }
726
+ _STD_END
727
+ #endif // _HAS_CXX17
728
+
729
+ // TRANSITION, non-_Ugly attribute tokens
730
+ #pragma pop_macro("noop_dtor")
731
+ #pragma pop_macro("msvc")
732
+
733
+ #pragma pop_macro("new")
734
+ _STL_RESTORE_CLANG_WARNINGS
735
+ #pragma warning(pop)
736
+ #pragma pack(pop)
737
+ #endif // _STL_COMPILER_PREPROCESSOR
738
+ #endif // _SYSTEM_ERROR_
miniMSVC/VC/Tools/MSVC/14.42.34433/include/thread ADDED
@@ -0,0 +1,446 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // thread standard header
2
+
3
+ // Copyright (c) Microsoft Corporation.
4
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
5
+
6
+ #ifndef _THREAD_
7
+ #define _THREAD_
8
+ #include <yvals_core.h>
9
+ #if _STL_COMPILER_PREPROCESSOR
10
+
11
+ #ifdef _M_CEE_PURE
12
+ #error <thread> is not supported when compiling with /clr:pure.
13
+ #endif // defined(_M_CEE_PURE)
14
+
15
+ #include <__msvc_chrono.hpp>
16
+ #include <memory>
17
+ #include <process.h>
18
+ #include <tuple>
19
+ #include <xthreads.h>
20
+
21
+ #if _HAS_CXX20
22
+ #include <compare>
23
+ #include <stop_token>
24
+ #endif // _HAS_CXX20
25
+
26
+ #if _HAS_CXX23
27
+ #include <format>
28
+ #endif // _HAS_CXX23
29
+
30
+ #pragma pack(push, _CRT_PACKING)
31
+ #pragma warning(push, _STL_WARNING_LEVEL)
32
+ #pragma warning(disable : _STL_DISABLED_WARNINGS)
33
+ _STL_DISABLE_CLANG_WARNINGS
34
+ #pragma push_macro("new")
35
+ #undef new
36
+
37
+ _STD_BEGIN
38
+ #if _HAS_CXX20
39
+ _EXPORT_STD class jthread;
40
+ #endif // _HAS_CXX20
41
+
42
+ _EXPORT_STD class thread { // class for observing and managing threads
43
+ public:
44
+ class id;
45
+
46
+ using native_handle_type = void*;
47
+
48
+ thread() noexcept : _Thr{} {}
49
+
50
+ private:
51
+ #if _HAS_CXX20
52
+ friend jthread;
53
+ #endif // _HAS_CXX20
54
+
55
+ template <class _Tuple, size_t... _Indices>
56
+ static unsigned int __stdcall _Invoke(void* _RawVals) noexcept /* terminates */ {
57
+ // adapt invoke of user's callable object to _beginthreadex's thread procedure
58
+ const unique_ptr<_Tuple> _FnVals(static_cast<_Tuple*>(_RawVals));
59
+ _Tuple& _Tup = *_FnVals.get(); // avoid ADL, handle incomplete types
60
+ _STD invoke(_STD move(_STD get<_Indices>(_Tup))...);
61
+ _Cnd_do_broadcast_at_thread_exit(); // TRANSITION, ABI
62
+ return 0;
63
+ }
64
+
65
+ template <class _Tuple, size_t... _Indices>
66
+ _NODISCARD static constexpr auto _Get_invoke(index_sequence<_Indices...>) noexcept {
67
+ return &_Invoke<_Tuple, _Indices...>;
68
+ }
69
+
70
+ #pragma warning(push) // pointer or reference to potentially throwing function passed to 'extern "C"' function under
71
+ #pragma warning(disable : 5039) // -EHc. Undefined behavior may occur if this function throws an exception. (/Wall)
72
+ template <class _Fn, class... _Args>
73
+ void _Start(_Fn&& _Fx, _Args&&... _Ax) {
74
+ using _Tuple = tuple<decay_t<_Fn>, decay_t<_Args>...>;
75
+ auto _Decay_copied = _STD make_unique<_Tuple>(_STD forward<_Fn>(_Fx), _STD forward<_Args>(_Ax)...);
76
+ constexpr auto _Invoker_proc = _Get_invoke<_Tuple>(make_index_sequence<1 + sizeof...(_Args)>{});
77
+
78
+ _Thr._Hnd =
79
+ reinterpret_cast<void*>(_CSTD _beginthreadex(nullptr, 0, _Invoker_proc, _Decay_copied.get(), 0, &_Thr._Id));
80
+
81
+ if (_Thr._Hnd) { // ownership transferred to the thread
82
+ (void) _Decay_copied.release();
83
+ } else { // failed to start thread
84
+ _Thr._Id = 0;
85
+ _Throw_Cpp_error(_RESOURCE_UNAVAILABLE_TRY_AGAIN);
86
+ }
87
+ }
88
+ #pragma warning(pop)
89
+
90
+ public:
91
+ template <class _Fn, class... _Args, enable_if_t<!is_same_v<_Remove_cvref_t<_Fn>, thread>, int> = 0>
92
+ _NODISCARD_CTOR_THREAD explicit thread(_Fn&& _Fx, _Args&&... _Ax) {
93
+ _Start(_STD forward<_Fn>(_Fx), _STD forward<_Args>(_Ax)...);
94
+ }
95
+
96
+ ~thread() noexcept {
97
+ if (joinable()) {
98
+ _STD terminate(); // per N4950 [thread.thread.destr]/1
99
+ }
100
+ }
101
+
102
+ thread(thread&& _Other) noexcept : _Thr(_STD exchange(_Other._Thr, {})) {}
103
+
104
+ thread& operator=(thread&& _Other) noexcept {
105
+ if (joinable()) {
106
+ _STD terminate(); // per N4950 [thread.thread.assign]/1
107
+ }
108
+
109
+ _Thr = _STD exchange(_Other._Thr, {});
110
+ return *this;
111
+ }
112
+
113
+ thread(const thread&) = delete;
114
+ thread& operator=(const thread&) = delete;
115
+
116
+ void swap(thread& _Other) noexcept {
117
+ _STD swap(_Thr, _Other._Thr);
118
+ }
119
+
120
+ _NODISCARD bool joinable() const noexcept {
121
+ return _Thr._Id != 0;
122
+ }
123
+
124
+ void join() {
125
+ if (!joinable()) {
126
+ _Throw_Cpp_error(_INVALID_ARGUMENT);
127
+ }
128
+
129
+ if (_Thr._Id == _Thrd_id()) {
130
+ _Throw_Cpp_error(_RESOURCE_DEADLOCK_WOULD_OCCUR);
131
+ }
132
+
133
+ if (_Thrd_join(_Thr, nullptr) != _Thrd_result::_Success) {
134
+ _Throw_Cpp_error(_NO_SUCH_PROCESS);
135
+ }
136
+
137
+ _Thr = {};
138
+ }
139
+
140
+ void detach() {
141
+ if (!joinable()) {
142
+ _Throw_Cpp_error(_INVALID_ARGUMENT);
143
+ }
144
+
145
+ if (_Thrd_detach(_Thr) != _Thrd_result::_Success) {
146
+ _Throw_Cpp_error(_INVALID_ARGUMENT);
147
+ }
148
+
149
+ _Thr = {};
150
+ }
151
+
152
+ _NODISCARD id get_id() const noexcept;
153
+
154
+ _NODISCARD native_handle_type native_handle() noexcept /* strengthened */ { // return Win32 HANDLE as void *
155
+ return _Thr._Hnd;
156
+ }
157
+
158
+ _NODISCARD static unsigned int hardware_concurrency() noexcept {
159
+ return _Thrd_hardware_concurrency();
160
+ }
161
+
162
+ private:
163
+ _Thrd_t _Thr;
164
+ };
165
+
166
+ template <class _Rep, class _Period>
167
+ _NODISCARD auto _To_absolute_time(const chrono::duration<_Rep, _Period>& _Rel_time) noexcept {
168
+ constexpr auto _Zero = chrono::duration<_Rep, _Period>::zero();
169
+ const auto _Now = chrono::steady_clock::now();
170
+ decltype(_Now + _Rel_time) _Abs_time = _Now; // return common type
171
+ if (_Rel_time > _Zero) {
172
+ constexpr auto _Forever = (chrono::steady_clock::time_point::max)();
173
+ if (_Abs_time < _Forever - _Rel_time) {
174
+ _Abs_time += _Rel_time;
175
+ } else {
176
+ _Abs_time = _Forever;
177
+ }
178
+ }
179
+ return _Abs_time;
180
+ }
181
+
182
+ struct _Clamped_rel_time_ms_count_result {
183
+ unsigned long _Count;
184
+ bool _Clamped;
185
+ };
186
+
187
+ template <class _Duration>
188
+ _NODISCARD _Clamped_rel_time_ms_count_result _Clamped_rel_time_ms_count(const _Duration& _Rel) {
189
+ // _Clamp must be less than 2^32 - 1 (INFINITE) milliseconds, but is otherwise arbitrary.
190
+ constexpr chrono::milliseconds _Clamp{chrono::hours{24}};
191
+
192
+ if (_Rel > _Clamp) {
193
+ return {static_cast<unsigned long>(_Clamp.count()), true};
194
+ } else {
195
+ const auto _Rel_ms = chrono::ceil<chrono::milliseconds>(_Rel);
196
+ return {static_cast<unsigned long>(_Rel_ms.count()), false};
197
+ }
198
+ }
199
+
200
+ namespace this_thread {
201
+ _EXPORT_STD _NODISCARD thread::id get_id() noexcept;
202
+
203
+ _EXPORT_STD inline void yield() noexcept {
204
+ _Thrd_yield();
205
+ }
206
+
207
+ _EXPORT_STD template <class _Clock, class _Duration>
208
+ void sleep_until(const chrono::time_point<_Clock, _Duration>& _Abs_time) {
209
+ static_assert(chrono::_Is_clock_v<_Clock>, "Clock type required");
210
+ for (;;) {
211
+ const auto _Now = _Clock::now();
212
+ if (_Abs_time <= _Now) {
213
+ return;
214
+ }
215
+
216
+ const unsigned long _Rel_ms_count = _Clamped_rel_time_ms_count(_Abs_time - _Now)._Count;
217
+ _Thrd_sleep_for(_Rel_ms_count);
218
+ }
219
+ }
220
+
221
+ _EXPORT_STD template <class _Rep, class _Period>
222
+ void sleep_for(const chrono::duration<_Rep, _Period>& _Rel_time) {
223
+ sleep_until(_To_absolute_time(_Rel_time));
224
+ }
225
+ } // namespace this_thread
226
+
227
+ class thread::id { // thread id
228
+ public:
229
+ id() noexcept = default; // id for no thread
230
+
231
+ #if _HAS_CXX23
232
+ _NODISCARD _Thrd_id_t _Get_underlying_id() const noexcept {
233
+ return _Id;
234
+ }
235
+ #endif // _HAS_CXX23
236
+
237
+ private:
238
+ explicit id(_Thrd_id_t _Other_id) noexcept : _Id(_Other_id) {}
239
+
240
+ _Thrd_id_t _Id = 0;
241
+
242
+ friend thread::id thread::get_id() const noexcept;
243
+ friend thread::id this_thread::get_id() noexcept;
244
+ friend bool operator==(thread::id _Left, thread::id _Right) noexcept;
245
+ #if _HAS_CXX20
246
+ friend strong_ordering operator<=>(thread::id _Left, thread::id _Right) noexcept;
247
+ #else // ^^^ _HAS_CXX20 / !_HAS_CXX20 vvv
248
+ friend bool operator<(thread::id _Left, thread::id _Right) noexcept;
249
+ #endif // ^^^ !_HAS_CXX20 ^^^
250
+ template <class _Ch, class _Tr>
251
+ friend basic_ostream<_Ch, _Tr>& operator<<(basic_ostream<_Ch, _Tr>& _Str, thread::id _Id);
252
+ friend hash<thread::id>;
253
+ };
254
+
255
+ _NODISCARD inline thread::id thread::get_id() const noexcept {
256
+ return thread::id{_Thr._Id};
257
+ }
258
+
259
+ _EXPORT_STD _NODISCARD inline thread::id this_thread::get_id() noexcept {
260
+ return thread::id{_Thrd_id()};
261
+ }
262
+
263
+ _EXPORT_STD inline void swap(thread& _Left, thread& _Right) noexcept {
264
+ _Left.swap(_Right);
265
+ }
266
+
267
+ _EXPORT_STD _NODISCARD inline bool operator==(thread::id _Left, thread::id _Right) noexcept {
268
+ return _Left._Id == _Right._Id;
269
+ }
270
+
271
+ #if _HAS_CXX20
272
+ _EXPORT_STD _NODISCARD inline strong_ordering operator<=>(thread::id _Left, thread::id _Right) noexcept {
273
+ return _Left._Id <=> _Right._Id;
274
+ }
275
+ #else // ^^^ _HAS_CXX20 / !_HAS_CXX20 vvv
276
+ _NODISCARD inline bool operator!=(thread::id _Left, thread::id _Right) noexcept {
277
+ return !(_Left == _Right);
278
+ }
279
+
280
+ _NODISCARD inline bool operator<(thread::id _Left, thread::id _Right) noexcept {
281
+ return _Left._Id < _Right._Id;
282
+ }
283
+
284
+ _NODISCARD inline bool operator<=(thread::id _Left, thread::id _Right) noexcept {
285
+ return !(_Right < _Left);
286
+ }
287
+
288
+ _NODISCARD inline bool operator>(thread::id _Left, thread::id _Right) noexcept {
289
+ return _Right < _Left;
290
+ }
291
+
292
+ _NODISCARD inline bool operator>=(thread::id _Left, thread::id _Right) noexcept {
293
+ return !(_Left < _Right);
294
+ }
295
+ #endif // ^^^ !_HAS_CXX20 ^^^
296
+
297
+ _EXPORT_STD template <class _Ch, class _Tr>
298
+ basic_ostream<_Ch, _Tr>& operator<<(basic_ostream<_Ch, _Tr>& _Str, thread::id _Id) {
299
+ _STL_INTERNAL_STATIC_ASSERT(sizeof(_Thrd_id_t) == 4);
300
+ _Ch _Buff[11]; // can hold 2^32 - 1, plus terminating null
301
+ _Ch* _RNext = _STD end(_Buff);
302
+ *--_RNext = static_cast<_Ch>('\0');
303
+ _RNext = _STD _UIntegral_to_buff(_RNext, _Id._Id);
304
+ return _Str << _RNext;
305
+ }
306
+
307
+ #if _HAS_CXX23
308
+ // Per LWG-3997, `_CharT` in library-provided `formatter` specializations is
309
+ // constrained to character types supported by `format`.
310
+ template <_Format_supported_charT _CharT>
311
+ struct formatter<thread::id, _CharT> {
312
+ private:
313
+ using _Pc = basic_format_parse_context<_CharT>;
314
+
315
+ public:
316
+ constexpr _Pc::iterator parse(_Pc& _Parse_ctx) {
317
+ return _Impl._Parse(_Parse_ctx);
318
+ }
319
+
320
+ template <class _FormatContext>
321
+ _FormatContext::iterator format(thread::id _Val, _FormatContext& _Format_ctx) const {
322
+ _STL_INTERNAL_STATIC_ASSERT(sizeof(_Thrd_id_t) == 4);
323
+ _CharT _Buff[10]; // can hold 2^32 - 1
324
+ _CharT* const _Last = _STD end(_Buff);
325
+ const _CharT* const _First = _STD _UIntegral_to_buff(_Last, _Val._Get_underlying_id());
326
+ return _Impl._Format(_Format_ctx, static_cast<int>(_Last - _First), _Fmt_align::_Right,
327
+ [&](_FormatContext::iterator _Out) { return _RANGES copy(_First, _Last, _STD move(_Out)).out; });
328
+ }
329
+
330
+ private:
331
+ _Fill_align_and_width_formatter<_CharT> _Impl;
332
+ };
333
+ #endif // _HAS_CXX23
334
+
335
+ template <>
336
+ struct hash<thread::id> {
337
+ using _ARGUMENT_TYPE_NAME _CXX17_DEPRECATE_ADAPTOR_TYPEDEFS = thread::id;
338
+ using _RESULT_TYPE_NAME _CXX17_DEPRECATE_ADAPTOR_TYPEDEFS = size_t;
339
+
340
+ _NODISCARD _STATIC_CALL_OPERATOR size_t operator()(const thread::id _Keyval) _CONST_CALL_OPERATOR noexcept {
341
+ return _Hash_representation(_Keyval._Id);
342
+ }
343
+ };
344
+
345
+ #if _HAS_CXX20
346
+ _EXPORT_STD class jthread {
347
+ public:
348
+ using id = thread::id;
349
+ using native_handle_type = thread::native_handle_type;
350
+
351
+ jthread() noexcept : _Impl{}, _Ssource{nostopstate} {}
352
+
353
+ template <class _Fn, class... _Args>
354
+ requires (!is_same_v<remove_cvref_t<_Fn>, jthread>)
355
+ _NODISCARD_CTOR_JTHREAD explicit jthread(_Fn&& _Fx, _Args&&... _Ax) {
356
+ if constexpr (is_invocable_v<decay_t<_Fn>, stop_token, decay_t<_Args>...>) {
357
+ _Impl._Start(_STD forward<_Fn>(_Fx), _Ssource.get_token(), _STD forward<_Args>(_Ax)...);
358
+ } else {
359
+ _Impl._Start(_STD forward<_Fn>(_Fx), _STD forward<_Args>(_Ax)...);
360
+ }
361
+ }
362
+
363
+ ~jthread() {
364
+ _Try_cancel_and_join();
365
+ }
366
+
367
+ jthread(const jthread&) = delete;
368
+ jthread(jthread&&) noexcept = default;
369
+ jthread& operator=(const jthread&) = delete;
370
+
371
+ jthread& operator=(jthread&& _Other) noexcept {
372
+ if (this == _STD addressof(_Other)) {
373
+ return *this;
374
+ }
375
+
376
+ _Try_cancel_and_join();
377
+ _Impl = _STD move(_Other._Impl);
378
+ _Ssource = _STD move(_Other._Ssource);
379
+ return *this;
380
+ }
381
+
382
+ void swap(jthread& _Other) noexcept {
383
+ _Impl.swap(_Other._Impl);
384
+ _Ssource.swap(_Other._Ssource);
385
+ }
386
+
387
+ _NODISCARD bool joinable() const noexcept {
388
+ return _Impl.joinable();
389
+ }
390
+
391
+ void join() {
392
+ _Impl.join();
393
+ }
394
+
395
+ void detach() {
396
+ _Impl.detach();
397
+ }
398
+
399
+ _NODISCARD id get_id() const noexcept {
400
+ return _Impl.get_id();
401
+ }
402
+
403
+ _NODISCARD native_handle_type native_handle() noexcept /* strengthened */ {
404
+ return _Impl.native_handle();
405
+ }
406
+
407
+ _NODISCARD stop_source get_stop_source() noexcept {
408
+ return _Ssource;
409
+ }
410
+
411
+ _NODISCARD stop_token get_stop_token() const noexcept {
412
+ return _Ssource.get_token();
413
+ }
414
+
415
+ bool request_stop() noexcept {
416
+ return _Ssource.request_stop();
417
+ }
418
+
419
+ friend void swap(jthread& _Lhs, jthread& _Rhs) noexcept {
420
+ _Lhs.swap(_Rhs);
421
+ }
422
+
423
+ _NODISCARD static unsigned int hardware_concurrency() noexcept {
424
+ return thread::hardware_concurrency();
425
+ }
426
+
427
+ private:
428
+ void _Try_cancel_and_join() noexcept {
429
+ if (_Impl.joinable()) {
430
+ _Ssource.request_stop();
431
+ _Impl.join();
432
+ }
433
+ }
434
+
435
+ thread _Impl;
436
+ stop_source _Ssource;
437
+ };
438
+ #endif // _HAS_CXX20
439
+ _STD_END
440
+
441
+ #pragma pop_macro("new")
442
+ _STL_RESTORE_CLANG_WARNINGS
443
+ #pragma warning(pop)
444
+ #pragma pack(pop)
445
+ #endif // _STL_COMPILER_PREPROCESSOR
446
+ #endif // _THREAD_
miniMSVC/VC/Tools/MSVC/14.42.34433/include/threads.h ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation. All rights reserved.
2
+
3
+ #pragma once
4
+ #define _THREADS_H
5
+
6
+ #include <vcruntime.h>
7
+ #include <corecrt.h>
8
+ #include <stdint.h>
9
+ #include <time.h>
10
+
11
+ #pragma warning(push)
12
+ #pragma warning(disable : _UCRT_DISABLED_WARNINGS)
13
+ _UCRT_DISABLE_CLANG_WARNINGS
14
+ _CRT_BEGIN_C_HEADER
15
+
16
+ #ifndef __cplusplus
17
+ #define thread_local _Thread_local
18
+ #endif
19
+
20
+ enum {
21
+ mtx_plain = 0,
22
+ mtx_recursive = 1 << 0,
23
+ mtx_timed = 1 << 1,
24
+ };
25
+
26
+ typedef struct {
27
+ uintptr_t _Type;
28
+ void* _Ptr;
29
+ void* _Cv;
30
+ uint32_t _Owner;
31
+ uint32_t _Cnt;
32
+ } mtx_t;
33
+
34
+ void __cdecl mtx_destroy(mtx_t* _Mtx);
35
+ int __cdecl mtx_init(_Out_ mtx_t* _Mtx, int _Type);
36
+ _Acquires_lock_(*_Mtx) int __cdecl mtx_lock(mtx_t* _Mtx);
37
+
38
+ int __cdecl _mtx_timedlock32(
39
+ mtx_t* __restrict _Mtx, const struct _timespec32* __restrict _Ts);
40
+ int __cdecl _mtx_timedlock64(
41
+ mtx_t* __restrict _Mtx, const struct _timespec64* __restrict _Ts);
42
+
43
+ #ifndef _CRT_NO_TIME_T
44
+ #ifdef _USE_32BIT_TIME_T
45
+ static inline int __cdecl mtx_timedlock(mtx_t* __restrict _Mtx, const struct timespec* __restrict _Ts) {
46
+ return _mtx_timedlock32(_Mtx, (struct _timespec32*) _Ts);
47
+ }
48
+ #else // ^^^ _CRT_32BIT_TIME_T / vvv 64-bit
49
+ static inline int __cdecl mtx_timedlock(mtx_t* __restrict _Mtx, const struct timespec* __restrict _Ts) {
50
+ return _mtx_timedlock64(_Mtx, (struct _timespec64*) _Ts);
51
+ }
52
+ #endif // _CRT_32BIT_TIME_T
53
+ #endif // _CRT_NO_TIME_T
54
+
55
+ int __cdecl mtx_trylock(mtx_t* _Mtx);
56
+
57
+ _Releases_lock_(*_Mtx) int __cdecl mtx_unlock(mtx_t* _Mtx);
58
+
59
+ typedef struct {
60
+ void* _Ptr;
61
+ } cnd_t;
62
+
63
+ int __cdecl cnd_broadcast(cnd_t* _Cond);
64
+ void __cdecl cnd_destroy(cnd_t* _Cond);
65
+ int __cdecl cnd_init(_Out_ cnd_t* _Cond);
66
+ int __cdecl cnd_signal(cnd_t* _Cond);
67
+ int __cdecl _cnd_timedwait32(cnd_t* _Cond, mtx_t* _Mtx, const struct _timespec32* _Ts);
68
+ int __cdecl _cnd_timedwait64(cnd_t* _Cond, mtx_t* _Mtx, const struct _timespec64* _Ts);
69
+
70
+ #ifndef _CRT_NO_TIME_T
71
+ #ifdef _USE_32BIT_TIME_T
72
+ static inline int __cdecl cnd_timedwait(cnd_t* _Cond, mtx_t* _Mtx, const struct timespec* _Ts) {
73
+ return _cnd_timedwait32(_Cond, _Mtx, (struct _timespec32*) _Ts);
74
+ }
75
+ #else
76
+ static inline int __cdecl cnd_timedwait(cnd_t* _Cond, mtx_t* _Mtx, const struct timespec* _Ts) {
77
+ return _cnd_timedwait64(_Cond, _Mtx, (struct _timespec64*) _Ts);
78
+ }
79
+ #endif
80
+ #endif // _CRT_NO_TIME_T
81
+ int cnd_wait(cnd_t* _Cond, mtx_t* _Mtx);
82
+
83
+
84
+ typedef struct {
85
+ void* _Handle;
86
+ uint32_t _Tid;
87
+ } thrd_t;
88
+
89
+ enum { thrd_success, thrd_nomem, thrd_timedout, thrd_busy, thrd_error };
90
+
91
+ typedef int(__cdecl* thrd_start_t)(void*);
92
+
93
+ _Success_(return == thrd_success) int __cdecl thrd_create(_Out_ thrd_t* _Thr, thrd_start_t _Func, void* _Arg);
94
+ thrd_t __cdecl thrd_current(void);
95
+ int __cdecl thrd_detach(thrd_t _Thr);
96
+ int __cdecl thrd_equal(thrd_t _Thr0, thrd_t _Thr1);
97
+
98
+ #ifdef __cplusplus // TRANSITION, [[_Noreturn]]
99
+ [[noreturn]] void __cdecl thrd_exit(int _Res);
100
+ #else
101
+ _Noreturn void __cdecl thrd_exit(int _Res);
102
+ #endif
103
+
104
+ int __cdecl thrd_join(thrd_t _Thr, int* _Res);
105
+ int __cdecl _thrd_sleep32(
106
+ const struct _timespec32* duration, struct _timespec32* remaining);
107
+ int __cdecl _thrd_sleep64(
108
+ const struct _timespec64* duration, struct _timespec64* remaining);
109
+
110
+ #ifndef _CRT_NO_TIME_T
111
+ #ifdef _USE_32BIT_TIME_T
112
+ static inline int __cdecl thrd_sleep(const struct timespec* duration, struct timespec* remaining) {
113
+ return _thrd_sleep32((struct _timespec32*) duration, (struct _timespec32*) remaining);
114
+ }
115
+ #else
116
+ static inline int __cdecl thrd_sleep(const struct timespec* duration, struct timespec* remaining) {
117
+ return _thrd_sleep64((struct _timespec64*) duration, (struct _timespec64*) remaining);
118
+ }
119
+ #endif
120
+ #endif
121
+
122
+ void __cdecl thrd_yield(void);
123
+
124
+ #define TSS_DTOR_ITERATIONS 1
125
+ typedef struct {
126
+ uint32_t _Idx;
127
+ } tss_t;
128
+
129
+ typedef void (*tss_dtor_t)(void*);
130
+
131
+ int __cdecl tss_create(tss_t* _Key, tss_dtor_t _Dtor);
132
+ void __cdecl tss_delete(tss_t _Key);
133
+ void* __cdecl tss_get(tss_t _Key);
134
+ int __cdecl tss_set(tss_t _Key, void* _Val);
135
+
136
+ typedef struct {
137
+ void* _Opaque;
138
+ } once_flag;
139
+
140
+ void __cdecl call_once(once_flag* _Flag, void(*_Func)(void));
141
+
142
+ #define ONCE_FLAG_INIT { 0 }
143
+
144
+ _CRT_END_C_HEADER
145
+ _UCRT_RESTORE_CLANG_WARNINGS
146
+ #pragma warning(pop) // _UCRT_DISABLED_WARNINGS
miniMSVC/VC/Tools/MSVC/14.42.34433/include/tmmintrin.h ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright (C) 1985-2015 Intel Corporation.
3
+ *
4
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
5
+ */
6
+
7
+ #pragma once
8
+
9
+ #if !defined(_M_IX86) && !defined(_M_X64) && !(defined(_M_ARM64) && defined(USE_SOFT_INTRINSICS))
10
+ #error This header is specific to X86, X64, ARM64, and ARM64EC targets
11
+ #endif
12
+
13
+ #if (defined(_M_ARM64) || defined(_M_ARM64EC)) && !defined(__INTRIN_H_)
14
+ #error this header should only be included through <intrin.h>
15
+ #endif
16
+
17
+ #ifndef _INCLUDED_TMM
18
+ #define _INCLUDED_TMM
19
+ #ifndef __midl
20
+
21
+ #if defined (_M_CEE_PURE)
22
+ #error ERROR: XMM intrinsics not supported in the pure mode!
23
+ #else /* defined (_M_CEE_PURE) */
24
+
25
+ #include <pmmintrin.h>
26
+
27
+ #ifdef _MM2_FUNCTIONALITY
28
+ /* support old notation */
29
+ #ifndef _MM_FUNCTIONALITY
30
+ #define _MM_FUNCTIONALITY
31
+ #endif /* _MM_FUNCTIONALITY */
32
+ #endif /* _MM2_FUNCTIONALITY */
33
+
34
+ #ifdef __cplusplus
35
+ extern "C" {
36
+ #endif /* __cplusplus */
37
+
38
+ // Horizontal Add: add pairs of adjacent words or double words.
39
+ // Each field in the result is the sum of two adjacent fields
40
+ // from the arguments, with the lower result fields coming from
41
+ // the first argument and the upper result fields coming from
42
+ // the second argument. The "hadds" forms saturate the signed
43
+ // addition rather than wrapping.
44
+
45
+ extern __m128i _mm_hadd_epi16 (__m128i, __m128i);
46
+ extern __m128i _mm_hadd_epi32 (__m128i, __m128i);
47
+ extern __m128i _mm_hadds_epi16 (__m128i, __m128i);
48
+
49
+ #if defined(_M_IX86)
50
+ extern __m64 _mm_hadd_pi16 (__m64, __m64);
51
+ extern __m64 _mm_hadd_pi32 (__m64, __m64);
52
+ extern __m64 _mm_hadds_pi16 (__m64, __m64);
53
+ #endif
54
+
55
+ // Horizontal Subtract: subtract pairs of adjacent words or double
56
+ // words. Each field in the result is the difference of two adjacent
57
+ // fields from the arguments, where the upper field is subtracted
58
+ // from the lower field. The lower result fields come from
59
+ // the first argument and the upper result fields come from
60
+ // the second argument. The "hsubs" forms saturate the signed
61
+ // subtraction rather than wrapping.
62
+
63
+ extern __m128i _mm_hsub_epi16 (__m128i, __m128i);
64
+ extern __m128i _mm_hsub_epi32 (__m128i, __m128i);
65
+ extern __m128i _mm_hsubs_epi16 (__m128i, __m128i);
66
+
67
+ #if defined(_M_IX86)
68
+ extern __m64 _mm_hsub_pi16 (__m64, __m64);
69
+ extern __m64 _mm_hsub_pi32 (__m64, __m64);
70
+ extern __m64 _mm_hsubs_pi16 (__m64, __m64);
71
+ #endif
72
+
73
+ // Multiply unsigned bytes by signed bytes and sum the word
74
+ // results in pairs with saturation. Each byte of the first
75
+ // argument is zero-extended to a word field and each byte
76
+ // of the second argument is sign-extended to a word field,
77
+ // then each pair of words is multiplied together to give
78
+ // signed word intermediate results. Pairs of words from
79
+ // that result are added horizontally with saturation
80
+ // to give the final result.
81
+
82
+ extern __m128i _mm_maddubs_epi16 (__m128i, __m128i);
83
+
84
+ #if defined(_M_IX86)
85
+ extern __m64 _mm_maddubs_pi16 (__m64, __m64);
86
+ #endif
87
+
88
+ // Packed multiply high integers with round and scaling,
89
+ // {X,}MM2/m{128,64} (b) to {X,}MM1 (a).
90
+
91
+ extern __m128i _mm_mulhrs_epi16 (__m128i, __m128i);
92
+
93
+ #if defined(_M_IX86)
94
+ extern __m64 _mm_mulhrs_pi16 (__m64, __m64);
95
+ #endif
96
+
97
+ // Packed shuffle bytes
98
+ // {X,}MM2/m{128,64} (b) by {X,}MM1 (a).
99
+
100
+ extern __m128i _mm_shuffle_epi8 (__m128i, __m128i);
101
+
102
+ #if defined(_M_IX86)
103
+ extern __m64 _mm_shuffle_pi8 (__m64, __m64);
104
+ #endif
105
+
106
+ // Packed byte, word, double word sign, {X,}MM2/m{128,64} (b) to
107
+ // {X,}MM1 (a).
108
+
109
+ extern __m128i _mm_sign_epi8 (__m128i, __m128i);
110
+ extern __m128i _mm_sign_epi16 (__m128i, __m128i);
111
+ extern __m128i _mm_sign_epi32 (__m128i, __m128i);
112
+
113
+ #if defined(_M_IX86)
114
+ extern __m64 _mm_sign_pi8 (__m64, __m64);
115
+ extern __m64 _mm_sign_pi16 (__m64, __m64);
116
+ extern __m64 _mm_sign_pi32 (__m64, __m64);
117
+ #endif
118
+
119
+ // Packed align and shift right by n*8 bits,
120
+ // {X,}MM2/m{128,64} (b) to {X,}MM1 (a).
121
+
122
+ extern __m128i _mm_alignr_epi8 (__m128i, __m128i, int);
123
+
124
+ #if defined(_M_IX86)
125
+ extern __m64 _mm_alignr_pi8 (__m64, __m64, int);
126
+ #endif
127
+
128
+ // Packed byte, word, double word absolute value,
129
+ // {X,}MM2/m{128,64} (b) to {X,}MM1 (a).
130
+
131
+ extern __m128i _mm_abs_epi8 (__m128i);
132
+ extern __m128i _mm_abs_epi16 (__m128i);
133
+ extern __m128i _mm_abs_epi32 (__m128i);
134
+
135
+ #if defined(_M_IX86)
136
+ extern __m64 _mm_abs_pi8 (__m64);
137
+ extern __m64 _mm_abs_pi16 (__m64);
138
+ extern __m64 _mm_abs_pi32 (__m64);
139
+ #endif
140
+
141
+ #ifdef __cplusplus
142
+ };
143
+ #endif /* __cplusplus */
144
+
145
+ #endif /* defined (_M_CEE_PURE) */
146
+ #endif /* __midl */
147
+ #endif /* _INCLUDED_TMM */
miniMSVC/VC/Tools/MSVC/14.42.34433/include/tuple ADDED
@@ -0,0 +1,1177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // tuple standard header (core)
2
+
3
+ // Copyright (c) Microsoft Corporation.
4
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
5
+
6
+ #ifndef _TUPLE_
7
+ #define _TUPLE_
8
+ #include <yvals_core.h>
9
+ #if _STL_COMPILER_PREPROCESSOR
10
+ #if _HAS_CXX20
11
+ #include <compare>
12
+ #endif // _HAS_CXX20
13
+ #include <__msvc_iter_core.hpp>
14
+ #include <type_traits>
15
+
16
+ #pragma pack(push, _CRT_PACKING)
17
+ #pragma warning(push, _STL_WARNING_LEVEL)
18
+ #pragma warning(disable : _STL_DISABLED_WARNINGS)
19
+ _STL_DISABLE_CLANG_WARNINGS
20
+ #pragma push_macro("new")
21
+ #undef new
22
+
23
+ _STD_BEGIN
24
+ template <bool _Same, class _Dest, class... _Srcs>
25
+ constexpr bool _Tuple_conditional_explicit_v0 = false;
26
+
27
+ template <class... _Dests, class... _Srcs>
28
+ constexpr bool _Tuple_conditional_explicit_v0<true, tuple<_Dests...>, _Srcs...> =
29
+ !conjunction_v<is_convertible<_Srcs, _Dests>...>;
30
+
31
+ template <class _Dest, class... _Srcs>
32
+ constexpr bool _Tuple_conditional_explicit_v =
33
+ _Tuple_conditional_explicit_v0<tuple_size_v<_Dest> == sizeof...(_Srcs), _Dest, _Srcs...>;
34
+
35
+ template <bool _Same, class _Dest, class... _Srcs>
36
+ constexpr bool _Tuple_constructible_v0 = false;
37
+
38
+ template <class... _Dests, class... _Srcs>
39
+ constexpr bool _Tuple_constructible_v0<true, tuple<_Dests...>, _Srcs...> =
40
+ conjunction_v<is_constructible<_Dests, _Srcs>...>;
41
+
42
+ template <class _Dest, class... _Srcs>
43
+ constexpr bool _Tuple_constructible_v =
44
+ _Tuple_constructible_v0<tuple_size_v<_Dest> == sizeof...(_Srcs), _Dest, _Srcs...>;
45
+
46
+ template <class _Dest, class... _Srcs>
47
+ struct _Tuple_constructible_val : bool_constant<_Tuple_constructible_v<_Dest, _Srcs...>> {};
48
+
49
+ template <bool _Same, class _Dest, class... _Srcs>
50
+ constexpr bool _Tuple_nothrow_constructible_v0 = false;
51
+
52
+ template <class... _Dests, class... _Srcs>
53
+ constexpr bool _Tuple_nothrow_constructible_v0<true, tuple<_Dests...>, _Srcs...> =
54
+ conjunction_v<is_nothrow_constructible<_Dests, _Srcs>...>;
55
+
56
+ template <class _Dest, class... _Srcs>
57
+ constexpr bool _Tuple_nothrow_constructible_v =
58
+ _Tuple_nothrow_constructible_v0<tuple_size_v<_Dest> == sizeof...(_Srcs), _Dest, _Srcs...>;
59
+
60
+ template <bool _Same, class _Dest, class... _Srcs>
61
+ constexpr bool _Tuple_assignable_v0 = false;
62
+
63
+ template <class... _Dests, class... _Srcs>
64
+ constexpr bool _Tuple_assignable_v0<true, tuple<_Dests...>, _Srcs...> =
65
+ conjunction_v<is_assignable<_Dests&, _Srcs>...>; // note _Dests& instead of _Dests
66
+
67
+ #if _HAS_CXX23
68
+ template <class... _Dests, class... _Srcs>
69
+ constexpr bool _Tuple_assignable_v0<true, const tuple<_Dests...>, _Srcs...> =
70
+ conjunction_v<is_assignable<const _Dests&, _Srcs>...>;
71
+ #endif // _HAS_CXX23
72
+
73
+ template <class _Dest, class... _Srcs>
74
+ constexpr bool _Tuple_assignable_v = _Tuple_assignable_v0<tuple_size_v<_Dest> == sizeof...(_Srcs), _Dest, _Srcs...>;
75
+
76
+ template <class _Dest, class... _Srcs>
77
+ struct _Tuple_assignable_val : bool_constant<_Tuple_assignable_v<_Dest, _Srcs...>> {};
78
+
79
+ template <bool _Same, class _Dest, class... _Srcs>
80
+ constexpr bool _Tuple_nothrow_assignable_v0 = false;
81
+
82
+ template <class... _Dests, class... _Srcs>
83
+ constexpr bool _Tuple_nothrow_assignable_v0<true, tuple<_Dests...>, _Srcs...> =
84
+ conjunction_v<is_nothrow_assignable<_Dests&, _Srcs>...>; // note _Dests& instead of _Dests
85
+
86
+ #if _HAS_CXX23
87
+ template <class... _Dests, class... _Srcs>
88
+ constexpr bool _Tuple_nothrow_assignable_v0<true, const tuple<_Dests...>, _Srcs...> =
89
+ conjunction_v<is_nothrow_assignable<const _Dests&, _Srcs>...>;
90
+ #endif // _HAS_CXX23
91
+
92
+ template <class _Dest, class... _Srcs>
93
+ constexpr bool _Tuple_nothrow_assignable_v =
94
+ _Tuple_nothrow_assignable_v0<tuple_size_v<_Dest> == sizeof...(_Srcs), _Dest, _Srcs...>;
95
+
96
+ // Constrain tuple's converting constructors
97
+ template <class _Myself, class _OtherTuple, class... _Other>
98
+ struct _Tuple_convert_val : true_type {};
99
+
100
+ template <class _This, class _OtherTuple, class _Uty>
101
+ struct _Tuple_convert_val<tuple<_This>, _OtherTuple, _Uty>
102
+ : bool_constant<!disjunction_v<is_same<_This, _Uty>, is_constructible<_This, _OtherTuple>,
103
+ is_convertible<_OtherTuple, _This>>> {};
104
+
105
+ // Constrain tuple's perfect forwarding constructor (LWG-3121)
106
+ template <class _Myself, class _This2, class... _Rest2>
107
+ struct _Tuple_perfect_val : true_type {};
108
+
109
+ template <class _Myself, class _This2>
110
+ struct _Tuple_perfect_val<_Myself, _This2> : bool_constant<!is_same_v<_Myself, _Remove_cvref_t<_This2>>> {};
111
+
112
+ template <class _Ty0, class _Ty1, class _Uty0, class _Uty1>
113
+ struct _Tuple_perfect_val<tuple<_Ty0, _Ty1>, _Uty0, _Uty1>
114
+ : bool_constant<disjunction_v<negation<is_same<_Remove_cvref_t<_Uty0>, allocator_arg_t>>,
115
+ is_same<_Remove_cvref_t<_Ty0>, allocator_arg_t>>> {};
116
+
117
+ template <class _Ty0, class _Ty1, class _Ty2, class _Uty0, class _Uty1, class _Uty2>
118
+ struct _Tuple_perfect_val<tuple<_Ty0, _Ty1, _Ty2>, _Uty0, _Uty1, _Uty2>
119
+ : bool_constant<disjunction_v<negation<is_same<_Remove_cvref_t<_Uty0>, allocator_arg_t>>,
120
+ is_same<_Remove_cvref_t<_Ty0>, allocator_arg_t>>> {};
121
+
122
+ // Note: To improve throughput, this file uses extra _STD qualification for names that appear in the
123
+ // arguments of enable_if_t. Specifically, we qualify names which appear anywhere in the STL as members of
124
+ // some class - including injected-class-names! - that we know are not members of the class being defined.
125
+ // This avoids pointless class-member lookup for those names in this context.
126
+
127
+ template <class _Ty>
128
+ struct _Tuple_val { // stores each value in a tuple
129
+ constexpr _Tuple_val() : _Val() {}
130
+
131
+ template <class _Other>
132
+ constexpr _Tuple_val(_Other&& _Arg) : _Val(_STD forward<_Other>(_Arg)) {}
133
+
134
+ template <class _Alloc, class... _Other, enable_if_t<!uses_allocator_v<_Ty, _Alloc>, int> = 0>
135
+ constexpr _Tuple_val(const _Alloc&, allocator_arg_t, _Other&&... _Arg) : _Val(_STD forward<_Other>(_Arg)...) {}
136
+
137
+ template <class _Alloc, class... _Other,
138
+ enable_if_t<conjunction_v<_STD uses_allocator<_Ty, _Alloc>,
139
+ _STD is_constructible<_Ty, _STD allocator_arg_t, const _Alloc&, _Other...>>,
140
+ int> = 0>
141
+ constexpr _Tuple_val(const _Alloc& _Al, allocator_arg_t, _Other&&... _Arg)
142
+ : _Val(allocator_arg, _Al, _STD forward<_Other>(_Arg)...) {}
143
+
144
+ template <class _Alloc, class... _Other,
145
+ enable_if_t<conjunction_v<_STD uses_allocator<_Ty, _Alloc>,
146
+ _STD negation<_STD is_constructible<_Ty, _STD allocator_arg_t, const _Alloc&, _Other...>>>,
147
+ int> = 0>
148
+ constexpr _Tuple_val(const _Alloc& _Al, allocator_arg_t, _Other&&... _Arg)
149
+ : _Val(_STD forward<_Other>(_Arg)..., _Al) {}
150
+
151
+ _Ty _Val;
152
+ };
153
+
154
+ struct _Exact_args_t {
155
+ explicit _Exact_args_t() = default;
156
+ }; // tag type to disambiguate construction (from one arg per element)
157
+
158
+ struct _Unpack_tuple_t {
159
+ explicit _Unpack_tuple_t() = default;
160
+ }; // tag type to disambiguate construction (from unpacking a tuple/pair)
161
+
162
+ struct _Alloc_exact_args_t {
163
+ explicit _Alloc_exact_args_t() = default;
164
+ }; // tag type to disambiguate construction (from an allocator and one arg per element)
165
+
166
+ struct _Alloc_unpack_tuple_t {
167
+ explicit _Alloc_unpack_tuple_t() = default;
168
+ }; // tag type to disambiguate construction (from an allocator and unpacking a tuple/pair)
169
+
170
+ #if _HAS_CXX23
171
+ template <class _Tuple, class _Other, class _Indices = make_index_sequence<tuple_size_v<_Tuple>>>
172
+ constexpr bool _Can_construct_values_from_tuple_like_v = false;
173
+
174
+ template <class... _Types, class _Other, size_t... _Indices>
175
+ constexpr bool _Can_construct_values_from_tuple_like_v<tuple<_Types...>, _Other, index_sequence<_Indices...>> =
176
+ conjunction_v<is_constructible<_Types, decltype(_STD get<_Indices>(_STD declval<_Other>()))>...>;
177
+
178
+ #if defined(__clang__) || defined(__EDG__) // TRANSITION, LLVM-59827 and VSO-1900279
179
+ template <class _TupleLike, class _Tuple>
180
+ concept _Can_construct_from_tuple_like =
181
+ _Different_from<_TupleLike, _Tuple> && _Tuple_like<_TupleLike> && !_Is_subrange_v<remove_cvref_t<_TupleLike>>
182
+ && (tuple_size_v<_Tuple> == tuple_size_v<remove_cvref_t<_TupleLike>>) //
183
+ &&_Can_construct_values_from_tuple_like_v<_Tuple, _TupleLike>
184
+ && (tuple_size_v<_Tuple> != 1
185
+ || (!is_convertible_v<_TupleLike, tuple_element_t<0, _Tuple>>
186
+ && !is_constructible_v<tuple_element_t<0, _Tuple>, _TupleLike>) );
187
+ #endif // ^^^ workaround ^^^
188
+
189
+ template <class _TTuple, class _UTuple, class _Indices = make_index_sequence<tuple_size_v<_UTuple>>>
190
+ struct _Three_way_comparison_result_with_tuple_like {};
191
+
192
+ template <class... _TTypes, class _UTuple, size_t... _Indices>
193
+ requires
194
+ #if !defined(__clang__) && !defined(__EDG__) // TRANSITION, DevCom-10265237
195
+ (sizeof...(_TTypes) == sizeof...(_Indices)) &&
196
+ #endif // ^^^ workaround ^^^
197
+ (requires { typename _Synth_three_way_result<_TTypes, tuple_element_t<_Indices, _UTuple>>; } && ...)
198
+ struct _Three_way_comparison_result_with_tuple_like<tuple<_TTypes...>, _UTuple, index_sequence<_Indices...>> {
199
+ using type = common_comparison_category_t<_Synth_three_way_result<_TTypes, tuple_element_t<_Indices, _UTuple>>...>;
200
+ };
201
+
202
+ template <class _TTuple, _Tuple_like _UTuple>
203
+ using _Three_way_comparison_result_with_tuple_like_t =
204
+ _Three_way_comparison_result_with_tuple_like<_TTuple, _UTuple>::type;
205
+
206
+ template <class _Ty>
207
+ concept _Tuple_like_non_tuple = !_Is_specialization_v<_Ty, tuple> && _Tuple_like<_Ty>;
208
+ #endif // _HAS_CXX23
209
+
210
+ template <>
211
+ class tuple<> { // empty tuple
212
+ public:
213
+ constexpr tuple() noexcept = default; /* strengthened */
214
+
215
+ constexpr tuple(const tuple&) noexcept /* strengthened */ {} // TRANSITION, ABI: should be defaulted
216
+
217
+ #if _HAS_CXX23
218
+ template <_Different_from<tuple> _Other>
219
+ requires _Tuple_like<_Other> && (tuple_size_v<remove_cvref_t<_Other>> == 0)
220
+ constexpr tuple(_Other&&) noexcept /* strengthened */ {}
221
+ #endif // _HAS_CXX23
222
+
223
+ template <class _Alloc>
224
+ _CONSTEXPR20 tuple(allocator_arg_t, const _Alloc&) noexcept /* strengthened */ {}
225
+
226
+ template <class _Alloc>
227
+ _CONSTEXPR20 tuple(allocator_arg_t, const _Alloc&, const tuple&) noexcept /* strengthened */ {}
228
+
229
+ #if _HAS_CXX23
230
+ template <class _Alloc, _Different_from<tuple> _Other>
231
+ requires _Tuple_like<_Other> && (tuple_size_v<remove_cvref_t<_Other>> == 0)
232
+ constexpr tuple(allocator_arg_t, const _Alloc&, _Other&&) noexcept /* strengthened */ {}
233
+ #endif // _HAS_CXX23
234
+
235
+ template <class _Tag, enable_if_t<is_same_v<_Tag, _STD _Exact_args_t>, int> = 0>
236
+ constexpr tuple(_Tag) noexcept /* strengthened */ {}
237
+
238
+ template <class _Tag, class _Alloc, enable_if_t<is_same_v<_Tag, _STD _Alloc_exact_args_t>, int> = 0>
239
+ constexpr tuple(_Tag, const _Alloc&) noexcept /* strengthened */ {}
240
+
241
+ constexpr tuple& operator=(const tuple&) = default;
242
+ #if _HAS_CXX23
243
+ constexpr const tuple& operator=(const tuple&) const noexcept /* strengthened */ {
244
+ return *this;
245
+ }
246
+
247
+ template <_Different_from<tuple> _Other>
248
+ requires _Tuple_like<_Other> && (tuple_size_v<remove_cvref_t<_Other>> == 0)
249
+ constexpr tuple& operator=(_Other&&) noexcept /* strengthened */ {
250
+ return *this;
251
+ }
252
+
253
+ template <_Different_from<tuple> _Other>
254
+ requires _Tuple_like<_Other> && (tuple_size_v<remove_cvref_t<_Other>> == 0)
255
+ constexpr const tuple& operator=(_Other&&) const noexcept /* strengthened */ {
256
+ return *this;
257
+ }
258
+ #endif // _HAS_CXX23
259
+
260
+ _CONSTEXPR20 void swap(tuple&) noexcept {}
261
+ #if _HAS_CXX23
262
+ constexpr void swap(const tuple&) const noexcept {}
263
+ #endif // _HAS_CXX23
264
+
265
+ constexpr bool _Equals(const tuple&) const noexcept {
266
+ return true;
267
+ }
268
+
269
+ #if _HAS_CXX20
270
+ _NODISCARD constexpr strong_ordering _Three_way_compare(const tuple&) const noexcept {
271
+ return strong_ordering::equal;
272
+ }
273
+ #else // ^^^ _HAS_CXX20 / !_HAS_CXX20 vvv
274
+ _NODISCARD constexpr bool _Less(const tuple&) const noexcept {
275
+ return false;
276
+ }
277
+ #endif // ^^^ !_HAS_CXX20 ^^^
278
+
279
+ #if _HAS_CXX23
280
+ template <_Tuple_like_non_tuple _Other>
281
+ _NODISCARD friend constexpr bool operator==(const tuple&, const _Other&) noexcept /* strengthened */ {
282
+ static_assert(tuple_size_v<_Other> == 0, "Cannot compare tuples of different sizes (N4950 [tuple.rel]/2).");
283
+ return true;
284
+ }
285
+
286
+ template <_Tuple_like_non_tuple _Other>
287
+ requires (tuple_size_v<remove_cvref_t<_Other>> == 0)
288
+ _NODISCARD friend constexpr strong_ordering operator<=>(const tuple&, const _Other&) noexcept /* strengthened */ {
289
+ return strong_ordering::equal;
290
+ }
291
+ #endif // _HAS_CXX23
292
+ };
293
+
294
+ template <class _This, class... _Rest>
295
+ class tuple<_This, _Rest...> : private tuple<_Rest...> { // recursive tuple definition
296
+ public:
297
+ using _This_type = _This;
298
+ using _Mybase = tuple<_Rest...>;
299
+
300
+ template <class _Tag, class _This2, class... _Rest2, enable_if_t<is_same_v<_Tag, _STD _Exact_args_t>, int> = 0>
301
+ constexpr tuple(_Tag, _This2&& _This_arg, _Rest2&&... _Rest_arg)
302
+ : _Mybase(_Exact_args_t{}, _STD forward<_Rest2>(_Rest_arg)...), _Myfirst(_STD forward<_This2>(_This_arg)) {}
303
+
304
+ template <class _Tag, class _Tpl, size_t... _Indices, enable_if_t<is_same_v<_Tag, _STD _Unpack_tuple_t>, int> = 0>
305
+ constexpr tuple(_Tag, _Tpl&& _Right, index_sequence<_Indices...>);
306
+
307
+ template <class _Tag, class _Tpl, enable_if_t<is_same_v<_Tag, _STD _Unpack_tuple_t>, int> = 0>
308
+ constexpr tuple(_Tag, _Tpl&& _Right)
309
+ : tuple(_Unpack_tuple_t{}, _STD forward<_Tpl>(_Right),
310
+ make_index_sequence<tuple_size_v<remove_reference_t<_Tpl>>>{}) {}
311
+
312
+ template <class _Tag, class _Alloc, class _This2, class... _Rest2,
313
+ enable_if_t<is_same_v<_Tag, _STD _Alloc_exact_args_t>, int> = 0>
314
+ constexpr tuple(_Tag, const _Alloc& _Al, _This2&& _This_arg, _Rest2&&... _Rest_arg)
315
+ : _Mybase(_Alloc_exact_args_t{}, _Al, _STD forward<_Rest2>(_Rest_arg)...),
316
+ _Myfirst(_Al, allocator_arg, _STD forward<_This2>(_This_arg)) {}
317
+
318
+ template <class _Tag, class _Alloc, class _Tpl, size_t... _Indices,
319
+ enable_if_t<is_same_v<_Tag, _STD _Alloc_unpack_tuple_t>, int> = 0>
320
+ constexpr tuple(_Tag, const _Alloc& _Al, _Tpl&& _Right, index_sequence<_Indices...>);
321
+
322
+ template <class _Tag, class _Alloc, class _Tpl, enable_if_t<is_same_v<_Tag, _STD _Alloc_unpack_tuple_t>, int> = 0>
323
+ constexpr tuple(_Tag, const _Alloc& _Al, _Tpl&& _Right)
324
+ : tuple(_Alloc_unpack_tuple_t{}, _Al, _STD forward<_Tpl>(_Right),
325
+ make_index_sequence<tuple_size_v<remove_reference_t<_Tpl>>>{}) {}
326
+
327
+ template <class _This2 = _This,
328
+ enable_if_t<conjunction_v<_STD is_default_constructible<_This2>, _STD is_default_constructible<_Rest>...>,
329
+ int> = 0>
330
+ constexpr explicit(
331
+ !conjunction_v<_Is_implicitly_default_constructible<_This2>, _Is_implicitly_default_constructible<_Rest>...>)
332
+ tuple() noexcept(conjunction_v<is_nothrow_default_constructible<_This2>,
333
+ is_nothrow_default_constructible<_Rest>...>) // strengthened
334
+ : _Mybase(), _Myfirst() {}
335
+
336
+ template <class _This2 = _This, enable_if_t<_Tuple_constructible_v<tuple, const _This2&, const _Rest&...>, int> = 0>
337
+ constexpr explicit(_Tuple_conditional_explicit_v<tuple, const _This2&, const _Rest&...>) tuple(
338
+ const _This& _This_arg, const _Rest&... _Rest_arg) noexcept(conjunction_v<is_nothrow_copy_constructible<_This2>,
339
+ is_nothrow_copy_constructible<_Rest>...>) // strengthened
340
+ : tuple(_Exact_args_t{}, _This_arg, _Rest_arg...) {}
341
+
342
+ template <class _This2, class... _Rest2,
343
+ enable_if_t<conjunction_v<_STD _Tuple_perfect_val<tuple, _This2, _Rest2...>,
344
+ _STD _Tuple_constructible_val<tuple, _This2, _Rest2...>>,
345
+ int> = 0>
346
+ constexpr explicit(_Tuple_conditional_explicit_v<tuple, _This2, _Rest2...>) tuple(_This2&& _This_arg,
347
+ _Rest2&&... _Rest_arg) noexcept(_Tuple_nothrow_constructible_v<tuple, _This2, _Rest2...>) // strengthened
348
+ : tuple(_Exact_args_t{}, _STD forward<_This2>(_This_arg), _STD forward<_Rest2>(_Rest_arg)...) {}
349
+
350
+ tuple(const tuple&) = default;
351
+ tuple(tuple&&) = default;
352
+
353
+ #if _HAS_CXX23
354
+ template <class... _Other, enable_if_t<conjunction_v<_STD _Tuple_constructible_val<tuple, _Other&...>,
355
+ _STD _Tuple_convert_val<tuple, tuple<_Other...>&, _Other...>>,
356
+ int> = 0>
357
+ constexpr explicit(_Tuple_conditional_explicit_v<tuple, _Other&...>)
358
+ tuple(tuple<_Other...>& _Right) noexcept(_Tuple_nothrow_constructible_v<tuple, _Other&...>) // strengthened
359
+ : tuple(_Unpack_tuple_t{}, _Right) {}
360
+ #endif // _HAS_CXX23
361
+
362
+ template <class... _Other, enable_if_t<conjunction_v<_STD _Tuple_constructible_val<tuple, const _Other&...>,
363
+ _STD _Tuple_convert_val<tuple, const tuple<_Other...>&, _Other...>>,
364
+ int> = 0>
365
+ constexpr explicit(_Tuple_conditional_explicit_v<tuple, const _Other&...>)
366
+ tuple(const tuple<_Other...>& _Right) noexcept(
367
+ _Tuple_nothrow_constructible_v<tuple, const _Other&...>) // strengthened
368
+ : tuple(_Unpack_tuple_t{}, _Right) {}
369
+
370
+ template <class... _Other, enable_if_t<conjunction_v<_STD _Tuple_constructible_val<tuple, _Other...>,
371
+ _STD _Tuple_convert_val<tuple, tuple<_Other...>, _Other...>>,
372
+ int> = 0>
373
+ constexpr explicit(_Tuple_conditional_explicit_v<tuple, _Other...>)
374
+ tuple(tuple<_Other...>&& _Right) noexcept(_Tuple_nothrow_constructible_v<tuple, _Other...>) // strengthened
375
+ : tuple(_Unpack_tuple_t{}, _STD move(_Right)) {}
376
+
377
+ #if _HAS_CXX23
378
+ template <class... _Other, enable_if_t<conjunction_v<_STD _Tuple_constructible_val<tuple, const _Other...>,
379
+ _STD _Tuple_convert_val<tuple, const tuple<_Other...>, _Other...>>,
380
+ int> = 0>
381
+ constexpr explicit(_Tuple_conditional_explicit_v<tuple, const _Other...>)
382
+ tuple(const tuple<_Other...>&& _Right) noexcept(
383
+ _Tuple_nothrow_constructible_v<tuple, const _Other...>) // strengthened
384
+ : tuple(_Unpack_tuple_t{}, _STD move(_Right)) {}
385
+
386
+ template <class _First, class _Second, enable_if_t<_Tuple_constructible_v<tuple, _First&, _Second&>, int> = 0>
387
+ constexpr explicit(_Tuple_conditional_explicit_v<tuple, _First&, _Second&>)
388
+ tuple(pair<_First, _Second>& _Right) noexcept(
389
+ _Tuple_nothrow_constructible_v<tuple, _First&, _Second&>) // strengthened
390
+ : tuple(_Unpack_tuple_t{}, _Right) {}
391
+ #endif // _HAS_CXX23
392
+
393
+ template <class _First, class _Second,
394
+ enable_if_t<_Tuple_constructible_v<tuple, const _First&, const _Second&>, int> = 0>
395
+ constexpr explicit(_Tuple_conditional_explicit_v<tuple, const _First&, const _Second&>)
396
+ tuple(const pair<_First, _Second>& _Right) noexcept(
397
+ _Tuple_nothrow_constructible_v<tuple, const _First&, const _Second&>) // strengthened
398
+ : tuple(_Unpack_tuple_t{}, _Right) {}
399
+
400
+ template <class _First, class _Second, enable_if_t<_Tuple_constructible_v<tuple, _First, _Second>, int> = 0>
401
+ constexpr explicit(_Tuple_conditional_explicit_v<tuple, _First, _Second>) tuple(
402
+ pair<_First, _Second>&& _Right) noexcept(_Tuple_nothrow_constructible_v<tuple, _First, _Second>) // strengthened
403
+ : tuple(_Unpack_tuple_t{}, _STD move(_Right)) {}
404
+
405
+ #if _HAS_CXX23
406
+ template <class _First, class _Second,
407
+ enable_if_t<_Tuple_constructible_v<tuple, const _First, const _Second>, int> = 0>
408
+ constexpr explicit(_Tuple_conditional_explicit_v<tuple, const _First, const _Second>)
409
+ tuple(const pair<_First, _Second>&& _Right) noexcept(
410
+ _Tuple_nothrow_constructible_v<tuple, const _First, const _Second>) // strengthened
411
+ : tuple(_Unpack_tuple_t{}, _STD move(_Right)) {}
412
+
413
+ template <class _Other, class _Indices = index_sequence_for<_Rest...>>
414
+ static constexpr bool _Is_tuple_like_constructor_explicit_v = false;
415
+
416
+ template <_Tuple_like _Other, size_t... _Indices>
417
+ static constexpr bool _Is_tuple_like_constructor_explicit_v<_Other, index_sequence<_Indices...>> =
418
+ negation_v<conjunction<is_convertible<decltype(_STD get<0>(_STD declval<_Other>())), _This>,
419
+ is_convertible<decltype(_STD get<_Indices + 1>(_STD declval<_Other>())), _Rest>...>>;
420
+
421
+ #if defined(__clang__) || defined(__EDG__) // TRANSITION, LLVM-59827 and VSO-1900279
422
+ template <class _Other, enable_if_t<_Can_construct_from_tuple_like<_Other, tuple>, int> = 0>
423
+ #else // ^^^ workaround / no workaround vvv
424
+ template <_Different_from<tuple> _Other>
425
+ requires _Tuple_like<_Other> && (!_Is_subrange_v<remove_cvref_t<_Other>>)
426
+ && (1 + sizeof...(_Rest) == tuple_size_v<remove_cvref_t<_Other>>)
427
+ && _Can_construct_values_from_tuple_like_v<tuple, _Other>
428
+ && (sizeof...(_Rest) != 0 || (!is_convertible_v<_Other, _This> && !is_constructible_v<_This, _Other>) )
429
+ #endif // ^^^ no workaround ^^^
430
+ constexpr explicit(_Is_tuple_like_constructor_explicit_v<_Other>) tuple(_Other&& _Right)
431
+ : tuple(_Unpack_tuple_t{}, _STD forward<_Other>(_Right)) {
432
+ }
433
+ #endif // _HAS_CXX23
434
+
435
+ template <class _Alloc, class _This2 = _This,
436
+ enable_if_t<conjunction_v<_STD is_default_constructible<_This2>, _STD is_default_constructible<_Rest>...>,
437
+ int> = 0>
438
+ _CONSTEXPR20 explicit(
439
+ !conjunction_v<_Is_implicitly_default_constructible<_This2>, _Is_implicitly_default_constructible<_Rest>...>)
440
+ tuple(allocator_arg_t, const _Alloc& _Al)
441
+ : _Mybase(allocator_arg, _Al), _Myfirst(_Al, allocator_arg) {}
442
+
443
+ template <class _Alloc, class _This2 = _This,
444
+ enable_if_t<_Tuple_constructible_v<tuple, const _This2&, const _Rest&...>, int> = 0>
445
+ _CONSTEXPR20 explicit(_Tuple_conditional_explicit_v<tuple, const _This2&, const _Rest&...>)
446
+ tuple(allocator_arg_t, const _Alloc& _Al, const _This& _This_arg, const _Rest&... _Rest_arg)
447
+ : tuple(_Alloc_exact_args_t{}, _Al, _This_arg, _Rest_arg...) {}
448
+
449
+ template <class _Alloc, class _This2, class... _Rest2,
450
+ enable_if_t<conjunction_v<_STD _Tuple_perfect_val<tuple, _This2, _Rest2...>,
451
+ _STD _Tuple_constructible_val<tuple, _This2, _Rest2...>>,
452
+ int> = 0>
453
+ _CONSTEXPR20 explicit(_Tuple_conditional_explicit_v<tuple, _This2, _Rest2...>)
454
+ tuple(allocator_arg_t, const _Alloc& _Al, _This2&& _This_arg, _Rest2&&... _Rest_arg)
455
+ : tuple(_Alloc_exact_args_t{}, _Al, _STD forward<_This2>(_This_arg), _STD forward<_Rest2>(_Rest_arg)...) {}
456
+
457
+ template <class _Alloc, class _This2 = _This,
458
+ enable_if_t<_Tuple_constructible_v<tuple, const _This2&, const _Rest&...>, int> = 0>
459
+ _CONSTEXPR20 tuple(allocator_arg_t, const _Alloc& _Al, const tuple& _Right)
460
+ : tuple(_Alloc_unpack_tuple_t{}, _Al, _Right) {}
461
+
462
+ template <class _Alloc, class _This2 = _This, enable_if_t<_Tuple_constructible_v<tuple, _This2, _Rest...>, int> = 0>
463
+ _CONSTEXPR20 tuple(allocator_arg_t, const _Alloc& _Al, tuple&& _Right)
464
+ : tuple(_Alloc_unpack_tuple_t{}, _Al, _STD move(_Right)) {}
465
+
466
+ #if _HAS_CXX23
467
+ template <class _Alloc, class... _Other,
468
+ enable_if_t<conjunction_v<_STD _Tuple_constructible_val<tuple, _Other&...>,
469
+ _STD _Tuple_convert_val<tuple, tuple<_Other...>&, _Other...>>,
470
+ int> = 0>
471
+ constexpr explicit(_Tuple_conditional_explicit_v<tuple, _Other&...>)
472
+ tuple(allocator_arg_t, const _Alloc& _Al, tuple<_Other...>& _Right)
473
+ : tuple(_Alloc_unpack_tuple_t{}, _Al, _Right) {}
474
+ #endif // _HAS_CXX23
475
+
476
+ template <class _Alloc, class... _Other,
477
+ enable_if_t<conjunction_v<_STD _Tuple_constructible_val<tuple, const _Other&...>,
478
+ _STD _Tuple_convert_val<tuple, const tuple<_Other...>&, _Other...>>,
479
+ int> = 0>
480
+ _CONSTEXPR20 explicit(_Tuple_conditional_explicit_v<tuple, const _Other&...>)
481
+ tuple(allocator_arg_t, const _Alloc& _Al, const tuple<_Other...>& _Right)
482
+ : tuple(_Alloc_unpack_tuple_t{}, _Al, _Right) {}
483
+
484
+ template <class _Alloc, class... _Other,
485
+ enable_if_t<conjunction_v<_STD _Tuple_constructible_val<tuple, _Other...>,
486
+ _STD _Tuple_convert_val<tuple, tuple<_Other...>, _Other...>>,
487
+ int> = 0>
488
+ _CONSTEXPR20 explicit(_Tuple_conditional_explicit_v<tuple, _Other...>)
489
+ tuple(allocator_arg_t, const _Alloc& _Al, tuple<_Other...>&& _Right)
490
+ : tuple(_Alloc_unpack_tuple_t{}, _Al, _STD move(_Right)) {}
491
+
492
+ #if _HAS_CXX23
493
+ template <class _Alloc, class... _Other,
494
+ enable_if_t<conjunction_v<_STD _Tuple_constructible_val<tuple, const _Other...>,
495
+ _STD _Tuple_convert_val<tuple, const tuple<_Other...>, _Other...>>,
496
+ int> = 0>
497
+ constexpr explicit(_Tuple_conditional_explicit_v<tuple, const _Other...>)
498
+ tuple(allocator_arg_t, const _Alloc& _Al, const tuple<_Other...>&& _Right)
499
+ : tuple(_Alloc_unpack_tuple_t{}, _Al, _STD move(_Right)) {}
500
+
501
+ template <class _Alloc, class _First, class _Second,
502
+ enable_if_t<_Tuple_constructible_v<tuple, _First&, _Second&>, int> = 0>
503
+ constexpr explicit(_Tuple_conditional_explicit_v<tuple, _First&, _Second&>)
504
+ tuple(allocator_arg_t, const _Alloc& _Al, pair<_First, _Second>& _Right)
505
+ : tuple(_Alloc_unpack_tuple_t{}, _Al, _Right) {}
506
+ #endif // _HAS_CXX23
507
+
508
+ template <class _Alloc, class _First, class _Second,
509
+ enable_if_t<_Tuple_constructible_v<tuple, const _First&, const _Second&>, int> = 0>
510
+ _CONSTEXPR20 explicit(_Tuple_conditional_explicit_v<tuple, const _First&, const _Second&>)
511
+ tuple(allocator_arg_t, const _Alloc& _Al, const pair<_First, _Second>& _Right)
512
+ : tuple(_Alloc_unpack_tuple_t{}, _Al, _Right) {}
513
+
514
+ template <class _Alloc, class _First, class _Second,
515
+ enable_if_t<_Tuple_constructible_v<tuple, _First, _Second>, int> = 0>
516
+ _CONSTEXPR20 explicit(_Tuple_conditional_explicit_v<tuple, _First, _Second>)
517
+ tuple(allocator_arg_t, const _Alloc& _Al, pair<_First, _Second>&& _Right)
518
+ : tuple(_Alloc_unpack_tuple_t{}, _Al, _STD move(_Right)) {}
519
+
520
+ #if _HAS_CXX23
521
+ template <class _Alloc, class _First, class _Second,
522
+ enable_if_t<_Tuple_constructible_v<tuple, const _First, const _Second>, int> = 0>
523
+ constexpr explicit(_Tuple_conditional_explicit_v<tuple, const _First, const _Second>)
524
+ tuple(allocator_arg_t, const _Alloc& _Al, const pair<_First, _Second>&& _Right)
525
+ : tuple(_Alloc_unpack_tuple_t{}, _Al, _STD move(_Right)) {}
526
+
527
+ #if defined(__clang__) || defined(__EDG__) // TRANSITION, LLVM-59827 (Clang), VSO-1900279 (EDG)
528
+ template <class _Alloc, class _Other, enable_if_t<_Can_construct_from_tuple_like<_Other, tuple>, int> = 0>
529
+ #else // ^^^ workaround / no workaround vvv
530
+ template <class _Alloc, _Different_from<tuple> _Other>
531
+ requires _Tuple_like<_Other> && (!_Is_subrange_v<remove_cvref_t<_Other>>)
532
+ && (1 + sizeof...(_Rest) == tuple_size_v<remove_cvref_t<_Other>>)
533
+ && _Can_construct_values_from_tuple_like_v<tuple, _Other>
534
+ && (sizeof...(_Rest) != 0 || (!is_convertible_v<_Other, _This> && !is_constructible_v<_This, _Other>) )
535
+ #endif // ^^^ no workaround ^^^
536
+ constexpr explicit(_Is_tuple_like_constructor_explicit_v<_Other>)
537
+ tuple(allocator_arg_t, const _Alloc& _Al, _Other&& _Right)
538
+ : tuple(_Alloc_unpack_tuple_t{}, _Al, _STD forward<_Other>(_Right)) {
539
+ }
540
+ #endif // _HAS_CXX23
541
+
542
+ tuple& operator=(const volatile tuple&) = delete;
543
+
544
+ template <class _Myself = tuple, class _This2 = _This,
545
+ enable_if_t<conjunction_v<_STD _Is_copy_assignable_no_precondition_check<_This2>,
546
+ _STD _Is_copy_assignable_no_precondition_check<_Rest>...>,
547
+ int> = 0>
548
+ _CONSTEXPR20 tuple& operator=(_Identity_t<const _Myself&> _Right) noexcept(
549
+ conjunction_v<is_nothrow_copy_assignable<_This2>, is_nothrow_copy_assignable<_Rest>...>) /* strengthened */ {
550
+ _Myfirst._Val = _Right._Myfirst._Val;
551
+ _Get_rest() = _Right._Get_rest();
552
+ return *this;
553
+ }
554
+
555
+ #if _HAS_CXX23
556
+ template <class _Myself = tuple>
557
+ requires conjunction_v<_STD _Is_copy_assignable_no_precondition_check<const _This>,
558
+ _STD _Is_copy_assignable_no_precondition_check<const _Rest>...>
559
+ constexpr const tuple& operator=(_Identity_t<const _Myself&> _Right) const
560
+ noexcept(conjunction_v<is_nothrow_copy_assignable<const _This>,
561
+ is_nothrow_copy_assignable<const _Rest>...>) /* strengthened */ {
562
+ _Myfirst._Val = _Right._Myfirst._Val;
563
+ _Get_rest() = _Right._Get_rest();
564
+ return *this;
565
+ }
566
+ #endif // _HAS_CXX23
567
+
568
+ template <class _Myself = tuple, class _This2 = _This,
569
+ enable_if_t<conjunction_v<_STD _Is_move_assignable_no_precondition_check<_This2>,
570
+ _STD _Is_move_assignable_no_precondition_check<_Rest>...>,
571
+ int> = 0>
572
+ _CONSTEXPR20 tuple& operator=(_Identity_t<_Myself&&> _Right) noexcept(
573
+ conjunction_v<is_nothrow_move_assignable<_This2>, is_nothrow_move_assignable<_Rest>...>) {
574
+ _Myfirst._Val = _STD forward<_This>(_Right._Myfirst._Val);
575
+ _Get_rest() = _STD forward<_Mybase>(_Right._Get_rest());
576
+ return *this;
577
+ }
578
+
579
+ #if _HAS_CXX23
580
+ template <class _Myself = tuple>
581
+ requires conjunction_v<_STD _Is_assignable_no_precondition_check<const _This&, _This>,
582
+ _STD _Is_assignable_no_precondition_check<const _Rest&, _Rest>...>
583
+ constexpr const tuple& operator=(_Identity_t<_Myself&&> _Right) const
584
+ noexcept(conjunction_v<is_nothrow_assignable<const _This&, _This>,
585
+ is_nothrow_assignable<const _Rest&, _Rest>...>) /* strengthened */ {
586
+ _Myfirst._Val = _STD forward<_This>(_Right._Myfirst._Val);
587
+ _Get_rest() = _STD forward<_Mybase>(_Right._Get_rest());
588
+ return *this;
589
+ }
590
+ #endif // _HAS_CXX23
591
+
592
+ template <class... _Other, enable_if_t<conjunction_v<_STD negation<_STD is_same<tuple, _STD tuple<_Other...>>>,
593
+ _STD _Tuple_assignable_val<tuple, const _Other&...>>,
594
+ int> = 0>
595
+ _CONSTEXPR20 tuple& operator=(const tuple<_Other...>& _Right) noexcept(
596
+ _Tuple_nothrow_assignable_v<tuple, const _Other&...>) /* strengthened */ {
597
+ _Myfirst._Val = _Right._Myfirst._Val;
598
+ _Get_rest() = _Right._Get_rest();
599
+ return *this;
600
+ }
601
+
602
+ #if _HAS_CXX23
603
+ template <class... _Other>
604
+ requires (!is_same_v<tuple, _STD tuple<_Other...>>) && _Tuple_assignable_v<const tuple, const _Other&...>
605
+ constexpr const tuple& operator=(const tuple<_Other...>& _Right) const
606
+ noexcept(_Tuple_nothrow_assignable_v<const tuple, const _Other&...>) /* strengthened */ {
607
+ _Myfirst._Val = _Right._Myfirst._Val;
608
+ _Get_rest() = _Right._Get_rest();
609
+ return *this;
610
+ }
611
+ #endif // _HAS_CXX23
612
+
613
+ template <class... _Other, enable_if_t<conjunction_v<_STD negation<_STD is_same<tuple, _STD tuple<_Other...>>>,
614
+ _STD _Tuple_assignable_val<tuple, _Other...>>,
615
+ int> = 0>
616
+ _CONSTEXPR20 tuple& operator=(tuple<_Other...>&& _Right) noexcept(
617
+ _Tuple_nothrow_assignable_v<tuple, _Other...>) /* strengthened */ {
618
+ _Myfirst._Val = _STD forward<typename tuple<_Other...>::_This_type>(_Right._Myfirst._Val);
619
+ _Get_rest() = _STD forward<typename tuple<_Other...>::_Mybase>(_Right._Get_rest());
620
+ return *this;
621
+ }
622
+
623
+ #if _HAS_CXX23
624
+ template <class... _Other>
625
+ requires (!is_same_v<tuple, _STD tuple<_Other...>>) && _Tuple_assignable_v<const tuple, _Other...>
626
+ constexpr const tuple& operator=(tuple<_Other...>&& _Right) const
627
+ noexcept(_Tuple_nothrow_assignable_v<const tuple, _Other...>) /* strengthened */ {
628
+ _Myfirst._Val = _STD forward<typename tuple<_Other...>::_This_type>(_Right._Myfirst._Val);
629
+ _Get_rest() = _STD forward<typename tuple<_Other...>::_Mybase>(_Right._Get_rest());
630
+ return *this;
631
+ }
632
+ #endif // _HAS_CXX23
633
+
634
+ template <class _First, class _Second,
635
+ enable_if_t<_Tuple_assignable_v<tuple, const _First&, const _Second&>, int> = 0>
636
+ _CONSTEXPR20 tuple& operator=(const pair<_First, _Second>& _Right) noexcept(
637
+ _Tuple_nothrow_assignable_v<tuple, const _First&, const _Second&>) /* strengthened */ {
638
+ _Myfirst._Val = _Right.first;
639
+ _Get_rest()._Myfirst._Val = _Right.second;
640
+ return *this;
641
+ }
642
+
643
+ #if _HAS_CXX23
644
+ template <class _First, class _Second>
645
+ requires _Tuple_assignable_v<const tuple, const _First&, const _Second&>
646
+ constexpr const tuple& operator=(const pair<_First, _Second>& _Right) const
647
+ noexcept(_Tuple_nothrow_assignable_v<const tuple, const _First&, const _Second&>) /* strengthened */ {
648
+ _Myfirst._Val = _Right.first;
649
+ _Get_rest()._Myfirst._Val = _Right.second;
650
+ return *this;
651
+ }
652
+ #endif // _HAS_CXX23
653
+
654
+ template <class _First, class _Second, enable_if_t<_Tuple_assignable_v<tuple, _First, _Second>, int> = 0>
655
+ _CONSTEXPR20 tuple& operator=(pair<_First, _Second>&& _Right) noexcept(
656
+ _Tuple_nothrow_assignable_v<tuple, _First, _Second>) /* strengthened */ {
657
+ _Myfirst._Val = _STD forward<_First>(_Right.first);
658
+ _Get_rest()._Myfirst._Val = _STD forward<_Second>(_Right.second);
659
+ return *this;
660
+ }
661
+
662
+ #if _HAS_CXX23
663
+ template <class _First, class _Second>
664
+ requires _Tuple_assignable_v<const tuple, _First, _Second>
665
+ constexpr const tuple& operator=(pair<_First, _Second>&& _Right) const
666
+ noexcept(_Tuple_nothrow_assignable_v<const tuple, _First, _Second>) /* strengthened */ {
667
+ _Myfirst._Val = _STD forward<_First>(_Right.first);
668
+ _Get_rest()._Myfirst._Val = _STD forward<_Second>(_Right.second);
669
+ return *this;
670
+ }
671
+
672
+ template <bool _Const_assignment, class _Other, class _Indices = index_sequence_for<_Rest...>>
673
+ static constexpr bool _Can_assign_values_from_tuple_like_v = false;
674
+
675
+ template <_Tuple_like _Other, size_t... _Indices>
676
+ static constexpr bool _Can_assign_values_from_tuple_like_v<false, _Other, index_sequence<_Indices...>> =
677
+ conjunction_v<is_assignable<_This&, decltype(_STD get<0>(_STD declval<_Other>()))>,
678
+ is_assignable<_Rest&, decltype(_STD get<_Indices + 1>(_STD declval<_Other>()))>...>;
679
+
680
+ template <_Tuple_like _Other, size_t... _Indices>
681
+ static constexpr bool _Can_assign_values_from_tuple_like_v<true, _Other, index_sequence<_Indices...>> =
682
+ conjunction_v<is_assignable<const _This&, decltype(_STD get<0>(_STD declval<_Other>()))>,
683
+ is_assignable<const _Rest&, decltype(_STD get<_Indices + 1>(_STD declval<_Other>()))>...>;
684
+
685
+ template <_Tuple_like _Other, size_t... _Indices>
686
+ constexpr void _Assign_tuple_like(_Other&& _Right, index_sequence<_Indices...>) {
687
+ ((void) (_STD get<_Indices>(*this) = _STD get<_Indices>(_STD forward<_Other>(_Right))), ...);
688
+ }
689
+
690
+ template <_Tuple_like _Other, size_t... _Indices>
691
+ constexpr void _Assign_tuple_like(_Other&& _Right, index_sequence<_Indices...>) const {
692
+ ((void) (_STD get<_Indices>(*this) = _STD get<_Indices>(_STD forward<_Other>(_Right))), ...);
693
+ }
694
+
695
+ template <_Different_from<tuple> _Other>
696
+ requires _Tuple_like<_Other> && (!_Is_subrange_v<remove_cvref_t<_Other>>)
697
+ && (1 + sizeof...(_Rest) == tuple_size_v<remove_cvref_t<_Other>>)
698
+ && _Can_assign_values_from_tuple_like_v<false, _Other>
699
+ constexpr tuple& operator=(_Other&& _Right) {
700
+ _Assign_tuple_like(_STD forward<_Other>(_Right), make_index_sequence<1 + sizeof...(_Rest)>{});
701
+ return *this;
702
+ }
703
+
704
+ template <_Different_from<tuple> _Other>
705
+ requires _Tuple_like<_Other> && (!_Is_subrange_v<remove_cvref_t<_Other>>)
706
+ && (1 + sizeof...(_Rest) == tuple_size_v<remove_cvref_t<_Other>>)
707
+ && _Can_assign_values_from_tuple_like_v<true, _Other>
708
+ constexpr const tuple& operator=(_Other&& _Right) const {
709
+ _Assign_tuple_like(_STD forward<_Other>(_Right), make_index_sequence<1 + sizeof...(_Rest)>{});
710
+ return *this;
711
+ }
712
+ #endif // _HAS_CXX23
713
+
714
+ _CONSTEXPR20 void swap(tuple& _Right) noexcept(
715
+ conjunction_v<_Is_nothrow_swappable<_This>, _Is_nothrow_swappable<_Rest>...>) {
716
+ using _STD swap;
717
+ swap(_Myfirst._Val, _Right._Myfirst._Val); // intentional ADL
718
+ _Mybase::swap(_Right._Get_rest());
719
+ }
720
+
721
+ #if _HAS_CXX23
722
+ template <int = 0> // see GH-3013
723
+ constexpr void swap(const tuple& _Right) const
724
+ noexcept(conjunction_v<is_nothrow_swappable<const _This>, is_nothrow_swappable<const _Rest>...>) {
725
+ using _STD swap;
726
+ swap(_Myfirst._Val, _Right._Myfirst._Val); // intentional ADL
727
+ _Mybase::swap(_Right._Get_rest());
728
+ }
729
+ #endif // _HAS_CXX23
730
+
731
+ constexpr _Mybase& _Get_rest() noexcept { // get reference to rest of elements
732
+ return *this;
733
+ }
734
+
735
+ constexpr const _Mybase& _Get_rest() const noexcept { // get const reference to rest of elements
736
+ return *this;
737
+ }
738
+
739
+ template <class... _Other>
740
+ constexpr bool _Equals(const tuple<_Other...>& _Right) const {
741
+ return _Myfirst._Val == _Right._Myfirst._Val && _Mybase::_Equals(_Right._Get_rest());
742
+ }
743
+
744
+ #if _HAS_CXX20
745
+ template <class _First, class... _Other>
746
+ _NODISCARD constexpr common_comparison_category_t<_Synth_three_way_result<_This, _First>,
747
+ _Synth_three_way_result<_Rest, _Other>...>
748
+ _Three_way_compare(const tuple<_First, _Other...>& _Right) const {
749
+ if (auto _Result = _Synth_three_way{}(_Myfirst._Val, _Right._Myfirst._Val); _Result != 0) {
750
+ return _Result;
751
+ }
752
+ return _Mybase::_Three_way_compare(_Right._Get_rest());
753
+ }
754
+
755
+ #if _HAS_CXX23
756
+ template <class _Other, class _Indices = make_index_sequence<1 + sizeof...(_Rest)>>
757
+ static constexpr bool _Can_equal_compare_with_tuple_like_v = false;
758
+
759
+ template <class _Other, size_t... _Indices>
760
+ static constexpr bool _Can_equal_compare_with_tuple_like_v<_Other, index_sequence<_Indices...>> =
761
+ (requires(const tuple& _Left, const _Other& _Right) {
762
+ { _STD get<_Indices>(_Left) == _STD get<_Indices>(_Right) } -> _Boolean_testable;
763
+ } && ...);
764
+
765
+ template <class _Other, size_t... _Indices>
766
+ _NODISCARD constexpr bool _Equals_to_tuple_like(const _Other& _Right, index_sequence<_Indices...>) const {
767
+ return ((_STD get<_Indices>(*this) == _STD get<_Indices>(_Right)) && ...);
768
+ }
769
+
770
+ template <_Tuple_like_non_tuple _Other>
771
+ _NODISCARD friend constexpr bool operator==(const tuple& _Left, const _Other& _Right) {
772
+ static_assert(1 + sizeof...(_Rest) == tuple_size_v<_Other>,
773
+ "Cannot compare tuples of different sizes (N4950 [tuple.rel]/2).");
774
+ static_assert(_Can_equal_compare_with_tuple_like_v<_Other>,
775
+ "For all i, where 0 <= i < sizeof...(TTypes), get<i>(t) == get<i>(u) must be a valid expression (N4950 "
776
+ "[tuple.rel]/2).");
777
+ return _Left._Equals_to_tuple_like(_Right, make_index_sequence<1 + sizeof...(_Rest)>{});
778
+ }
779
+
780
+ template <class _Other, size_t... _Indices>
781
+ _NODISCARD constexpr auto _Three_way_compare_with_tuple_like(
782
+ const _Other& _Right, index_sequence<_Indices...>) const {
783
+ _Three_way_comparison_result_with_tuple_like_t<tuple, _Other> _Result = strong_ordering::equal;
784
+ (void) (((_Result = _Synth_three_way{}(_STD get<_Indices>(*this), _STD get<_Indices>(_Right))) == 0) && ...);
785
+ return _Result;
786
+ }
787
+
788
+ template <_Tuple_like_non_tuple _Other>
789
+ _NODISCARD friend constexpr auto operator<=>(const tuple& _Left, const _Other& _Right)
790
+ -> _Three_way_comparison_result_with_tuple_like_t<tuple, _Other> {
791
+ return _Left._Three_way_compare_with_tuple_like(_Right, make_index_sequence<1 + sizeof...(_Rest)>{});
792
+ }
793
+ #endif // _HAS_CXX23
794
+ #else // ^^^ _HAS_CXX20 / !_HAS_CXX20 vvv
795
+ template <class... _Other>
796
+ _NODISCARD constexpr bool _Less(const tuple<_Other...>& _Right) const {
797
+ return _Myfirst._Val < _Right._Myfirst._Val
798
+ || (!(_Right._Myfirst._Val < _Myfirst._Val) && _Mybase::_Less(_Right._Get_rest()));
799
+ }
800
+ #endif // ^^^ !_HAS_CXX20 ^^^
801
+
802
+ template <size_t _Index, class... _Types>
803
+ friend constexpr tuple_element_t<_Index, tuple<_Types...>>& get(tuple<_Types...>& _Tuple) noexcept;
804
+
805
+ template <size_t _Index, class... _Types>
806
+ friend constexpr const tuple_element_t<_Index, tuple<_Types...>>& get(const tuple<_Types...>& _Tuple) noexcept;
807
+
808
+ template <size_t _Index, class... _Types>
809
+ friend constexpr tuple_element_t<_Index, tuple<_Types...>>&& get(tuple<_Types...>&& _Tuple) noexcept;
810
+
811
+ template <size_t _Index, class... _Types>
812
+ friend constexpr const tuple_element_t<_Index, tuple<_Types...>>&& get(const tuple<_Types...>&& _Tuple) noexcept;
813
+
814
+ template <size_t _Index, class... _Types>
815
+ friend constexpr auto&& _Tuple_get(tuple<_Types...>&& _Tuple) noexcept;
816
+
817
+ template <class _Ty, class... _Types>
818
+ friend constexpr _Ty& get(tuple<_Types...>& _Tuple) noexcept;
819
+
820
+ template <class _Ty, class... _Types>
821
+ friend constexpr const _Ty& get(const tuple<_Types...>& _Tuple) noexcept;
822
+
823
+ template <class _Ty, class... _Types>
824
+ friend constexpr _Ty&& get(tuple<_Types...>&& _Tuple) noexcept;
825
+
826
+ template <class _Ty, class... _Types>
827
+ friend constexpr const _Ty&& get(const tuple<_Types...>&& _Tuple) noexcept;
828
+
829
+ _Tuple_val<_This> _Myfirst; // the stored element
830
+ };
831
+
832
+ #if _HAS_CXX17
833
+ template <class... _Types>
834
+ tuple(_Types...) -> tuple<_Types...>;
835
+
836
+ template <class _Ty1, class _Ty2>
837
+ tuple(pair<_Ty1, _Ty2>) -> tuple<_Ty1, _Ty2>;
838
+
839
+ template <class _Alloc, class... _Types>
840
+ tuple(allocator_arg_t, _Alloc, _Types...) -> tuple<_Types...>;
841
+
842
+ template <class _Alloc, class _Ty1, class _Ty2>
843
+ tuple(allocator_arg_t, _Alloc, pair<_Ty1, _Ty2>) -> tuple<_Ty1, _Ty2>;
844
+
845
+ template <class _Alloc, class... _Types>
846
+ tuple(allocator_arg_t, _Alloc, tuple<_Types...>) -> tuple<_Types...>;
847
+ #endif // _HAS_CXX17
848
+
849
+ _EXPORT_STD template <class... _Types1, class... _Types2>
850
+ _NODISCARD constexpr bool operator==(const tuple<_Types1...>& _Left, const tuple<_Types2...>& _Right) {
851
+ static_assert(
852
+ sizeof...(_Types1) == sizeof...(_Types2), "Cannot compare tuples of different sizes (N4950 [tuple.rel]/2).");
853
+ return _Left._Equals(_Right);
854
+ }
855
+
856
+ #if _HAS_CXX20
857
+ _EXPORT_STD template <class... _Types1, class... _Types2>
858
+ _NODISCARD constexpr common_comparison_category_t<_Synth_three_way_result<_Types1, _Types2>...> operator<=>(
859
+ const tuple<_Types1...>& _Left, const tuple<_Types2...>& _Right) {
860
+ return _Left._Three_way_compare(_Right);
861
+ }
862
+ #else // ^^^ _HAS_CXX20 / !_HAS_CXX20 vvv
863
+ template <class... _Types1, class... _Types2>
864
+ _NODISCARD constexpr bool operator!=(const tuple<_Types1...>& _Left, const tuple<_Types2...>& _Right) {
865
+ return !(_Left == _Right);
866
+ }
867
+
868
+ template <class... _Types1, class... _Types2>
869
+ _NODISCARD constexpr bool operator<(const tuple<_Types1...>& _Left, const tuple<_Types2...>& _Right) {
870
+ static_assert(sizeof...(_Types1) == sizeof...(_Types2), "cannot compare tuples of different sizes");
871
+ return _Left._Less(_Right);
872
+ }
873
+
874
+ template <class... _Types1, class... _Types2>
875
+ _NODISCARD constexpr bool operator>=(const tuple<_Types1...>& _Left, const tuple<_Types2...>& _Right) {
876
+ return !(_Left < _Right);
877
+ }
878
+
879
+ template <class... _Types1, class... _Types2>
880
+ _NODISCARD constexpr bool operator>(const tuple<_Types1...>& _Left, const tuple<_Types2...>& _Right) {
881
+ return _Right < _Left;
882
+ }
883
+
884
+ template <class... _Types1, class... _Types2>
885
+ _NODISCARD constexpr bool operator<=(const tuple<_Types1...>& _Left, const tuple<_Types2...>& _Right) {
886
+ return !(_Right < _Left);
887
+ }
888
+ #endif // ^^^ !_HAS_CXX20 ^^^
889
+
890
+ _EXPORT_STD template <class... _Types, enable_if_t<conjunction_v<_Is_swappable<_Types>...>, int> = 0>
891
+ _CONSTEXPR20 void swap(tuple<_Types...>& _Left, tuple<_Types...>& _Right) noexcept(noexcept(_Left.swap(_Right))) {
892
+ _Left.swap(_Right);
893
+ }
894
+
895
+ #if _HAS_CXX23
896
+ _EXPORT_STD template <class... _Types>
897
+ requires conjunction_v<is_swappable<const _Types>...>
898
+ constexpr void swap(const tuple<_Types...>& _Left, const tuple<_Types...>& _Right) noexcept(
899
+ noexcept(_Left.swap(_Right))) {
900
+ _Left.swap(_Right);
901
+ }
902
+ #endif // _HAS_CXX23
903
+
904
+ _EXPORT_STD template <size_t _Index, class... _Types>
905
+ _NODISCARD constexpr tuple_element_t<_Index, tuple<_Types...>>& get(tuple<_Types...>& _Tuple) noexcept {
906
+ using _Ttype = typename tuple_element<_Index, tuple<_Types...>>::_Ttype;
907
+ return static_cast<_Ttype&>(_Tuple)._Myfirst._Val;
908
+ }
909
+
910
+ _EXPORT_STD template <size_t _Index, class... _Types>
911
+ _NODISCARD constexpr const tuple_element_t<_Index, tuple<_Types...>>& get(const tuple<_Types...>& _Tuple) noexcept {
912
+ using _Ttype = typename tuple_element<_Index, tuple<_Types...>>::_Ttype;
913
+ return static_cast<const _Ttype&>(_Tuple)._Myfirst._Val;
914
+ }
915
+
916
+ _EXPORT_STD template <size_t _Index, class... _Types>
917
+ _NODISCARD constexpr tuple_element_t<_Index, tuple<_Types...>>&& get(tuple<_Types...>&& _Tuple) noexcept {
918
+ using _Ty = tuple_element_t<_Index, tuple<_Types...>>;
919
+ using _Ttype = typename tuple_element<_Index, tuple<_Types...>>::_Ttype;
920
+ return static_cast<_Ty&&>(static_cast<_Ttype&>(_Tuple)._Myfirst._Val);
921
+ }
922
+
923
+ _EXPORT_STD template <size_t _Index, class... _Types>
924
+ _NODISCARD constexpr const tuple_element_t<_Index, tuple<_Types...>>&& get(const tuple<_Types...>&& _Tuple) noexcept {
925
+ using _Ty = tuple_element_t<_Index, tuple<_Types...>>;
926
+ using _Ttype = typename tuple_element<_Index, tuple<_Types...>>::_Ttype;
927
+ return static_cast<const _Ty&&>(static_cast<const _Ttype&>(_Tuple)._Myfirst._Val);
928
+ }
929
+
930
+ template <size_t _Index, class... _Types>
931
+ _NODISCARD constexpr auto&& _Tuple_get(tuple<_Types...>&& _Tuple) noexcept {
932
+ // used by pair's piecewise constructor
933
+ using _Ty = tuple_element_t<_Index, tuple<_Types...>>;
934
+ using _Ttype = typename tuple_element<_Index, tuple<_Types...>>::_Ttype;
935
+ return static_cast<_Ty&&>(static_cast<_Ttype&>(_Tuple)._Myfirst._Val);
936
+ }
937
+
938
+ _EXPORT_STD template <class _Ty, class... _Types>
939
+ _NODISCARD constexpr _Ty& get(tuple<_Types...>& _Tuple) noexcept {
940
+ constexpr size_t _Idx = _Meta_find_unique_index<tuple<_Types...>, _Ty>::value;
941
+ if constexpr (_Idx < sizeof...(_Types)) {
942
+ using _Ttype = typename tuple_element<_Idx, tuple<_Types...>>::_Ttype;
943
+ return static_cast<_Ttype&>(_Tuple)._Myfirst._Val;
944
+ } else {
945
+ static_assert(false, "get<T>(tuple<Types...>&) "
946
+ "requires T to occur exactly once in Types. (N4971 [tuple.elem]/5)");
947
+ }
948
+ }
949
+
950
+ _EXPORT_STD template <class _Ty, class... _Types>
951
+ _NODISCARD constexpr const _Ty& get(const tuple<_Types...>& _Tuple) noexcept {
952
+ constexpr size_t _Idx = _Meta_find_unique_index<tuple<_Types...>, _Ty>::value;
953
+ if constexpr (_Idx < sizeof...(_Types)) {
954
+ using _Ttype = typename tuple_element<_Idx, tuple<_Types...>>::_Ttype;
955
+ return static_cast<const _Ttype&>(_Tuple)._Myfirst._Val;
956
+ } else {
957
+ static_assert(false, "get<T>(const tuple<Types...>&) "
958
+ "requires T to occur exactly once in Types. (N4971 [tuple.elem]/5)");
959
+ }
960
+ }
961
+
962
+ _EXPORT_STD template <class _Ty, class... _Types>
963
+ _NODISCARD constexpr _Ty&& get(tuple<_Types...>&& _Tuple) noexcept {
964
+ constexpr size_t _Idx = _Meta_find_unique_index<tuple<_Types...>, _Ty>::value;
965
+ if constexpr (_Idx < sizeof...(_Types)) {
966
+ using _Ttype = typename tuple_element<_Idx, tuple<_Types...>>::_Ttype;
967
+ return static_cast<_Ty&&>(static_cast<_Ttype&>(_Tuple)._Myfirst._Val);
968
+ } else {
969
+ static_assert(false, "get<T>(tuple<Types...>&&) "
970
+ "requires T to occur exactly once in Types. (N4971 [tuple.elem]/5)");
971
+ }
972
+ }
973
+
974
+ _EXPORT_STD template <class _Ty, class... _Types>
975
+ _NODISCARD constexpr const _Ty&& get(const tuple<_Types...>&& _Tuple) noexcept {
976
+ constexpr size_t _Idx = _Meta_find_unique_index<tuple<_Types...>, _Ty>::value;
977
+ if constexpr (_Idx < sizeof...(_Types)) {
978
+ using _Ttype = typename tuple_element<_Idx, tuple<_Types...>>::_Ttype;
979
+ return static_cast<const _Ty&&>(static_cast<const _Ttype&>(_Tuple)._Myfirst._Val);
980
+ } else {
981
+ static_assert(false, "get<T>(const tuple<Types...>&&) "
982
+ "requires T to occur exactly once in Types. (N4971 [tuple.elem]/5)");
983
+ }
984
+ }
985
+
986
+ template <class _This, class... _Rest>
987
+ template <class _Tag, class _Tpl, size_t... _Indices, enable_if_t<is_same_v<_Tag, _STD _Unpack_tuple_t>, int> /* = 0 */>
988
+ constexpr tuple<_This, _Rest...>::tuple(_Tag, _Tpl&& _Right, index_sequence<_Indices...>)
989
+ : tuple(_Exact_args_t{}, _STD get<_Indices>(_STD forward<_Tpl>(_Right))...) {}
990
+
991
+ template <class _This, class... _Rest>
992
+ template <class _Tag, class _Alloc, class _Tpl, size_t... _Indices,
993
+ enable_if_t<is_same_v<_Tag, _STD _Alloc_unpack_tuple_t>, int> /* = 0 */>
994
+ constexpr tuple<_This, _Rest...>::tuple(_Tag, const _Alloc& _Al, _Tpl&& _Right, index_sequence<_Indices...>)
995
+ : tuple(_Alloc_exact_args_t{}, _Al, _STD get<_Indices>(_STD forward<_Tpl>(_Right))...) {}
996
+
997
+ _EXPORT_STD template <class... _Types>
998
+ _NODISCARD constexpr tuple<_Unrefwrap_t<_Types>...> make_tuple(_Types&&... _Args) { // make tuple from elements
999
+ using _Ttype = tuple<_Unrefwrap_t<_Types>...>;
1000
+ return _Ttype(_STD forward<_Types>(_Args)...);
1001
+ }
1002
+
1003
+ _EXPORT_STD template <class... _Types>
1004
+ _NODISCARD constexpr tuple<_Types&...> tie(_Types&... _Args) noexcept { // make tuple from elements
1005
+ using _Ttype = tuple<_Types&...>;
1006
+ return _Ttype(_Args...);
1007
+ }
1008
+
1009
+ _EXPORT_STD template <class... _Types>
1010
+ _NODISCARD constexpr tuple<_Types&&...> forward_as_tuple(_Types&&... _Args) noexcept { // forward arguments in a tuple
1011
+ return tuple<_Types&&...>(_STD forward<_Types>(_Args)...);
1012
+ }
1013
+
1014
+ template <class _Ty, class _Kx_arg, class _Ix_arg, size_t _Ix_next, class... _Sequences>
1015
+ struct _Tuple_cat2;
1016
+
1017
+ template <class _Ty, size_t... _Kx, size_t... _Ix, size_t _Ix_next>
1018
+ struct _Tuple_cat2<_Ty, index_sequence<_Kx...>, index_sequence<_Ix...>, _Ix_next> {
1019
+ using _Ret = tuple<tuple_element_t<_Kx, _Remove_cvref_t<tuple_element_t<_Ix, _Ty>>>...>;
1020
+ using _Kx_seq = index_sequence<_Kx...>;
1021
+ using _Ix_seq = index_sequence<_Ix...>;
1022
+ };
1023
+
1024
+ template <class _Ty, size_t... _Kx, size_t... _Ix, size_t _Ix_next, size_t... _Kx_next, class... _Rest>
1025
+ struct _Tuple_cat2<_Ty, index_sequence<_Kx...>, index_sequence<_Ix...>, _Ix_next, index_sequence<_Kx_next...>, _Rest...>
1026
+ : _Tuple_cat2<_Ty, index_sequence<_Kx..., _Kx_next...>,
1027
+ index_sequence<_Ix..., (_Ix_next + 0 * _Kx_next)...>, // repeat _Ix_next, ignoring the elements of _Kx_next
1028
+ _Ix_next + 1, _Rest...> {};
1029
+
1030
+ #if _HAS_CXX23
1031
+ template <_Tuple_like... _Tuples>
1032
+ #else // ^^^ _HAS_CXX23 / !_HAS_CXX23 vvv
1033
+ template <class... _Tuples>
1034
+ #endif // ^^^ !_HAS_CXX23 ^^^
1035
+ using _Tuple_cat1 = _Tuple_cat2<tuple<_Tuples&&...>, index_sequence<>, index_sequence<>, 0,
1036
+ make_index_sequence<tuple_size_v<_Remove_cvref_t<_Tuples>>>...>;
1037
+
1038
+ template <class _Ret, size_t... _Kx, size_t... _Ix, class _Ty>
1039
+ constexpr _Ret _Tuple_cat(index_sequence<_Kx...>, index_sequence<_Ix...>, _Ty _Arg) {
1040
+ return _Ret{_STD get<_Kx>(_STD get<_Ix>(_STD move(_Arg)))...};
1041
+ }
1042
+
1043
+ #if _HAS_CXX23
1044
+ _EXPORT_STD template <_Tuple_like... _Tuples>
1045
+ #else // ^^^ _HAS_CXX23 / !_HAS_CXX23 vvv
1046
+ _EXPORT_STD template <class... _Tuples>
1047
+ #endif // ^^^ !_HAS_CXX23 ^^^
1048
+ _NODISCARD constexpr typename _Tuple_cat1<_Tuples...>::_Ret tuple_cat(_Tuples&&... _Tpls) { // concatenate tuples
1049
+ using _Cat1 = _Tuple_cat1<_Tuples...>;
1050
+ using _Ret = typename _Cat1::_Ret;
1051
+ using _Kx_seq = typename _Cat1::_Kx_seq;
1052
+ using _Ix_seq = typename _Cat1::_Ix_seq;
1053
+ return _STD _Tuple_cat<_Ret>(_Kx_seq{}, _Ix_seq{}, _STD forward_as_tuple(_STD forward<_Tuples>(_Tpls)...));
1054
+ }
1055
+
1056
+ #if _HAS_CXX17
1057
+ #if _HAS_CXX23
1058
+ template <class _Callable, _Tuple_like _Tuple, size_t... _Indices>
1059
+ #else // ^^^ _HAS_CXX23 / !_HAS_CXX23 vvv
1060
+ template <class _Callable, class _Tuple, size_t... _Indices>
1061
+ #endif // ^^^ !_HAS_CXX23 ^^^
1062
+ constexpr decltype(auto) _Apply_impl(_Callable&& _Obj, _Tuple&& _Tpl, index_sequence<_Indices...>) noexcept(
1063
+ noexcept(_STD invoke(_STD forward<_Callable>(_Obj), _STD get<_Indices>(_STD forward<_Tuple>(_Tpl))...))) {
1064
+ return _STD invoke(_STD forward<_Callable>(_Obj), _STD get<_Indices>(_STD forward<_Tuple>(_Tpl))...);
1065
+ }
1066
+
1067
+ #if _HAS_CXX23
1068
+ _EXPORT_STD template <class _Callable, _Tuple_like _Tuple>
1069
+ #else // ^^^ _HAS_CXX23 / !_HAS_CXX23 vvv
1070
+ _EXPORT_STD template <class _Callable, class _Tuple>
1071
+ #endif // ^^^ !_HAS_CXX23 ^^^
1072
+ constexpr decltype(auto) apply(_Callable&& _Obj, _Tuple&& _Tpl) noexcept(
1073
+ noexcept(_STD _Apply_impl(_STD forward<_Callable>(_Obj), _STD forward<_Tuple>(_Tpl),
1074
+ make_index_sequence<tuple_size_v<remove_reference_t<_Tuple>>>{}))) {
1075
+ return _STD _Apply_impl(_STD forward<_Callable>(_Obj), _STD forward<_Tuple>(_Tpl),
1076
+ make_index_sequence<tuple_size_v<remove_reference_t<_Tuple>>>{});
1077
+ }
1078
+
1079
+ template <class _Ty, class _Tuple, class _Seq = make_index_sequence<tuple_size_v<remove_reference_t<_Tuple>>>>
1080
+ constexpr bool _Can_make_from_tuple = false;
1081
+ template <class _Ty, class _Tuple, size_t... _Indices>
1082
+ constexpr bool _Can_make_from_tuple<_Ty, _Tuple, index_sequence<_Indices...>> =
1083
+ is_constructible_v<_Ty, decltype(_STD get<_Indices>(_STD declval<_Tuple>()))...>;
1084
+
1085
+ template <class _Ty, class _Tuple, size_t... _Indices>
1086
+ constexpr _Ty _Make_from_tuple_impl(_Tuple&& _Tpl, index_sequence<_Indices...>) noexcept(
1087
+ is_nothrow_constructible_v<_Ty, decltype(_STD get<_Indices>(_STD forward<_Tuple>(_Tpl)))...>) {
1088
+ return _Ty(_STD get<_Indices>(_STD forward<_Tuple>(_Tpl))...);
1089
+ }
1090
+
1091
+ #if _HAS_CXX23
1092
+ _EXPORT_STD template <class _Ty, _Tuple_like _Tuple>
1093
+ requires _Can_make_from_tuple<_Ty, _Tuple>
1094
+ #elif _HAS_CXX20
1095
+ _EXPORT_STD template <class _Ty, class _Tuple>
1096
+ requires _Can_make_from_tuple<_Ty, _Tuple>
1097
+ #else // ^^^ _HAS_CXX20 / !_HAS_CXX20 vvv
1098
+ template <class _Ty, class _Tuple, enable_if_t<_Can_make_from_tuple<_Ty, _Tuple>, int> = 0>
1099
+ #endif // ^^^ !_HAS_CXX20 ^^^
1100
+ _NODISCARD constexpr _Ty make_from_tuple(_Tuple&& _Tpl) noexcept(noexcept(_STD _Make_from_tuple_impl<_Ty>(
1101
+ _STD forward<_Tuple>(_Tpl), make_index_sequence<tuple_size_v<remove_reference_t<_Tuple>>>{}))) /* strengthened */ {
1102
+ // construct _Ty from the elements of _Tpl
1103
+ return _STD _Make_from_tuple_impl<_Ty>(
1104
+ _STD forward<_Tuple>(_Tpl), make_index_sequence<tuple_size_v<remove_reference_t<_Tuple>>>{});
1105
+ }
1106
+ #endif // _HAS_CXX17
1107
+
1108
+ template <class... _Types, class _Alloc>
1109
+ struct uses_allocator<tuple<_Types...>, _Alloc> : true_type {}; // true_type if container allocator enabled
1110
+
1111
+ #if _HAS_CXX23
1112
+ template <_Tuple_like _TTuple, _Tuple_like _UTuple, template <class> class _TQual, template <class> class _UQual,
1113
+ class _Indices = make_index_sequence<tuple_size_v<_TTuple>>>
1114
+ struct _Tuple_like_common_reference;
1115
+
1116
+ template <class _TTuple, class _UTuple, template <class> class _TQual, template <class> class _UQual,
1117
+ size_t... _Indices>
1118
+ requires requires {
1119
+ typename tuple<common_reference_t<_TQual<tuple_element_t<_Indices, _TTuple>>,
1120
+ _UQual<tuple_element_t<_Indices, _UTuple>>>...>;
1121
+ }
1122
+ struct _Tuple_like_common_reference<_TTuple, _UTuple, _TQual, _UQual, index_sequence<_Indices...>> {
1123
+ using type = tuple<
1124
+ common_reference_t<_TQual<tuple_element_t<_Indices, _TTuple>>, _UQual<tuple_element_t<_Indices, _UTuple>>>...>;
1125
+ };
1126
+
1127
+ template <_Tuple_like _TTuple, _Tuple_like _UTuple, template <class> class _TQual, template <class> class _UQual>
1128
+ requires (_Is_specialization_v<_TTuple, tuple> || _Is_specialization_v<_UTuple, tuple>)
1129
+ && is_same_v<_TTuple, decay_t<_TTuple>> && is_same_v<_UTuple, decay_t<_UTuple>>
1130
+ && (tuple_size_v<_TTuple> == tuple_size_v<_UTuple>) && requires {
1131
+ typename _Tuple_like_common_reference<_TTuple, _UTuple, _TQual, _UQual>::type;
1132
+ }
1133
+ struct basic_common_reference<_TTuple, _UTuple, _TQual, _UQual> {
1134
+ using type = _Tuple_like_common_reference<_TTuple, _UTuple, _TQual, _UQual>::type;
1135
+ };
1136
+
1137
+ template <_Tuple_like _TTuple, _Tuple_like _UTuple, class _Indices = make_index_sequence<tuple_size_v<_TTuple>>>
1138
+ struct _Tuple_like_common_type;
1139
+
1140
+ template <class _TTuple, class _UTuple, size_t... _Indices>
1141
+ requires requires {
1142
+ typename tuple<common_type_t<tuple_element_t<_Indices, _TTuple>, tuple_element_t<_Indices, _UTuple>>...>;
1143
+ }
1144
+ struct _Tuple_like_common_type<_TTuple, _UTuple, index_sequence<_Indices...>> {
1145
+ using type = tuple<common_type_t<tuple_element_t<_Indices, _TTuple>, tuple_element_t<_Indices, _UTuple>>...>;
1146
+ };
1147
+
1148
+ template <_Tuple_like _TTuple, _Tuple_like _UTuple>
1149
+ requires (_Is_specialization_v<_TTuple, tuple> || _Is_specialization_v<_UTuple, tuple>)
1150
+ && is_same_v<_TTuple, decay_t<_TTuple>> && is_same_v<_UTuple, decay_t<_UTuple>>
1151
+ && (tuple_size_v<_TTuple> == tuple_size_v<_UTuple>) && requires {
1152
+ typename _Tuple_like_common_type<_TTuple, _UTuple>::type;
1153
+ }
1154
+ struct common_type<_TTuple, _UTuple> {
1155
+ using type = _Tuple_like_common_type<_TTuple, _UTuple>::type;
1156
+ };
1157
+ #endif // _HAS_CXX23
1158
+
1159
+ #if _HAS_TR1_NAMESPACE
1160
+ namespace _DEPRECATE_TR1_NAMESPACE tr1 {
1161
+ using _STD get;
1162
+ using _STD ignore;
1163
+ using _STD make_tuple;
1164
+ using _STD ref;
1165
+ using _STD tie;
1166
+ using _STD tuple;
1167
+ } // namespace _DEPRECATE_TR1_NAMESPACE tr1
1168
+ #endif // _HAS_TR1_NAMESPACE
1169
+
1170
+ _STD_END
1171
+
1172
+ #pragma pop_macro("new")
1173
+ _STL_RESTORE_CLANG_WARNINGS
1174
+ #pragma warning(pop)
1175
+ #pragma pack(pop)
1176
+ #endif // _STL_COMPILER_PREPROCESSOR
1177
+ #endif // _TUPLE_
miniMSVC/VC/Tools/MSVC/14.42.34433/include/type_traits ADDED
The diff for this file is too large to render. See raw diff
 
miniMSVC/VC/Tools/MSVC/14.42.34433/include/typeindex ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // typeindex standard header
2
+
3
+ // Copyright (c) Microsoft Corporation.
4
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
5
+
6
+ #ifndef _TYPEINDEX_
7
+ #define _TYPEINDEX_
8
+ #include <yvals_core.h>
9
+ #if _STL_COMPILER_PREPROCESSOR
10
+ #include <typeinfo>
11
+
12
+ #if _HAS_CXX20
13
+ #include <compare>
14
+ #include <cstring>
15
+ #endif // _HAS_CXX20
16
+
17
+ #pragma pack(push, _CRT_PACKING)
18
+ #pragma warning(push, _STL_WARNING_LEVEL)
19
+ #pragma warning(disable : _STL_DISABLED_WARNINGS)
20
+ _STL_DISABLE_CLANG_WARNINGS
21
+ #pragma push_macro("new")
22
+ #undef new
23
+
24
+ _STD_BEGIN
25
+ _EXPORT_STD class type_index { // wraps a typeinfo for indexing
26
+ public:
27
+ type_index(const type_info& _Tinfo) noexcept : _Tptr(&_Tinfo) {}
28
+
29
+ _NODISCARD size_t hash_code() const noexcept {
30
+ return _Tptr->hash_code();
31
+ }
32
+
33
+ _NODISCARD const char* name() const noexcept {
34
+ return _Tptr->name();
35
+ }
36
+
37
+ _NODISCARD bool operator==(const type_index& _Right) const noexcept {
38
+ return *_Tptr == *_Right._Tptr;
39
+ }
40
+
41
+ #if _HAS_CXX20
42
+ _NODISCARD strong_ordering operator<=>(const type_index& _Right) const noexcept {
43
+ // TRANSITION, DevCom-10326599, should rely on a stable interface
44
+ if (_Tptr == _Right._Tptr) {
45
+ return strong_ordering::equal;
46
+ }
47
+
48
+ #pragma push_macro("raw_name") // TRANSITION, GH-2195
49
+ #undef raw_name
50
+ return _CSTD strcmp(_Tptr->raw_name() + 1, _Right._Tptr->raw_name() + 1) <=> 0;
51
+ #pragma pop_macro("raw_name")
52
+ }
53
+ #else // ^^^ _HAS_CXX20 / !_HAS_CXX20 vvv
54
+ _NODISCARD bool operator!=(const type_index& _Right) const noexcept {
55
+ return !(*this == _Right);
56
+ }
57
+ #endif // ^^^ !_HAS_CXX20 ^^^
58
+
59
+ _NODISCARD bool operator<(const type_index& _Right) const noexcept {
60
+ return _Tptr->before(*_Right._Tptr);
61
+ }
62
+
63
+ _NODISCARD bool operator>=(const type_index& _Right) const noexcept {
64
+ return !(*this < _Right);
65
+ }
66
+
67
+ _NODISCARD bool operator>(const type_index& _Right) const noexcept {
68
+ return _Right < *this;
69
+ }
70
+
71
+ _NODISCARD bool operator<=(const type_index& _Right) const noexcept {
72
+ return !(_Right < *this);
73
+ }
74
+
75
+ private:
76
+ const type_info* _Tptr;
77
+ };
78
+
79
+ template <>
80
+ struct hash<type_index> {
81
+ using _ARGUMENT_TYPE_NAME _CXX17_DEPRECATE_ADAPTOR_TYPEDEFS = type_index;
82
+ using _RESULT_TYPE_NAME _CXX17_DEPRECATE_ADAPTOR_TYPEDEFS = size_t;
83
+
84
+ _NODISCARD _STATIC_CALL_OPERATOR size_t operator()(const type_index& _Keyval) _CONST_CALL_OPERATOR noexcept {
85
+ return _Keyval.hash_code();
86
+ }
87
+ };
88
+ _STD_END
89
+
90
+ #pragma pop_macro("new")
91
+ _STL_RESTORE_CLANG_WARNINGS
92
+ #pragma warning(pop)
93
+ #pragma pack(pop)
94
+ #endif // _STL_COMPILER_PREPROCESSOR
95
+ #endif // _TYPEINDEX_
miniMSVC/VC/Tools/MSVC/14.42.34433/include/typeinfo ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // typeinfo standard header
2
+
3
+ // Copyright (c) Microsoft Corporation.
4
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
5
+
6
+ #ifndef _TYPEINFO_
7
+ #define _TYPEINFO_
8
+ #include <yvals_core.h>
9
+ #if _STL_COMPILER_PREPROCESSOR
10
+ #include <exception>
11
+
12
+ #pragma pack(push, _CRT_PACKING)
13
+ #pragma warning(push, _STL_WARNING_LEVEL)
14
+ #pragma warning(disable : _STL_DISABLED_WARNINGS)
15
+ _STL_DISABLE_CLANG_WARNINGS
16
+ #pragma push_macro("new")
17
+ #undef new
18
+
19
+ #pragma warning(disable : 4275) // non dll-interface class 'X' used as base for dll-interface class 'Y'
20
+
21
+ #pragma push_macro("raw_name") // TRANSITION, GH-2195
22
+ #undef raw_name
23
+ #include <vcruntime_typeinfo.h>
24
+ #pragma pop_macro("raw_name")
25
+
26
+ _STD_BEGIN
27
+
28
+ // size in pointers of std::function and std::any (roughly 3 pointers larger than std::string when building debug)
29
+ _INLINE_VAR constexpr int _Small_object_num_ptrs = 6 + 16 / sizeof(void*);
30
+
31
+ #if !_HAS_EXCEPTIONS
32
+ _EXPORT_STD class bad_cast : public exception { // base of all bad cast exceptions
33
+ public:
34
+ bad_cast(const char* _Message = "bad cast") noexcept : exception(_Message) {}
35
+
36
+ ~bad_cast() noexcept override {}
37
+
38
+ protected:
39
+ void _Doraise() const override { // perform class-specific exception handling
40
+ _RAISE(*this);
41
+ }
42
+ };
43
+
44
+ _EXPORT_STD class bad_typeid : public exception { // base of all bad typeid exceptions
45
+ public:
46
+ bad_typeid(const char* _Message = "bad typeid") noexcept : exception(_Message) {}
47
+
48
+ ~bad_typeid() noexcept override {}
49
+
50
+ protected:
51
+ void _Doraise() const override { // perform class-specific exception handling
52
+ _RAISE(*this);
53
+ }
54
+ };
55
+
56
+ class __non_rtti_object : public bad_typeid { // report a non-RTTI object
57
+ public:
58
+ __non_rtti_object(const char* _Message) : bad_typeid(_Message) {}
59
+ };
60
+ #endif // ^^^ !_HAS_EXCEPTIONS ^^^
61
+
62
+ [[noreturn]] inline void _Throw_bad_cast() {
63
+ _THROW(bad_cast{});
64
+ }
65
+
66
+ _STD_END
67
+
68
+ #pragma pop_macro("new")
69
+ _STL_RESTORE_CLANG_WARNINGS
70
+ #pragma pack(pop)
71
+ #pragma warning(pop)
72
+ #endif // _STL_COMPILER_PREPROCESSOR
73
+ #endif // _TYPEINFO_
miniMSVC/VC/Tools/MSVC/14.42.34433/include/unordered_map ADDED
@@ -0,0 +1,944 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // unordered_map standard header
2
+
3
+ // Copyright (c) Microsoft Corporation.
4
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
5
+
6
+ #ifndef _UNORDERED_MAP_
7
+ #define _UNORDERED_MAP_
8
+ #include <yvals_core.h>
9
+ #if _STL_COMPILER_PREPROCESSOR
10
+ #include <xhash>
11
+
12
+ #if _HAS_CXX17
13
+ #include <xpolymorphic_allocator.h>
14
+ #endif // _HAS_CXX17
15
+
16
+ #pragma pack(push, _CRT_PACKING)
17
+ #pragma warning(push, _STL_WARNING_LEVEL)
18
+ #pragma warning(disable : _STL_DISABLED_WARNINGS)
19
+ _STL_DISABLE_CLANG_WARNINGS
20
+ #pragma push_macro("new")
21
+ #undef new
22
+
23
+ _STD_BEGIN
24
+ template <class _Kty, // key type
25
+ class _Ty, // mapped type
26
+ class _Tr, // comparator predicate type
27
+ class _Alloc, // actual allocator type (should be value allocator)
28
+ bool _Mfl> // true if multiple equivalent keys are permitted
29
+ class _Umap_traits : public _Tr { // traits required to make _Hash behave like a map
30
+ public:
31
+ using key_type = _Kty;
32
+ using value_type = pair<const _Kty, _Ty>;
33
+ using _Mutable_value_type = pair<_Kty, _Ty>;
34
+ using key_compare = _Tr;
35
+ using allocator_type = _Alloc;
36
+ #if _HAS_CXX17
37
+ using node_type = _Node_handle<_List_node<value_type, typename allocator_traits<_Alloc>::void_pointer>, _Alloc,
38
+ _Node_handle_map_base, _Kty, _Ty>;
39
+ #endif // _HAS_CXX17
40
+
41
+ static constexpr bool _Multi = _Mfl;
42
+ static constexpr bool _Standard = true;
43
+
44
+ template <class... _Args>
45
+ using _In_place_key_extractor = _In_place_key_extract_map<_Kty, _Args...>;
46
+
47
+ _Umap_traits() = default;
48
+
49
+ explicit _Umap_traits(const _Tr& _Traits) noexcept(is_nothrow_copy_constructible_v<_Tr>) : _Tr(_Traits) {}
50
+
51
+ using value_compare = void; // TRANSITION, remove when _Standard becomes unconditionally true
52
+
53
+ template <class _Ty1, class _Ty2>
54
+ static const _Kty& _Kfn(const pair<_Ty1, _Ty2>& _Val) noexcept { // extract key from element value
55
+ return _Val.first;
56
+ }
57
+ };
58
+
59
+ _EXPORT_STD template <class _Kty, class _Ty, class _Hasher = hash<_Kty>, class _Keyeq = equal_to<_Kty>,
60
+ class _Alloc = allocator<pair<const _Kty, _Ty>>>
61
+ class unordered_map : public _Hash<_Umap_traits<_Kty, _Ty, _Uhash_compare<_Kty, _Hasher, _Keyeq>, _Alloc, false>> {
62
+ // hash table of {key, mapped} values, unique keys
63
+ public:
64
+ static_assert(!_ENFORCE_MATCHING_ALLOCATORS || is_same_v<pair<const _Kty, _Ty>, typename _Alloc::value_type>,
65
+ _MISMATCHED_ALLOCATOR_MESSAGE("unordered_map<Key, Value, Hasher, Eq, Allocator>", "pair<const Key, Value>"));
66
+ static_assert(is_object_v<_Kty>, "The C++ Standard forbids containers of non-object types "
67
+ "because of [container.requirements].");
68
+
69
+ private:
70
+ using _Mytraits = _Uhash_compare<_Kty, _Hasher, _Keyeq>;
71
+ using _Mybase = _Hash<_Umap_traits<_Kty, _Ty, _Mytraits, _Alloc, false>>;
72
+ using _Alnode = typename _Mybase::_Alnode;
73
+ using _Alnode_traits = typename _Mybase::_Alnode_traits;
74
+ using _Nodeptr = typename _Mybase::_Nodeptr;
75
+ using _Key_compare = typename _Mybase::_Key_compare;
76
+
77
+ public:
78
+ using hasher = _Hasher;
79
+ using key_type = _Kty;
80
+ using mapped_type = _Ty;
81
+ using key_equal = _Keyeq;
82
+
83
+ using value_type = pair<const _Kty, _Ty>;
84
+ using allocator_type = typename _Mybase::allocator_type;
85
+ using size_type = typename _Mybase::size_type;
86
+ using difference_type = typename _Mybase::difference_type;
87
+ using pointer = typename _Mybase::pointer;
88
+ using const_pointer = typename _Mybase::const_pointer;
89
+ using reference = value_type&;
90
+ using const_reference = const value_type&;
91
+ using iterator = typename _Mybase::iterator;
92
+ using const_iterator = typename _Mybase::const_iterator;
93
+
94
+ using local_iterator = typename _Mybase::iterator;
95
+ using const_local_iterator = typename _Mybase::const_iterator;
96
+
97
+ #if _HAS_CXX17
98
+ using insert_return_type = _Insert_return_type<iterator, typename _Mybase::node_type>;
99
+ #endif // _HAS_CXX17
100
+
101
+ unordered_map() : _Mybase(_Key_compare(), allocator_type()) {}
102
+
103
+ explicit unordered_map(const allocator_type& _Al) : _Mybase(_Key_compare(), _Al) {}
104
+
105
+ unordered_map(const unordered_map& _Right)
106
+ : _Mybase(_Right, _Alnode_traits::select_on_container_copy_construction(_Right._Getal())) {}
107
+
108
+ unordered_map(const unordered_map& _Right, const allocator_type& _Al) : _Mybase(_Right, _Al) {}
109
+
110
+ explicit unordered_map(size_type _Buckets) : _Mybase(_Key_compare(), allocator_type()) {
111
+ _Mybase::rehash(_Buckets);
112
+ }
113
+
114
+ unordered_map(size_type _Buckets, const allocator_type& _Al) : _Mybase(_Key_compare(), _Al) {
115
+ _Mybase::rehash(_Buckets);
116
+ }
117
+
118
+ unordered_map(size_type _Buckets, const hasher& _Hasharg) : _Mybase(_Key_compare(_Hasharg), allocator_type()) {
119
+ _Mybase::rehash(_Buckets);
120
+ }
121
+
122
+ unordered_map(size_type _Buckets, const hasher& _Hasharg, const allocator_type& _Al)
123
+ : _Mybase(_Key_compare(_Hasharg), _Al) {
124
+ _Mybase::rehash(_Buckets);
125
+ }
126
+
127
+ unordered_map(size_type _Buckets, const hasher& _Hasharg, const _Keyeq& _Keyeqarg)
128
+ : _Mybase(_Key_compare(_Hasharg, _Keyeqarg), allocator_type()) {
129
+ _Mybase::rehash(_Buckets);
130
+ }
131
+
132
+ unordered_map(size_type _Buckets, const hasher& _Hasharg, const _Keyeq& _Keyeqarg, const allocator_type& _Al)
133
+ : _Mybase(_Key_compare(_Hasharg, _Keyeqarg), _Al) {
134
+ _Mybase::rehash(_Buckets);
135
+ }
136
+
137
+ template <class _Iter>
138
+ unordered_map(_Iter _First, _Iter _Last) : _Mybase(_Key_compare(), allocator_type()) {
139
+ insert(_First, _Last);
140
+ }
141
+
142
+ template <class _Iter>
143
+ unordered_map(_Iter _First, _Iter _Last, const allocator_type& _Al) : _Mybase(_Key_compare(), _Al) {
144
+ insert(_First, _Last);
145
+ }
146
+
147
+ template <class _Iter>
148
+ unordered_map(_Iter _First, _Iter _Last, size_type _Buckets) : _Mybase(_Key_compare(), allocator_type()) {
149
+ _Mybase::rehash(_Buckets);
150
+ insert(_First, _Last);
151
+ }
152
+
153
+ template <class _Iter>
154
+ unordered_map(_Iter _First, _Iter _Last, size_type _Buckets, const allocator_type& _Al)
155
+ : _Mybase(_Key_compare(), _Al) {
156
+ _Mybase::rehash(_Buckets);
157
+ insert(_First, _Last);
158
+ }
159
+
160
+ template <class _Iter>
161
+ unordered_map(_Iter _First, _Iter _Last, size_type _Buckets, const hasher& _Hasharg)
162
+ : _Mybase(_Key_compare(_Hasharg), allocator_type()) {
163
+ _Mybase::rehash(_Buckets);
164
+ insert(_First, _Last);
165
+ }
166
+
167
+ template <class _Iter>
168
+ unordered_map(_Iter _First, _Iter _Last, size_type _Buckets, const hasher& _Hasharg, const allocator_type& _Al)
169
+ : _Mybase(_Key_compare(_Hasharg), _Al) {
170
+ _Mybase::rehash(_Buckets);
171
+ insert(_First, _Last);
172
+ }
173
+
174
+ template <class _Iter>
175
+ unordered_map(_Iter _First, _Iter _Last, size_type _Buckets, const hasher& _Hasharg, const _Keyeq& _Keyeqarg)
176
+ : _Mybase(_Key_compare(_Hasharg, _Keyeqarg), allocator_type()) {
177
+ _Mybase::rehash(_Buckets);
178
+ insert(_First, _Last);
179
+ }
180
+
181
+ template <class _Iter>
182
+ unordered_map(_Iter _First, _Iter _Last, size_type _Buckets, const hasher& _Hasharg, const _Keyeq& _Keyeqarg,
183
+ const allocator_type& _Al)
184
+ : _Mybase(_Key_compare(_Hasharg, _Keyeqarg), _Al) {
185
+ _Mybase::rehash(_Buckets);
186
+ insert(_First, _Last);
187
+ }
188
+
189
+ #if _HAS_CXX23
190
+ template <_Container_compatible_range<value_type> _Rng>
191
+ unordered_map(from_range_t, _Rng&& _Range) : _Mybase(_Key_compare(), allocator_type()) {
192
+ this->_Insert_range_unchecked(_RANGES _Ubegin(_Range), _RANGES _Uend(_Range));
193
+ }
194
+
195
+ template <_Container_compatible_range<value_type> _Rng>
196
+ unordered_map(from_range_t, _Rng&& _Range, const allocator_type& _Al) : _Mybase(_Key_compare(), _Al) {
197
+ this->_Insert_range_unchecked(_RANGES _Ubegin(_Range), _RANGES _Uend(_Range));
198
+ }
199
+
200
+ template <_Container_compatible_range<value_type> _Rng>
201
+ unordered_map(from_range_t, _Rng&& _Range, size_type _Buckets) : _Mybase(_Key_compare(), allocator_type()) {
202
+ _Mybase::rehash(_Buckets);
203
+ this->_Insert_range_unchecked(_RANGES _Ubegin(_Range), _RANGES _Uend(_Range));
204
+ }
205
+
206
+ template <_Container_compatible_range<value_type> _Rng>
207
+ unordered_map(from_range_t, _Rng&& _Range, size_type _Buckets, const allocator_type& _Al)
208
+ : _Mybase(_Key_compare(), _Al) {
209
+ _Mybase::rehash(_Buckets);
210
+ this->_Insert_range_unchecked(_RANGES _Ubegin(_Range), _RANGES _Uend(_Range));
211
+ }
212
+
213
+ template <_Container_compatible_range<value_type> _Rng>
214
+ unordered_map(from_range_t, _Rng&& _Range, size_type _Buckets, const hasher& _Hasharg)
215
+ : _Mybase(_Key_compare(_Hasharg), allocator_type()) {
216
+ _Mybase::rehash(_Buckets);
217
+ this->_Insert_range_unchecked(_RANGES _Ubegin(_Range), _RANGES _Uend(_Range));
218
+ }
219
+
220
+ template <_Container_compatible_range<value_type> _Rng>
221
+ unordered_map(from_range_t, _Rng&& _Range, size_type _Buckets, const hasher& _Hasharg, const allocator_type& _Al)
222
+ : _Mybase(_Key_compare(_Hasharg), _Al) {
223
+ _Mybase::rehash(_Buckets);
224
+ this->_Insert_range_unchecked(_RANGES _Ubegin(_Range), _RANGES _Uend(_Range));
225
+ }
226
+
227
+ template <_Container_compatible_range<value_type> _Rng>
228
+ unordered_map(from_range_t, _Rng&& _Range, size_type _Buckets, const hasher& _Hasharg, const _Keyeq& _Keyeqarg)
229
+ : _Mybase(_Key_compare(_Hasharg, _Keyeqarg), allocator_type()) {
230
+ _Mybase::rehash(_Buckets);
231
+ this->_Insert_range_unchecked(_RANGES _Ubegin(_Range), _RANGES _Uend(_Range));
232
+ }
233
+
234
+ template <_Container_compatible_range<value_type> _Rng>
235
+ unordered_map(from_range_t, _Rng&& _Range, size_type _Buckets, const hasher& _Hasharg, const _Keyeq& _Keyeqarg,
236
+ const allocator_type& _Al)
237
+ : _Mybase(_Key_compare(_Hasharg, _Keyeqarg), _Al) {
238
+ _Mybase::rehash(_Buckets);
239
+ this->_Insert_range_unchecked(_RANGES _Ubegin(_Range), _RANGES _Uend(_Range));
240
+ }
241
+ #endif // _HAS_CXX23
242
+
243
+ unordered_map& operator=(const unordered_map& _Right) {
244
+ _Mybase::operator=(_Right);
245
+ return *this;
246
+ }
247
+
248
+ unordered_map(unordered_map&& _Right) : _Mybase(_STD move(_Right)) {}
249
+
250
+ unordered_map(unordered_map&& _Right, const allocator_type& _Al) : _Mybase(_STD move(_Right), _Al) {}
251
+
252
+ unordered_map& operator=(unordered_map&& _Right) noexcept(_Alnode_traits::is_always_equal::value
253
+ && is_nothrow_move_assignable_v<_Hasher>
254
+ && is_nothrow_move_assignable_v<_Keyeq>) {
255
+ _Mybase::operator=(_STD move(_Right));
256
+ return *this;
257
+ }
258
+
259
+ mapped_type& operator[](key_type&& _Keyval) {
260
+ return this->_Try_emplace(_STD move(_Keyval)).first->_Myval.second;
261
+ }
262
+
263
+ void swap(unordered_map& _Right) noexcept(noexcept(_Mybase::swap(_Right))) {
264
+ _Mybase::swap(_Right);
265
+ }
266
+
267
+ using _Mybase::insert;
268
+
269
+ template <class _Valty, enable_if_t<is_constructible_v<value_type, _Valty>, int> = 0>
270
+ pair<iterator, bool> insert(_Valty&& _Val) {
271
+ return this->emplace(_STD forward<_Valty>(_Val));
272
+ }
273
+
274
+ template <class _Valty, enable_if_t<is_constructible_v<value_type, _Valty>, int> = 0>
275
+ iterator insert(const_iterator _Where, _Valty&& _Val) {
276
+ return this->emplace_hint(_Where, _STD forward<_Valty>(_Val));
277
+ }
278
+
279
+ template <class... _Mappedty>
280
+ pair<iterator, bool> try_emplace(const key_type& _Keyval, _Mappedty&&... _Mapval) {
281
+ const auto _Result = this->_Try_emplace(_Keyval, _STD forward<_Mappedty>(_Mapval)...);
282
+ return {this->_List._Make_iter(_Result.first), _Result.second};
283
+ }
284
+
285
+ template <class... _Mappedty>
286
+ pair<iterator, bool> try_emplace(key_type&& _Keyval, _Mappedty&&... _Mapval) {
287
+ const auto _Result = this->_Try_emplace(_STD move(_Keyval), _STD forward<_Mappedty>(_Mapval)...);
288
+ return {this->_List._Make_iter(_Result.first), _Result.second};
289
+ }
290
+
291
+ template <class... _Mappedty>
292
+ iterator try_emplace(const const_iterator _Hint, const key_type& _Keyval, _Mappedty&&... _Mapval) {
293
+ return this->_List._Make_iter(
294
+ this->_Try_emplace_hint(_Hint._Ptr, _Keyval, _STD forward<_Mappedty>(_Mapval)...));
295
+ }
296
+
297
+ template <class... _Mappedty>
298
+ iterator try_emplace(const const_iterator _Hint, key_type&& _Keyval, _Mappedty&&... _Mapval) {
299
+ return this->_List._Make_iter(
300
+ this->_Try_emplace_hint(_Hint._Ptr, _STD move(_Keyval), _STD forward<_Mappedty>(_Mapval)...));
301
+ }
302
+
303
+ private:
304
+ template <class _Keyty, class _Mappedty>
305
+ pair<iterator, bool> _Insert_or_assign(_Keyty&& _Keyval_arg, _Mappedty&& _Mapval) {
306
+ const auto& _Keyval = _Keyval_arg;
307
+ const size_t _Hashval = this->_Traitsobj(_Keyval);
308
+ auto _Target = this->_Find_last(_Keyval, _Hashval);
309
+ if (_Target._Duplicate) {
310
+ _Target._Duplicate->_Myval.second = _STD forward<_Mappedty>(_Mapval);
311
+ return {this->_List._Make_iter(_Target._Duplicate), false};
312
+ }
313
+
314
+ this->_Check_max_size();
315
+ // invalidates _Keyval:
316
+ _List_node_emplace_op2<_Alnode> _Newnode(
317
+ this->_Getal(), _STD forward<_Keyty>(_Keyval_arg), _STD forward<_Mappedty>(_Mapval));
318
+ if (this->_Check_rehash_required_1()) {
319
+ this->_Rehash_for_1();
320
+ _Target = this->_Find_last(_Newnode._Ptr->_Myval.first, _Hashval);
321
+ }
322
+
323
+ return {this->_List._Make_iter(
324
+ this->_Insert_new_node_before(_Hashval, _Target._Insert_before, _Newnode._Release())),
325
+ true};
326
+ }
327
+
328
+ template <class _Keyty, class _Mappedty>
329
+ iterator _Insert_or_assign(const _Nodeptr _Hint, _Keyty&& _Keyval_arg, _Mappedty&& _Mapval) {
330
+ const auto& _Keyval = _Keyval_arg;
331
+ const size_t _Hashval = this->_Traitsobj(_Keyval);
332
+ auto _Target = this->_Find_hint(_Hint, _Keyval, _Hashval);
333
+ if (_Target._Duplicate) {
334
+ _Target._Duplicate->_Myval.second = _STD forward<_Mappedty>(_Mapval);
335
+ return this->_List._Make_iter(_Target._Duplicate);
336
+ }
337
+
338
+ this->_Check_max_size();
339
+ // invalidates _Keyval:
340
+ _List_node_emplace_op2<_Alnode> _Newnode(
341
+ this->_Getal(), _STD forward<_Keyty>(_Keyval_arg), _STD forward<_Mappedty>(_Mapval));
342
+ if (this->_Check_rehash_required_1()) {
343
+ this->_Rehash_for_1();
344
+ _Target = this->_Find_hint(_Hint, _Newnode._Ptr->_Myval.first, _Hashval);
345
+ }
346
+
347
+ return this->_List._Make_iter(
348
+ this->_Insert_new_node_before(_Hashval, _Target._Insert_before, _Newnode._Release()));
349
+ }
350
+
351
+ public:
352
+ template <class _Mappedty>
353
+ pair<iterator, bool> insert_or_assign(const key_type& _Keyval, _Mappedty&& _Mapval) {
354
+ return _Insert_or_assign(_Keyval, _STD forward<_Mappedty>(_Mapval));
355
+ }
356
+
357
+ template <class _Mappedty>
358
+ pair<iterator, bool> insert_or_assign(key_type&& _Keyval, _Mappedty&& _Mapval) {
359
+ return _Insert_or_assign(_STD move(_Keyval), _STD forward<_Mappedty>(_Mapval));
360
+ }
361
+
362
+ template <class _Mappedty>
363
+ iterator insert_or_assign(const_iterator _Hint, const key_type& _Keyval, _Mappedty&& _Mapval) {
364
+ return _Insert_or_assign(_Hint._Ptr, _Keyval, _STD forward<_Mappedty>(_Mapval));
365
+ }
366
+
367
+ template <class _Mappedty>
368
+ iterator insert_or_assign(const_iterator _Hint, key_type&& _Keyval, _Mappedty&& _Mapval) {
369
+ return _Insert_or_assign(_Hint._Ptr, _STD move(_Keyval), _STD forward<_Mappedty>(_Mapval));
370
+ }
371
+
372
+ unordered_map(initializer_list<value_type> _Ilist) : _Mybase(_Key_compare(), allocator_type()) {
373
+ insert(_Ilist);
374
+ }
375
+
376
+ unordered_map(initializer_list<value_type> _Ilist, const allocator_type& _Al) : _Mybase(_Key_compare(), _Al) {
377
+ insert(_Ilist);
378
+ }
379
+
380
+ unordered_map(initializer_list<value_type> _Ilist, size_type _Buckets) : _Mybase(_Key_compare(), allocator_type()) {
381
+ _Mybase::rehash(_Buckets);
382
+ insert(_Ilist);
383
+ }
384
+
385
+ unordered_map(initializer_list<value_type> _Ilist, size_type _Buckets, const allocator_type& _Al)
386
+ : _Mybase(_Key_compare(), _Al) {
387
+ _Mybase::rehash(_Buckets);
388
+ insert(_Ilist);
389
+ }
390
+
391
+ unordered_map(initializer_list<value_type> _Ilist, size_type _Buckets, const hasher& _Hasharg)
392
+ : _Mybase(_Key_compare(_Hasharg), allocator_type()) {
393
+ _Mybase::rehash(_Buckets);
394
+ insert(_Ilist);
395
+ }
396
+
397
+ unordered_map(
398
+ initializer_list<value_type> _Ilist, size_type _Buckets, const hasher& _Hasharg, const allocator_type& _Al)
399
+ : _Mybase(_Key_compare(_Hasharg), _Al) {
400
+ _Mybase::rehash(_Buckets);
401
+ insert(_Ilist);
402
+ }
403
+
404
+ unordered_map(
405
+ initializer_list<value_type> _Ilist, size_type _Buckets, const hasher& _Hasharg, const _Keyeq& _Keyeqarg)
406
+ : _Mybase(_Key_compare(_Hasharg, _Keyeqarg), allocator_type()) {
407
+ _Mybase::rehash(_Buckets);
408
+ insert(_Ilist);
409
+ }
410
+
411
+ unordered_map(initializer_list<value_type> _Ilist, size_type _Buckets, const hasher& _Hasharg,
412
+ const _Keyeq& _Keyeqarg, const allocator_type& _Al)
413
+ : _Mybase(_Key_compare(_Hasharg, _Keyeqarg), _Al) {
414
+ _Mybase::rehash(_Buckets);
415
+ insert(_Ilist);
416
+ }
417
+
418
+ unordered_map& operator=(initializer_list<value_type> _Ilist) {
419
+ _Mybase::clear();
420
+ insert(_Ilist);
421
+ return *this;
422
+ }
423
+
424
+ _NODISCARD hasher hash_function() const {
425
+ return _Mybase::_Traitsobj._Mypair._Get_first();
426
+ }
427
+
428
+ _NODISCARD key_equal key_eq() const {
429
+ return _Mybase::_Traitsobj._Mypair._Myval2._Get_first();
430
+ }
431
+
432
+ mapped_type& operator[](const key_type& _Keyval) {
433
+ return this->_Try_emplace(_Keyval).first->_Myval.second;
434
+ }
435
+
436
+ _NODISCARD mapped_type& at(const key_type& _Keyval) {
437
+ const auto _Target = this->_Find_last(_Keyval, this->_Traitsobj(_Keyval));
438
+ if (_Target._Duplicate) {
439
+ return _Target._Duplicate->_Myval.second;
440
+ }
441
+
442
+ _Xout_of_range("invalid unordered_map<K, T> key");
443
+ }
444
+
445
+ _NODISCARD const mapped_type& at(const key_type& _Keyval) const {
446
+ const auto _Target = this->_Find_last(_Keyval, this->_Traitsobj(_Keyval));
447
+ if (_Target._Duplicate) {
448
+ return _Target._Duplicate->_Myval.second;
449
+ }
450
+
451
+ _Xout_of_range("invalid unordered_map<K, T> key");
452
+ }
453
+
454
+ using _Mybase::_Unchecked_begin;
455
+ using _Mybase::_Unchecked_end;
456
+ };
457
+
458
+ #if _HAS_CXX17
459
+ template <class _Iter, class _Hasher = hash<_Guide_key_t<_Iter>>, class _Keyeq = equal_to<_Guide_key_t<_Iter>>,
460
+ class _Alloc = allocator<_Guide_pair_t<_Iter>>,
461
+ enable_if_t<
462
+ conjunction_v<_Is_iterator<_Iter>, _Is_hasher<_Hasher>, negation<_Is_allocator<_Keyeq>>, _Is_allocator<_Alloc>>,
463
+ int> = 0>
464
+ unordered_map(_Iter, _Iter, _Guide_size_type_t<_Alloc> = 0, _Hasher = _Hasher(), _Keyeq = _Keyeq(), _Alloc = _Alloc())
465
+ -> unordered_map<_Guide_key_t<_Iter>, _Guide_val_t<_Iter>, _Hasher, _Keyeq, _Alloc>;
466
+
467
+ template <class _Kty, class _Ty, class _Hasher = hash<_Kty>, class _Keyeq = equal_to<_Kty>,
468
+ class _Alloc = allocator<pair<const _Kty, _Ty>>,
469
+ enable_if_t<conjunction_v<_Is_hasher<_Hasher>, negation<_Is_allocator<_Keyeq>>, _Is_allocator<_Alloc>>, int> = 0>
470
+ unordered_map(initializer_list<pair<_Kty, _Ty>>, _Guide_size_type_t<_Alloc> = 0, _Hasher = _Hasher(), _Keyeq = _Keyeq(),
471
+ _Alloc = _Alloc()) -> unordered_map<_Kty, _Ty, _Hasher, _Keyeq, _Alloc>;
472
+
473
+ template <class _Iter, class _Alloc, enable_if_t<conjunction_v<_Is_iterator<_Iter>, _Is_allocator<_Alloc>>, int> = 0>
474
+ unordered_map(_Iter, _Iter, _Alloc) -> unordered_map<_Guide_key_t<_Iter>, _Guide_val_t<_Iter>,
475
+ hash<_Guide_key_t<_Iter>>, equal_to<_Guide_key_t<_Iter>>, _Alloc>;
476
+
477
+ template <class _Iter, class _Alloc, enable_if_t<conjunction_v<_Is_iterator<_Iter>, _Is_allocator<_Alloc>>, int> = 0>
478
+ unordered_map(_Iter, _Iter, _Guide_size_type_t<_Alloc>, _Alloc) -> unordered_map<_Guide_key_t<_Iter>,
479
+ _Guide_val_t<_Iter>, hash<_Guide_key_t<_Iter>>, equal_to<_Guide_key_t<_Iter>>, _Alloc>;
480
+
481
+ template <class _Iter, class _Hasher, class _Alloc,
482
+ enable_if_t<conjunction_v<_Is_iterator<_Iter>, _Is_hasher<_Hasher>, _Is_allocator<_Alloc>>, int> = 0>
483
+ unordered_map(_Iter, _Iter, _Guide_size_type_t<_Alloc>, _Hasher, _Alloc)
484
+ -> unordered_map<_Guide_key_t<_Iter>, _Guide_val_t<_Iter>, _Hasher, equal_to<_Guide_key_t<_Iter>>, _Alloc>;
485
+
486
+ template <class _Kty, class _Ty, class _Alloc, enable_if_t<_Is_allocator<_Alloc>::value, int> = 0>
487
+ unordered_map(initializer_list<pair<_Kty, _Ty>>, _Alloc)
488
+ -> unordered_map<_Kty, _Ty, hash<_Kty>, equal_to<_Kty>, _Alloc>;
489
+
490
+ template <class _Kty, class _Ty, class _Alloc, enable_if_t<_Is_allocator<_Alloc>::value, int> = 0>
491
+ unordered_map(initializer_list<pair<_Kty, _Ty>>, _Guide_size_type_t<_Alloc>, _Alloc)
492
+ -> unordered_map<_Kty, _Ty, hash<_Kty>, equal_to<_Kty>, _Alloc>;
493
+
494
+ template <class _Kty, class _Ty, class _Hasher, class _Alloc,
495
+ enable_if_t<conjunction_v<_Is_hasher<_Hasher>, _Is_allocator<_Alloc>>, int> = 0>
496
+ unordered_map(initializer_list<pair<_Kty, _Ty>>, _Guide_size_type_t<_Alloc>, _Hasher, _Alloc)
497
+ -> unordered_map<_Kty, _Ty, _Hasher, equal_to<_Kty>, _Alloc>;
498
+
499
+ #if _HAS_CXX23
500
+ template <_RANGES input_range _Rng, _Hasher_for_container _Hasher = hash<_Range_key_type<_Rng>>,
501
+ class _Keyeq = equal_to<_Range_key_type<_Rng>>,
502
+ _Allocator_for_container _Alloc = allocator<_Range_to_alloc_type<_Rng>>>
503
+ requires (!_Allocator_for_container<_Keyeq>)
504
+ unordered_map(from_range_t, _Rng&&, _Guide_size_type_t<_Alloc> = 0, _Hasher = _Hasher(), _Keyeq = _Keyeq(),
505
+ _Alloc = _Alloc()) -> unordered_map<_Range_key_type<_Rng>, _Range_mapped_type<_Rng>, _Hasher, _Keyeq, _Alloc>;
506
+
507
+ template <_RANGES input_range _Rng, _Allocator_for_container _Alloc>
508
+ unordered_map(from_range_t, _Rng&&, _Guide_size_type_t<_Alloc>, _Alloc) -> unordered_map<_Range_key_type<_Rng>,
509
+ _Range_mapped_type<_Rng>, hash<_Range_key_type<_Rng>>, equal_to<_Range_key_type<_Rng>>, _Alloc>;
510
+
511
+ template <_RANGES input_range _Rng, _Allocator_for_container _Alloc>
512
+ unordered_map(from_range_t, _Rng&&, _Alloc) -> unordered_map<_Range_key_type<_Rng>, _Range_mapped_type<_Rng>,
513
+ hash<_Range_key_type<_Rng>>, equal_to<_Range_key_type<_Rng>>, _Alloc>;
514
+
515
+ template <_RANGES input_range _Rng, _Hasher_for_container _Hasher, _Allocator_for_container _Alloc>
516
+ unordered_map(from_range_t, _Rng&&, _Guide_size_type_t<_Alloc>, _Hasher, _Alloc)
517
+ -> unordered_map<_Range_key_type<_Rng>, _Range_mapped_type<_Rng>, _Hasher, equal_to<_Range_key_type<_Rng>>, _Alloc>;
518
+ #endif // _HAS_CXX23
519
+ #endif // _HAS_CXX17
520
+
521
+ _EXPORT_STD template <class _Kty, class _Ty, class _Hasher, class _Keyeq, class _Alloc>
522
+ void swap(unordered_map<_Kty, _Ty, _Hasher, _Keyeq, _Alloc>& _Left,
523
+ unordered_map<_Kty, _Ty, _Hasher, _Keyeq, _Alloc>& _Right) noexcept(noexcept(_Left.swap(_Right))) {
524
+ _Left.swap(_Right);
525
+ }
526
+
527
+ #if _HAS_CXX20
528
+ _EXPORT_STD template <class _Kty, class _Ty, class _Hasher, class _Keyeq, class _Alloc, class _Pr>
529
+ unordered_map<_Kty, _Ty, _Hasher, _Keyeq, _Alloc>::size_type erase_if(
530
+ unordered_map<_Kty, _Ty, _Hasher, _Keyeq, _Alloc>& _Cont, _Pr _Pred) {
531
+ return _STD _Erase_nodes_if(_Cont, _STD _Pass_fn(_Pred));
532
+ }
533
+ #endif // _HAS_CXX20
534
+
535
+ _EXPORT_STD template <class _Kty, class _Ty, class _Hasher, class _Keyeq, class _Alloc>
536
+ _NODISCARD bool operator==(const unordered_map<_Kty, _Ty, _Hasher, _Keyeq, _Alloc>& _Left,
537
+ const unordered_map<_Kty, _Ty, _Hasher, _Keyeq, _Alloc>& _Right) {
538
+ return _Hash_equal(_Left, _Right);
539
+ }
540
+
541
+ #if !_HAS_CXX20
542
+ template <class _Kty, class _Ty, class _Hasher, class _Keyeq, class _Alloc>
543
+ _NODISCARD bool operator!=(const unordered_map<_Kty, _Ty, _Hasher, _Keyeq, _Alloc>& _Left,
544
+ const unordered_map<_Kty, _Ty, _Hasher, _Keyeq, _Alloc>& _Right) {
545
+ return !(_Left == _Right);
546
+ }
547
+ #endif // !_HAS_CXX20
548
+
549
+ _EXPORT_STD template <class _Kty, class _Ty, class _Hasher = hash<_Kty>, class _Keyeq = equal_to<_Kty>,
550
+ class _Alloc = allocator<pair<const _Kty, _Ty>>>
551
+ class unordered_multimap : public _Hash<_Umap_traits<_Kty, _Ty, _Uhash_compare<_Kty, _Hasher, _Keyeq>, _Alloc, true>> {
552
+ // hash table of {key, mapped} values, non-unique keys
553
+ public:
554
+ static_assert(!_ENFORCE_MATCHING_ALLOCATORS || is_same_v<pair<const _Kty, _Ty>, typename _Alloc::value_type>,
555
+ _MISMATCHED_ALLOCATOR_MESSAGE(
556
+ "unordered_multimap<Key, Value, Hasher, Eq, Allocator>", "pair<const Key, Value>"));
557
+ static_assert(is_object_v<_Kty>, "The C++ Standard forbids containers of non-object types "
558
+ "because of [container.requirements].");
559
+
560
+ private:
561
+ using _Mytraits = _Uhash_compare<_Kty, _Hasher, _Keyeq>;
562
+ using _Mybase = _Hash<_Umap_traits<_Kty, _Ty, _Mytraits, _Alloc, true>>;
563
+ using _Alnode = typename _Mybase::_Alnode;
564
+ using _Alnode_traits = typename _Mybase::_Alnode_traits;
565
+ using _Key_compare = typename _Mybase::_Key_compare;
566
+
567
+ public:
568
+ using hasher = _Hasher;
569
+ using key_type = _Kty;
570
+ using mapped_type = _Ty;
571
+ using key_equal = _Keyeq;
572
+
573
+ using value_type = pair<const _Kty, _Ty>;
574
+ using allocator_type = typename _Mybase::allocator_type;
575
+ using size_type = typename _Mybase::size_type;
576
+ using difference_type = typename _Mybase::difference_type;
577
+ using pointer = typename _Mybase::pointer;
578
+ using const_pointer = typename _Mybase::const_pointer;
579
+ using reference = value_type&;
580
+ using const_reference = const value_type&;
581
+ using iterator = typename _Mybase::iterator;
582
+ using const_iterator = typename _Mybase::const_iterator;
583
+
584
+ using local_iterator = typename _Mybase::iterator;
585
+ using const_local_iterator = typename _Mybase::const_iterator;
586
+
587
+ unordered_multimap() : _Mybase(_Key_compare(), allocator_type()) {} // construct empty map from defaults
588
+
589
+ explicit unordered_multimap(const allocator_type& _Al)
590
+ : _Mybase(_Key_compare(), _Al) {} // construct empty map from defaults, allocator
591
+
592
+ unordered_multimap(const unordered_multimap& _Right)
593
+ : _Mybase(_Right, _Alnode_traits::select_on_container_copy_construction(_Right._Getal())) {}
594
+
595
+ unordered_multimap(const unordered_multimap& _Right, const allocator_type& _Al) : _Mybase(_Right, _Al) {}
596
+
597
+ explicit unordered_multimap(size_type _Buckets) : _Mybase(_Key_compare(), allocator_type()) {
598
+ _Mybase::rehash(_Buckets);
599
+ }
600
+
601
+ unordered_multimap(size_type _Buckets, const allocator_type& _Al) : _Mybase(_Key_compare(), _Al) {
602
+ _Mybase::rehash(_Buckets);
603
+ }
604
+
605
+ unordered_multimap(size_type _Buckets, const hasher& _Hasharg) : _Mybase(_Key_compare(_Hasharg), allocator_type()) {
606
+ _Mybase::rehash(_Buckets);
607
+ }
608
+
609
+ unordered_multimap(size_type _Buckets, const hasher& _Hasharg, const allocator_type& _Al)
610
+ : _Mybase(_Key_compare(_Hasharg), _Al) {
611
+ _Mybase::rehash(_Buckets);
612
+ }
613
+
614
+ unordered_multimap(size_type _Buckets, const hasher& _Hasharg, const _Keyeq& _Keyeqarg)
615
+ : _Mybase(_Key_compare(_Hasharg, _Keyeqarg), allocator_type()) {
616
+ _Mybase::rehash(_Buckets);
617
+ }
618
+
619
+ unordered_multimap(size_type _Buckets, const hasher& _Hasharg, const _Keyeq& _Keyeqarg, const allocator_type& _Al)
620
+ : _Mybase(_Key_compare(_Hasharg, _Keyeqarg), _Al) {
621
+ _Mybase::rehash(_Buckets);
622
+ }
623
+
624
+ template <class _Iter>
625
+ unordered_multimap(_Iter _First, _Iter _Last) : _Mybase(_Key_compare(), allocator_type()) {
626
+ insert(_First, _Last);
627
+ }
628
+
629
+ template <class _Iter>
630
+ unordered_multimap(_Iter _First, _Iter _Last, const allocator_type& _Al) : _Mybase(_Key_compare(), _Al) {
631
+ insert(_First, _Last);
632
+ }
633
+
634
+ template <class _Iter>
635
+ unordered_multimap(_Iter _First, _Iter _Last, size_type _Buckets) : _Mybase(_Key_compare(), allocator_type()) {
636
+ _Mybase::rehash(_Buckets);
637
+ insert(_First, _Last);
638
+ }
639
+
640
+ template <class _Iter>
641
+ unordered_multimap(_Iter _First, _Iter _Last, size_type _Buckets, const allocator_type& _Al)
642
+ : _Mybase(_Key_compare(), _Al) {
643
+ _Mybase::rehash(_Buckets);
644
+ insert(_First, _Last);
645
+ }
646
+
647
+ template <class _Iter>
648
+ unordered_multimap(_Iter _First, _Iter _Last, size_type _Buckets, const hasher& _Hasharg)
649
+ : _Mybase(_Key_compare(_Hasharg), allocator_type()) {
650
+ _Mybase::rehash(_Buckets);
651
+ insert(_First, _Last);
652
+ }
653
+
654
+ template <class _Iter>
655
+ unordered_multimap(_Iter _First, _Iter _Last, size_type _Buckets, const hasher& _Hasharg, const allocator_type& _Al)
656
+ : _Mybase(_Key_compare(_Hasharg), _Al) {
657
+ _Mybase::rehash(_Buckets);
658
+ insert(_First, _Last);
659
+ }
660
+
661
+ template <class _Iter>
662
+ unordered_multimap(_Iter _First, _Iter _Last, size_type _Buckets, const hasher& _Hasharg, const _Keyeq& _Keyeqarg)
663
+ : _Mybase(_Key_compare(_Hasharg, _Keyeqarg), allocator_type()) {
664
+ _Mybase::rehash(_Buckets);
665
+ insert(_First, _Last);
666
+ }
667
+
668
+ template <class _Iter>
669
+ unordered_multimap(_Iter _First, _Iter _Last, size_type _Buckets, const hasher& _Hasharg, const _Keyeq& _Keyeqarg,
670
+ const allocator_type& _Al)
671
+ : _Mybase(_Key_compare(_Hasharg, _Keyeqarg), _Al) {
672
+ _Mybase::rehash(_Buckets);
673
+ insert(_First, _Last);
674
+ }
675
+
676
+ #if _HAS_CXX23
677
+ template <_Container_compatible_range<value_type> _Rng>
678
+ unordered_multimap(from_range_t, _Rng&& _Range) : _Mybase(_Key_compare(), allocator_type()) {
679
+ this->_Insert_range_unchecked(_RANGES _Ubegin(_Range), _RANGES _Uend(_Range));
680
+ }
681
+
682
+ template <_Container_compatible_range<value_type> _Rng>
683
+ unordered_multimap(from_range_t, _Rng&& _Range, const allocator_type& _Al) : _Mybase(_Key_compare(), _Al) {
684
+ this->_Insert_range_unchecked(_RANGES _Ubegin(_Range), _RANGES _Uend(_Range));
685
+ }
686
+
687
+ template <_Container_compatible_range<value_type> _Rng>
688
+ unordered_multimap(from_range_t, _Rng&& _Range, size_type _Buckets) : _Mybase(_Key_compare(), allocator_type()) {
689
+ _Mybase::rehash(_Buckets);
690
+ this->_Insert_range_unchecked(_RANGES _Ubegin(_Range), _RANGES _Uend(_Range));
691
+ }
692
+
693
+ template <_Container_compatible_range<value_type> _Rng>
694
+ unordered_multimap(from_range_t, _Rng&& _Range, size_type _Buckets, const allocator_type& _Al)
695
+ : _Mybase(_Key_compare(), _Al) {
696
+ _Mybase::rehash(_Buckets);
697
+ this->_Insert_range_unchecked(_RANGES _Ubegin(_Range), _RANGES _Uend(_Range));
698
+ }
699
+
700
+ template <_Container_compatible_range<value_type> _Rng>
701
+ unordered_multimap(from_range_t, _Rng&& _Range, size_type _Buckets, const hasher& _Hasharg)
702
+ : _Mybase(_Key_compare(_Hasharg), allocator_type()) {
703
+ _Mybase::rehash(_Buckets);
704
+ this->_Insert_range_unchecked(_RANGES _Ubegin(_Range), _RANGES _Uend(_Range));
705
+ }
706
+
707
+ template <_Container_compatible_range<value_type> _Rng>
708
+ unordered_multimap(
709
+ from_range_t, _Rng&& _Range, size_type _Buckets, const hasher& _Hasharg, const allocator_type& _Al)
710
+ : _Mybase(_Key_compare(_Hasharg), _Al) {
711
+ _Mybase::rehash(_Buckets);
712
+ this->_Insert_range_unchecked(_RANGES _Ubegin(_Range), _RANGES _Uend(_Range));
713
+ }
714
+
715
+ template <_Container_compatible_range<value_type> _Rng>
716
+ unordered_multimap(from_range_t, _Rng&& _Range, size_type _Buckets, const hasher& _Hasharg, const _Keyeq& _Keyeqarg)
717
+ : _Mybase(_Key_compare(_Hasharg, _Keyeqarg), allocator_type()) {
718
+ _Mybase::rehash(_Buckets);
719
+ this->_Insert_range_unchecked(_RANGES _Ubegin(_Range), _RANGES _Uend(_Range));
720
+ }
721
+
722
+ template <_Container_compatible_range<value_type> _Rng>
723
+ unordered_multimap(from_range_t, _Rng&& _Range, size_type _Buckets, const hasher& _Hasharg, const _Keyeq& _Keyeqarg,
724
+ const allocator_type& _Al)
725
+ : _Mybase(_Key_compare(_Hasharg, _Keyeqarg), _Al) {
726
+ _Mybase::rehash(_Buckets);
727
+ this->_Insert_range_unchecked(_RANGES _Ubegin(_Range), _RANGES _Uend(_Range));
728
+ }
729
+ #endif // _HAS_CXX23
730
+
731
+ unordered_multimap& operator=(const unordered_multimap& _Right) {
732
+ _Mybase::operator=(_Right);
733
+ return *this;
734
+ }
735
+
736
+ unordered_multimap(unordered_multimap&& _Right) : _Mybase(_STD move(_Right)) {}
737
+
738
+ unordered_multimap(unordered_multimap&& _Right, const allocator_type& _Al) : _Mybase(_STD move(_Right), _Al) {}
739
+
740
+ unordered_multimap& operator=(unordered_multimap&& _Right) noexcept(_Alnode_traits::is_always_equal::value
741
+ && is_nothrow_move_assignable_v<_Hasher>
742
+ && is_nothrow_move_assignable_v<_Keyeq>) {
743
+ _Mybase::operator=(_STD move(_Right));
744
+ return *this;
745
+ }
746
+
747
+ void swap(unordered_multimap& _Right) noexcept(noexcept(_Mybase::swap(_Right))) {
748
+ _Mybase::swap(_Right);
749
+ }
750
+
751
+ using _Mybase::insert;
752
+
753
+ template <class _Valty, enable_if_t<is_constructible_v<value_type, _Valty>, int> = 0>
754
+ iterator insert(_Valty&& _Val) {
755
+ return this->emplace(_STD forward<_Valty>(_Val));
756
+ }
757
+
758
+ template <class _Valty, enable_if_t<is_constructible_v<value_type, _Valty>, int> = 0>
759
+ iterator insert(const_iterator _Where, _Valty&& _Val) {
760
+ return this->emplace_hint(_Where, _STD forward<_Valty>(_Val));
761
+ }
762
+
763
+ unordered_multimap(initializer_list<value_type> _Ilist) : _Mybase(_Key_compare(), allocator_type()) {
764
+ insert(_Ilist);
765
+ }
766
+
767
+ unordered_multimap(initializer_list<value_type> _Ilist, const allocator_type& _Al) : _Mybase(_Key_compare(), _Al) {
768
+ insert(_Ilist);
769
+ }
770
+
771
+ unordered_multimap(initializer_list<value_type> _Ilist, size_type _Buckets)
772
+ : _Mybase(_Key_compare(), allocator_type()) {
773
+ _Mybase::rehash(_Buckets);
774
+ insert(_Ilist);
775
+ }
776
+
777
+ unordered_multimap(initializer_list<value_type> _Ilist, size_type _Buckets, const allocator_type& _Al)
778
+ : _Mybase(_Key_compare(), _Al) {
779
+ _Mybase::rehash(_Buckets);
780
+ insert(_Ilist);
781
+ }
782
+
783
+ unordered_multimap(initializer_list<value_type> _Ilist, size_type _Buckets, const hasher& _Hasharg)
784
+ : _Mybase(_Key_compare(_Hasharg), allocator_type()) {
785
+ _Mybase::rehash(_Buckets);
786
+ insert(_Ilist);
787
+ }
788
+
789
+ unordered_multimap(
790
+ initializer_list<value_type> _Ilist, size_type _Buckets, const hasher& _Hasharg, const allocator_type& _Al)
791
+ : _Mybase(_Key_compare(_Hasharg), _Al) {
792
+ _Mybase::rehash(_Buckets);
793
+ insert(_Ilist);
794
+ }
795
+
796
+ unordered_multimap(
797
+ initializer_list<value_type> _Ilist, size_type _Buckets, const hasher& _Hasharg, const _Keyeq& _Keyeqarg)
798
+ : _Mybase(_Key_compare(_Hasharg, _Keyeqarg), allocator_type()) {
799
+ _Mybase::rehash(_Buckets);
800
+ insert(_Ilist);
801
+ }
802
+
803
+ unordered_multimap(initializer_list<value_type> _Ilist, size_type _Buckets, const hasher& _Hasharg,
804
+ const _Keyeq& _Keyeqarg, const allocator_type& _Al)
805
+ : _Mybase(_Key_compare(_Hasharg, _Keyeqarg), _Al) {
806
+ _Mybase::rehash(_Buckets);
807
+ insert(_Ilist);
808
+ }
809
+
810
+ unordered_multimap& operator=(initializer_list<value_type> _Ilist) {
811
+ _Mybase::clear();
812
+ insert(_Ilist);
813
+ return *this;
814
+ }
815
+
816
+ _NODISCARD hasher hash_function() const {
817
+ return _Mybase::_Traitsobj._Mypair._Get_first();
818
+ }
819
+
820
+ _NODISCARD key_equal key_eq() const {
821
+ return _Mybase::_Traitsobj._Mypair._Myval2._Get_first();
822
+ }
823
+
824
+ using _Mybase::_Unchecked_begin;
825
+ using _Mybase::_Unchecked_end;
826
+ };
827
+
828
+ #if _HAS_CXX17
829
+ template <class _Iter, class _Hasher = hash<_Guide_key_t<_Iter>>, class _Keyeq = equal_to<_Guide_key_t<_Iter>>,
830
+ class _Alloc = allocator<_Guide_pair_t<_Iter>>,
831
+ enable_if_t<
832
+ conjunction_v<_Is_iterator<_Iter>, _Is_hasher<_Hasher>, negation<_Is_allocator<_Keyeq>>, _Is_allocator<_Alloc>>,
833
+ int> = 0>
834
+ unordered_multimap(_Iter, _Iter, _Guide_size_type_t<_Alloc> = 0, _Hasher = _Hasher(), _Keyeq = _Keyeq(),
835
+ _Alloc = _Alloc()) -> unordered_multimap<_Guide_key_t<_Iter>, _Guide_val_t<_Iter>, _Hasher, _Keyeq, _Alloc>;
836
+
837
+ template <class _Kty, class _Ty, class _Hasher = hash<_Kty>, class _Keyeq = equal_to<_Kty>,
838
+ class _Alloc = allocator<pair<const _Kty, _Ty>>,
839
+ enable_if_t<conjunction_v<_Is_hasher<_Hasher>, negation<_Is_allocator<_Keyeq>>, _Is_allocator<_Alloc>>, int> = 0>
840
+ unordered_multimap(initializer_list<pair<_Kty, _Ty>>, _Guide_size_type_t<_Alloc> = 0, _Hasher = _Hasher(),
841
+ _Keyeq = _Keyeq(), _Alloc = _Alloc()) -> unordered_multimap<_Kty, _Ty, _Hasher, _Keyeq, _Alloc>;
842
+
843
+ template <class _Iter, class _Alloc, enable_if_t<conjunction_v<_Is_iterator<_Iter>, _Is_allocator<_Alloc>>, int> = 0>
844
+ unordered_multimap(_Iter, _Iter, _Alloc) -> unordered_multimap<_Guide_key_t<_Iter>, _Guide_val_t<_Iter>,
845
+ hash<_Guide_key_t<_Iter>>, equal_to<_Guide_key_t<_Iter>>, _Alloc>;
846
+
847
+ template <class _Iter, class _Alloc, enable_if_t<conjunction_v<_Is_iterator<_Iter>, _Is_allocator<_Alloc>>, int> = 0>
848
+ unordered_multimap(_Iter, _Iter, _Guide_size_type_t<_Alloc>, _Alloc) -> unordered_multimap<_Guide_key_t<_Iter>,
849
+ _Guide_val_t<_Iter>, hash<_Guide_key_t<_Iter>>, equal_to<_Guide_key_t<_Iter>>, _Alloc>;
850
+
851
+ template <class _Iter, class _Hasher, class _Alloc,
852
+ enable_if_t<conjunction_v<_Is_iterator<_Iter>, _Is_hasher<_Hasher>, _Is_allocator<_Alloc>>, int> = 0>
853
+ unordered_multimap(_Iter, _Iter, _Guide_size_type_t<_Alloc>, _Hasher, _Alloc)
854
+ -> unordered_multimap<_Guide_key_t<_Iter>, _Guide_val_t<_Iter>, _Hasher, equal_to<_Guide_key_t<_Iter>>, _Alloc>;
855
+
856
+ template <class _Kty, class _Ty, class _Alloc, enable_if_t<_Is_allocator<_Alloc>::value, int> = 0>
857
+ unordered_multimap(initializer_list<pair<_Kty, _Ty>>, _Alloc)
858
+ -> unordered_multimap<_Kty, _Ty, hash<_Kty>, equal_to<_Kty>, _Alloc>;
859
+
860
+ template <class _Kty, class _Ty, class _Alloc, enable_if_t<_Is_allocator<_Alloc>::value, int> = 0>
861
+ unordered_multimap(initializer_list<pair<_Kty, _Ty>>, _Guide_size_type_t<_Alloc>, _Alloc)
862
+ -> unordered_multimap<_Kty, _Ty, hash<_Kty>, equal_to<_Kty>, _Alloc>;
863
+
864
+ template <class _Kty, class _Ty, class _Hasher, class _Alloc,
865
+ enable_if_t<conjunction_v<_Is_hasher<_Hasher>, _Is_allocator<_Alloc>>, int> = 0>
866
+ unordered_multimap(initializer_list<pair<_Kty, _Ty>>, _Guide_size_type_t<_Alloc>, _Hasher, _Alloc)
867
+ -> unordered_multimap<_Kty, _Ty, _Hasher, equal_to<_Kty>, _Alloc>;
868
+
869
+ #if _HAS_CXX23
870
+ template <_RANGES input_range _Rng, _Hasher_for_container _Hasher = hash<_Range_key_type<_Rng>>,
871
+ class _Keyeq = equal_to<_Range_key_type<_Rng>>,
872
+ _Allocator_for_container _Alloc = allocator<_Range_to_alloc_type<_Rng>>>
873
+ requires (!_Allocator_for_container<_Keyeq>)
874
+ unordered_multimap(from_range_t, _Rng&&, _Guide_size_type_t<_Alloc> = 0, _Hasher = _Hasher(), _Keyeq = _Keyeq(),
875
+ _Alloc = _Alloc()) -> unordered_multimap<_Range_key_type<_Rng>, _Range_mapped_type<_Rng>, _Hasher, _Keyeq, _Alloc>;
876
+
877
+ template <_RANGES input_range _Rng, _Allocator_for_container _Alloc>
878
+ unordered_multimap(from_range_t, _Rng&&, _Guide_size_type_t<_Alloc>, _Alloc)
879
+ -> unordered_multimap<_Range_key_type<_Rng>, _Range_mapped_type<_Rng>, hash<_Range_key_type<_Rng>>,
880
+ equal_to<_Range_key_type<_Rng>>, _Alloc>;
881
+
882
+ template <_RANGES input_range _Rng, _Allocator_for_container _Alloc>
883
+ unordered_multimap(from_range_t, _Rng&&, _Alloc) -> unordered_multimap<_Range_key_type<_Rng>, _Range_mapped_type<_Rng>,
884
+ hash<_Range_key_type<_Rng>>, equal_to<_Range_key_type<_Rng>>, _Alloc>;
885
+
886
+ template <_RANGES input_range _Rng, _Hasher_for_container _Hasher, _Allocator_for_container _Alloc>
887
+ unordered_multimap(from_range_t, _Rng&&, _Guide_size_type_t<_Alloc>, _Hasher, _Alloc)
888
+ -> unordered_multimap<_Range_key_type<_Rng>, _Range_mapped_type<_Rng>, _Hasher, equal_to<_Range_key_type<_Rng>>,
889
+ _Alloc>;
890
+ #endif // _HAS_CXX23
891
+ #endif // _HAS_CXX17
892
+
893
+ _EXPORT_STD template <class _Kty, class _Ty, class _Hasher, class _Keyeq, class _Alloc>
894
+ void swap(unordered_multimap<_Kty, _Ty, _Hasher, _Keyeq, _Alloc>& _Left,
895
+ unordered_multimap<_Kty, _Ty, _Hasher, _Keyeq, _Alloc>& _Right) noexcept(noexcept(_Left.swap(_Right))) {
896
+ _Left.swap(_Right);
897
+ }
898
+
899
+ #if _HAS_CXX20
900
+ _EXPORT_STD template <class _Kty, class _Ty, class _Hasher, class _Keyeq, class _Alloc, class _Pr>
901
+ unordered_multimap<_Kty, _Ty, _Hasher, _Keyeq, _Alloc>::size_type erase_if(
902
+ unordered_multimap<_Kty, _Ty, _Hasher, _Keyeq, _Alloc>& _Cont, _Pr _Pred) {
903
+ return _STD _Erase_nodes_if(_Cont, _STD _Pass_fn(_Pred));
904
+ }
905
+ #endif // _HAS_CXX20
906
+
907
+ _EXPORT_STD template <class _Kty, class _Ty, class _Hasher, class _Keyeq, class _Alloc>
908
+ _NODISCARD bool operator==(const unordered_multimap<_Kty, _Ty, _Hasher, _Keyeq, _Alloc>& _Left,
909
+ const unordered_multimap<_Kty, _Ty, _Hasher, _Keyeq, _Alloc>& _Right) {
910
+ return _Hash_equal(_Left, _Right);
911
+ }
912
+
913
+ #if !_HAS_CXX20
914
+ template <class _Kty, class _Ty, class _Hasher, class _Keyeq, class _Alloc>
915
+ _NODISCARD bool operator!=(const unordered_multimap<_Kty, _Ty, _Hasher, _Keyeq, _Alloc>& _Left,
916
+ const unordered_multimap<_Kty, _Ty, _Hasher, _Keyeq, _Alloc>& _Right) {
917
+ return !(_Left == _Right);
918
+ }
919
+ #endif // !_HAS_CXX20
920
+
921
+ #if _HAS_TR1_NAMESPACE
922
+ namespace _DEPRECATE_TR1_NAMESPACE tr1 {
923
+ using _STD unordered_map;
924
+ using _STD unordered_multimap;
925
+ } // namespace _DEPRECATE_TR1_NAMESPACE tr1
926
+ #endif // _HAS_TR1_NAMESPACE
927
+
928
+ #if _HAS_CXX17
929
+ namespace pmr {
930
+ _EXPORT_STD template <class _Kty, class _Ty, class _Hasher = hash<_Kty>, class _Keyeq = equal_to<_Kty>>
931
+ using unordered_map = _STD unordered_map<_Kty, _Ty, _Hasher, _Keyeq, polymorphic_allocator<pair<const _Kty, _Ty>>>;
932
+
933
+ _EXPORT_STD template <class _Kty, class _Ty, class _Hasher = hash<_Kty>, class _Keyeq = equal_to<_Kty>>
934
+ using unordered_multimap =
935
+ _STD unordered_multimap<_Kty, _Ty, _Hasher, _Keyeq, polymorphic_allocator<pair<const _Kty, _Ty>>>;
936
+ } // namespace pmr
937
+ #endif // _HAS_CXX17
938
+ _STD_END
939
+ #pragma pop_macro("new")
940
+ _STL_RESTORE_CLANG_WARNINGS
941
+ #pragma warning(pop)
942
+ #pragma pack(pop)
943
+ #endif // _STL_COMPILER_PREPROCESSOR
944
+ #endif // _UNORDERED_MAP_
miniMSVC/VC/Tools/MSVC/14.42.34433/include/unordered_set ADDED
@@ -0,0 +1,771 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // unordered_set standard header
2
+
3
+ // Copyright (c) Microsoft Corporation.
4
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
5
+
6
+ #ifndef _UNORDERED_SET_
7
+ #define _UNORDERED_SET_
8
+ #include <yvals_core.h>
9
+ #if _STL_COMPILER_PREPROCESSOR
10
+ #include <xhash>
11
+
12
+ #if _HAS_CXX17
13
+ #include <xpolymorphic_allocator.h>
14
+ #endif // _HAS_CXX17
15
+
16
+ #pragma pack(push, _CRT_PACKING)
17
+ #pragma warning(push, _STL_WARNING_LEVEL)
18
+ #pragma warning(disable : _STL_DISABLED_WARNINGS)
19
+ _STL_DISABLE_CLANG_WARNINGS
20
+ #pragma push_macro("new")
21
+ #undef new
22
+
23
+ _STD_BEGIN
24
+ template <class _Kty, // key type (same as value type)
25
+ class _Tr, // comparator predicate type
26
+ class _Alloc, // actual allocator type (should be value allocator)
27
+ bool _Mfl> // true if multiple equivalent keys are permitted
28
+ class _Uset_traits : public _Tr { // traits required to make _Hash behave like a set
29
+ public:
30
+ using key_type = _Kty;
31
+ using value_type = _Kty;
32
+ using _Mutable_value_type = _Kty;
33
+ using key_compare = _Tr;
34
+ using allocator_type = _Alloc;
35
+ #if _HAS_CXX17
36
+ using node_type = _Node_handle<_List_node<value_type, typename allocator_traits<_Alloc>::void_pointer>, _Alloc,
37
+ _Node_handle_set_base, _Kty>;
38
+ #endif // _HAS_CXX17
39
+
40
+ static constexpr bool _Multi = _Mfl;
41
+ static constexpr bool _Standard = true;
42
+
43
+ template <class... _Args>
44
+ using _In_place_key_extractor = _In_place_key_extract_set<_Kty, _Args...>;
45
+
46
+ _Uset_traits() = default;
47
+
48
+ explicit _Uset_traits(const _Tr& _Traits) noexcept(is_nothrow_copy_constructible_v<_Tr>) : _Tr(_Traits) {}
49
+
50
+ using value_compare = void; // TRANSITION, remove when _Standard becomes unconditionally true
51
+
52
+ static const _Kty& _Kfn(const value_type& _Val) noexcept {
53
+ return _Val;
54
+ }
55
+ };
56
+
57
+ _EXPORT_STD template <class _Kty, class _Hasher = hash<_Kty>, class _Keyeq = equal_to<_Kty>,
58
+ class _Alloc = allocator<_Kty>>
59
+ class unordered_set : public _Hash<_Uset_traits<_Kty, _Uhash_compare<_Kty, _Hasher, _Keyeq>, _Alloc, false>> {
60
+ // hash table of key-values, unique keys
61
+ public:
62
+ static_assert(!_ENFORCE_MATCHING_ALLOCATORS || is_same_v<_Kty, typename _Alloc::value_type>,
63
+ _MISMATCHED_ALLOCATOR_MESSAGE("unordered_set<T, Hasher, Eq, Allocator>", "T"));
64
+ static_assert(is_object_v<_Kty>, "The C++ Standard forbids containers of non-object types "
65
+ "because of [container.requirements].");
66
+
67
+ private:
68
+ using _Mytraits = _Uhash_compare<_Kty, _Hasher, _Keyeq>;
69
+ using _Mybase = _Hash<_Uset_traits<_Kty, _Mytraits, _Alloc, false>>;
70
+ using _Alnode = typename _Mybase::_Alnode;
71
+ using _Alnode_traits = typename _Mybase::_Alnode_traits;
72
+ using _Key_compare = typename _Mybase::_Key_compare;
73
+
74
+ public:
75
+ using hasher = _Hasher;
76
+ using key_type = _Kty;
77
+ using key_equal = _Keyeq;
78
+
79
+ using value_type = typename _Mybase::value_type;
80
+ using allocator_type = typename _Mybase::allocator_type;
81
+ using size_type = typename _Mybase::size_type;
82
+ using difference_type = typename _Mybase::difference_type;
83
+ using pointer = typename _Mybase::pointer;
84
+ using const_pointer = typename _Mybase::const_pointer;
85
+ using reference = value_type&;
86
+ using const_reference = const value_type&;
87
+ using iterator = typename _Mybase::iterator;
88
+ using const_iterator = typename _Mybase::const_iterator;
89
+
90
+ using local_iterator = typename _Mybase::iterator;
91
+ using const_local_iterator = typename _Mybase::const_iterator;
92
+
93
+ #if _HAS_CXX17
94
+ using insert_return_type = _Insert_return_type<iterator, typename _Mybase::node_type>;
95
+ #endif // _HAS_CXX17
96
+
97
+ unordered_set() : _Mybase(_Key_compare(), allocator_type()) {}
98
+
99
+ explicit unordered_set(const allocator_type& _Al) : _Mybase(_Key_compare(), _Al) {}
100
+
101
+ unordered_set(const unordered_set& _Right)
102
+ : _Mybase(_Right, _Alnode_traits::select_on_container_copy_construction(_Right._Getal())) {}
103
+
104
+ unordered_set(const unordered_set& _Right, const allocator_type& _Al) : _Mybase(_Right, _Al) {}
105
+
106
+ explicit unordered_set(size_type _Buckets) : _Mybase(_Key_compare(), allocator_type()) {
107
+ this->rehash(_Buckets);
108
+ }
109
+
110
+ unordered_set(size_type _Buckets, const allocator_type& _Al) : _Mybase(_Key_compare(), _Al) {
111
+ this->rehash(_Buckets);
112
+ }
113
+
114
+ unordered_set(size_type _Buckets, const hasher& _Hasharg) : _Mybase(_Key_compare(_Hasharg), allocator_type()) {
115
+ this->rehash(_Buckets);
116
+ }
117
+
118
+ unordered_set(size_type _Buckets, const hasher& _Hasharg, const allocator_type& _Al)
119
+ : _Mybase(_Key_compare(_Hasharg), _Al) {
120
+ this->rehash(_Buckets);
121
+ }
122
+
123
+ unordered_set(size_type _Buckets, const hasher& _Hasharg, const _Keyeq& _Keyeqarg)
124
+ : _Mybase(_Key_compare(_Hasharg, _Keyeqarg), allocator_type()) {
125
+ this->rehash(_Buckets);
126
+ }
127
+
128
+ unordered_set(size_type _Buckets, const hasher& _Hasharg, const _Keyeq& _Keyeqarg, const allocator_type& _Al)
129
+ : _Mybase(_Key_compare(_Hasharg, _Keyeqarg), _Al) {
130
+ this->rehash(_Buckets);
131
+ }
132
+
133
+ template <class _Iter>
134
+ unordered_set(_Iter _First, _Iter _Last) : _Mybase(_Key_compare(), allocator_type()) {
135
+ this->insert(_First, _Last);
136
+ }
137
+
138
+ template <class _Iter>
139
+ unordered_set(_Iter _First, _Iter _Last, const allocator_type& _Al) : _Mybase(_Key_compare(), _Al) {
140
+ this->insert(_First, _Last);
141
+ }
142
+
143
+ template <class _Iter>
144
+ unordered_set(_Iter _First, _Iter _Last, size_type _Buckets) : _Mybase(_Key_compare(), allocator_type()) {
145
+ this->rehash(_Buckets);
146
+ this->insert(_First, _Last);
147
+ }
148
+
149
+ template <class _Iter>
150
+ unordered_set(_Iter _First, _Iter _Last, size_type _Buckets, const allocator_type& _Al)
151
+ : _Mybase(_Key_compare(), _Al) {
152
+ this->rehash(_Buckets);
153
+ this->insert(_First, _Last);
154
+ }
155
+
156
+ template <class _Iter>
157
+ unordered_set(_Iter _First, _Iter _Last, size_type _Buckets, const hasher& _Hasharg)
158
+ : _Mybase(_Key_compare(_Hasharg), allocator_type()) {
159
+ this->rehash(_Buckets);
160
+ this->insert(_First, _Last);
161
+ }
162
+
163
+ template <class _Iter>
164
+ unordered_set(_Iter _First, _Iter _Last, size_type _Buckets, const hasher& _Hasharg, const allocator_type& _Al)
165
+ : _Mybase(_Key_compare(_Hasharg), _Al) {
166
+ this->rehash(_Buckets);
167
+ this->insert(_First, _Last);
168
+ }
169
+
170
+ template <class _Iter>
171
+ unordered_set(_Iter _First, _Iter _Last, size_type _Buckets, const hasher& _Hasharg, const _Keyeq& _Keyeqarg)
172
+ : _Mybase(_Key_compare(_Hasharg, _Keyeqarg), allocator_type()) {
173
+ this->rehash(_Buckets);
174
+ this->insert(_First, _Last);
175
+ }
176
+
177
+ template <class _Iter>
178
+ unordered_set(_Iter _First, _Iter _Last, size_type _Buckets, const hasher& _Hasharg, const _Keyeq& _Keyeqarg,
179
+ const allocator_type& _Al)
180
+ : _Mybase(_Key_compare(_Hasharg, _Keyeqarg), _Al) {
181
+ this->rehash(_Buckets);
182
+ this->insert(_First, _Last);
183
+ }
184
+
185
+ #if _HAS_CXX23
186
+ template <_Container_compatible_range<value_type> _Rng>
187
+ unordered_set(from_range_t, _Rng&& _Range) : _Mybase(_Key_compare(), allocator_type()) {
188
+ this->_Insert_range_unchecked(_RANGES _Ubegin(_Range), _RANGES _Uend(_Range));
189
+ }
190
+
191
+ template <_Container_compatible_range<value_type> _Rng>
192
+ unordered_set(from_range_t, _Rng&& _Range, const allocator_type& _Al) : _Mybase(_Key_compare(), _Al) {
193
+ this->_Insert_range_unchecked(_RANGES _Ubegin(_Range), _RANGES _Uend(_Range));
194
+ }
195
+
196
+ template <_Container_compatible_range<value_type> _Rng>
197
+ unordered_set(from_range_t, _Rng&& _Range, size_type _Buckets) : _Mybase(_Key_compare(), allocator_type()) {
198
+ _Mybase::rehash(_Buckets);
199
+ this->_Insert_range_unchecked(_RANGES _Ubegin(_Range), _RANGES _Uend(_Range));
200
+ }
201
+
202
+ template <_Container_compatible_range<value_type> _Rng>
203
+ unordered_set(from_range_t, _Rng&& _Range, size_type _Buckets, const allocator_type& _Al)
204
+ : _Mybase(_Key_compare(), _Al) {
205
+ _Mybase::rehash(_Buckets);
206
+ this->_Insert_range_unchecked(_RANGES _Ubegin(_Range), _RANGES _Uend(_Range));
207
+ }
208
+
209
+ template <_Container_compatible_range<value_type> _Rng>
210
+ unordered_set(from_range_t, _Rng&& _Range, size_type _Buckets, const hasher& _Hasharg)
211
+ : _Mybase(_Key_compare(_Hasharg), allocator_type()) {
212
+ _Mybase::rehash(_Buckets);
213
+ this->_Insert_range_unchecked(_RANGES _Ubegin(_Range), _RANGES _Uend(_Range));
214
+ }
215
+
216
+ template <_Container_compatible_range<value_type> _Rng>
217
+ unordered_set(from_range_t, _Rng&& _Range, size_type _Buckets, const hasher& _Hasharg, const allocator_type& _Al)
218
+ : _Mybase(_Key_compare(_Hasharg), _Al) {
219
+ _Mybase::rehash(_Buckets);
220
+ this->_Insert_range_unchecked(_RANGES _Ubegin(_Range), _RANGES _Uend(_Range));
221
+ }
222
+
223
+ template <_Container_compatible_range<value_type> _Rng>
224
+ unordered_set(from_range_t, _Rng&& _Range, size_type _Buckets, const hasher& _Hasharg, const _Keyeq& _Keyeqarg)
225
+ : _Mybase(_Key_compare(_Hasharg, _Keyeqarg), allocator_type()) {
226
+ _Mybase::rehash(_Buckets);
227
+ this->_Insert_range_unchecked(_RANGES _Ubegin(_Range), _RANGES _Uend(_Range));
228
+ }
229
+
230
+ template <_Container_compatible_range<value_type> _Rng>
231
+ unordered_set(from_range_t, _Rng&& _Range, size_type _Buckets, const hasher& _Hasharg, const _Keyeq& _Keyeqarg,
232
+ const allocator_type& _Al)
233
+ : _Mybase(_Key_compare(_Hasharg, _Keyeqarg), _Al) {
234
+ _Mybase::rehash(_Buckets);
235
+ this->_Insert_range_unchecked(_RANGES _Ubegin(_Range), _RANGES _Uend(_Range));
236
+ }
237
+ #endif // _HAS_CXX23
238
+
239
+ unordered_set& operator=(const unordered_set& _Right) {
240
+ _Mybase::operator=(_Right);
241
+ return *this;
242
+ }
243
+
244
+ unordered_set(unordered_set&& _Right) : _Mybase(_STD move(_Right)) {}
245
+
246
+ unordered_set(unordered_set&& _Right, const allocator_type& _Al) : _Mybase(_STD move(_Right), _Al) {}
247
+
248
+ unordered_set& operator=(unordered_set&& _Right) noexcept(_Alnode_traits::is_always_equal::value
249
+ && is_nothrow_move_assignable_v<_Hasher>
250
+ && is_nothrow_move_assignable_v<_Keyeq>) {
251
+ _Mybase::operator=(_STD move(_Right));
252
+ return *this;
253
+ }
254
+
255
+ void swap(unordered_set& _Right) noexcept(noexcept(_Mybase::swap(_Right))) {
256
+ _Mybase::swap(_Right);
257
+ }
258
+
259
+ unordered_set(initializer_list<value_type> _Ilist) : _Mybase(_Key_compare(), allocator_type()) {
260
+ this->insert(_Ilist);
261
+ }
262
+
263
+ unordered_set(initializer_list<value_type> _Ilist, const allocator_type& _Al) : _Mybase(_Key_compare(), _Al) {
264
+ this->insert(_Ilist);
265
+ }
266
+
267
+ unordered_set(initializer_list<value_type> _Ilist, size_type _Buckets) : _Mybase(_Key_compare(), allocator_type()) {
268
+ this->rehash(_Buckets);
269
+ this->insert(_Ilist);
270
+ }
271
+
272
+ unordered_set(initializer_list<value_type> _Ilist, size_type _Buckets, const allocator_type& _Al)
273
+ : _Mybase(_Key_compare(), _Al) {
274
+ this->rehash(_Buckets);
275
+ this->insert(_Ilist);
276
+ }
277
+
278
+ unordered_set(initializer_list<value_type> _Ilist, size_type _Buckets, const hasher& _Hasharg)
279
+ : _Mybase(_Key_compare(_Hasharg), allocator_type()) {
280
+ this->rehash(_Buckets);
281
+ this->insert(_Ilist);
282
+ }
283
+
284
+ unordered_set(
285
+ initializer_list<value_type> _Ilist, size_type _Buckets, const hasher& _Hasharg, const allocator_type& _Al)
286
+ : _Mybase(_Key_compare(_Hasharg), _Al) {
287
+ this->rehash(_Buckets);
288
+ this->insert(_Ilist);
289
+ }
290
+
291
+ unordered_set(
292
+ initializer_list<value_type> _Ilist, size_type _Buckets, const hasher& _Hasharg, const _Keyeq& _Keyeqarg)
293
+ : _Mybase(_Key_compare(_Hasharg, _Keyeqarg), allocator_type()) {
294
+ this->rehash(_Buckets);
295
+ this->insert(_Ilist);
296
+ }
297
+
298
+ unordered_set(initializer_list<value_type> _Ilist, size_type _Buckets, const hasher& _Hasharg,
299
+ const _Keyeq& _Keyeqarg, const allocator_type& _Al)
300
+ : _Mybase(_Key_compare(_Hasharg, _Keyeqarg), _Al) {
301
+ this->rehash(_Buckets);
302
+ this->insert(_Ilist);
303
+ }
304
+
305
+ unordered_set& operator=(initializer_list<value_type> _Ilist) {
306
+ this->clear();
307
+ this->insert(_Ilist);
308
+ return *this;
309
+ }
310
+
311
+ _NODISCARD hasher hash_function() const {
312
+ return this->_Traitsobj._Mypair._Get_first();
313
+ }
314
+
315
+ _NODISCARD key_equal key_eq() const {
316
+ return this->_Traitsobj._Mypair._Myval2._Get_first();
317
+ }
318
+
319
+ using _Mybase::_Unchecked_begin;
320
+ using _Mybase::_Unchecked_end;
321
+ };
322
+
323
+ #if _HAS_CXX17
324
+ template <class _Iter, class _Hasher = hash<_Iter_value_t<_Iter>>, class _Keyeq = equal_to<_Iter_value_t<_Iter>>,
325
+ class _Alloc = allocator<_Iter_value_t<_Iter>>,
326
+ enable_if_t<
327
+ conjunction_v<_Is_iterator<_Iter>, _Is_hasher<_Hasher>, negation<_Is_allocator<_Keyeq>>, _Is_allocator<_Alloc>>,
328
+ int> = 0>
329
+ unordered_set(_Iter, _Iter, _Guide_size_type_t<_Alloc> = 0, _Hasher = _Hasher(), _Keyeq = _Keyeq(), _Alloc = _Alloc())
330
+ -> unordered_set<_Iter_value_t<_Iter>, _Hasher, _Keyeq, _Alloc>;
331
+
332
+ template <class _Kty, class _Hasher = hash<_Kty>, class _Keyeq = equal_to<_Kty>, class _Alloc = allocator<_Kty>,
333
+ enable_if_t<conjunction_v<_Is_hasher<_Hasher>, negation<_Is_allocator<_Keyeq>>, _Is_allocator<_Alloc>>, int> = 0>
334
+ unordered_set(initializer_list<_Kty>, _Guide_size_type_t<_Alloc> = 0, _Hasher = _Hasher(), _Keyeq = _Keyeq(),
335
+ _Alloc = _Alloc()) -> unordered_set<_Kty, _Hasher, _Keyeq, _Alloc>;
336
+
337
+ template <class _Iter, class _Alloc, enable_if_t<conjunction_v<_Is_iterator<_Iter>, _Is_allocator<_Alloc>>, int> = 0>
338
+ unordered_set(_Iter, _Iter, _Guide_size_type_t<_Alloc>, _Alloc)
339
+ -> unordered_set<_Iter_value_t<_Iter>, hash<_Iter_value_t<_Iter>>, equal_to<_Iter_value_t<_Iter>>, _Alloc>;
340
+
341
+ template <class _Iter, class _Hasher, class _Alloc,
342
+ enable_if_t<conjunction_v<_Is_iterator<_Iter>, _Is_hasher<_Hasher>, _Is_allocator<_Alloc>>, int> = 0>
343
+ unordered_set(_Iter, _Iter, _Guide_size_type_t<_Alloc>, _Hasher, _Alloc)
344
+ -> unordered_set<_Iter_value_t<_Iter>, _Hasher, equal_to<_Iter_value_t<_Iter>>, _Alloc>;
345
+
346
+ template <class _Kty, class _Alloc, enable_if_t<_Is_allocator<_Alloc>::value, int> = 0>
347
+ unordered_set(initializer_list<_Kty>, _Guide_size_type_t<_Alloc>, _Alloc)
348
+ -> unordered_set<_Kty, hash<_Kty>, equal_to<_Kty>, _Alloc>;
349
+
350
+ template <class _Kty, class _Hasher, class _Alloc,
351
+ enable_if_t<conjunction_v<_Is_hasher<_Hasher>, _Is_allocator<_Alloc>>, int> = 0>
352
+ unordered_set(initializer_list<_Kty>, _Guide_size_type_t<_Alloc>, _Hasher, _Alloc)
353
+ -> unordered_set<_Kty, _Hasher, equal_to<_Kty>, _Alloc>;
354
+
355
+ #if _HAS_CXX23
356
+ template <_RANGES input_range _Rng, _Hasher_for_container _Hasher = hash<_RANGES range_value_t<_Rng>>,
357
+ class _Keyeq = equal_to<_RANGES range_value_t<_Rng>>,
358
+ _Allocator_for_container _Alloc = allocator<_RANGES range_value_t<_Rng>>>
359
+ requires (!_Allocator_for_container<_Keyeq>)
360
+ unordered_set(from_range_t, _Rng&&, _Guide_size_type_t<_Alloc> = 0, _Hasher = _Hasher(), _Keyeq = _Keyeq(),
361
+ _Alloc = _Alloc()) -> unordered_set<_RANGES range_value_t<_Rng>, _Hasher, _Keyeq, _Alloc>;
362
+
363
+ template <_RANGES input_range _Rng, _Allocator_for_container _Alloc>
364
+ unordered_set(from_range_t, _Rng&&, _Guide_size_type_t<_Alloc>, _Alloc) -> unordered_set<_RANGES range_value_t<_Rng>,
365
+ hash<_RANGES range_value_t<_Rng>>, equal_to<_RANGES range_value_t<_Rng>>, _Alloc>;
366
+
367
+ template <_RANGES input_range _Rng, _Allocator_for_container _Alloc>
368
+ unordered_set(from_range_t, _Rng&&, _Alloc) -> unordered_set<_RANGES range_value_t<_Rng>,
369
+ hash<_RANGES range_value_t<_Rng>>, equal_to<_RANGES range_value_t<_Rng>>, _Alloc>;
370
+
371
+ template <_RANGES input_range _Rng, _Hasher_for_container _Hasher, _Allocator_for_container _Alloc>
372
+ unordered_set(from_range_t, _Rng&&, _Guide_size_type_t<_Alloc>, _Hasher, _Alloc)
373
+ -> unordered_set<_RANGES range_value_t<_Rng>, _Hasher, equal_to<_RANGES range_value_t<_Rng>>, _Alloc>;
374
+ #endif // _HAS_CXX23
375
+ #endif // _HAS_CXX17
376
+
377
+ _EXPORT_STD template <class _Kty, class _Hasher, class _Keyeq, class _Alloc>
378
+ void swap(unordered_set<_Kty, _Hasher, _Keyeq, _Alloc>& _Left,
379
+ unordered_set<_Kty, _Hasher, _Keyeq, _Alloc>& _Right) noexcept(noexcept(_Left.swap(_Right))) {
380
+ _Left.swap(_Right);
381
+ }
382
+
383
+ #if _HAS_CXX20
384
+ _EXPORT_STD template <class _Kty, class _Hasher, class _Keyeq, class _Alloc, class _Pr>
385
+ unordered_set<_Kty, _Hasher, _Keyeq, _Alloc>::size_type erase_if(
386
+ unordered_set<_Kty, _Hasher, _Keyeq, _Alloc>& _Cont, _Pr _Pred) {
387
+ return _STD _Erase_nodes_if(_Cont, _STD _Pass_fn(_Pred));
388
+ }
389
+ #endif // _HAS_CXX20
390
+
391
+ _EXPORT_STD template <class _Kty, class _Hasher, class _Keyeq, class _Alloc>
392
+ _NODISCARD bool operator==(const unordered_set<_Kty, _Hasher, _Keyeq, _Alloc>& _Left,
393
+ const unordered_set<_Kty, _Hasher, _Keyeq, _Alloc>& _Right) {
394
+ return _Hash_equal(_Left, _Right);
395
+ }
396
+
397
+ #if !_HAS_CXX20
398
+ template <class _Kty, class _Hasher, class _Keyeq, class _Alloc>
399
+ _NODISCARD bool operator!=(const unordered_set<_Kty, _Hasher, _Keyeq, _Alloc>& _Left,
400
+ const unordered_set<_Kty, _Hasher, _Keyeq, _Alloc>& _Right) {
401
+ return !(_Left == _Right);
402
+ }
403
+ #endif // !_HAS_CXX20
404
+
405
+ _EXPORT_STD template <class _Kty, class _Hasher = hash<_Kty>, class _Keyeq = equal_to<_Kty>,
406
+ class _Alloc = allocator<_Kty>>
407
+ class unordered_multiset : public _Hash<_Uset_traits<_Kty, _Uhash_compare<_Kty, _Hasher, _Keyeq>, _Alloc, true>> {
408
+ // hash table of key-values, non-unique keys
409
+ public:
410
+ static_assert(!_ENFORCE_MATCHING_ALLOCATORS || is_same_v<_Kty, typename _Alloc::value_type>,
411
+ _MISMATCHED_ALLOCATOR_MESSAGE("unordered_multiset<T, Hasher, Eq, Allocator>", "T"));
412
+ static_assert(is_object_v<_Kty>, "The C++ Standard forbids containers of non-object types "
413
+ "because of [container.requirements].");
414
+
415
+ private:
416
+ using _Mytraits = _Uhash_compare<_Kty, _Hasher, _Keyeq>;
417
+ using _Mybase = _Hash<_Uset_traits<_Kty, _Mytraits, _Alloc, true>>;
418
+ using _Alnode = typename _Mybase::_Alnode;
419
+ using _Alnode_traits = typename _Mybase::_Alnode_traits;
420
+ using _Key_compare = typename _Mybase::_Key_compare;
421
+
422
+ public:
423
+ using hasher = _Hasher;
424
+ using key_type = _Kty;
425
+ using key_equal = _Keyeq;
426
+
427
+ using value_type = typename _Mybase::value_type;
428
+ using allocator_type = typename _Mybase::allocator_type;
429
+ using size_type = typename _Mybase::size_type;
430
+ using difference_type = typename _Mybase::difference_type;
431
+ using pointer = typename _Mybase::pointer;
432
+ using const_pointer = typename _Mybase::const_pointer;
433
+ using reference = value_type&;
434
+ using const_reference = const value_type&;
435
+ using iterator = typename _Mybase::iterator;
436
+ using const_iterator = typename _Mybase::const_iterator;
437
+
438
+ unordered_multiset() : _Mybase(_Key_compare(), allocator_type()) {}
439
+
440
+ explicit unordered_multiset(const allocator_type& _Al) : _Mybase(_Key_compare(), _Al) {}
441
+
442
+ unordered_multiset(const unordered_multiset& _Right)
443
+ : _Mybase(_Right, _Alnode_traits::select_on_container_copy_construction(_Right._Getal())) {}
444
+
445
+ unordered_multiset(const unordered_multiset& _Right, const allocator_type& _Al) : _Mybase(_Right, _Al) {}
446
+
447
+ explicit unordered_multiset(size_type _Buckets) : _Mybase(_Key_compare(), allocator_type()) {
448
+ this->rehash(_Buckets);
449
+ }
450
+
451
+ unordered_multiset(size_type _Buckets, const allocator_type& _Al) : _Mybase(_Key_compare(), _Al) {
452
+ this->rehash(_Buckets);
453
+ }
454
+
455
+ unordered_multiset(size_type _Buckets, const hasher& _Hasharg) : _Mybase(_Key_compare(_Hasharg), allocator_type()) {
456
+ this->rehash(_Buckets);
457
+ }
458
+
459
+ unordered_multiset(size_type _Buckets, const hasher& _Hasharg, const allocator_type& _Al)
460
+ : _Mybase(_Key_compare(_Hasharg), _Al) {
461
+ this->rehash(_Buckets);
462
+ }
463
+
464
+ unordered_multiset(size_type _Buckets, const hasher& _Hasharg, const _Keyeq& _Keyeqarg)
465
+ : _Mybase(_Key_compare(_Hasharg, _Keyeqarg), allocator_type()) {
466
+ this->rehash(_Buckets);
467
+ }
468
+
469
+ unordered_multiset(size_type _Buckets, const hasher& _Hasharg, const _Keyeq& _Keyeqarg, const allocator_type& _Al)
470
+ : _Mybase(_Key_compare(_Hasharg, _Keyeqarg), _Al) {
471
+ this->rehash(_Buckets);
472
+ }
473
+
474
+ template <class _Iter>
475
+ unordered_multiset(_Iter _First, _Iter _Last) : _Mybase(_Key_compare(), allocator_type()) {
476
+ this->insert(_First, _Last);
477
+ }
478
+
479
+ template <class _Iter>
480
+ unordered_multiset(_Iter _First, _Iter _Last, const allocator_type& _Al) : _Mybase(_Key_compare(), _Al) {
481
+ this->insert(_First, _Last);
482
+ }
483
+
484
+ template <class _Iter>
485
+ unordered_multiset(_Iter _First, _Iter _Last, size_type _Buckets) : _Mybase(_Key_compare(), allocator_type()) {
486
+ this->rehash(_Buckets);
487
+ this->insert(_First, _Last);
488
+ }
489
+
490
+ template <class _Iter>
491
+ unordered_multiset(_Iter _First, _Iter _Last, size_type _Buckets, const allocator_type& _Al)
492
+ : _Mybase(_Key_compare(), _Al) {
493
+ this->rehash(_Buckets);
494
+ this->insert(_First, _Last);
495
+ }
496
+
497
+ template <class _Iter>
498
+ unordered_multiset(_Iter _First, _Iter _Last, size_type _Buckets, const hasher& _Hasharg)
499
+ : _Mybase(_Key_compare(_Hasharg), allocator_type()) {
500
+ this->rehash(_Buckets);
501
+ this->insert(_First, _Last);
502
+ }
503
+
504
+ template <class _Iter>
505
+ unordered_multiset(_Iter _First, _Iter _Last, size_type _Buckets, const hasher& _Hasharg, const allocator_type& _Al)
506
+ : _Mybase(_Key_compare(_Hasharg), _Al) {
507
+ this->rehash(_Buckets);
508
+ this->insert(_First, _Last);
509
+ }
510
+
511
+ template <class _Iter>
512
+ unordered_multiset(_Iter _First, _Iter _Last, size_type _Buckets, const hasher& _Hasharg, const _Keyeq& _Keyeqarg)
513
+ : _Mybase(_Key_compare(_Hasharg, _Keyeqarg), allocator_type()) {
514
+ this->rehash(_Buckets);
515
+ this->insert(_First, _Last);
516
+ }
517
+
518
+ template <class _Iter>
519
+ unordered_multiset(_Iter _First, _Iter _Last, size_type _Buckets, const hasher& _Hasharg, const _Keyeq& _Keyeqarg,
520
+ const allocator_type& _Al)
521
+ : _Mybase(_Key_compare(_Hasharg, _Keyeqarg), _Al) {
522
+ this->rehash(_Buckets);
523
+ this->insert(_First, _Last);
524
+ }
525
+
526
+ unordered_multiset& operator=(const unordered_multiset& _Right) {
527
+ _Mybase::operator=(_Right);
528
+ return *this;
529
+ }
530
+
531
+ unordered_multiset(unordered_multiset&& _Right) : _Mybase(_STD move(_Right)) {}
532
+
533
+ unordered_multiset(unordered_multiset&& _Right, const allocator_type& _Al) : _Mybase(_STD move(_Right), _Al) {}
534
+
535
+ #if _HAS_CXX23
536
+ template <_Container_compatible_range<value_type> _Rng>
537
+ unordered_multiset(from_range_t, _Rng&& _Range) : _Mybase(_Key_compare(), allocator_type()) {
538
+ this->_Insert_range_unchecked(_RANGES _Ubegin(_Range), _RANGES _Uend(_Range));
539
+ }
540
+
541
+ template <_Container_compatible_range<value_type> _Rng>
542
+ unordered_multiset(from_range_t, _Rng&& _Range, const allocator_type& _Al) : _Mybase(_Key_compare(), _Al) {
543
+ this->_Insert_range_unchecked(_RANGES _Ubegin(_Range), _RANGES _Uend(_Range));
544
+ }
545
+
546
+ template <_Container_compatible_range<value_type> _Rng>
547
+ unordered_multiset(from_range_t, _Rng&& _Range, size_type _Buckets) : _Mybase(_Key_compare(), allocator_type()) {
548
+ _Mybase::rehash(_Buckets);
549
+ this->_Insert_range_unchecked(_RANGES _Ubegin(_Range), _RANGES _Uend(_Range));
550
+ }
551
+
552
+ template <_Container_compatible_range<value_type> _Rng>
553
+ unordered_multiset(from_range_t, _Rng&& _Range, size_type _Buckets, const allocator_type& _Al)
554
+ : _Mybase(_Key_compare(), _Al) {
555
+ _Mybase::rehash(_Buckets);
556
+ this->_Insert_range_unchecked(_RANGES _Ubegin(_Range), _RANGES _Uend(_Range));
557
+ }
558
+
559
+ template <_Container_compatible_range<value_type> _Rng>
560
+ unordered_multiset(from_range_t, _Rng&& _Range, size_type _Buckets, const hasher& _Hasharg)
561
+ : _Mybase(_Key_compare(_Hasharg), allocator_type()) {
562
+ _Mybase::rehash(_Buckets);
563
+ this->_Insert_range_unchecked(_RANGES _Ubegin(_Range), _RANGES _Uend(_Range));
564
+ }
565
+
566
+ template <_Container_compatible_range<value_type> _Rng>
567
+ unordered_multiset(
568
+ from_range_t, _Rng&& _Range, size_type _Buckets, const hasher& _Hasharg, const allocator_type& _Al)
569
+ : _Mybase(_Key_compare(_Hasharg), _Al) {
570
+ _Mybase::rehash(_Buckets);
571
+ this->_Insert_range_unchecked(_RANGES _Ubegin(_Range), _RANGES _Uend(_Range));
572
+ }
573
+
574
+ template <_Container_compatible_range<value_type> _Rng>
575
+ unordered_multiset(from_range_t, _Rng&& _Range, size_type _Buckets, const hasher& _Hasharg, const _Keyeq& _Keyeqarg)
576
+ : _Mybase(_Key_compare(_Hasharg, _Keyeqarg), allocator_type()) {
577
+ _Mybase::rehash(_Buckets);
578
+ this->_Insert_range_unchecked(_RANGES _Ubegin(_Range), _RANGES _Uend(_Range));
579
+ }
580
+
581
+ template <_Container_compatible_range<value_type> _Rng>
582
+ unordered_multiset(from_range_t, _Rng&& _Range, size_type _Buckets, const hasher& _Hasharg, const _Keyeq& _Keyeqarg,
583
+ const allocator_type& _Al)
584
+ : _Mybase(_Key_compare(_Hasharg, _Keyeqarg), _Al) {
585
+ _Mybase::rehash(_Buckets);
586
+ this->_Insert_range_unchecked(_RANGES _Ubegin(_Range), _RANGES _Uend(_Range));
587
+ }
588
+ #endif // _HAS_CXX23
589
+
590
+ unordered_multiset& operator=(unordered_multiset&& _Right) noexcept(_Alnode_traits::is_always_equal::value
591
+ && is_nothrow_move_assignable_v<_Hasher>
592
+ && is_nothrow_move_assignable_v<_Keyeq>) {
593
+ _Mybase::operator=(_STD move(_Right));
594
+ return *this;
595
+ }
596
+
597
+ void swap(unordered_multiset& _Right) noexcept(noexcept(_Mybase::swap(_Right))) {
598
+ _Mybase::swap(_Right);
599
+ }
600
+
601
+ unordered_multiset(initializer_list<value_type> _Ilist) : _Mybase(_Key_compare(), allocator_type()) {
602
+ this->insert(_Ilist);
603
+ }
604
+
605
+ unordered_multiset(initializer_list<value_type> _Ilist, const allocator_type& _Al) : _Mybase(_Key_compare(), _Al) {
606
+ this->insert(_Ilist);
607
+ }
608
+
609
+ unordered_multiset(initializer_list<value_type> _Ilist, size_type _Buckets)
610
+ : _Mybase(_Key_compare(), allocator_type()) {
611
+ this->rehash(_Buckets);
612
+ this->insert(_Ilist);
613
+ }
614
+
615
+ unordered_multiset(initializer_list<value_type> _Ilist, size_type _Buckets, const allocator_type& _Al)
616
+ : _Mybase(_Key_compare(), _Al) {
617
+ this->rehash(_Buckets);
618
+ this->insert(_Ilist);
619
+ }
620
+
621
+ unordered_multiset(initializer_list<value_type> _Ilist, size_type _Buckets, const hasher& _Hasharg)
622
+ : _Mybase(_Key_compare(_Hasharg), allocator_type()) {
623
+ this->rehash(_Buckets);
624
+ this->insert(_Ilist);
625
+ }
626
+
627
+ unordered_multiset(
628
+ initializer_list<value_type> _Ilist, size_type _Buckets, const hasher& _Hasharg, const allocator_type& _Al)
629
+ : _Mybase(_Key_compare(_Hasharg), _Al) {
630
+ this->rehash(_Buckets);
631
+ this->insert(_Ilist);
632
+ }
633
+
634
+ unordered_multiset(
635
+ initializer_list<value_type> _Ilist, size_type _Buckets, const hasher& _Hasharg, const _Keyeq& _Keyeqarg)
636
+ : _Mybase(_Key_compare(_Hasharg, _Keyeqarg), allocator_type()) {
637
+ this->rehash(_Buckets);
638
+ this->insert(_Ilist);
639
+ }
640
+
641
+ unordered_multiset(initializer_list<value_type> _Ilist, size_type _Buckets, const hasher& _Hasharg,
642
+ const _Keyeq& _Keyeqarg, const allocator_type& _Al)
643
+ : _Mybase(_Key_compare(_Hasharg, _Keyeqarg), _Al) {
644
+ this->rehash(_Buckets);
645
+ this->insert(_Ilist);
646
+ }
647
+
648
+ unordered_multiset& operator=(initializer_list<value_type> _Ilist) {
649
+ this->clear();
650
+ this->insert(_Ilist);
651
+ return *this;
652
+ }
653
+
654
+ _NODISCARD hasher hash_function() const {
655
+ return this->_Traitsobj._Mypair._Get_first();
656
+ }
657
+
658
+ _NODISCARD key_equal key_eq() const {
659
+ return this->_Traitsobj._Mypair._Myval2._Get_first();
660
+ }
661
+
662
+ using _Mybase::_Unchecked_begin;
663
+ using _Mybase::_Unchecked_end;
664
+ };
665
+
666
+ #if _HAS_CXX17
667
+ template <class _Iter, class _Hasher = hash<_Iter_value_t<_Iter>>, class _Keyeq = equal_to<_Iter_value_t<_Iter>>,
668
+ class _Alloc = allocator<_Iter_value_t<_Iter>>,
669
+ enable_if_t<
670
+ conjunction_v<_Is_iterator<_Iter>, _Is_hasher<_Hasher>, negation<_Is_allocator<_Keyeq>>, _Is_allocator<_Alloc>>,
671
+ int> = 0>
672
+ unordered_multiset(_Iter, _Iter, _Guide_size_type_t<_Alloc> = 0, _Hasher = _Hasher(), _Keyeq = _Keyeq(),
673
+ _Alloc = _Alloc()) -> unordered_multiset<_Iter_value_t<_Iter>, _Hasher, _Keyeq, _Alloc>;
674
+
675
+ template <class _Kty, class _Hasher = hash<_Kty>, class _Keyeq = equal_to<_Kty>, class _Alloc = allocator<_Kty>,
676
+ enable_if_t<conjunction_v<_Is_hasher<_Hasher>, negation<_Is_allocator<_Keyeq>>, _Is_allocator<_Alloc>>, int> = 0>
677
+ unordered_multiset(initializer_list<_Kty>, _Guide_size_type_t<_Alloc> = 0, _Hasher = _Hasher(), _Keyeq = _Keyeq(),
678
+ _Alloc = _Alloc()) -> unordered_multiset<_Kty, _Hasher, _Keyeq, _Alloc>;
679
+
680
+ template <class _Iter, class _Alloc, enable_if_t<conjunction_v<_Is_iterator<_Iter>, _Is_allocator<_Alloc>>, int> = 0>
681
+ unordered_multiset(_Iter, _Iter, _Guide_size_type_t<_Alloc>, _Alloc)
682
+ -> unordered_multiset<_Iter_value_t<_Iter>, hash<_Iter_value_t<_Iter>>, equal_to<_Iter_value_t<_Iter>>, _Alloc>;
683
+
684
+ template <class _Iter, class _Hasher, class _Alloc,
685
+ enable_if_t<conjunction_v<_Is_iterator<_Iter>, _Is_hasher<_Hasher>, _Is_allocator<_Alloc>>, int> = 0>
686
+ unordered_multiset(_Iter, _Iter, _Guide_size_type_t<_Alloc>, _Hasher, _Alloc)
687
+ -> unordered_multiset<_Iter_value_t<_Iter>, _Hasher, equal_to<_Iter_value_t<_Iter>>, _Alloc>;
688
+
689
+ template <class _Kty, class _Alloc, enable_if_t<_Is_allocator<_Alloc>::value, int> = 0>
690
+ unordered_multiset(initializer_list<_Kty>, _Guide_size_type_t<_Alloc>, _Alloc)
691
+ -> unordered_multiset<_Kty, hash<_Kty>, equal_to<_Kty>, _Alloc>;
692
+
693
+ template <class _Kty, class _Hasher, class _Alloc,
694
+ enable_if_t<conjunction_v<_Is_hasher<_Hasher>, _Is_allocator<_Alloc>>, int> = 0>
695
+ unordered_multiset(initializer_list<_Kty>, _Guide_size_type_t<_Alloc>, _Hasher, _Alloc)
696
+ -> unordered_multiset<_Kty, _Hasher, equal_to<_Kty>, _Alloc>;
697
+
698
+ #if _HAS_CXX23
699
+ template <_RANGES input_range _Rng, _Hasher_for_container _Hasher = hash<_RANGES range_value_t<_Rng>>,
700
+ class _Keyeq = equal_to<_RANGES range_value_t<_Rng>>,
701
+ _Allocator_for_container _Alloc = allocator<_RANGES range_value_t<_Rng>>>
702
+ requires (!_Allocator_for_container<_Keyeq>)
703
+ unordered_multiset(from_range_t, _Rng&&, _Guide_size_type_t<_Alloc> = 0, _Hasher = _Hasher(), _Keyeq = _Keyeq(),
704
+ _Alloc = _Alloc()) -> unordered_multiset<_RANGES range_value_t<_Rng>, _Hasher, _Keyeq, _Alloc>;
705
+
706
+ template <_RANGES input_range _Rng, _Allocator_for_container _Alloc>
707
+ unordered_multiset(from_range_t, _Rng&&, _Guide_size_type_t<_Alloc>, _Alloc)
708
+ -> unordered_multiset<_RANGES range_value_t<_Rng>, hash<_RANGES range_value_t<_Rng>>,
709
+ equal_to<_RANGES range_value_t<_Rng>>, _Alloc>;
710
+
711
+ template <_RANGES input_range _Rng, _Allocator_for_container _Alloc>
712
+ unordered_multiset(from_range_t, _Rng&&, _Alloc) -> unordered_multiset<_RANGES range_value_t<_Rng>,
713
+ hash<_RANGES range_value_t<_Rng>>, equal_to<_RANGES range_value_t<_Rng>>, _Alloc>;
714
+
715
+ template <_RANGES input_range _Rng, _Hasher_for_container _Hasher, _Allocator_for_container _Alloc>
716
+ unordered_multiset(from_range_t, _Rng&&, _Guide_size_type_t<_Alloc>, _Hasher, _Alloc)
717
+ -> unordered_multiset<_RANGES range_value_t<_Rng>, _Hasher, equal_to<_RANGES range_value_t<_Rng>>, _Alloc>;
718
+ #endif // _HAS_CXX23
719
+ #endif // _HAS_CXX17
720
+
721
+ _EXPORT_STD template <class _Kty, class _Hasher, class _Keyeq, class _Alloc>
722
+ void swap(unordered_multiset<_Kty, _Hasher, _Keyeq, _Alloc>& _Left,
723
+ unordered_multiset<_Kty, _Hasher, _Keyeq, _Alloc>& _Right) noexcept(noexcept(_Left.swap(_Right))) {
724
+ _Left.swap(_Right);
725
+ }
726
+
727
+ #if _HAS_CXX20
728
+ _EXPORT_STD template <class _Kty, class _Hasher, class _Keyeq, class _Alloc, class _Pr>
729
+ unordered_multiset<_Kty, _Hasher, _Keyeq, _Alloc>::size_type erase_if(
730
+ unordered_multiset<_Kty, _Hasher, _Keyeq, _Alloc>& _Cont, _Pr _Pred) {
731
+ return _STD _Erase_nodes_if(_Cont, _STD _Pass_fn(_Pred));
732
+ }
733
+ #endif // _HAS_CXX20
734
+
735
+ _EXPORT_STD template <class _Kty, class _Hasher, class _Keyeq, class _Alloc>
736
+ _NODISCARD bool operator==(const unordered_multiset<_Kty, _Hasher, _Keyeq, _Alloc>& _Left,
737
+ const unordered_multiset<_Kty, _Hasher, _Keyeq, _Alloc>& _Right) {
738
+ return _Hash_equal(_Left, _Right);
739
+ }
740
+
741
+ #if !_HAS_CXX20
742
+ template <class _Kty, class _Hasher, class _Keyeq, class _Alloc>
743
+ _NODISCARD bool operator!=(const unordered_multiset<_Kty, _Hasher, _Keyeq, _Alloc>& _Left,
744
+ const unordered_multiset<_Kty, _Hasher, _Keyeq, _Alloc>& _Right) {
745
+ return !(_Left == _Right);
746
+ }
747
+ #endif // !_HAS_CXX20
748
+
749
+ #if _HAS_TR1_NAMESPACE
750
+ namespace _DEPRECATE_TR1_NAMESPACE tr1 {
751
+ using _STD unordered_multiset;
752
+ using _STD unordered_set;
753
+ } // namespace _DEPRECATE_TR1_NAMESPACE tr1
754
+ #endif // _HAS_TR1_NAMESPACE
755
+
756
+ #if _HAS_CXX17
757
+ namespace pmr {
758
+ _EXPORT_STD template <class _Kty, class _Hasher = hash<_Kty>, class _Keyeq = equal_to<_Kty>>
759
+ using unordered_set = _STD unordered_set<_Kty, _Hasher, _Keyeq, polymorphic_allocator<_Kty>>;
760
+
761
+ _EXPORT_STD template <class _Kty, class _Hasher = hash<_Kty>, class _Keyeq = equal_to<_Kty>>
762
+ using unordered_multiset = _STD unordered_multiset<_Kty, _Hasher, _Keyeq, polymorphic_allocator<_Kty>>;
763
+ } // namespace pmr
764
+ #endif // _HAS_CXX17
765
+ _STD_END
766
+ #pragma pop_macro("new")
767
+ _STL_RESTORE_CLANG_WARNINGS
768
+ #pragma warning(pop)
769
+ #pragma pack(pop)
770
+ #endif // _STL_COMPILER_PREPROCESSOR
771
+ #endif // _UNORDERED_SET_
miniMSVC/VC/Tools/MSVC/14.42.34433/include/use_ansi.h ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // use_ansi.h internal header
2
+
3
+ // Copyright (c) Microsoft Corporation.
4
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
5
+
6
+ #ifndef _USE_ANSI_CPP
7
+ #define _USE_ANSI_CPP
8
+
9
+ #ifdef _CRTBLD
10
+ #define _CRT_NOPRAGMA_LIBS
11
+ #else
12
+ #undef _CRT_NOPRAGMA_LIBS
13
+ #endif
14
+
15
+ #ifndef _CRT_NOPRAGMA_LIBS
16
+
17
+ #ifndef _M_CEE_PURE
18
+
19
+ #undef _DEBUG_AFFIX
20
+ #undef _IDL_AFFIX
21
+ #undef _IDL_DEFAULT
22
+ #undef _LIB_STEM
23
+
24
+ #ifdef _DEBUG
25
+ #define _DEBUG_AFFIX "d"
26
+ #define _IDL_DEFAULT 2
27
+ #else // ^^^ defined(_DEBUG) / !defined(_DEBUG) vvv
28
+ #define _DEBUG_AFFIX ""
29
+ #define _IDL_DEFAULT 0
30
+ #endif // ^^^ !defined(_DEBUG) ^^^
31
+
32
+ #if defined(_DLL) && !defined(_STATIC_CPPLIB)
33
+ #define _LIB_STEM "msvcprt"
34
+ #else // ^^^ defined(_DLL) && !defined(_STATIC_CPPLIB) / !defined(_DLL) || defined(_STATIC_CPPLIB) vvv
35
+ #define _LIB_STEM "libcpmt"
36
+
37
+ #if _ITERATOR_DEBUG_LEVEL != _IDL_DEFAULT
38
+ #define _IDL_AFFIX _STL_STRINGIZE(_ITERATOR_DEBUG_LEVEL)
39
+ #endif // _ITERATOR_DEBUG_LEVEL != _IDL_DEFAULT
40
+ #endif // ^^^ !defined(_DLL) || defined(_STATIC_CPPLIB) ^^^
41
+
42
+ #ifndef _IDL_AFFIX
43
+ #define _IDL_AFFIX ""
44
+ #endif
45
+
46
+ #pragma comment(lib, _LIB_STEM _DEBUG_AFFIX _IDL_AFFIX)
47
+
48
+ #undef _DEBUG_AFFIX
49
+ #undef _IDL_AFFIX
50
+ #undef _IDL_DEFAULT
51
+ #undef _LIB_STEM
52
+
53
+ #endif // !defined(_M_CEE_PURE)
54
+
55
+ #endif // !defined(_CRT_NOPRAGMA_LIBS)
56
+
57
+ #endif // _USE_ANSI_CPP
miniMSVC/VC/Tools/MSVC/14.42.34433/include/utility ADDED
@@ -0,0 +1,988 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // utility standard header (core)
2
+
3
+ // Copyright (c) Microsoft Corporation.
4
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
5
+
6
+ #ifndef _UTILITY_
7
+ #define _UTILITY_
8
+ #include <yvals_core.h>
9
+ #if _STL_COMPILER_PREPROCESSOR
10
+ #include <initializer_list>
11
+ #include <type_traits>
12
+
13
+ #if _HAS_CXX20
14
+ #include <compare>
15
+ #include <concepts>
16
+ #endif // _HAS_CXX20
17
+
18
+ #if _HAS_CXX23
19
+ #include <cstdlib>
20
+ #endif // _HAS_CXX23
21
+
22
+ #pragma pack(push, _CRT_PACKING)
23
+ #pragma warning(push, _STL_WARNING_LEVEL)
24
+ #pragma warning(disable : _STL_DISABLED_WARNINGS)
25
+ _STL_DISABLE_CLANG_WARNINGS
26
+ #pragma push_macro("new")
27
+ #undef new
28
+
29
+ // TRANSITION, non-_Ugly attribute tokens
30
+ #pragma push_macro("msvc")
31
+ #pragma push_macro("intrinsic")
32
+ #pragma push_macro("known_semantics")
33
+ #pragma push_macro("lifetimebound")
34
+ #undef msvc
35
+ #undef intrinsic
36
+ #undef known_semantics
37
+ #undef lifetimebound
38
+
39
+ _STD_BEGIN
40
+ _EXPORT_STD template <class _Ty, _Ty... _Vals>
41
+ struct integer_sequence { // sequence of integer parameters
42
+ static_assert(is_integral_v<_Ty>, "integer_sequence<T, I...> requires T to be an integral type.");
43
+
44
+ using value_type = _Ty;
45
+
46
+ _NODISCARD static constexpr size_t size() noexcept {
47
+ return sizeof...(_Vals);
48
+ }
49
+ };
50
+
51
+ _EXPORT_STD template <class _Ty, _Ty _Size>
52
+ using make_integer_sequence = __make_integer_seq<integer_sequence, _Ty, _Size>;
53
+
54
+ _EXPORT_STD template <size_t... _Vals>
55
+ using index_sequence = integer_sequence<size_t, _Vals...>;
56
+
57
+ _EXPORT_STD template <size_t _Size>
58
+ using make_index_sequence = make_integer_sequence<size_t, _Size>;
59
+
60
+ _EXPORT_STD template <class... _Types>
61
+ using index_sequence_for = make_index_sequence<sizeof...(_Types)>;
62
+
63
+ _EXPORT_STD template <class _Ty, class _Pr>
64
+ _NODISCARD constexpr const _Ty&(max) (const _Ty& _Left _MSVC_LIFETIMEBOUND, const _Ty& _Right _MSVC_LIFETIMEBOUND,
65
+ _Pr _Pred) noexcept(noexcept(_Pred(_Left, _Right))) /* strengthened */ {
66
+ // return larger of _Left and _Right
67
+ return _Pred(_Left, _Right) ? _Right : _Left;
68
+ }
69
+
70
+ #pragma warning(push)
71
+ #pragma warning(disable : 28285) // (syntax error in SAL annotation, occurs when _Ty is not an integral type)
72
+ _EXPORT_STD template <class _Ty>
73
+ _NODISCARD _Post_equal_to_(_Left < _Right ? _Right : _Left) constexpr const _Ty& //
74
+ (max) (const _Ty& _Left _MSVC_LIFETIMEBOUND, const _Ty& _Right _MSVC_LIFETIMEBOUND) noexcept(
75
+ noexcept(_Left < _Right)) /* strengthened */ {
76
+ // return larger of _Left and _Right
77
+ return _Left < _Right ? _Right : _Left;
78
+ }
79
+ #pragma warning(pop)
80
+
81
+ _EXPORT_STD template <class _Ty, class _Pr>
82
+ _NODISCARD constexpr _Ty(max)(initializer_list<_Ty>, _Pr); // implemented in <algorithm>
83
+
84
+ _EXPORT_STD template <class _Ty>
85
+ _NODISCARD constexpr _Ty(max)(initializer_list<_Ty>); // implemented in <algorithm>
86
+
87
+ _EXPORT_STD template <class _Ty, class _Pr>
88
+ _NODISCARD constexpr const _Ty&(min) (const _Ty& _Left _MSVC_LIFETIMEBOUND, const _Ty& _Right _MSVC_LIFETIMEBOUND,
89
+ _Pr _Pred) noexcept(noexcept(_Pred(_Right, _Left))) /* strengthened */ {
90
+ // return smaller of _Left and _Right
91
+ return _Pred(_Right, _Left) ? _Right : _Left;
92
+ }
93
+
94
+ #pragma warning(push)
95
+ #pragma warning(disable : 28285) // (syntax error in SAL annotation, occurs when _Ty is not an integral type)
96
+ _EXPORT_STD template <class _Ty>
97
+ _NODISCARD _Post_equal_to_(_Right < _Left ? _Right : _Left) constexpr const _Ty& //
98
+ (min) (const _Ty& _Left _MSVC_LIFETIMEBOUND, const _Ty& _Right _MSVC_LIFETIMEBOUND) noexcept(
99
+ noexcept(_Right < _Left)) /* strengthened */ {
100
+ // return smaller of _Left and _Right
101
+ return _Right < _Left ? _Right : _Left;
102
+ }
103
+ #pragma warning(pop)
104
+
105
+ _EXPORT_STD template <class _Ty, class _Pr>
106
+ _NODISCARD constexpr _Ty(min)(initializer_list<_Ty>, _Pr); // implemented in <algorithm>
107
+
108
+ _EXPORT_STD template <class _Ty>
109
+ _NODISCARD constexpr _Ty(min)(initializer_list<_Ty>); // implemented in <algorithm>
110
+
111
+ _EXPORT_STD template <class _Ty, size_t _Size, enable_if_t<_Is_swappable<_Ty>::value, int> /* = 0 */>
112
+ _CONSTEXPR20 void swap(_Ty (&_Left)[_Size], _Ty (&_Right)[_Size]) noexcept(_Is_nothrow_swappable<_Ty>::value) {
113
+ if (&_Left != &_Right) {
114
+ _Ty* _First1 = _Left;
115
+ _Ty* _Last1 = _First1 + _Size;
116
+ _Ty* _First2 = _Right;
117
+ for (; _First1 != _Last1; ++_First1, ++_First2) {
118
+ swap(*_First1, *_First2); // intentional ADL
119
+ }
120
+ }
121
+ }
122
+
123
+ #if _HAS_CXX17
124
+ _EXPORT_STD template <class _Ty, enable_if_t<is_move_constructible_v<_Ty> && is_move_assignable_v<_Ty>, int> /* = 0 */>
125
+ #else // ^^^ _HAS_CXX17 / !_HAS_CXX17 vvv
126
+ template <class _Ty, int _Enabled /* = 0 */>
127
+ #endif // ^^^ !_HAS_CXX17 ^^^
128
+ _CONSTEXPR20 void swap(_Ty& _Left, _Ty& _Right) noexcept(
129
+ is_nothrow_move_constructible_v<_Ty> && is_nothrow_move_assignable_v<_Ty>) {
130
+ _Ty _Tmp = _STD move(_Left);
131
+ _Left = _STD move(_Right);
132
+ _Right = _STD move(_Tmp);
133
+ }
134
+
135
+ _EXPORT_STD struct piecewise_construct_t { // tag type for pair tuple arguments
136
+ explicit piecewise_construct_t() = default;
137
+ };
138
+
139
+ _EXPORT_STD _INLINE_VAR constexpr piecewise_construct_t piecewise_construct{};
140
+
141
+ struct _Ignore { // struct that ignores assignments
142
+ template <class _Ty>
143
+ constexpr const _Ignore& operator=(const _Ty&) const noexcept {
144
+ // do nothing
145
+ return *this;
146
+ }
147
+ };
148
+
149
+ _EXPORT_STD _INLINE_VAR constexpr _Ignore ignore{};
150
+
151
+ _EXPORT_STD template <class... _Types>
152
+ class tuple;
153
+
154
+ _EXPORT_STD template <class _Ty1, class _Ty2>
155
+ struct pair;
156
+
157
+ _EXPORT_STD template <class _Ty, size_t _Size>
158
+ class array;
159
+
160
+ _EXPORT_STD template <class _Tuple>
161
+ struct tuple_size;
162
+
163
+ _EXPORT_STD template <class _Ty>
164
+ constexpr size_t tuple_size_v = tuple_size<_Ty>::value;
165
+
166
+ _EXPORT_STD template <size_t _Index, class _Tuple>
167
+ struct tuple_element;
168
+
169
+ _EXPORT_STD template <size_t _Index, class _Tuple>
170
+ using tuple_element_t = typename tuple_element<_Index, _Tuple>::type;
171
+
172
+ template <size_t _Index, class... _Types>
173
+ _NODISCARD constexpr auto&& _Tuple_get(tuple<_Types...>&& _Tuple) noexcept;
174
+
175
+ _EXPORT_STD template <size_t _Index, class... _Types>
176
+ _NODISCARD constexpr tuple_element_t<_Index, tuple<_Types...>>& get(tuple<_Types...>& _Tuple) noexcept;
177
+
178
+ _EXPORT_STD template <size_t _Index, class... _Types>
179
+ _NODISCARD constexpr const tuple_element_t<_Index, tuple<_Types...>>& get(const tuple<_Types...>& _Tuple) noexcept;
180
+
181
+ _EXPORT_STD template <size_t _Index, class... _Types>
182
+ _NODISCARD constexpr tuple_element_t<_Index, tuple<_Types...>>&& get(tuple<_Types...>&& _Tuple) noexcept;
183
+
184
+ _EXPORT_STD template <size_t _Index, class... _Types>
185
+ _NODISCARD constexpr const tuple_element_t<_Index, tuple<_Types...>>&& get(const tuple<_Types...>&& _Tuple) noexcept;
186
+
187
+ _EXPORT_STD template <size_t _Idx, class _Ty, size_t _Size>
188
+ _NODISCARD constexpr _Ty& get(array<_Ty, _Size>& _Arr) noexcept;
189
+
190
+ _EXPORT_STD template <size_t _Idx, class _Ty, size_t _Size>
191
+ _NODISCARD constexpr const _Ty& get(const array<_Ty, _Size>& _Arr) noexcept;
192
+
193
+ _EXPORT_STD template <size_t _Idx, class _Ty, size_t _Size>
194
+ _NODISCARD constexpr _Ty&& get(array<_Ty, _Size>&& _Arr) noexcept;
195
+
196
+ _EXPORT_STD template <size_t _Idx, class _Ty, size_t _Size>
197
+ _NODISCARD constexpr const _Ty&& get(const array<_Ty, _Size>&& _Arr) noexcept;
198
+
199
+ #if _HAS_CXX20
200
+ template <class _Ty1, class _Ty2>
201
+ concept _Different_from = !same_as<remove_cvref_t<_Ty1>, remove_cvref_t<_Ty2>>;
202
+
203
+ template <class>
204
+ constexpr bool _Is_subrange_v = false;
205
+
206
+ #if _HAS_CXX23
207
+ template <class>
208
+ constexpr bool _Tuple_like_impl = false;
209
+
210
+ template <class... _Types>
211
+ constexpr bool _Tuple_like_impl<tuple<_Types...>> = true;
212
+
213
+ template <class _Ty1, class _Ty2>
214
+ constexpr bool _Tuple_like_impl<pair<_Ty1, _Ty2>> = true;
215
+
216
+ template <class _Ty, size_t _Size>
217
+ constexpr bool _Tuple_like_impl<array<_Ty, _Size>> = true;
218
+
219
+ template <class _Ty>
220
+ concept _Tuple_like = _Tuple_like_impl<remove_cvref_t<_Ty>>;
221
+
222
+ template <class _Ty>
223
+ concept _Pair_like = _Tuple_like<_Ty> && tuple_size_v<remove_cvref_t<_Ty>> == 2;
224
+
225
+ #if defined(__clang__) || defined(__EDG__) // TRANSITION, LLVM-59827 and VSO-1900279
226
+ template <class _PairLike, class _Ty1, class _Ty2>
227
+ concept _Can_construct_from_pair_like = _Pair_like<_PairLike> && !_Is_subrange_v<remove_cvref_t<_PairLike>>
228
+ && is_constructible_v<_Ty1, decltype(_STD get<0>(_STD declval<_PairLike>()))>
229
+ && is_constructible_v<_Ty2, decltype(_STD get<1>(_STD declval<_PairLike>()))>;
230
+ #endif // ^^^ workaround ^^^
231
+ #endif // _HAS_CXX23
232
+ #endif // _HAS_CXX20
233
+
234
+ _EXPORT_STD template <class _Ty1, class _Ty2>
235
+ struct pair { // store a pair of values
236
+ using first_type = _Ty1;
237
+ using second_type = _Ty2;
238
+
239
+ template <class _Uty1 = _Ty1, class _Uty2 = _Ty2,
240
+ enable_if_t<conjunction_v<is_default_constructible<_Uty1>, is_default_constructible<_Uty2>>, int> = 0>
241
+ constexpr explicit(
242
+ !conjunction_v<_Is_implicitly_default_constructible<_Uty1>, _Is_implicitly_default_constructible<_Uty2>>)
243
+ pair() noexcept(
244
+ is_nothrow_default_constructible_v<_Uty1> && is_nothrow_default_constructible_v<_Uty2>) // strengthened
245
+ : first(), second() {}
246
+
247
+ template <class _Uty1 = _Ty1, class _Uty2 = _Ty2,
248
+ enable_if_t<conjunction_v<is_copy_constructible<_Uty1>, is_copy_constructible<_Uty2>>, int> = 0>
249
+ constexpr explicit(!conjunction_v<is_convertible<const _Uty1&, _Uty1>, is_convertible<const _Uty2&, _Uty2>>)
250
+ pair(const _Ty1& _Val1, const _Ty2& _Val2) noexcept(
251
+ is_nothrow_copy_constructible_v<_Uty1> && is_nothrow_copy_constructible_v<_Uty2>) // strengthened
252
+ : first(_Val1), second(_Val2) {}
253
+
254
+ #if _HAS_CXX23
255
+ template <class _Other1 = _Ty1, class _Other2 = _Ty2,
256
+ #else // ^^^ _HAS_CXX23 / !_HAS_CXX23 vvv
257
+ template <class _Other1, class _Other2,
258
+ #endif // ^^^ !_HAS_CXX23 ^^^
259
+ enable_if_t<conjunction_v<is_constructible<_Ty1, _Other1>, is_constructible<_Ty2, _Other2>>, int> = 0>
260
+ constexpr explicit(!conjunction_v<is_convertible<_Other1, _Ty1>, is_convertible<_Other2, _Ty2>>)
261
+ pair(_Other1&& _Val1, _Other2&& _Val2) noexcept(
262
+ is_nothrow_constructible_v<_Ty1, _Other1> && is_nothrow_constructible_v<_Ty2, _Other2>) // strengthened
263
+ : first(_STD forward<_Other1>(_Val1)), second(_STD forward<_Other2>(_Val2)) {
264
+ }
265
+
266
+ pair(const pair&) = default;
267
+ pair(pair&&) = default;
268
+
269
+ #if _HAS_CXX23
270
+ template <class _Other1, class _Other2>
271
+ requires is_constructible_v<_Ty1, _Other1&> && is_constructible_v<_Ty2, _Other2&>
272
+ constexpr explicit(!conjunction_v<is_convertible<_Other1&, _Ty1>, is_convertible<_Other2&, _Ty2>>)
273
+ pair(pair<_Other1, _Other2>& _Right) noexcept(
274
+ is_nothrow_constructible_v<_Ty1, _Other1&> && is_nothrow_constructible_v<_Ty2, _Other2&>) // strengthened
275
+ : first(_Right.first), second(_Right.second) {}
276
+ #endif // _HAS_CXX23
277
+
278
+ template <class _Other1, class _Other2,
279
+ enable_if_t<conjunction_v<is_constructible<_Ty1, const _Other1&>, is_constructible<_Ty2, const _Other2&>>,
280
+ int> = 0>
281
+ constexpr explicit(!conjunction_v<is_convertible<const _Other1&, _Ty1>, is_convertible<const _Other2&, _Ty2>>)
282
+ pair(const pair<_Other1, _Other2>& _Right) noexcept(
283
+ is_nothrow_constructible_v<_Ty1, const _Other1&>
284
+ && is_nothrow_constructible_v<_Ty2, const _Other2&>) // strengthened
285
+ : first(_Right.first), second(_Right.second) {}
286
+
287
+ template <class _Other1, class _Other2,
288
+ enable_if_t<conjunction_v<is_constructible<_Ty1, _Other1>, is_constructible<_Ty2, _Other2>>, int> = 0>
289
+ constexpr explicit(!conjunction_v<is_convertible<_Other1, _Ty1>, is_convertible<_Other2, _Ty2>>)
290
+ pair(pair<_Other1, _Other2>&& _Right) noexcept(
291
+ is_nothrow_constructible_v<_Ty1, _Other1> && is_nothrow_constructible_v<_Ty2, _Other2>) // strengthened
292
+ : first(_STD forward<_Other1>(_Right.first)), second(_STD forward<_Other2>(_Right.second)) {}
293
+
294
+ #if _HAS_CXX23
295
+ template <class _Other1, class _Other2>
296
+ requires is_constructible_v<_Ty1, const _Other1> && is_constructible_v<_Ty2, const _Other2>
297
+ constexpr explicit(!conjunction_v<is_convertible<const _Other1, _Ty1>, is_convertible<const _Other2, _Ty2>>)
298
+ pair(const pair<_Other1, _Other2>&& _Right) noexcept(
299
+ is_nothrow_constructible_v<_Ty1, const _Other1>
300
+ && is_nothrow_constructible_v<_Ty2, const _Other2>) // strengthened
301
+ : first(_STD forward<const _Other1>(_Right.first)), second(_STD forward<const _Other2>(_Right.second)) {}
302
+
303
+ #if defined(__clang__) || defined(__EDG__) // TRANSITION, LLVM-59827 (Clang), VSO-1900279 (EDG)
304
+ template <class _Other, enable_if_t<_Can_construct_from_pair_like<_Other, _Ty1, _Ty2>, int> = 0>
305
+ #else // ^^^ workaround / no workaround vvv
306
+ template <_Pair_like _Other>
307
+ requires conjunction_v<bool_constant<!_Is_subrange_v<remove_cvref_t<_Other>>>,
308
+ is_constructible<_Ty1, decltype(_STD get<0>(_STD declval<_Other>()))>,
309
+ is_constructible<_Ty2, decltype(_STD get<1>(_STD declval<_Other>()))>>
310
+ #endif // ^^^ no workaround ^^^
311
+ constexpr explicit(!conjunction_v<is_convertible<decltype(_STD get<0>(_STD declval<_Other>())), _Ty1>,
312
+ is_convertible<decltype(_STD get<1>(_STD declval<_Other>())), _Ty2>>)
313
+ pair(_Other&& _Right) noexcept(
314
+ is_nothrow_constructible_v<_Ty1, decltype(_STD get<0>(_STD declval<_Other>()))>
315
+ && is_nothrow_constructible_v<_Ty2, decltype(_STD get<1>(_STD declval<_Other>()))>) // strengthened
316
+ : first(_STD get<0>(_STD forward<_Other>(_Right))), second(_STD get<1>(_STD forward<_Other>(_Right))) {
317
+ }
318
+ #endif // _HAS_CXX23
319
+
320
+ template <class _Tuple1, class _Tuple2, size_t... _Indices1, size_t... _Indices2>
321
+ constexpr pair(_Tuple1& _Val1, _Tuple2& _Val2, index_sequence<_Indices1...>, index_sequence<_Indices2...>)
322
+ : first(_STD _Tuple_get<_Indices1>(_STD move(_Val1))...),
323
+ second(_STD _Tuple_get<_Indices2>(_STD move(_Val2))...) {}
324
+
325
+ template <class... _Types1, class... _Types2>
326
+ _CONSTEXPR20 pair(piecewise_construct_t, tuple<_Types1...> _Val1, tuple<_Types2...> _Val2)
327
+ : pair(_Val1, _Val2, index_sequence_for<_Types1...>{}, index_sequence_for<_Types2...>{}) {}
328
+
329
+ pair& operator=(const volatile pair&) = delete;
330
+
331
+ template <class _Myself = pair,
332
+ enable_if_t<conjunction_v<_Is_copy_assignable_no_precondition_check<typename _Myself::first_type>,
333
+ _Is_copy_assignable_no_precondition_check<typename _Myself::second_type>>,
334
+ int> = 0>
335
+ _CONSTEXPR20 pair& operator=(_Identity_t<const _Myself&> _Right) noexcept(
336
+ conjunction_v<is_nothrow_copy_assignable<_Ty1>, is_nothrow_copy_assignable<_Ty2>>) /* strengthened */ {
337
+ first = _Right.first;
338
+ second = _Right.second;
339
+ return *this;
340
+ }
341
+
342
+ #if _HAS_CXX23
343
+ template <class _Myself = pair>
344
+ requires _Is_copy_assignable_unchecked_v<const typename _Myself::first_type>
345
+ && _Is_copy_assignable_unchecked_v<const typename _Myself::second_type>
346
+ constexpr const pair& operator=(_Identity_t<const _Myself&> _Right) const
347
+ noexcept(conjunction_v<is_nothrow_copy_assignable<const _Ty1>,
348
+ is_nothrow_copy_assignable<const _Ty2>>) /* strengthened */ {
349
+ first = _Right.first;
350
+ second = _Right.second;
351
+ return *this;
352
+ }
353
+ #endif // _HAS_CXX23
354
+
355
+ template <class _Myself = pair,
356
+ enable_if_t<conjunction_v<_Is_move_assignable_no_precondition_check<typename _Myself::first_type>,
357
+ _Is_move_assignable_no_precondition_check<typename _Myself::second_type>>,
358
+ int> = 0>
359
+ _CONSTEXPR20 pair& operator=(_Identity_t<_Myself&&> _Right) noexcept(
360
+ conjunction_v<is_nothrow_move_assignable<_Ty1>, is_nothrow_move_assignable<_Ty2>>) /* strengthened */ {
361
+ first = _STD forward<_Ty1>(_Right.first);
362
+ second = _STD forward<_Ty2>(_Right.second);
363
+ return *this;
364
+ }
365
+
366
+ #if _HAS_CXX23
367
+ template <class _Myself = pair>
368
+ requires _Is_assignable_no_precondition_check<const typename _Myself::first_type&, _Ty1>::value
369
+ && _Is_assignable_no_precondition_check<const typename _Myself::second_type&, _Ty2>::value
370
+ constexpr const pair& operator=(_Identity_t<_Myself&&> _Right) const
371
+ noexcept(conjunction_v<is_nothrow_assignable<const _Ty1&, _Ty1>,
372
+ is_nothrow_assignable<const _Ty2&, _Ty2>>) /* strengthened */ {
373
+ first = _STD forward<_Ty1>(_Right.first);
374
+ second = _STD forward<_Ty2>(_Right.second);
375
+ return *this;
376
+ }
377
+ #endif // _HAS_CXX23
378
+
379
+ template <class _Other1, class _Other2,
380
+ enable_if_t<conjunction_v<negation<is_same<pair, pair<_Other1, _Other2>>>, is_assignable<_Ty1&, const _Other1&>,
381
+ is_assignable<_Ty2&, const _Other2&>>,
382
+ int> = 0>
383
+ _CONSTEXPR20 pair& operator=(const pair<_Other1, _Other2>& _Right) noexcept(
384
+ is_nothrow_assignable_v<_Ty1&, const _Other1&>
385
+ && is_nothrow_assignable_v<_Ty2&, const _Other2&>) /* strengthened */ {
386
+ first = _Right.first;
387
+ second = _Right.second;
388
+ return *this;
389
+ }
390
+
391
+ #if _HAS_CXX23
392
+ template <class _Other1, class _Other2>
393
+ requires (!is_same_v<pair, pair<_Other1, _Other2>>)
394
+ && is_assignable_v<const _Ty1&, const _Other1&> && is_assignable_v<const _Ty2&, const _Other2&>
395
+ constexpr const pair& operator=(const pair<_Other1, _Other2>& _Right) const
396
+ noexcept(is_nothrow_assignable_v<const _Ty1&, const _Other1&>
397
+ && is_nothrow_assignable_v<const _Ty2&, const _Other2&>) /* strengthened */ {
398
+ first = _Right.first;
399
+ second = _Right.second;
400
+ return *this;
401
+ }
402
+ #endif // _HAS_CXX23
403
+
404
+ template <class _Other1, class _Other2,
405
+ enable_if_t<conjunction_v<negation<is_same<pair, pair<_Other1, _Other2>>>, is_assignable<_Ty1&, _Other1>,
406
+ is_assignable<_Ty2&, _Other2>>,
407
+ int> = 0>
408
+ _CONSTEXPR20 pair& operator=(pair<_Other1, _Other2>&& _Right) noexcept(
409
+ is_nothrow_assignable_v<_Ty1&, _Other1> && is_nothrow_assignable_v<_Ty2&, _Other2>) /* strengthened */ {
410
+ first = _STD forward<_Other1>(_Right.first);
411
+ second = _STD forward<_Other2>(_Right.second);
412
+ return *this;
413
+ }
414
+
415
+ #if _HAS_CXX23
416
+ template <class _Other1, class _Other2>
417
+ requires (!is_same_v<pair, pair<_Other1, _Other2>>)
418
+ && is_assignable_v<const _Ty1&, _Other1> && is_assignable_v<const _Ty2&, _Other2>
419
+ constexpr const pair& operator=(pair<_Other1, _Other2>&& _Right) const
420
+ noexcept(is_nothrow_assignable_v<const _Ty1&, _Other1>
421
+ && is_nothrow_assignable_v<const _Ty2&, _Other2>) /* strengthened */ {
422
+ first = _STD forward<_Other1>(_Right.first);
423
+ second = _STD forward<_Other2>(_Right.second);
424
+ return *this;
425
+ }
426
+
427
+ template <_Pair_like _Other>
428
+ requires _Different_from<_Other, pair> && (!_Is_subrange_v<remove_cvref_t<_Other>>)
429
+ && is_assignable_v<_Ty1&, decltype(_STD get<0>(_STD declval<_Other>()))>
430
+ && is_assignable_v<_Ty2&, decltype(_STD get<1>(_STD declval<_Other>()))>
431
+ constexpr pair& operator=(_Other&& _Right) noexcept(
432
+ is_nothrow_assignable_v<_Ty1&, decltype(_STD get<0>(_STD declval<_Other>()))>
433
+ && is_nothrow_assignable_v<_Ty2&, decltype(_STD get<1>(_STD declval<_Other>()))>) /* strengthened */ {
434
+ first = _STD get<0>(_STD forward<_Other>(_Right));
435
+ second = _STD get<1>(_STD forward<_Other>(_Right));
436
+ return *this;
437
+ }
438
+
439
+ template <_Pair_like _Other>
440
+ requires _Different_from<_Other, pair> && (!_Is_subrange_v<remove_cvref_t<_Other>>)
441
+ && is_assignable_v<const _Ty1&, decltype(_STD get<0>(_STD declval<_Other>()))>
442
+ && is_assignable_v<const _Ty2&, decltype(_STD get<1>(_STD declval<_Other>()))>
443
+ constexpr const pair& operator=(_Other&& _Right) const noexcept(
444
+ is_nothrow_assignable_v<const _Ty1&, decltype(_STD get<0>(_STD declval<_Other>()))>
445
+ && is_nothrow_assignable_v<const _Ty2&, decltype(_STD get<1>(_STD declval<_Other>()))>) /* strengthened */ {
446
+ first = _STD get<0>(_STD forward<_Other>(_Right));
447
+ second = _STD get<1>(_STD forward<_Other>(_Right));
448
+ return *this;
449
+ }
450
+ #endif // _HAS_CXX23
451
+
452
+ _CONSTEXPR20 void swap(pair& _Right) noexcept(
453
+ _Is_nothrow_swappable<_Ty1>::value && _Is_nothrow_swappable<_Ty2>::value) {
454
+ using _STD swap;
455
+ swap(first, _Right.first); // intentional ADL
456
+ swap(second, _Right.second); // intentional ADL
457
+ }
458
+
459
+ #if _HAS_CXX23
460
+ template <int = 0> // see GH-3013
461
+ constexpr void swap(const pair& _Right) const
462
+ noexcept(is_nothrow_swappable_v<const _Ty1> && is_nothrow_swappable_v<const _Ty2>) {
463
+ using _STD swap;
464
+ swap(first, _Right.first); // intentional ADL
465
+ swap(second, _Right.second); // intentional ADL
466
+ }
467
+ #endif // _HAS_CXX23
468
+
469
+ _Ty1 first; // the first stored value
470
+ _Ty2 second; // the second stored value
471
+ };
472
+
473
+ #if _HAS_CXX17
474
+ template <class _Ty1, class _Ty2>
475
+ pair(_Ty1, _Ty2) -> pair<_Ty1, _Ty2>;
476
+ #endif // _HAS_CXX17
477
+
478
+ _EXPORT_STD template <class _Ty1, class _Ty2,
479
+ enable_if_t<_Is_swappable<_Ty1>::value && _Is_swappable<_Ty2>::value, int> = 0>
480
+ _CONSTEXPR20 void swap(pair<_Ty1, _Ty2>& _Left, pair<_Ty1, _Ty2>& _Right) noexcept(noexcept(_Left.swap(_Right))) {
481
+ _Left.swap(_Right);
482
+ }
483
+
484
+ #if _HAS_CXX23
485
+ _EXPORT_STD template <class _Ty1, class _Ty2>
486
+ requires is_swappable<const _Ty1>::value && is_swappable<const _Ty2>::value // TRANSITION, /permissive needs ::value
487
+ constexpr void swap(const pair<_Ty1, _Ty2>& _Left, const pair<_Ty1, _Ty2>& _Right) noexcept(
488
+ noexcept(_Left.swap(_Right))) {
489
+ _Left.swap(_Right);
490
+ }
491
+ #endif // _HAS_CXX23
492
+
493
+ _EXPORT_STD template <class _Ty1, class _Ty2, class _Uty1, class _Uty2>
494
+ _NODISCARD constexpr bool operator==(const pair<_Ty1, _Ty2>& _Left, const pair<_Uty1, _Uty2>& _Right) {
495
+ return _Left.first == _Right.first && _Left.second == _Right.second;
496
+ }
497
+
498
+ #if _HAS_CXX20
499
+ _EXPORT_STD template <class _Ty1, class _Ty2, class _Uty1, class _Uty2>
500
+ _NODISCARD constexpr common_comparison_category_t<_Synth_three_way_result<_Ty1, _Uty1>,
501
+ _Synth_three_way_result<_Ty2, _Uty2>>
502
+ operator<=>(const pair<_Ty1, _Ty2>& _Left, const pair<_Uty1, _Uty2>& _Right) {
503
+ if (auto _Result = _Synth_three_way{}(_Left.first, _Right.first); _Result != 0) {
504
+ return _Result;
505
+ }
506
+ return _Synth_three_way{}(_Left.second, _Right.second);
507
+ }
508
+ #else // ^^^ _HAS_CXX20 / !_HAS_CXX20 vvv
509
+ template <class _Ty1, class _Ty2, class _Uty1, class _Uty2>
510
+ _NODISCARD constexpr bool operator!=(const pair<_Ty1, _Ty2>& _Left, const pair<_Uty1, _Uty2>& _Right) {
511
+ return !(_Left == _Right);
512
+ }
513
+
514
+ template <class _Ty1, class _Ty2, class _Uty1, class _Uty2>
515
+ _NODISCARD constexpr bool operator<(const pair<_Ty1, _Ty2>& _Left, const pair<_Uty1, _Uty2>& _Right) {
516
+ return _Left.first < _Right.first || (!(_Right.first < _Left.first) && _Left.second < _Right.second);
517
+ }
518
+
519
+ template <class _Ty1, class _Ty2, class _Uty1, class _Uty2>
520
+ _NODISCARD constexpr bool operator>(const pair<_Ty1, _Ty2>& _Left, const pair<_Uty1, _Uty2>& _Right) {
521
+ return _Right < _Left;
522
+ }
523
+
524
+ template <class _Ty1, class _Ty2, class _Uty1, class _Uty2>
525
+ _NODISCARD constexpr bool operator<=(const pair<_Ty1, _Ty2>& _Left, const pair<_Uty1, _Uty2>& _Right) {
526
+ return !(_Right < _Left);
527
+ }
528
+
529
+ template <class _Ty1, class _Ty2, class _Uty1, class _Uty2>
530
+ _NODISCARD constexpr bool operator>=(const pair<_Ty1, _Ty2>& _Left, const pair<_Uty1, _Uty2>& _Right) {
531
+ return !(_Left < _Right);
532
+ }
533
+ #endif // ^^^ !_HAS_CXX20 ^^^
534
+
535
+ #if _HAS_CXX23
536
+ template <class _Ty1, class _Ty2, class _Uty1, class _Uty2, template <class> class _TQual,
537
+ template <class> class _UQual>
538
+ requires requires {
539
+ typename pair<common_reference_t<_TQual<_Ty1>, _UQual<_Uty1>>, common_reference_t<_TQual<_Ty2>, _UQual<_Uty2>>>;
540
+ }
541
+ struct basic_common_reference<pair<_Ty1, _Ty2>, pair<_Uty1, _Uty2>, _TQual, _UQual> {
542
+ using type = pair<common_reference_t<_TQual<_Ty1>, _UQual<_Uty1>>, common_reference_t<_TQual<_Ty2>, _UQual<_Uty2>>>;
543
+ };
544
+
545
+ template <class _Ty1, class _Ty2, class _Uty1, class _Uty2>
546
+ requires requires { typename pair<common_type_t<_Ty1, _Uty1>, common_type_t<_Ty2, _Uty2>>; }
547
+ struct common_type<pair<_Ty1, _Ty2>, pair<_Uty1, _Uty2>> {
548
+ using type = pair<common_type_t<_Ty1, _Uty1>, common_type_t<_Ty2, _Uty2>>;
549
+ };
550
+ #endif // _HAS_CXX23
551
+
552
+ template <class _Ty>
553
+ struct _Unrefwrap_helper { // leave unchanged if not a reference_wrapper
554
+ using type = _Ty;
555
+ };
556
+
557
+ template <class _Ty>
558
+ struct _Unrefwrap_helper<reference_wrapper<_Ty>> { // make a reference from a reference_wrapper
559
+ using type = _Ty&;
560
+ };
561
+
562
+ // decay, then unwrap a reference_wrapper
563
+ template <class _Ty>
564
+ using _Unrefwrap_t = typename _Unrefwrap_helper<decay_t<_Ty>>::type;
565
+
566
+ _EXPORT_STD template <class _Ty1, class _Ty2>
567
+ _NODISCARD constexpr pair<_Unrefwrap_t<_Ty1>, _Unrefwrap_t<_Ty2>> make_pair(_Ty1&& _Val1, _Ty2&& _Val2) noexcept(
568
+ is_nothrow_constructible_v<_Unrefwrap_t<_Ty1>, _Ty1>
569
+ && is_nothrow_constructible_v<_Unrefwrap_t<_Ty2>, _Ty2>) /* strengthened */ {
570
+ // return pair composed from arguments
571
+ using _Mypair = pair<_Unrefwrap_t<_Ty1>, _Unrefwrap_t<_Ty2>>;
572
+ return _Mypair(_STD forward<_Ty1>(_Val1), _STD forward<_Ty2>(_Val2));
573
+ }
574
+
575
+ namespace _CXX20_DEPRECATE_REL_OPS rel_ops {
576
+ _EXPORT_STD template <class _Ty>
577
+ _CXX20_DEPRECATE_REL_OPS _NODISCARD bool operator!=(const _Ty& _Left, const _Ty& _Right) {
578
+ return !(_Left == _Right);
579
+ }
580
+
581
+ _EXPORT_STD template <class _Ty>
582
+ _CXX20_DEPRECATE_REL_OPS _NODISCARD bool operator>(const _Ty& _Left, const _Ty& _Right) {
583
+ return _Right < _Left;
584
+ }
585
+
586
+ _EXPORT_STD template <class _Ty>
587
+ _CXX20_DEPRECATE_REL_OPS _NODISCARD bool operator<=(const _Ty& _Left, const _Ty& _Right) {
588
+ return !(_Right < _Left);
589
+ }
590
+
591
+ _EXPORT_STD template <class _Ty>
592
+ _CXX20_DEPRECATE_REL_OPS _NODISCARD bool operator>=(const _Ty& _Left, const _Ty& _Right) {
593
+ return !(_Left < _Right);
594
+ }
595
+ } // namespace _CXX20_DEPRECATE_REL_OPS rel_ops
596
+
597
+ template <class _Tuple, class = void>
598
+ struct _Tuple_size_sfinae {}; // selected when tuple_size<_Tuple>::value isn't well-formed
599
+
600
+ template <class _Tuple>
601
+ struct _Tuple_size_sfinae<_Tuple, void_t<decltype(tuple_size<_Tuple>::value)>>
602
+ : integral_constant<size_t, tuple_size<_Tuple>::value> {}; // selected when tuple_size<_Tuple>::value is well-formed
603
+
604
+ template <class _Tuple>
605
+ struct tuple_size<const _Tuple> : _Tuple_size_sfinae<_Tuple> {}; // ignore cv
606
+
607
+ template <class _Tuple>
608
+ struct _CXX20_DEPRECATE_VOLATILE tuple_size<volatile _Tuple> : _Tuple_size_sfinae<_Tuple> {}; // ignore cv
609
+
610
+ template <class _Tuple>
611
+ struct _CXX20_DEPRECATE_VOLATILE tuple_size<const volatile _Tuple> : _Tuple_size_sfinae<_Tuple> {}; // ignore cv
612
+
613
+ template <size_t _Index, class _Tuple>
614
+ struct _MSVC_KNOWN_SEMANTICS tuple_element<_Index, const _Tuple> : tuple_element<_Index, _Tuple> {
615
+ using _Mybase = tuple_element<_Index, _Tuple>;
616
+ using type = add_const_t<typename _Mybase::type>;
617
+ };
618
+
619
+ template <size_t _Index, class _Tuple>
620
+ struct _CXX20_DEPRECATE_VOLATILE _MSVC_KNOWN_SEMANTICS tuple_element<_Index, volatile _Tuple>
621
+ : tuple_element<_Index, _Tuple> {
622
+ using _Mybase = tuple_element<_Index, _Tuple>;
623
+ using type = add_volatile_t<typename _Mybase::type>;
624
+ };
625
+
626
+ template <size_t _Index, class _Tuple>
627
+ struct _CXX20_DEPRECATE_VOLATILE _MSVC_KNOWN_SEMANTICS tuple_element<_Index, const volatile _Tuple>
628
+ : tuple_element<_Index, _Tuple> {
629
+ using _Mybase = tuple_element<_Index, _Tuple>;
630
+ using type = add_cv_t<typename _Mybase::type>;
631
+ };
632
+
633
+ template <class _Ty, size_t _Size>
634
+ struct tuple_size<array<_Ty, _Size>> : integral_constant<size_t, _Size> {}; // size of array
635
+
636
+ template <size_t _Idx, class _Ty, size_t _Size>
637
+ struct _MSVC_KNOWN_SEMANTICS tuple_element<_Idx, array<_Ty, _Size>> {
638
+ static_assert(_Idx < _Size, "array index out of bounds");
639
+
640
+ using type = _Ty;
641
+ };
642
+
643
+ template <class... _Types>
644
+ struct tuple_size<tuple<_Types...>> : integral_constant<size_t, sizeof...(_Types)> {}; // size of tuple
645
+
646
+ template <size_t _Index>
647
+ struct _MSVC_KNOWN_SEMANTICS tuple_element<_Index, tuple<>> { // enforce bounds checking
648
+ static_assert(_Always_false<integral_constant<size_t, _Index>>, "tuple index out of bounds");
649
+ };
650
+
651
+ template <class _This, class... _Rest>
652
+ struct _MSVC_KNOWN_SEMANTICS tuple_element<0, tuple<_This, _Rest...>> { // select first element
653
+ using type = _This;
654
+ // MSVC assumes the meaning of _Ttype; remove or rename, but do not change semantics
655
+ using _Ttype = tuple<_This, _Rest...>;
656
+ };
657
+
658
+ template <size_t _Index, class _This, class... _Rest>
659
+ struct _MSVC_KNOWN_SEMANTICS tuple_element<_Index, tuple<_This, _Rest...>>
660
+ : tuple_element<_Index - 1, tuple<_Rest...>> {}; // recursive tuple_element definition
661
+
662
+ template <class _Ty1, class _Ty2>
663
+ struct tuple_size<pair<_Ty1, _Ty2>> : integral_constant<size_t, 2> {}; // size of pair
664
+
665
+ template <size_t _Idx, class _Ty1, class _Ty2>
666
+ struct _MSVC_KNOWN_SEMANTICS tuple_element<_Idx, pair<_Ty1, _Ty2>> {
667
+ static_assert(_Idx < 2, "pair index out of bounds");
668
+
669
+ using type = conditional_t<_Idx == 0, _Ty1, _Ty2>;
670
+ };
671
+
672
+ _EXPORT_STD template <size_t _Idx, class _Ty1, class _Ty2>
673
+ _NODISCARD constexpr tuple_element_t<_Idx, pair<_Ty1, _Ty2>>& get(pair<_Ty1, _Ty2>& _Pr) noexcept {
674
+ // get reference to element at _Idx in pair _Pr
675
+ if constexpr (_Idx == 0) {
676
+ return _Pr.first;
677
+ } else {
678
+ return _Pr.second;
679
+ }
680
+ }
681
+
682
+ _EXPORT_STD template <class _Ty1, class _Ty2>
683
+ _NODISCARD constexpr _Ty1& get(pair<_Ty1, _Ty2>& _Pr) noexcept {
684
+ // get reference to element _Ty1 in pair _Pr
685
+ return _Pr.first;
686
+ }
687
+
688
+ _EXPORT_STD template <class _Ty2, class _Ty1>
689
+ _NODISCARD constexpr _Ty2& get(pair<_Ty1, _Ty2>& _Pr) noexcept {
690
+ // get reference to element _Ty2 in pair _Pr
691
+ return _Pr.second;
692
+ }
693
+
694
+ _EXPORT_STD template <size_t _Idx, class _Ty1, class _Ty2>
695
+ _NODISCARD constexpr const tuple_element_t<_Idx, pair<_Ty1, _Ty2>>& get(const pair<_Ty1, _Ty2>& _Pr) noexcept {
696
+ // get const reference to element at _Idx in pair _Pr
697
+ if constexpr (_Idx == 0) {
698
+ return _Pr.first;
699
+ } else {
700
+ return _Pr.second;
701
+ }
702
+ }
703
+
704
+ _EXPORT_STD template <class _Ty1, class _Ty2>
705
+ _NODISCARD constexpr const _Ty1& get(const pair<_Ty1, _Ty2>& _Pr) noexcept {
706
+ // get const reference to element _Ty1 in pair _Pr
707
+ return _Pr.first;
708
+ }
709
+
710
+ _EXPORT_STD template <class _Ty2, class _Ty1>
711
+ _NODISCARD constexpr const _Ty2& get(const pair<_Ty1, _Ty2>& _Pr) noexcept {
712
+ // get const reference to element _Ty2 in pair _Pr
713
+ return _Pr.second;
714
+ }
715
+
716
+ _EXPORT_STD template <size_t _Idx, class _Ty1, class _Ty2>
717
+ _NODISCARD constexpr tuple_element_t<_Idx, pair<_Ty1, _Ty2>>&& get(pair<_Ty1, _Ty2>&& _Pr) noexcept {
718
+ // get rvalue reference to element at _Idx in pair _Pr
719
+ if constexpr (_Idx == 0) {
720
+ return _STD forward<_Ty1>(_Pr.first);
721
+ } else {
722
+ return _STD forward<_Ty2>(_Pr.second);
723
+ }
724
+ }
725
+
726
+ _EXPORT_STD template <class _Ty1, class _Ty2>
727
+ _NODISCARD constexpr _Ty1&& get(pair<_Ty1, _Ty2>&& _Pr) noexcept {
728
+ // get rvalue reference to element _Ty1 in pair _Pr
729
+ return _STD forward<_Ty1>(_Pr.first);
730
+ }
731
+
732
+ _EXPORT_STD template <class _Ty2, class _Ty1>
733
+ _NODISCARD constexpr _Ty2&& get(pair<_Ty1, _Ty2>&& _Pr) noexcept {
734
+ // get rvalue reference to element _Ty2 in pair _Pr
735
+ return _STD forward<_Ty2>(_Pr.second);
736
+ }
737
+
738
+ _EXPORT_STD template <size_t _Idx, class _Ty1, class _Ty2>
739
+ _NODISCARD constexpr const tuple_element_t<_Idx, pair<_Ty1, _Ty2>>&& get(const pair<_Ty1, _Ty2>&& _Pr) noexcept {
740
+ // get const rvalue reference to element at _Idx in pair _Pr
741
+ if constexpr (_Idx == 0) {
742
+ return _STD forward<const _Ty1>(_Pr.first);
743
+ } else {
744
+ return _STD forward<const _Ty2>(_Pr.second);
745
+ }
746
+ }
747
+
748
+ _EXPORT_STD template <class _Ty1, class _Ty2>
749
+ _NODISCARD constexpr const _Ty1&& get(const pair<_Ty1, _Ty2>&& _Pr) noexcept {
750
+ // get const rvalue reference to element _Ty1 in pair _Pr
751
+ return _STD forward<const _Ty1>(_Pr.first);
752
+ }
753
+
754
+ _EXPORT_STD template <class _Ty2, class _Ty1>
755
+ _NODISCARD constexpr const _Ty2&& get(const pair<_Ty1, _Ty2>&& _Pr) noexcept {
756
+ // get const rvalue reference to element _Ty2 in pair _Pr
757
+ return _STD forward<const _Ty2>(_Pr.second);
758
+ }
759
+
760
+ _EXPORT_STD template <class _Ty, class _Other = _Ty>
761
+ _CONSTEXPR20 _Ty exchange(_Ty& _Val, _Other&& _New_val) noexcept(
762
+ conjunction_v<is_nothrow_move_constructible<_Ty>, is_nothrow_assignable<_Ty&, _Other>>) {
763
+ // assign _New_val to _Val, return previous _Val
764
+ _Ty _Old_val = static_cast<_Ty&&>(_Val);
765
+ _Val = static_cast<_Other&&>(_New_val);
766
+ return _Old_val;
767
+ }
768
+
769
+ _EXPORT_STD template <class _Ty>
770
+ _NODISCARD _MSVC_INTRINSIC constexpr add_const_t<_Ty>& as_const(_Ty& _Val) noexcept { // view _Val through const lenses
771
+ return _Val;
772
+ }
773
+
774
+ _EXPORT_STD template <class _Ty>
775
+ void as_const(const _Ty&&) = delete;
776
+
777
+ #if _HAS_CXX17
778
+ _EXPORT_STD struct in_place_t { // tag used to select a constructor which initializes a contained object in place
779
+ explicit in_place_t() = default;
780
+ };
781
+ _EXPORT_STD inline constexpr in_place_t in_place{};
782
+
783
+ _EXPORT_STD template <class>
784
+ struct in_place_type_t { // tag that selects a type to construct in place
785
+ explicit in_place_type_t() = default;
786
+ };
787
+ _EXPORT_STD template <class _Ty>
788
+ constexpr in_place_type_t<_Ty> in_place_type{};
789
+
790
+ _EXPORT_STD template <size_t>
791
+ struct in_place_index_t { // tag that selects the index of a type to construct in place
792
+ explicit in_place_index_t() = default;
793
+ };
794
+ _EXPORT_STD template <size_t _Idx>
795
+ constexpr in_place_index_t<_Idx> in_place_index{};
796
+ #endif // _HAS_CXX17
797
+
798
+ template <class _Ty1, class _Ty2>
799
+ _NODISCARD constexpr bool _Cmp_equal(const _Ty1 _Left, const _Ty2 _Right) noexcept {
800
+ _STL_INTERNAL_STATIC_ASSERT(_Is_nonbool_integral<_Ty1> && _Is_nonbool_integral<_Ty2>); // allows character types
801
+ if constexpr (is_signed_v<_Ty1> == is_signed_v<_Ty2>) {
802
+ return _Left == _Right;
803
+ } else if constexpr (is_signed_v<_Ty2>) {
804
+ return _Left == static_cast<make_unsigned_t<_Ty2>>(_Right) && _Right >= 0;
805
+ } else {
806
+ return static_cast<make_unsigned_t<_Ty1>>(_Left) == _Right && _Left >= 0;
807
+ }
808
+ }
809
+
810
+ template <class _Ty1, class _Ty2>
811
+ _NODISCARD constexpr bool _Cmp_not_equal(const _Ty1 _Left, const _Ty2 _Right) noexcept {
812
+ return !_STD _Cmp_equal(_Left, _Right);
813
+ }
814
+
815
+ template <class _Ty1, class _Ty2>
816
+ _NODISCARD constexpr bool _Cmp_less(const _Ty1 _Left, const _Ty2 _Right) noexcept {
817
+ _STL_INTERNAL_STATIC_ASSERT(_Is_nonbool_integral<_Ty1> && _Is_nonbool_integral<_Ty2>); // allows character types
818
+ if constexpr (is_signed_v<_Ty1> == is_signed_v<_Ty2>) {
819
+ return _Left < _Right;
820
+ } else if constexpr (is_signed_v<_Ty2>) {
821
+ return _Right > 0 && _Left < static_cast<make_unsigned_t<_Ty2>>(_Right);
822
+ } else {
823
+ return _Left < 0 || static_cast<make_unsigned_t<_Ty1>>(_Left) < _Right;
824
+ }
825
+ }
826
+
827
+ template <class _Ty1, class _Ty2>
828
+ _NODISCARD constexpr bool _Cmp_greater(const _Ty1 _Left, const _Ty2 _Right) noexcept {
829
+ return _STD _Cmp_less(_Right, _Left);
830
+ }
831
+
832
+ template <class _Ty1, class _Ty2>
833
+ _NODISCARD constexpr bool _Cmp_less_equal(const _Ty1 _Left, const _Ty2 _Right) noexcept {
834
+ return !_STD _Cmp_less(_Right, _Left);
835
+ }
836
+
837
+ template <class _Ty1, class _Ty2>
838
+ _NODISCARD constexpr bool _Cmp_greater_equal(const _Ty1 _Left, const _Ty2 _Right) noexcept {
839
+ return !_STD _Cmp_less(_Left, _Right);
840
+ }
841
+
842
+ template <class _Ty>
843
+ _NODISCARD constexpr _Ty _Min_limit() noexcept { // same as (numeric_limits<_Ty>::min)(), less throughput cost
844
+ _STL_INTERNAL_STATIC_ASSERT(is_integral_v<_Ty>); // doesn't attempt to handle all types
845
+ if constexpr (is_signed_v<_Ty>) {
846
+ constexpr auto _Unsigned_max = static_cast<make_unsigned_t<_Ty>>(-1);
847
+ return static_cast<_Ty>((_Unsigned_max >> 1) + 1); // well-defined, N4950 [conv.integral]/3
848
+ } else {
849
+ return 0;
850
+ }
851
+ }
852
+
853
+ template <class _Ty>
854
+ _NODISCARD constexpr _Ty _Max_limit() noexcept { // same as (numeric_limits<_Ty>::max)(), less throughput cost
855
+ _STL_INTERNAL_STATIC_ASSERT(is_integral_v<_Ty>); // doesn't attempt to handle all types
856
+ if constexpr (is_signed_v<_Ty>) {
857
+ constexpr auto _Unsigned_max = static_cast<make_unsigned_t<_Ty>>(-1);
858
+ return static_cast<_Ty>(_Unsigned_max >> 1);
859
+ } else {
860
+ return static_cast<_Ty>(-1);
861
+ }
862
+ }
863
+
864
+ template <class _Rx, class _Ty>
865
+ _NODISCARD constexpr bool _In_range(const _Ty _Value) noexcept {
866
+ _STL_INTERNAL_STATIC_ASSERT(_Is_nonbool_integral<_Rx> && _Is_nonbool_integral<_Ty>); // allows character types
867
+
868
+ constexpr auto _Ty_min = _Min_limit<_Ty>();
869
+ constexpr auto _Rx_min = _Min_limit<_Rx>();
870
+
871
+ if constexpr (_STD _Cmp_less(_Ty_min, _Rx_min)) {
872
+ if (_Value < _Ty{_Rx_min}) {
873
+ return false;
874
+ }
875
+ }
876
+
877
+ constexpr auto _Ty_max = _Max_limit<_Ty>();
878
+ constexpr auto _Rx_max = _Max_limit<_Rx>();
879
+
880
+ if constexpr (_STD _Cmp_greater(_Ty_max, _Rx_max)) {
881
+ if (_Value > _Ty{_Rx_max}) {
882
+ return false;
883
+ }
884
+ }
885
+
886
+ return true;
887
+ }
888
+
889
+ #if _HAS_CXX20
890
+ template <class _Ty>
891
+ constexpr bool _Is_standard_integer = _Is_any_of_v<remove_cv_t<_Ty>, signed char, short, int, long, long long,
892
+ unsigned char, unsigned short, unsigned int, unsigned long, unsigned long long>;
893
+
894
+ _EXPORT_STD template <class _Ty1, class _Ty2>
895
+ _NODISCARD constexpr bool cmp_equal(const _Ty1 _Left, const _Ty2 _Right) noexcept {
896
+ static_assert(_Is_standard_integer<_Ty1> && _Is_standard_integer<_Ty2>,
897
+ "The integer comparison functions only accept standard and extended integer types.");
898
+ return _STD _Cmp_equal(_Left, _Right);
899
+ }
900
+
901
+ _EXPORT_STD template <class _Ty1, class _Ty2>
902
+ _NODISCARD constexpr bool cmp_not_equal(const _Ty1 _Left, const _Ty2 _Right) noexcept {
903
+ static_assert(_Is_standard_integer<_Ty1> && _Is_standard_integer<_Ty2>,
904
+ "The integer comparison functions only accept standard and extended integer types.");
905
+ return _STD _Cmp_not_equal(_Left, _Right);
906
+ }
907
+
908
+ _EXPORT_STD template <class _Ty1, class _Ty2>
909
+ _NODISCARD constexpr bool cmp_less(const _Ty1 _Left, const _Ty2 _Right) noexcept {
910
+ static_assert(_Is_standard_integer<_Ty1> && _Is_standard_integer<_Ty2>,
911
+ "The integer comparison functions only accept standard and extended integer types.");
912
+ return _STD _Cmp_less(_Left, _Right);
913
+ }
914
+
915
+ _EXPORT_STD template <class _Ty1, class _Ty2>
916
+ _NODISCARD constexpr bool cmp_greater(const _Ty1 _Left, const _Ty2 _Right) noexcept {
917
+ static_assert(_Is_standard_integer<_Ty1> && _Is_standard_integer<_Ty2>,
918
+ "The integer comparison functions only accept standard and extended integer types.");
919
+ return _STD _Cmp_greater(_Left, _Right);
920
+ }
921
+
922
+ _EXPORT_STD template <class _Ty1, class _Ty2>
923
+ _NODISCARD constexpr bool cmp_less_equal(const _Ty1 _Left, const _Ty2 _Right) noexcept {
924
+ static_assert(_Is_standard_integer<_Ty1> && _Is_standard_integer<_Ty2>,
925
+ "The integer comparison functions only accept standard and extended integer types.");
926
+ return _STD _Cmp_less_equal(_Left, _Right);
927
+ }
928
+
929
+ _EXPORT_STD template <class _Ty1, class _Ty2>
930
+ _NODISCARD constexpr bool cmp_greater_equal(const _Ty1 _Left, const _Ty2 _Right) noexcept {
931
+ static_assert(_Is_standard_integer<_Ty1> && _Is_standard_integer<_Ty2>,
932
+ "The integer comparison functions only accept standard and extended integer types.");
933
+ return _STD _Cmp_greater_equal(_Left, _Right);
934
+ }
935
+
936
+ _EXPORT_STD template <class _Rx, class _Ty>
937
+ _NODISCARD constexpr bool in_range(const _Ty _Value) noexcept {
938
+ static_assert(_Is_standard_integer<_Rx> && _Is_standard_integer<_Ty>,
939
+ "The integer comparison functions only accept standard and extended integer types.");
940
+ return _STD _In_range<_Rx>(_Value);
941
+ }
942
+ #endif // _HAS_CXX20
943
+
944
+ #if _HAS_CXX23
945
+ _EXPORT_STD template <class _Ty>
946
+ _NODISCARD _MSVC_INTRINSIC constexpr underlying_type_t<_Ty> to_underlying(_Ty _Value) noexcept {
947
+ return static_cast<underlying_type_t<_Ty>>(_Value);
948
+ }
949
+
950
+ _EXPORT_STD [[noreturn]] __forceinline void unreachable() noexcept /* strengthened */ {
951
+ _STL_UNREACHABLE;
952
+ #ifdef _DEBUG
953
+ _CSTD abort(); // likely to be called in debug mode, but can't be relied upon - already entered the UB territory
954
+ #endif // defined(_DEBUG)
955
+ }
956
+
957
+ template <class _Ty, class _Uty,
958
+ class _Tmp = _Maybe_const<is_const_v<remove_reference_t<_Ty>>, remove_reference_t<_Uty>>>
959
+ using _Forward_like_t = conditional_t<is_rvalue_reference_v<_Ty&&>, _Tmp&&, _Tmp&>;
960
+
961
+ _EXPORT_STD template <class _Ty, class _Uty>
962
+ _NODISCARD _MSVC_INTRINSIC constexpr _Forward_like_t<_Ty, _Uty> forward_like(_Uty&& _Ux) noexcept {
963
+ return static_cast<_Forward_like_t<_Ty, _Uty>>(_Ux);
964
+ }
965
+ #endif // _HAS_CXX23
966
+
967
+ #if _HAS_TR1_NAMESPACE
968
+ namespace _DEPRECATE_TR1_NAMESPACE tr1 {
969
+ using _STD get;
970
+ using _STD tuple_element;
971
+ using _STD tuple_size;
972
+ } // namespace _DEPRECATE_TR1_NAMESPACE tr1
973
+ #endif // _HAS_TR1_NAMESPACE
974
+
975
+ _STD_END
976
+
977
+ // TRANSITION, non-_Ugly attribute tokens
978
+ #pragma pop_macro("lifetimebound")
979
+ #pragma pop_macro("known_semantics")
980
+ #pragma pop_macro("intrinsic")
981
+ #pragma pop_macro("msvc")
982
+
983
+ #pragma pop_macro("new")
984
+ _STL_RESTORE_CLANG_WARNINGS
985
+ #pragma warning(pop)
986
+ #pragma pack(pop)
987
+ #endif // _STL_COMPILER_PREPROCESSOR
988
+ #endif // _UTILITY_
miniMSVC/VC/Tools/MSVC/14.42.34433/include/vadefs.h ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //
2
+ // vadefs.h
3
+ //
4
+ // Copyright (c) Microsoft Corporation. All rights reserved.
5
+ //
6
+ // Definitions of macro helpers used by <stdarg.h>. This is the topmost header
7
+ // in the CRT header lattice, and is always the first CRT header to be included,
8
+ // explicitly or implicitly. Therefore, this header also has several definitions
9
+ // that are used throughout the CRT.
10
+ //
11
+ #pragma once
12
+ #define _INC_VADEFS
13
+
14
+ #define _CRT_PACKING 8
15
+ #pragma pack(push, _CRT_PACKING)
16
+
17
+ // C4339: '__type_info_node': use of undefined type detected in CLR meta-data (/Wall)
18
+ #ifndef _VCRUNTIME_DISABLED_WARNING_4339
19
+ #ifdef _M_CEE_PURE
20
+ #define _VCRUNTIME_DISABLED_WARNING_4339 4339
21
+ #else
22
+ #define _VCRUNTIME_DISABLED_WARNING_4339
23
+ #endif
24
+ #endif
25
+
26
+ // C4412: function signature contains type '<typename>';
27
+ // C++ objects are unsafe to pass between pure code and mixed or native. (/Wall)
28
+ #ifndef _VCRUNTIME_DISABLED_WARNING_4412
29
+ #ifdef _M_CEE_PURE
30
+ #define _VCRUNTIME_DISABLED_WARNING_4412 4412
31
+ #else
32
+ #define _VCRUNTIME_DISABLED_WARNING_4412
33
+ #endif
34
+ #endif
35
+
36
+ // Use _VCRUNTIME_EXTRA_DISABLED_WARNINGS to add additional warning suppressions to VCRuntime headers.
37
+ #ifndef _VCRUNTIME_EXTRA_DISABLED_WARNINGS
38
+ #define _VCRUNTIME_EXTRA_DISABLED_WARNINGS
39
+ #endif
40
+
41
+ // C4514: unreferenced inline function has been removed (/Wall)
42
+ // C4820: '<typename>' : 'N' bytes padding added after data member (/Wall)
43
+ #ifndef _VCRUNTIME_DISABLED_WARNINGS
44
+ #define _VCRUNTIME_DISABLED_WARNINGS _VCRUNTIME_DISABLED_WARNING_4339 _VCRUNTIME_DISABLED_WARNING_4412 4514 4820 _VCRUNTIME_EXTRA_DISABLED_WARNINGS
45
+ #endif
46
+
47
+ #pragma warning(push)
48
+ #pragma warning(disable: _VCRUNTIME_DISABLED_WARNINGS)
49
+
50
+ #ifdef __cplusplus
51
+ extern "C" {
52
+ #endif
53
+
54
+ #if !defined _W64
55
+ #define _W64
56
+ #endif
57
+
58
+ #ifndef _UINTPTR_T_DEFINED
59
+ #define _UINTPTR_T_DEFINED
60
+ #ifdef _WIN64
61
+ typedef unsigned __int64 uintptr_t;
62
+ #else
63
+ typedef unsigned int uintptr_t;
64
+ #endif
65
+ #endif
66
+
67
+ #ifndef _VA_LIST_DEFINED
68
+ #define _VA_LIST_DEFINED
69
+ #ifdef _M_CEE_PURE
70
+ typedef System::ArgIterator va_list;
71
+ #else
72
+ typedef char* va_list;
73
+ #endif
74
+ #endif
75
+
76
+ #ifdef __cplusplus
77
+ #define _ADDRESSOF(v) (&const_cast<char&>(reinterpret_cast<const volatile char&>(v)))
78
+ #else
79
+ #define _ADDRESSOF(v) (&(v))
80
+ #endif
81
+
82
+ #if (defined _M_ARM || defined _M_HYBRID_X86_ARM64) && !defined _M_CEE_PURE
83
+ #define _VA_ALIGN 4
84
+ #define _SLOTSIZEOF(t) ((sizeof(t) + _VA_ALIGN - 1) & ~(_VA_ALIGN - 1))
85
+ #define _APALIGN(t,ap) (((va_list)0 - (ap)) & (__alignof(t) - 1))
86
+ #elif (defined _M_ARM64 || defined _M_ARM64EC) && !defined _M_CEE_PURE
87
+ #define _VA_ALIGN 8
88
+ #define _SLOTSIZEOF(t) ((sizeof(t) + _VA_ALIGN - 1) & ~(_VA_ALIGN - 1))
89
+ #define _APALIGN(t,ap) (((va_list)0 - (ap)) & (__alignof(t) - 1))
90
+ #else
91
+ #define _SLOTSIZEOF(t) (sizeof(t))
92
+ #define _APALIGN(t,ap) (__alignof(t))
93
+ #endif
94
+
95
+ #if defined _M_CEE_PURE || (defined _M_CEE && !defined _M_ARM && !defined _M_ARM64)
96
+
97
+ void __cdecl __va_start(va_list*, ...);
98
+ void* __cdecl __va_arg(va_list*, ...);
99
+ void __cdecl __va_end(va_list*);
100
+
101
+ #define __crt_va_start_a(ap, v) ((void)(__va_start(&ap, _ADDRESSOF(v), _SLOTSIZEOF(v), __alignof(v), _ADDRESSOF(v))))
102
+ #define __crt_va_arg(ap, t) (*(t *)__va_arg(&ap, _SLOTSIZEOF(t), _APALIGN(t,ap), (t*)0))
103
+ #define __crt_va_end(ap) ((void)(__va_end(&ap)))
104
+
105
+ #elif defined _M_IX86 && !defined _M_HYBRID_X86_ARM64
106
+
107
+ #define _INTSIZEOF(n) ((sizeof(n) + sizeof(int) - 1) & ~(sizeof(int) - 1))
108
+
109
+ #define __crt_va_start_a(ap, v) ((void)(ap = (va_list)_ADDRESSOF(v) + _INTSIZEOF(v)))
110
+ #define __crt_va_arg(ap, t) (*(t*)((ap += _INTSIZEOF(t)) - _INTSIZEOF(t)))
111
+ #define __crt_va_end(ap) ((void)(ap = (va_list)0))
112
+
113
+ #elif defined _M_ARM
114
+
115
+ #ifdef __cplusplus
116
+ void __cdecl __va_start(va_list*, ...);
117
+ #define __crt_va_start_a(ap, v) ((void)(__va_start(&ap, _ADDRESSOF(v), _SLOTSIZEOF(v), _ADDRESSOF(v))))
118
+ #else
119
+ #define __crt_va_start_a(ap, v) ((void)(ap = (va_list)_ADDRESSOF(v) + _SLOTSIZEOF(v)))
120
+ #endif
121
+
122
+ #define __crt_va_arg(ap, t) (*(t*)((ap += _SLOTSIZEOF(t) + _APALIGN(t,ap)) - _SLOTSIZEOF(t)))
123
+ #define __crt_va_end(ap) ((void)(ap = (va_list)0))
124
+
125
+ #elif defined _M_HYBRID_X86_ARM64
126
+ void __cdecl __va_start(va_list*, ...);
127
+ #define __crt_va_start_a(ap,v) ((void)(__va_start(&ap, _ADDRESSOF(v), _SLOTSIZEOF(v), __alignof(v), _ADDRESSOF(v))))
128
+ #define __crt_va_arg(ap, t) (*(t*)((ap += _SLOTSIZEOF(t)) - _SLOTSIZEOF(t)))
129
+ #define __crt_va_end(ap) ((void)(ap = (va_list)0))
130
+
131
+ #elif defined _M_ARM64
132
+
133
+ void __cdecl __va_start(va_list*, ...);
134
+
135
+ #define __crt_va_start_a(ap,v) ((void)(__va_start(&ap, _ADDRESSOF(v), _SLOTSIZEOF(v), __alignof(v), _ADDRESSOF(v))))
136
+ #define __crt_va_arg(ap, t) \
137
+ ((sizeof(t) > (2 * sizeof(__int64))) \
138
+ ? **(t**)((ap += sizeof(__int64)) - sizeof(__int64)) \
139
+ : *(t*)((ap += _SLOTSIZEOF(t) + _APALIGN(t,ap)) - _SLOTSIZEOF(t)))
140
+ #define __crt_va_end(ap) ((void)(ap = (va_list)0))
141
+
142
+ #elif defined _M_ARM64EC
143
+ void __cdecl __va_start(va_list*, ...);
144
+ //take the ARM64 va_start (for now)
145
+ #define __crt_va_start_a(ap,v) ((void)(__va_start(&ap, _ADDRESSOF(v), _SLOTSIZEOF(v), __alignof(v), _ADDRESSOF(v))))
146
+ //a hybrid va arg, to account for the shift in calling convention, with the alignment of ARM64
147
+ #define __crt_va_arg(ap, t) \
148
+ ((sizeof(t) > sizeof(__int64) || (sizeof(t) & (sizeof(t) - 1)) != 0) \
149
+ ? **(t**)((ap += sizeof(__int64)) - sizeof(__int64)) \
150
+ : *(t*)((ap += _SLOTSIZEOF(t) + _APALIGN(t,ap)) - _SLOTSIZEOF(t)))
151
+ #define __crt_va_end(ap) ((void)(ap = (va_list)0))
152
+
153
+ #elif defined _M_X64
154
+
155
+ void __cdecl __va_start(va_list* , ...);
156
+
157
+ #define __crt_va_start_a(ap, x) ((void)(__va_start(&ap, x)))
158
+ #define __crt_va_arg(ap, t) \
159
+ ((sizeof(t) > sizeof(__int64) || (sizeof(t) & (sizeof(t) - 1)) != 0) \
160
+ ? **(t**)((ap += sizeof(__int64)) - sizeof(__int64)) \
161
+ : *(t* )((ap += sizeof(__int64)) - sizeof(__int64)))
162
+ #define __crt_va_end(ap) ((void)(ap = (va_list)0))
163
+
164
+ #endif
165
+
166
+ #ifdef __cplusplus
167
+ } // extern "C"
168
+ #endif
169
+
170
+ #if defined __cplusplus && !defined _CRT_NO_VA_START_VALIDATION
171
+ extern "C++"
172
+ {
173
+ template <typename _Ty>
174
+ struct __vcrt_va_list_is_reference
175
+ {
176
+ enum : bool { __the_value = false };
177
+ };
178
+
179
+ template <typename _Ty>
180
+ struct __vcrt_va_list_is_reference<_Ty&>
181
+ {
182
+ enum : bool { __the_value = true };
183
+ };
184
+
185
+ template <typename _Ty>
186
+ struct __vcrt_va_list_is_reference<_Ty&&>
187
+ {
188
+ enum : bool { __the_value = true };
189
+ };
190
+
191
+ template <typename _Ty>
192
+ struct __vcrt_assert_va_start_is_not_reference
193
+ {
194
+ static_assert(!__vcrt_va_list_is_reference<_Ty>::__the_value,
195
+ "va_start argument must not have reference type and must not be parenthesized");
196
+ };
197
+ } // extern "C++"
198
+
199
+ #define __crt_va_start(ap, x) ((void)(__vcrt_assert_va_start_is_not_reference<decltype(x)>(), __crt_va_start_a(ap, x)))
200
+
201
+ #else // ^^^ __cplusplus ^^^ // vvv !__cplusplus vvv //
202
+
203
+ #define __crt_va_start(ap, x) __crt_va_start_a(ap, x)
204
+
205
+ #endif
206
+
207
+ #pragma warning(pop) // _VCRUNTIME_DISABLED_WARNINGS
208
+ #pragma pack(pop)
miniMSVC/VC/Tools/MSVC/14.42.34433/include/valarray ADDED
@@ -0,0 +1,2120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // valarray standard header
2
+
3
+ // Copyright (c) Microsoft Corporation.
4
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
5
+
6
+ #ifndef _VALARRAY_
7
+ #define _VALARRAY_
8
+ #include <yvals_core.h>
9
+ #if _STL_COMPILER_PREPROCESSOR
10
+ #include <cmath>
11
+ #include <xmemory>
12
+
13
+ #pragma pack(push, _CRT_PACKING)
14
+ #pragma warning(push, _STL_WARNING_LEVEL)
15
+ #pragma warning(disable : _STL_DISABLED_WARNINGS)
16
+ _STL_DISABLE_CLANG_WARNINGS
17
+ #pragma push_macro("new")
18
+ #undef new
19
+
20
+ _STD_BEGIN
21
+ _EXPORT_STD class gslice;
22
+ _EXPORT_STD class slice;
23
+
24
+ _EXPORT_STD template <class _Ty>
25
+ class gslice_array;
26
+ _EXPORT_STD template <class _Ty>
27
+ class indirect_array;
28
+ _EXPORT_STD template <class _Ty>
29
+ class mask_array;
30
+ _EXPORT_STD template <class _Ty>
31
+ class slice_array;
32
+ _EXPORT_STD template <class _Ty>
33
+ class valarray;
34
+
35
+ template <class _Ty>
36
+ _Ty* _Allocate_for_op_delete(size_t _Count) {
37
+ // allocates space for _Count objects of type _Ty
38
+ if (_Count == 0) {
39
+ return nullptr;
40
+ }
41
+
42
+ const size_t _Bytes = _Get_size_of_n<sizeof(_Ty)>(_Count);
43
+ #ifdef __cpp_aligned_new
44
+ constexpr bool _Extended_alignment = alignof(_Ty) > __STDCPP_DEFAULT_NEW_ALIGNMENT__;
45
+ if constexpr (_Extended_alignment) {
46
+ return static_cast<_Ty*>(::operator new(_Bytes, align_val_t{alignof(_Ty)}));
47
+ } else
48
+ #endif // defined(__cpp_aligned_new)
49
+ {
50
+ return static_cast<_Ty*>(::operator new(_Bytes));
51
+ }
52
+ }
53
+
54
+ using _Boolarray = valarray<bool>;
55
+ using _Sizarray = valarray<size_t>;
56
+
57
+ _EXPORT_STD template <class _Ty>
58
+ class valarray { // store array with various indexing options
59
+ public:
60
+ friend _Tidy_deallocate_guard<valarray>;
61
+
62
+ template <class _Ty2>
63
+ friend _Ty2* begin(valarray<_Ty2>& _Array) noexcept /* strengthened */;
64
+
65
+ template <class _Ty2>
66
+ friend const _Ty2* begin(const valarray<_Ty2>& _Array) noexcept /* strengthened */;
67
+
68
+ template <class _Ty2>
69
+ friend _Ty2* end(valarray<_Ty2>& _Array) noexcept /* strengthened */;
70
+
71
+ template <class _Ty2>
72
+ friend const _Ty2* end(const valarray<_Ty2>& _Array) noexcept /* strengthened */;
73
+
74
+ using value_type = _Ty;
75
+
76
+ valarray() = default; // construct empty valarray
77
+
78
+ explicit valarray(size_t _Count) { // construct with _Count * _Ty()
79
+ _Grow(_Count);
80
+ }
81
+
82
+ valarray(const _Ty& _Val, size_t _Count) { // construct with _Count * _Val
83
+ _Grow(_Count, &_Val);
84
+ }
85
+
86
+ valarray(const _Ty* _Ptr, size_t _Count) { // construct with [_Ptr, _Ptr + _Count)
87
+ _Grow(_Count, _Ptr, 1);
88
+ }
89
+
90
+ valarray(const valarray& _Right) {
91
+ _Grow(_Right.size(), _Right._Myptr, 1);
92
+ }
93
+
94
+ valarray(const slice_array<_Ty>& _Slicearr) {
95
+ *this = _Slicearr;
96
+ }
97
+
98
+ valarray(const gslice_array<_Ty>& _Gslicearr) {
99
+ *this = _Gslicearr;
100
+ }
101
+
102
+ valarray(const mask_array<_Ty>& _Maskarr) {
103
+ *this = _Maskarr;
104
+ }
105
+
106
+ valarray(const indirect_array<_Ty>& _Indarr) {
107
+ *this = _Indarr;
108
+ }
109
+
110
+ valarray(valarray&& _Right) noexcept
111
+ : _Myptr(_STD exchange(_Right._Myptr, nullptr)), _Mysize(_STD exchange(_Right._Mysize, size_t{})) {}
112
+
113
+ valarray& operator=(valarray&& _Right) noexcept {
114
+ if (this != _STD addressof(_Right)) { // clear this and steal from _Right
115
+ _Tidy_deallocate();
116
+ _Myptr = _STD exchange(_Right._Myptr, nullptr);
117
+ _Mysize = _STD exchange(_Right._Mysize, size_t{});
118
+ }
119
+ return *this;
120
+ }
121
+
122
+ valarray(initializer_list<_Ty> _Ilist) {
123
+ _Grow(_Ilist.size(), _Ilist.begin(), 1);
124
+ }
125
+
126
+ valarray& operator=(initializer_list<_Ty> _Ilist) {
127
+ _Assign(_Ilist.size(), _Ilist.begin());
128
+ return *this;
129
+ }
130
+
131
+ void swap(valarray& _Right) noexcept {
132
+ if (this != _STD addressof(_Right)) {
133
+ _STD swap(_Myptr, _Right._Myptr);
134
+ _STD swap(_Mysize, _Right._Mysize);
135
+ }
136
+ }
137
+
138
+ ~valarray() noexcept {
139
+ _Tidy_deallocate();
140
+ }
141
+
142
+ valarray& operator=(const valarray& _Right) {
143
+ if (this != _STD addressof(_Right)) {
144
+ _Assign(_Right.size(), _Right._Myptr);
145
+ }
146
+
147
+ return *this;
148
+ }
149
+
150
+ valarray& operator=(const _Ty& _Val) {
151
+ const size_t _Size = _Mysize; // eliminating indirection helps vectorization
152
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
153
+ _Myptr[_Idx] = _Val;
154
+ }
155
+ return *this;
156
+ }
157
+
158
+ void resize(size_t _Newsize) { // determine new length, filling with _Ty() elements
159
+ _Tidy_deallocate();
160
+ _Grow(_Newsize);
161
+ }
162
+
163
+ void resize(size_t _Newsize, _Ty _Val) { // determine new length, filling with _Val elements
164
+ _Tidy_deallocate();
165
+ _Grow(_Newsize, &_Val, 0);
166
+ }
167
+
168
+ valarray& operator=(const slice_array<_Ty>& _Slicearr); // defined below
169
+
170
+ valarray& operator=(const gslice_array<_Ty>& _Gslicearr); // defined below
171
+
172
+ valarray& operator=(const mask_array<_Ty>& _Maskarr); // defined below
173
+
174
+ valarray& operator=(const indirect_array<_Ty>& _Indarr); // defined below
175
+
176
+ _NODISCARD valarray operator+() const {
177
+ const size_t _Size = _Mysize; // eliminating indirection helps vectorization
178
+ valarray<_Ty> _Ans(_Size);
179
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
180
+ _Ans[_Idx] = +_Myptr[_Idx];
181
+ }
182
+ return _Ans;
183
+ }
184
+
185
+ _NODISCARD valarray operator-() const {
186
+ const size_t _Size = _Mysize; // eliminating indirection helps vectorization
187
+ valarray<_Ty> _Ans(_Size);
188
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
189
+ _Ans[_Idx] = -_Myptr[_Idx];
190
+ }
191
+ return _Ans;
192
+ }
193
+
194
+ _NODISCARD valarray operator~() const {
195
+ const size_t _Size = _Mysize; // eliminating indirection helps vectorization
196
+ valarray<_Ty> _Ans(_Size);
197
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
198
+ _Ans[_Idx] = ~_Myptr[_Idx];
199
+ }
200
+ return _Ans;
201
+ }
202
+
203
+ _NODISCARD _Boolarray operator!() const {
204
+ const size_t _Size = _Mysize; // eliminating indirection helps vectorization
205
+ valarray<bool> _Ans(_Size);
206
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
207
+ _Ans[_Idx] = !_Myptr[_Idx];
208
+ }
209
+ return _Ans;
210
+ }
211
+
212
+ valarray& operator*=(const _Ty& _Right) {
213
+ const size_t _Size = _Mysize; // eliminating indirection helps vectorization
214
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
215
+ _Myptr[_Idx] *= _Right;
216
+ }
217
+ return *this;
218
+ }
219
+
220
+ valarray& operator/=(const _Ty& _Right) {
221
+ const size_t _Size = _Mysize; // eliminating indirection helps vectorization
222
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
223
+ _Myptr[_Idx] /= _Right;
224
+ }
225
+ return *this;
226
+ }
227
+
228
+ valarray& operator%=(const _Ty& _Right) {
229
+ const size_t _Size = _Mysize; // eliminating indirection helps vectorization
230
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
231
+ _Myptr[_Idx] %= _Right;
232
+ }
233
+ return *this;
234
+ }
235
+
236
+ valarray& operator+=(const _Ty& _Right) {
237
+ const size_t _Size = _Mysize; // eliminating indirection helps vectorization
238
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
239
+ _Myptr[_Idx] += _Right;
240
+ }
241
+ return *this;
242
+ }
243
+
244
+ valarray& operator-=(const _Ty& _Right) {
245
+ const size_t _Size = _Mysize; // eliminating indirection helps vectorization
246
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
247
+ _Myptr[_Idx] -= _Right;
248
+ }
249
+ return *this;
250
+ }
251
+
252
+ valarray& operator^=(const _Ty& _Right) {
253
+ const size_t _Size = _Mysize; // eliminating indirection helps vectorization
254
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
255
+ _Myptr[_Idx] ^= _Right;
256
+ }
257
+ return *this;
258
+ }
259
+
260
+ valarray& operator&=(const _Ty& _Right) {
261
+ const size_t _Size = _Mysize; // eliminating indirection helps vectorization
262
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
263
+ _Myptr[_Idx] &= _Right;
264
+ }
265
+ return *this;
266
+ }
267
+
268
+ valarray& operator|=(const _Ty& _Right) {
269
+ const size_t _Size = _Mysize; // eliminating indirection helps vectorization
270
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
271
+ _Myptr[_Idx] |= _Right;
272
+ }
273
+ return *this;
274
+ }
275
+
276
+ valarray& operator<<=(const _Ty& _Right) {
277
+ const size_t _Size = _Mysize; // eliminating indirection helps vectorization
278
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
279
+ _Myptr[_Idx] <<= _Right;
280
+ }
281
+ return *this;
282
+ }
283
+
284
+ valarray& operator>>=(const _Ty& _Right) {
285
+ const size_t _Size = _Mysize; // eliminating indirection helps vectorization
286
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
287
+ _Myptr[_Idx] >>= _Right;
288
+ }
289
+ return *this;
290
+ }
291
+
292
+ valarray& operator*=(const valarray& _Right) {
293
+ #if _CONTAINER_DEBUG_LEVEL > 0
294
+ _STL_VERIFY(_Mysize == _Right._Mysize, "valarrays of different lengths");
295
+ #endif // _CONTAINER_DEBUG_LEVEL > 0
296
+ const size_t _Size = _Mysize; // eliminating indirection helps vectorization
297
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
298
+ _Myptr[_Idx] *= _Right[_Idx];
299
+ }
300
+ return *this;
301
+ }
302
+
303
+ valarray& operator/=(const valarray& _Right) {
304
+ #if _CONTAINER_DEBUG_LEVEL > 0
305
+ _STL_VERIFY(_Mysize == _Right._Mysize, "valarrays of different lengths");
306
+ #endif // _CONTAINER_DEBUG_LEVEL > 0
307
+ const size_t _Size = _Mysize; // eliminating indirection helps vectorization
308
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
309
+ _Myptr[_Idx] /= _Right[_Idx];
310
+ }
311
+ return *this;
312
+ }
313
+
314
+ valarray& operator%=(const valarray& _Right) {
315
+ #if _CONTAINER_DEBUG_LEVEL > 0
316
+ _STL_VERIFY(_Mysize == _Right._Mysize, "valarrays of different lengths");
317
+ #endif // _CONTAINER_DEBUG_LEVEL > 0
318
+ const size_t _Size = _Mysize; // eliminating indirection helps vectorization
319
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
320
+ _Myptr[_Idx] %= _Right[_Idx];
321
+ }
322
+ return *this;
323
+ }
324
+
325
+ valarray& operator+=(const valarray& _Right) {
326
+ #if _CONTAINER_DEBUG_LEVEL > 0
327
+ _STL_VERIFY(_Mysize == _Right._Mysize, "valarrays of different lengths");
328
+ #endif // _CONTAINER_DEBUG_LEVEL > 0
329
+ const size_t _Size = _Mysize; // eliminating indirection helps vectorization
330
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
331
+ _Myptr[_Idx] += _Right[_Idx];
332
+ }
333
+ return *this;
334
+ }
335
+
336
+ valarray& operator-=(const valarray& _Right) {
337
+ #if _CONTAINER_DEBUG_LEVEL > 0
338
+ _STL_VERIFY(_Mysize == _Right._Mysize, "valarrays of different lengths");
339
+ #endif // _CONTAINER_DEBUG_LEVEL > 0
340
+ const size_t _Size = _Mysize; // eliminating indirection helps vectorization
341
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
342
+ _Myptr[_Idx] -= _Right[_Idx];
343
+ }
344
+ return *this;
345
+ }
346
+
347
+ valarray& operator^=(const valarray& _Right) {
348
+ #if _CONTAINER_DEBUG_LEVEL > 0
349
+ _STL_VERIFY(_Mysize == _Right._Mysize, "valarrays of different lengths");
350
+ #endif // _CONTAINER_DEBUG_LEVEL > 0
351
+ const size_t _Size = _Mysize; // eliminating indirection helps vectorization
352
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
353
+ _Myptr[_Idx] ^= _Right[_Idx];
354
+ }
355
+ return *this;
356
+ }
357
+
358
+ valarray& operator|=(const valarray& _Right) {
359
+ #if _CONTAINER_DEBUG_LEVEL > 0
360
+ _STL_VERIFY(_Mysize == _Right._Mysize, "valarrays of different lengths");
361
+ #endif // _CONTAINER_DEBUG_LEVEL > 0
362
+ const size_t _Size = _Mysize; // eliminating indirection helps vectorization
363
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
364
+ _Myptr[_Idx] |= _Right[_Idx];
365
+ }
366
+ return *this;
367
+ }
368
+
369
+ valarray& operator&=(const valarray& _Right) {
370
+ #if _CONTAINER_DEBUG_LEVEL > 0
371
+ _STL_VERIFY(_Mysize == _Right._Mysize, "valarrays of different lengths");
372
+ #endif // _CONTAINER_DEBUG_LEVEL > 0
373
+ const size_t _Size = _Mysize; // eliminating indirection helps vectorization
374
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
375
+ _Myptr[_Idx] &= _Right[_Idx];
376
+ }
377
+ return *this;
378
+ }
379
+
380
+ valarray& operator<<=(const valarray& _Right) {
381
+ #if _CONTAINER_DEBUG_LEVEL > 0
382
+ _STL_VERIFY(_Mysize == _Right._Mysize, "valarrays of different lengths");
383
+ #endif // _CONTAINER_DEBUG_LEVEL > 0
384
+ const size_t _Size = _Mysize; // eliminating indirection helps vectorization
385
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
386
+ _Myptr[_Idx] <<= _Right[_Idx];
387
+ }
388
+ return *this;
389
+ }
390
+
391
+ valarray& operator>>=(const valarray& _Right) {
392
+ #if _CONTAINER_DEBUG_LEVEL > 0
393
+ _STL_VERIFY(_Mysize == _Right._Mysize, "valarrays of different lengths");
394
+ #endif // _CONTAINER_DEBUG_LEVEL > 0
395
+ const size_t _Size = _Mysize; // eliminating indirection helps vectorization
396
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
397
+ _Myptr[_Idx] >>= _Right[_Idx];
398
+ }
399
+ return *this;
400
+ }
401
+
402
+ _NODISCARD size_t size() const noexcept /* strengthened */ {
403
+ return _Mysize;
404
+ }
405
+
406
+ _NODISCARD const _Ty& operator[](size_t _Off) const noexcept /* strengthened */ {
407
+ #if _CONTAINER_DEBUG_LEVEL > 0
408
+ _STL_VERIFY(_Off < _Mysize, "valarray subscript out of range");
409
+ #endif // _CONTAINER_DEBUG_LEVEL > 0
410
+
411
+ return _Myptr[_Off];
412
+ }
413
+
414
+ _NODISCARD _Ty& operator[](size_t _Off) noexcept /* strengthened */ {
415
+ #if _CONTAINER_DEBUG_LEVEL > 0
416
+ _STL_VERIFY(_Off < _Mysize, "valarray subscript out of range");
417
+ #endif // _CONTAINER_DEBUG_LEVEL > 0
418
+
419
+ return _Myptr[_Off];
420
+ }
421
+
422
+ _NODISCARD valarray operator[](slice _Slicearr) const; // defined below
423
+
424
+ _NODISCARD slice_array<_Ty> operator[](slice _Slicearr) noexcept /* strengthened */; // defined below
425
+
426
+ _NODISCARD valarray operator[](const gslice& _Gslicearr) const; // defined below
427
+
428
+ _NODISCARD gslice_array<_Ty> operator[](const gslice& _Gslicearr); // defined below
429
+
430
+ _NODISCARD valarray operator[](const _Boolarray& _Boolarr) const; // defined below
431
+
432
+ _NODISCARD mask_array<_Ty> operator[](const _Boolarray& _Boolarr); // defined below
433
+
434
+ _NODISCARD valarray operator[](const _Sizarray& _Indarr) const; // defined below
435
+
436
+ _NODISCARD indirect_array<_Ty> operator[](const _Sizarray& _Indarr); // defined below
437
+
438
+ _NODISCARD _Ty sum() const {
439
+ const size_t _Size = _Mysize; // eliminating indirection helps vectorization
440
+ _Ty _Sum = _Myptr[0];
441
+ for (size_t _Idx = 1; _Idx < _Size; ++_Idx) {
442
+ _Sum += _Myptr[_Idx];
443
+ }
444
+
445
+ return _Sum;
446
+ }
447
+
448
+ _NODISCARD _Ty(min)() const {
449
+ const size_t _Size = _Mysize; // eliminating indirection helps vectorization
450
+ _Ty _Min = _Myptr[0];
451
+ for (size_t _Idx = 1; _Idx < _Size; ++_Idx) {
452
+ if (_Myptr[_Idx] < _Min) {
453
+ _Min = _Myptr[_Idx];
454
+ }
455
+ }
456
+
457
+ return _Min;
458
+ }
459
+
460
+ _NODISCARD _Ty(max)() const {
461
+ const size_t _Size = _Mysize; // eliminating indirection helps vectorization
462
+ _Ty _Max = _Myptr[0];
463
+ for (size_t _Idx = 1; _Idx < _Size; ++_Idx) {
464
+ if (_Max < _Myptr[_Idx]) {
465
+ _Max = _Myptr[_Idx];
466
+ }
467
+ }
468
+
469
+ return _Max;
470
+ }
471
+
472
+ _NODISCARD valarray shift(int _Count) const {
473
+ const size_t _Size = _Mysize; // eliminating indirection helps vectorization
474
+ valarray<_Ty> _Ans(_Size);
475
+ size_t _Min = 0;
476
+ size_t _Max = _Size;
477
+ if (_Count < 0) {
478
+ const size_t _Skip = static_cast<size_t>(-_Count);
479
+ _Min += _Skip;
480
+ } else {
481
+ const size_t _Skip = static_cast<size_t>(_Count);
482
+ if (_Skip < _Size) {
483
+ _Max -= _Skip;
484
+ } else {
485
+ _Max = 0;
486
+ }
487
+ }
488
+ for (size_t _Idx = _Min; _Idx < _Max; ++_Idx) {
489
+ _Ans[_Idx] = _Myptr[_Idx + _Count];
490
+ }
491
+ return _Ans;
492
+ }
493
+
494
+ _NODISCARD valarray cshift(int _Count) const {
495
+ const size_t _Size = _Mysize; // eliminating indirection helps vectorization
496
+ if (_Size != 0) {
497
+ if (_Count < 0) { // right shift
498
+ if (_Size < size_t{0} - _Count) {
499
+ _Count = static_cast<int>(_Size - (size_t{0} - _Count - _Size) % _Size);
500
+ } else {
501
+ _Count = static_cast<int>(_Size + _Count);
502
+ }
503
+ } else if (_Size <= static_cast<size_t>(_Count)) {
504
+ _Count %= _Size;
505
+ }
506
+ }
507
+
508
+ valarray<_Ty> _Ans(_Size);
509
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
510
+ _Ans[_Idx] =
511
+ _Size - _Idx <= static_cast<size_t>(_Count) ? _Myptr[_Idx - _Size + _Count] : _Myptr[_Idx + _Count];
512
+ }
513
+ return _Ans;
514
+ }
515
+
516
+ _NODISCARD valarray apply(_Ty _Func(_Ty)) const {
517
+ const size_t _Size = _Mysize; // eliminating indirection helps vectorization
518
+ valarray<_Ty> _Ans(_Size);
519
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
520
+ _Ans[_Idx] = _Func(_Myptr[_Idx]);
521
+ }
522
+ return _Ans;
523
+ }
524
+
525
+ _NODISCARD valarray apply(_Ty _Func(const _Ty&)) const {
526
+ // return valarray transformed by _Func, nonmutable argument
527
+ const size_t _Size = _Mysize; // eliminating indirection helps vectorization
528
+ valarray<_Ty> _Ans(_Size);
529
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
530
+ _Ans[_Idx] = _Func(_Myptr[_Idx]);
531
+ }
532
+ return _Ans;
533
+ }
534
+
535
+ private:
536
+ void _Grow(size_t _Newsize) { // allocate space for _Count elements and fill with default values
537
+ if (0 < _Newsize) { // worth doing, allocate
538
+ _Myptr = _Allocate_for_op_delete<_Ty>(_Newsize);
539
+ _Tidy_deallocate_guard<valarray> _Guard{this};
540
+ for (size_t _Idx = 0; _Idx < _Newsize; ++_Idx) {
541
+ _Construct_in_place(_Myptr[_Idx]);
542
+ }
543
+
544
+ _Guard._Target = nullptr;
545
+ _Mysize = _Newsize;
546
+ }
547
+ }
548
+
549
+ void _Grow(size_t _Newsize, const _Ty* _Ptr, size_t _Inc = 0) {
550
+ // allocate space for _Count elements and fill with *_Ptr
551
+ if (0 < _Newsize) { // worth doing, allocate
552
+ _Myptr = _Allocate_for_op_delete<_Ty>(_Newsize);
553
+ _Tidy_deallocate_guard<valarray> _Guard{this};
554
+ for (size_t _Idx = 0; _Idx < _Newsize; ++_Idx, _Ptr += _Inc) {
555
+ _Construct_in_place(_Myptr[_Idx], *_Ptr);
556
+ }
557
+
558
+ _Guard._Target = nullptr;
559
+ _Mysize = _Newsize;
560
+ }
561
+ }
562
+
563
+ void _Tidy_deallocate() noexcept {
564
+ if (_Myptr) { // destroy elements
565
+ _Destroy_range(_Myptr, _Myptr + _Mysize);
566
+ #ifdef __cpp_aligned_new
567
+ constexpr bool _Extended_alignment = alignof(_Ty) > __STDCPP_DEFAULT_NEW_ALIGNMENT__;
568
+ if constexpr (_Extended_alignment) {
569
+ ::operator delete(static_cast<void*>(_Myptr), align_val_t{alignof(_Ty)});
570
+ } else
571
+ #endif // defined(__cpp_aligned_new)
572
+ {
573
+ ::operator delete(static_cast<void*>(_Myptr));
574
+ }
575
+ }
576
+
577
+ _Mysize = 0;
578
+ _Myptr = nullptr;
579
+ }
580
+
581
+ void _Assign(size_t _Newsize, const _Ty* _Ptr) {
582
+ const size_t _Size = _Mysize; // eliminating indirection helps vectorization
583
+ if (_Size == _Newsize) {
584
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
585
+ _Myptr[_Idx] = _Ptr[_Idx];
586
+ }
587
+ } else { // resize and copy
588
+ _Tidy_deallocate();
589
+ _Grow(_Newsize, _Ptr, 1);
590
+ }
591
+ }
592
+
593
+ _Ty* _Myptr = nullptr; // current storage reserved for array
594
+ size_t _Mysize = 0; // current length of sequence
595
+ };
596
+
597
+ #if _HAS_CXX17
598
+ template <class _Ty, size_t _Nx>
599
+ valarray(const _Ty (&)[_Nx], size_t) -> valarray<_Ty>;
600
+ #endif // _HAS_CXX17
601
+
602
+ _EXPORT_STD template <class _Ty>
603
+ void swap(valarray<_Ty>& _Left, valarray<_Ty>& _Right) noexcept {
604
+ _Left.swap(_Right);
605
+ }
606
+
607
+ _EXPORT_STD template <class _Ty>
608
+ _NODISCARD _Ty* begin(valarray<_Ty>& _Array) noexcept /* strengthened */ {
609
+ return _Array._Myptr;
610
+ }
611
+
612
+ _EXPORT_STD template <class _Ty>
613
+ _NODISCARD const _Ty* begin(const valarray<_Ty>& _Array) noexcept /* strengthened */ {
614
+ return _Array._Myptr;
615
+ }
616
+
617
+ _EXPORT_STD template <class _Ty>
618
+ _NODISCARD _Ty* end(valarray<_Ty>& _Array) noexcept /* strengthened */ {
619
+ return _Array._Myptr + _Array.size();
620
+ }
621
+
622
+ _EXPORT_STD template <class _Ty>
623
+ _NODISCARD const _Ty* end(const valarray<_Ty>& _Array) noexcept /* strengthened */ {
624
+ return _Array._Myptr + _Array.size();
625
+ }
626
+
627
+ _EXPORT_STD template <class _Ty>
628
+ _NODISCARD valarray<_Ty> operator*(const valarray<_Ty>& _Left, const typename valarray<_Ty>::value_type& _Right) {
629
+ const size_t _Size = _Left.size();
630
+ valarray<_Ty> _Ans(_Size);
631
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
632
+ _Ans[_Idx] = _Left[_Idx] * _Right;
633
+ }
634
+ return _Ans;
635
+ }
636
+
637
+ _EXPORT_STD template <class _Ty>
638
+ _NODISCARD valarray<_Ty> operator*(const typename valarray<_Ty>::value_type& _Left, const valarray<_Ty>& _Right) {
639
+ const size_t _Size = _Right.size();
640
+ valarray<_Ty> _Ans(_Size);
641
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
642
+ _Ans[_Idx] = _Left * _Right[_Idx];
643
+ }
644
+ return _Ans;
645
+ }
646
+
647
+ _EXPORT_STD template <class _Ty>
648
+ _NODISCARD valarray<_Ty> operator/(const valarray<_Ty>& _Left, const typename valarray<_Ty>::value_type& _Right) {
649
+ const size_t _Size = _Left.size();
650
+ valarray<_Ty> _Ans(_Size);
651
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
652
+ _Ans[_Idx] = _Left[_Idx] / _Right;
653
+ }
654
+ return _Ans;
655
+ }
656
+
657
+ _EXPORT_STD template <class _Ty>
658
+ _NODISCARD valarray<_Ty> operator/(const typename valarray<_Ty>::value_type& _Left, const valarray<_Ty>& _Right) {
659
+ const size_t _Size = _Right.size();
660
+ valarray<_Ty> _Ans(_Size);
661
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
662
+ _Ans[_Idx] = _Left / _Right[_Idx];
663
+ }
664
+ return _Ans;
665
+ }
666
+
667
+ _EXPORT_STD template <class _Ty>
668
+ _NODISCARD valarray<_Ty> operator%(const valarray<_Ty>& _Left, const typename valarray<_Ty>::value_type& _Right) {
669
+ const size_t _Size = _Left.size();
670
+ valarray<_Ty> _Ans(_Size);
671
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
672
+ _Ans[_Idx] = _Left[_Idx] % _Right;
673
+ }
674
+ return _Ans;
675
+ }
676
+
677
+ _EXPORT_STD template <class _Ty>
678
+ _NODISCARD valarray<_Ty> operator%(const typename valarray<_Ty>::value_type& _Left, const valarray<_Ty>& _Right) {
679
+ const size_t _Size = _Right.size();
680
+ valarray<_Ty> _Ans(_Size);
681
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
682
+ _Ans[_Idx] = _Left % _Right[_Idx];
683
+ }
684
+ return _Ans;
685
+ }
686
+
687
+ _EXPORT_STD template <class _Ty>
688
+ _NODISCARD valarray<_Ty> operator+(const valarray<_Ty>& _Left, const typename valarray<_Ty>::value_type& _Right) {
689
+ const size_t _Size = _Left.size();
690
+ valarray<_Ty> _Ans(_Size);
691
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
692
+ _Ans[_Idx] = _Left[_Idx] + _Right;
693
+ }
694
+ return _Ans;
695
+ }
696
+
697
+ _EXPORT_STD template <class _Ty>
698
+ _NODISCARD valarray<_Ty> operator+(const typename valarray<_Ty>::value_type& _Left, const valarray<_Ty>& _Right) {
699
+ const size_t _Size = _Right.size();
700
+ valarray<_Ty> _Ans(_Size);
701
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
702
+ _Ans[_Idx] = _Left + _Right[_Idx];
703
+ }
704
+ return _Ans;
705
+ }
706
+
707
+ _EXPORT_STD template <class _Ty>
708
+ _NODISCARD valarray<_Ty> operator-(const valarray<_Ty>& _Left, const typename valarray<_Ty>::value_type& _Right) {
709
+ const size_t _Size = _Left.size();
710
+ valarray<_Ty> _Ans(_Size);
711
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
712
+ _Ans[_Idx] = _Left[_Idx] - _Right;
713
+ }
714
+ return _Ans;
715
+ }
716
+
717
+ _EXPORT_STD template <class _Ty>
718
+ _NODISCARD valarray<_Ty> operator-(const typename valarray<_Ty>::value_type& _Left, const valarray<_Ty>& _Right) {
719
+ const size_t _Size = _Right.size();
720
+ valarray<_Ty> _Ans(_Size);
721
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
722
+ _Ans[_Idx] = _Left - _Right[_Idx];
723
+ }
724
+ return _Ans;
725
+ }
726
+
727
+ _EXPORT_STD template <class _Ty>
728
+ _NODISCARD valarray<_Ty> operator^(const valarray<_Ty>& _Left, const typename valarray<_Ty>::value_type& _Right) {
729
+ const size_t _Size = _Left.size();
730
+ valarray<_Ty> _Ans(_Size);
731
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
732
+ _Ans[_Idx] = _Left[_Idx] ^ _Right;
733
+ }
734
+ return _Ans;
735
+ }
736
+
737
+ _EXPORT_STD template <class _Ty>
738
+ _NODISCARD valarray<_Ty> operator^(const typename valarray<_Ty>::value_type& _Left, const valarray<_Ty>& _Right) {
739
+ const size_t _Size = _Right.size();
740
+ valarray<_Ty> _Ans(_Size);
741
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
742
+ _Ans[_Idx] = _Left ^ _Right[_Idx];
743
+ }
744
+ return _Ans;
745
+ }
746
+
747
+ _EXPORT_STD template <class _Ty>
748
+ _NODISCARD valarray<_Ty> operator&(const valarray<_Ty>& _Left, const typename valarray<_Ty>::value_type& _Right) {
749
+ const size_t _Size = _Left.size();
750
+ valarray<_Ty> _Ans(_Size);
751
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
752
+ _Ans[_Idx] = _Left[_Idx] & _Right;
753
+ }
754
+ return _Ans;
755
+ }
756
+
757
+ _EXPORT_STD template <class _Ty>
758
+ _NODISCARD valarray<_Ty> operator&(const typename valarray<_Ty>::value_type& _Left, const valarray<_Ty>& _Right) {
759
+ const size_t _Size = _Right.size();
760
+ valarray<_Ty> _Ans(_Size);
761
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
762
+ _Ans[_Idx] = _Left & _Right[_Idx];
763
+ }
764
+ return _Ans;
765
+ }
766
+
767
+ _EXPORT_STD template <class _Ty>
768
+ _NODISCARD valarray<_Ty> operator|(const valarray<_Ty>& _Left, const typename valarray<_Ty>::value_type& _Right) {
769
+ const size_t _Size = _Left.size();
770
+ valarray<_Ty> _Ans(_Size);
771
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
772
+ _Ans[_Idx] = _Left[_Idx] | _Right;
773
+ }
774
+ return _Ans;
775
+ }
776
+
777
+ _EXPORT_STD template <class _Ty>
778
+ _NODISCARD valarray<_Ty> operator|(const typename valarray<_Ty>::value_type& _Left, const valarray<_Ty>& _Right) {
779
+ const size_t _Size = _Right.size();
780
+ valarray<_Ty> _Ans(_Size);
781
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
782
+ _Ans[_Idx] = _Left | _Right[_Idx];
783
+ }
784
+ return _Ans;
785
+ }
786
+
787
+ _EXPORT_STD template <class _Ty>
788
+ _NODISCARD valarray<_Ty> operator<<(const valarray<_Ty>& _Left, const typename valarray<_Ty>::value_type& _Right) {
789
+ const size_t _Size = _Left.size();
790
+ valarray<_Ty> _Ans(_Size);
791
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
792
+ _Ans[_Idx] = _Left[_Idx] << _Right;
793
+ }
794
+ return _Ans;
795
+ }
796
+
797
+ _EXPORT_STD template <class _Ty>
798
+ _NODISCARD valarray<_Ty> operator<<(const typename valarray<_Ty>::value_type& _Left, const valarray<_Ty>& _Right) {
799
+ const size_t _Size = _Right.size();
800
+ valarray<_Ty> _Ans(_Size);
801
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
802
+ _Ans[_Idx] = _Left << _Right[_Idx];
803
+ }
804
+ return _Ans;
805
+ }
806
+
807
+ _EXPORT_STD template <class _Ty>
808
+ _NODISCARD valarray<_Ty> operator>>(const valarray<_Ty>& _Left, const typename valarray<_Ty>::value_type& _Right) {
809
+ const size_t _Size = _Left.size();
810
+ valarray<_Ty> _Ans(_Size);
811
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
812
+ _Ans[_Idx] = _Left[_Idx] >> _Right;
813
+ }
814
+ return _Ans;
815
+ }
816
+
817
+ _EXPORT_STD template <class _Ty>
818
+ _NODISCARD valarray<_Ty> operator>>(const typename valarray<_Ty>::value_type& _Left, const valarray<_Ty>& _Right) {
819
+ const size_t _Size = _Right.size();
820
+ valarray<_Ty> _Ans(_Size);
821
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
822
+ _Ans[_Idx] = _Left >> _Right[_Idx];
823
+ }
824
+ return _Ans;
825
+ }
826
+
827
+ _EXPORT_STD template <class _Ty>
828
+ _NODISCARD _Boolarray operator&&(const valarray<_Ty>& _Left, const typename valarray<_Ty>::value_type& _Right) {
829
+ const size_t _Size = _Left.size();
830
+ valarray<bool> _Ans(_Size);
831
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
832
+ _Ans[_Idx] = _Left[_Idx] && _Right;
833
+ }
834
+ return _Ans;
835
+ }
836
+
837
+ _EXPORT_STD template <class _Ty>
838
+ _NODISCARD _Boolarray operator&&(const typename valarray<_Ty>::value_type& _Left, const valarray<_Ty>& _Right) {
839
+ const size_t _Size = _Right.size();
840
+ valarray<bool> _Ans(_Size);
841
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
842
+ _Ans[_Idx] = _Left && _Right[_Idx];
843
+ }
844
+ return _Ans;
845
+ }
846
+
847
+ _EXPORT_STD template <class _Ty>
848
+ _NODISCARD _Boolarray operator||(const valarray<_Ty>& _Left, const typename valarray<_Ty>::value_type& _Right) {
849
+ const size_t _Size = _Left.size();
850
+ valarray<bool> _Ans(_Size);
851
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
852
+ _Ans[_Idx] = _Left[_Idx] || _Right;
853
+ }
854
+ return _Ans;
855
+ }
856
+
857
+ _EXPORT_STD template <class _Ty>
858
+ _NODISCARD _Boolarray operator||(const typename valarray<_Ty>::value_type& _Left, const valarray<_Ty>& _Right) {
859
+ const size_t _Size = _Right.size();
860
+ valarray<bool> _Ans(_Size);
861
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
862
+ _Ans[_Idx] = _Left || _Right[_Idx];
863
+ }
864
+ return _Ans;
865
+ }
866
+
867
+ _EXPORT_STD template <class _Ty>
868
+ _NODISCARD valarray<_Ty> operator*(const valarray<_Ty>& _Left, const valarray<_Ty>& _Right) {
869
+ #if _CONTAINER_DEBUG_LEVEL > 0
870
+ _STL_VERIFY(_Left.size() == _Right.size(), "valarrays of different lengths");
871
+ #endif // _CONTAINER_DEBUG_LEVEL > 0
872
+ const size_t _Size = _Left.size();
873
+ valarray<_Ty> _Ans(_Size);
874
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
875
+ _Ans[_Idx] = _Left[_Idx] * _Right[_Idx];
876
+ }
877
+ return _Ans;
878
+ }
879
+
880
+ _EXPORT_STD template <class _Ty>
881
+ _NODISCARD valarray<_Ty> operator/(const valarray<_Ty>& _Left, const valarray<_Ty>& _Right) {
882
+ #if _CONTAINER_DEBUG_LEVEL > 0
883
+ _STL_VERIFY(_Left.size() == _Right.size(), "valarrays of different lengths");
884
+ #endif // _CONTAINER_DEBUG_LEVEL > 0
885
+ const size_t _Size = _Left.size();
886
+ valarray<_Ty> _Ans(_Size);
887
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
888
+ _Ans[_Idx] = _Left[_Idx] / _Right[_Idx];
889
+ }
890
+ return _Ans;
891
+ }
892
+
893
+ _EXPORT_STD template <class _Ty>
894
+ _NODISCARD valarray<_Ty> operator%(const valarray<_Ty>& _Left, const valarray<_Ty>& _Right) {
895
+ #if _CONTAINER_DEBUG_LEVEL > 0
896
+ _STL_VERIFY(_Left.size() == _Right.size(), "valarrays of different lengths");
897
+ #endif // _CONTAINER_DEBUG_LEVEL > 0
898
+ const size_t _Size = _Left.size();
899
+ valarray<_Ty> _Ans(_Size);
900
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
901
+ _Ans[_Idx] = _Left[_Idx] % _Right[_Idx];
902
+ }
903
+ return _Ans;
904
+ }
905
+
906
+ _EXPORT_STD template <class _Ty>
907
+ _NODISCARD valarray<_Ty> operator+(const valarray<_Ty>& _Left, const valarray<_Ty>& _Right) {
908
+ #if _CONTAINER_DEBUG_LEVEL > 0
909
+ _STL_VERIFY(_Left.size() == _Right.size(), "valarrays of different lengths");
910
+ #endif // _CONTAINER_DEBUG_LEVEL > 0
911
+ const size_t _Size = _Left.size();
912
+ valarray<_Ty> _Ans(_Size);
913
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
914
+ _Ans[_Idx] = _Left[_Idx] + _Right[_Idx];
915
+ }
916
+ return _Ans;
917
+ }
918
+
919
+ _EXPORT_STD template <class _Ty>
920
+ _NODISCARD valarray<_Ty> operator-(const valarray<_Ty>& _Left, const valarray<_Ty>& _Right) {
921
+ #if _CONTAINER_DEBUG_LEVEL > 0
922
+ _STL_VERIFY(_Left.size() == _Right.size(), "valarrays of different lengths");
923
+ #endif // _CONTAINER_DEBUG_LEVEL > 0
924
+ const size_t _Size = _Left.size();
925
+ valarray<_Ty> _Ans(_Size);
926
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
927
+ _Ans[_Idx] = _Left[_Idx] - _Right[_Idx];
928
+ }
929
+ return _Ans;
930
+ }
931
+
932
+ _EXPORT_STD template <class _Ty>
933
+ _NODISCARD valarray<_Ty> operator^(const valarray<_Ty>& _Left, const valarray<_Ty>& _Right) {
934
+ #if _CONTAINER_DEBUG_LEVEL > 0
935
+ _STL_VERIFY(_Left.size() == _Right.size(), "valarrays of different lengths");
936
+ #endif // _CONTAINER_DEBUG_LEVEL > 0
937
+ const size_t _Size = _Left.size();
938
+ valarray<_Ty> _Ans(_Size);
939
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
940
+ _Ans[_Idx] = _Left[_Idx] ^ _Right[_Idx];
941
+ }
942
+ return _Ans;
943
+ }
944
+
945
+ _EXPORT_STD template <class _Ty>
946
+ _NODISCARD valarray<_Ty> operator&(const valarray<_Ty>& _Left, const valarray<_Ty>& _Right) {
947
+ #if _CONTAINER_DEBUG_LEVEL > 0
948
+ _STL_VERIFY(_Left.size() == _Right.size(), "valarrays of different lengths");
949
+ #endif // _CONTAINER_DEBUG_LEVEL > 0
950
+ const size_t _Size = _Left.size();
951
+ valarray<_Ty> _Ans(_Size);
952
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
953
+ _Ans[_Idx] = _Left[_Idx] & _Right[_Idx];
954
+ }
955
+ return _Ans;
956
+ }
957
+
958
+ _EXPORT_STD template <class _Ty>
959
+ _NODISCARD valarray<_Ty> operator|(const valarray<_Ty>& _Left, const valarray<_Ty>& _Right) {
960
+ #if _CONTAINER_DEBUG_LEVEL > 0
961
+ _STL_VERIFY(_Left.size() == _Right.size(), "valarrays of different lengths");
962
+ #endif // _CONTAINER_DEBUG_LEVEL > 0
963
+ const size_t _Size = _Left.size();
964
+ valarray<_Ty> _Ans(_Size);
965
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
966
+ _Ans[_Idx] = _Left[_Idx] | _Right[_Idx];
967
+ }
968
+ return _Ans;
969
+ }
970
+
971
+ _EXPORT_STD template <class _Ty>
972
+ _NODISCARD valarray<_Ty> operator<<(const valarray<_Ty>& _Left, const valarray<_Ty>& _Right) {
973
+ #if _CONTAINER_DEBUG_LEVEL > 0
974
+ _STL_VERIFY(_Left.size() == _Right.size(), "valarrays of different lengths");
975
+ #endif // _CONTAINER_DEBUG_LEVEL > 0
976
+ const size_t _Size = _Left.size();
977
+ valarray<_Ty> _Ans(_Size);
978
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
979
+ _Ans[_Idx] = _Left[_Idx] << _Right[_Idx];
980
+ }
981
+ return _Ans;
982
+ }
983
+
984
+ _EXPORT_STD template <class _Ty>
985
+ _NODISCARD valarray<_Ty> operator>>(const valarray<_Ty>& _Left, const valarray<_Ty>& _Right) {
986
+ #if _CONTAINER_DEBUG_LEVEL > 0
987
+ _STL_VERIFY(_Left.size() == _Right.size(), "valarrays of different lengths");
988
+ #endif // _CONTAINER_DEBUG_LEVEL > 0
989
+ const size_t _Size = _Left.size();
990
+ valarray<_Ty> _Ans(_Size);
991
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
992
+ _Ans[_Idx] = _Left[_Idx] >> _Right[_Idx];
993
+ }
994
+ return _Ans;
995
+ }
996
+
997
+ _EXPORT_STD template <class _Ty>
998
+ _NODISCARD _Boolarray operator&&(const valarray<_Ty>& _Left, const valarray<_Ty>& _Right) {
999
+ #if _CONTAINER_DEBUG_LEVEL > 0
1000
+ _STL_VERIFY(_Left.size() == _Right.size(), "valarrays of different lengths");
1001
+ #endif // _CONTAINER_DEBUG_LEVEL > 0
1002
+ const size_t _Size = _Left.size();
1003
+ valarray<bool> _Ans(_Size);
1004
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1005
+ _Ans[_Idx] = _Left[_Idx] && _Right[_Idx];
1006
+ }
1007
+ return _Ans;
1008
+ }
1009
+
1010
+ _EXPORT_STD template <class _Ty>
1011
+ _NODISCARD _Boolarray operator||(const valarray<_Ty>& _Left, const valarray<_Ty>& _Right) {
1012
+ #if _CONTAINER_DEBUG_LEVEL > 0
1013
+ _STL_VERIFY(_Left.size() == _Right.size(), "valarrays of different lengths");
1014
+ #endif // _CONTAINER_DEBUG_LEVEL > 0
1015
+ const size_t _Size = _Left.size();
1016
+ valarray<bool> _Ans(_Size);
1017
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1018
+ _Ans[_Idx] = _Left[_Idx] || _Right[_Idx];
1019
+ }
1020
+ return _Ans;
1021
+ }
1022
+
1023
+ _EXPORT_STD template <class _Ty>
1024
+ _NODISCARD _Boolarray operator==(const valarray<_Ty>& _Left, const typename valarray<_Ty>::value_type& _Right) {
1025
+ const size_t _Size = _Left.size();
1026
+ valarray<bool> _Ans(_Size);
1027
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1028
+ _Ans[_Idx] = _Left[_Idx] == _Right;
1029
+ }
1030
+ return _Ans;
1031
+ }
1032
+
1033
+ _EXPORT_STD template <class _Ty>
1034
+ _NODISCARD _Boolarray operator==(const typename valarray<_Ty>::value_type& _Left, const valarray<_Ty>& _Right) {
1035
+ const size_t _Size = _Right.size();
1036
+ valarray<bool> _Ans(_Size);
1037
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1038
+ _Ans[_Idx] = _Left == _Right[_Idx];
1039
+ }
1040
+ return _Ans;
1041
+ }
1042
+
1043
+ _EXPORT_STD template <class _Ty>
1044
+ _NODISCARD _Boolarray operator==(const valarray<_Ty>& _Left, const valarray<_Ty>& _Right) {
1045
+ #if _CONTAINER_DEBUG_LEVEL > 0
1046
+ _STL_VERIFY(_Left.size() == _Right.size(), "valarrays of different lengths");
1047
+ #endif // _CONTAINER_DEBUG_LEVEL > 0
1048
+ const size_t _Size = _Left.size();
1049
+ valarray<bool> _Ans(_Size);
1050
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1051
+ _Ans[_Idx] = _Left[_Idx] == _Right[_Idx];
1052
+ }
1053
+ return _Ans;
1054
+ }
1055
+
1056
+ _EXPORT_STD template <class _Ty>
1057
+ _NODISCARD _Boolarray operator!=(const valarray<_Ty>& _Left, const typename valarray<_Ty>::value_type& _Right) {
1058
+ const size_t _Size = _Left.size();
1059
+ valarray<bool> _Ans(_Size);
1060
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1061
+ _Ans[_Idx] = _Left[_Idx] != _Right;
1062
+ }
1063
+ return _Ans;
1064
+ }
1065
+
1066
+ _EXPORT_STD template <class _Ty>
1067
+ _NODISCARD _Boolarray operator!=(const typename valarray<_Ty>::value_type& _Left, const valarray<_Ty>& _Right) {
1068
+ const size_t _Size = _Right.size();
1069
+ valarray<bool> _Ans(_Size);
1070
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1071
+ _Ans[_Idx] = _Left != _Right[_Idx];
1072
+ }
1073
+ return _Ans;
1074
+ }
1075
+
1076
+ _EXPORT_STD template <class _Ty>
1077
+ _NODISCARD _Boolarray operator!=(const valarray<_Ty>& _Left, const valarray<_Ty>& _Right) {
1078
+ #if _CONTAINER_DEBUG_LEVEL > 0
1079
+ _STL_VERIFY(_Left.size() == _Right.size(), "valarrays of different lengths");
1080
+ #endif // _CONTAINER_DEBUG_LEVEL > 0
1081
+ const size_t _Size = _Left.size();
1082
+ valarray<bool> _Ans(_Size);
1083
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1084
+ _Ans[_Idx] = _Left[_Idx] != _Right[_Idx];
1085
+ }
1086
+ return _Ans;
1087
+ }
1088
+
1089
+ _EXPORT_STD template <class _Ty>
1090
+ _NODISCARD _Boolarray operator<(const valarray<_Ty>& _Left, const typename valarray<_Ty>::value_type& _Right) {
1091
+ const size_t _Size = _Left.size();
1092
+ valarray<bool> _Ans(_Size);
1093
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1094
+ _Ans[_Idx] = _Left[_Idx] < _Right;
1095
+ }
1096
+ return _Ans;
1097
+ }
1098
+
1099
+ _EXPORT_STD template <class _Ty>
1100
+ _NODISCARD _Boolarray operator<(const typename valarray<_Ty>::value_type& _Left, const valarray<_Ty>& _Right) {
1101
+ const size_t _Size = _Right.size();
1102
+ valarray<bool> _Ans(_Size);
1103
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1104
+ _Ans[_Idx] = _Left < _Right[_Idx];
1105
+ }
1106
+ return _Ans;
1107
+ }
1108
+
1109
+ _EXPORT_STD template <class _Ty>
1110
+ _NODISCARD _Boolarray operator<(const valarray<_Ty>& _Left, const valarray<_Ty>& _Right) {
1111
+ #if _CONTAINER_DEBUG_LEVEL > 0
1112
+ _STL_VERIFY(_Left.size() == _Right.size(), "valarrays of different lengths");
1113
+ #endif // _CONTAINER_DEBUG_LEVEL > 0
1114
+ const size_t _Size = _Left.size();
1115
+ valarray<bool> _Ans(_Size);
1116
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1117
+ _Ans[_Idx] = _Left[_Idx] < _Right[_Idx];
1118
+ }
1119
+ return _Ans;
1120
+ }
1121
+
1122
+ _EXPORT_STD template <class _Ty>
1123
+ _NODISCARD _Boolarray operator>(const valarray<_Ty>& _Left, const typename valarray<_Ty>::value_type& _Right) {
1124
+ const size_t _Size = _Left.size();
1125
+ valarray<bool> _Ans(_Size);
1126
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1127
+ _Ans[_Idx] = _Left[_Idx] > _Right;
1128
+ }
1129
+ return _Ans;
1130
+ }
1131
+
1132
+ _EXPORT_STD template <class _Ty>
1133
+ _NODISCARD _Boolarray operator>(const typename valarray<_Ty>::value_type& _Left, const valarray<_Ty>& _Right) {
1134
+ const size_t _Size = _Right.size();
1135
+ valarray<bool> _Ans(_Size);
1136
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1137
+ _Ans[_Idx] = _Left > _Right[_Idx];
1138
+ }
1139
+ return _Ans;
1140
+ }
1141
+
1142
+ _EXPORT_STD template <class _Ty>
1143
+ _NODISCARD _Boolarray operator>(const valarray<_Ty>& _Left, const valarray<_Ty>& _Right) {
1144
+ #if _CONTAINER_DEBUG_LEVEL > 0
1145
+ _STL_VERIFY(_Left.size() == _Right.size(), "valarrays of different lengths");
1146
+ #endif // _CONTAINER_DEBUG_LEVEL > 0
1147
+ const size_t _Size = _Left.size();
1148
+ valarray<bool> _Ans(_Size);
1149
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1150
+ _Ans[_Idx] = _Left[_Idx] > _Right[_Idx];
1151
+ }
1152
+ return _Ans;
1153
+ }
1154
+
1155
+ _EXPORT_STD template <class _Ty>
1156
+ _NODISCARD _Boolarray operator<=(const valarray<_Ty>& _Left, const typename valarray<_Ty>::value_type& _Right) {
1157
+ const size_t _Size = _Left.size();
1158
+ valarray<bool> _Ans(_Size);
1159
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1160
+ _Ans[_Idx] = _Left[_Idx] <= _Right;
1161
+ }
1162
+ return _Ans;
1163
+ }
1164
+
1165
+ _EXPORT_STD template <class _Ty>
1166
+ _NODISCARD _Boolarray operator<=(const typename valarray<_Ty>::value_type& _Left, const valarray<_Ty>& _Right) {
1167
+ const size_t _Size = _Right.size();
1168
+ valarray<bool> _Ans(_Size);
1169
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1170
+ _Ans[_Idx] = _Left <= _Right[_Idx];
1171
+ }
1172
+ return _Ans;
1173
+ }
1174
+
1175
+ _EXPORT_STD template <class _Ty>
1176
+ _NODISCARD _Boolarray operator<=(const valarray<_Ty>& _Left, const valarray<_Ty>& _Right) {
1177
+ #if _CONTAINER_DEBUG_LEVEL > 0
1178
+ _STL_VERIFY(_Left.size() == _Right.size(), "valarrays of different lengths");
1179
+ #endif // _CONTAINER_DEBUG_LEVEL > 0
1180
+ const size_t _Size = _Left.size();
1181
+ valarray<bool> _Ans(_Size);
1182
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1183
+ _Ans[_Idx] = _Left[_Idx] <= _Right[_Idx];
1184
+ }
1185
+ return _Ans;
1186
+ }
1187
+
1188
+ _EXPORT_STD template <class _Ty>
1189
+ _NODISCARD _Boolarray operator>=(const valarray<_Ty>& _Left, const typename valarray<_Ty>::value_type& _Right) {
1190
+ const size_t _Size = _Left.size();
1191
+ valarray<bool> _Ans(_Size);
1192
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1193
+ _Ans[_Idx] = _Left[_Idx] >= _Right;
1194
+ }
1195
+ return _Ans;
1196
+ }
1197
+
1198
+ _EXPORT_STD template <class _Ty>
1199
+ _NODISCARD _Boolarray operator>=(const typename valarray<_Ty>::value_type& _Left, const valarray<_Ty>& _Right) {
1200
+ const size_t _Size = _Right.size();
1201
+ valarray<bool> _Ans(_Size);
1202
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1203
+ _Ans[_Idx] = _Left >= _Right[_Idx];
1204
+ }
1205
+ return _Ans;
1206
+ }
1207
+
1208
+ _EXPORT_STD template <class _Ty>
1209
+ _NODISCARD _Boolarray operator>=(const valarray<_Ty>& _Left, const valarray<_Ty>& _Right) {
1210
+ #if _CONTAINER_DEBUG_LEVEL > 0
1211
+ _STL_VERIFY(_Left.size() == _Right.size(), "valarrays of different lengths");
1212
+ #endif // _CONTAINER_DEBUG_LEVEL > 0
1213
+ const size_t _Size = _Left.size();
1214
+ valarray<bool> _Ans(_Size);
1215
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1216
+ _Ans[_Idx] = _Left[_Idx] >= _Right[_Idx];
1217
+ }
1218
+ return _Ans;
1219
+ }
1220
+
1221
+ // [valarray.transcend] Transcendentals
1222
+ _EXPORT_STD template <class _Ty>
1223
+ _NODISCARD valarray<_Ty> abs(const valarray<_Ty>& _Left) {
1224
+ const size_t _Size = _Left.size();
1225
+ valarray<_Ty> _Ans(_Size);
1226
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1227
+ _Ans[_Idx] = abs(_Left[_Idx]); // intentional ADL, N4950 [valarray.transcend]/1
1228
+ }
1229
+ return _Ans;
1230
+ }
1231
+
1232
+ _EXPORT_STD template <class _Ty>
1233
+ _NODISCARD valarray<_Ty> acos(const valarray<_Ty>& _Left) {
1234
+ const size_t _Size = _Left.size();
1235
+ valarray<_Ty> _Ans(_Size);
1236
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1237
+ _Ans[_Idx] = acos(_Left[_Idx]); // intentional ADL, N4950 [valarray.transcend]/1
1238
+ }
1239
+ return _Ans;
1240
+ }
1241
+
1242
+ _EXPORT_STD template <class _Ty>
1243
+ _NODISCARD valarray<_Ty> asin(const valarray<_Ty>& _Left) {
1244
+ const size_t _Size = _Left.size();
1245
+ valarray<_Ty> _Ans(_Size);
1246
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1247
+ _Ans[_Idx] = asin(_Left[_Idx]); // intentional ADL, N4950 [valarray.transcend]/1
1248
+ }
1249
+ return _Ans;
1250
+ }
1251
+
1252
+ _EXPORT_STD template <class _Ty>
1253
+ _NODISCARD valarray<_Ty> atan(const valarray<_Ty>& _Left) {
1254
+ const size_t _Size = _Left.size();
1255
+ valarray<_Ty> _Ans(_Size);
1256
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1257
+ _Ans[_Idx] = atan(_Left[_Idx]); // intentional ADL, N4950 [valarray.transcend]/1
1258
+ }
1259
+ return _Ans;
1260
+ }
1261
+
1262
+ _EXPORT_STD template <class _Ty>
1263
+ _NODISCARD valarray<_Ty> atan2(const valarray<_Ty>& _Left, const valarray<_Ty>& _Right) {
1264
+ const size_t _Size = _Left.size();
1265
+ valarray<_Ty> _Ans(_Size);
1266
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1267
+ _Ans[_Idx] = atan2(_Left[_Idx], _Right[_Idx]); // intentional ADL, N4950 [valarray.transcend]/1
1268
+ }
1269
+ return _Ans;
1270
+ }
1271
+
1272
+ _EXPORT_STD template <class _Ty>
1273
+ _NODISCARD valarray<_Ty> atan2(const valarray<_Ty>& _Left, const typename valarray<_Ty>::value_type& _Right) {
1274
+ const size_t _Size = _Left.size();
1275
+ valarray<_Ty> _Ans(_Size);
1276
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1277
+ _Ans[_Idx] = atan2(_Left[_Idx], _Right); // intentional ADL, N4950 [valarray.transcend]/1
1278
+ }
1279
+ return _Ans;
1280
+ }
1281
+
1282
+ _EXPORT_STD template <class _Ty>
1283
+ _NODISCARD valarray<_Ty> atan2(const typename valarray<_Ty>::value_type& _Left, const valarray<_Ty>& _Right) {
1284
+ const size_t _Size = _Right.size();
1285
+ valarray<_Ty> _Ans(_Size);
1286
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1287
+ _Ans[_Idx] = atan2(_Left, _Right[_Idx]); // intentional ADL, N4950 [valarray.transcend]/1
1288
+ }
1289
+ return _Ans;
1290
+ }
1291
+
1292
+ _EXPORT_STD template <class _Ty>
1293
+ _NODISCARD valarray<_Ty> cos(const valarray<_Ty>& _Left) {
1294
+ const size_t _Size = _Left.size();
1295
+ valarray<_Ty> _Ans(_Size);
1296
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1297
+ _Ans[_Idx] = cos(_Left[_Idx]); // intentional ADL, N4950 [valarray.transcend]/1
1298
+ }
1299
+ return _Ans;
1300
+ }
1301
+
1302
+ _EXPORT_STD template <class _Ty>
1303
+ _NODISCARD valarray<_Ty> cosh(const valarray<_Ty>& _Left) {
1304
+ const size_t _Size = _Left.size();
1305
+ valarray<_Ty> _Ans(_Size);
1306
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1307
+ _Ans[_Idx] = cosh(_Left[_Idx]); // intentional ADL, N4950 [valarray.transcend]/1
1308
+ }
1309
+ return _Ans;
1310
+ }
1311
+
1312
+ _EXPORT_STD template <class _Ty>
1313
+ _NODISCARD valarray<_Ty> exp(const valarray<_Ty>& _Left) {
1314
+ const size_t _Size = _Left.size();
1315
+ valarray<_Ty> _Ans(_Size);
1316
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1317
+ _Ans[_Idx] = exp(_Left[_Idx]); // intentional ADL, N4950 [valarray.transcend]/1
1318
+ }
1319
+ return _Ans;
1320
+ }
1321
+
1322
+ _EXPORT_STD template <class _Ty>
1323
+ _NODISCARD valarray<_Ty> log(const valarray<_Ty>& _Left) {
1324
+ const size_t _Size = _Left.size();
1325
+ valarray<_Ty> _Ans(_Size);
1326
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1327
+ _Ans[_Idx] = log(_Left[_Idx]); // intentional ADL, N4950 [valarray.transcend]/1
1328
+ }
1329
+ return _Ans;
1330
+ }
1331
+
1332
+ _EXPORT_STD template <class _Ty>
1333
+ _NODISCARD valarray<_Ty> log10(const valarray<_Ty>& _Left) {
1334
+ const size_t _Size = _Left.size();
1335
+ valarray<_Ty> _Ans(_Size);
1336
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1337
+ _Ans[_Idx] = log10(_Left[_Idx]); // intentional ADL, N4950 [valarray.transcend]/1
1338
+ }
1339
+ return _Ans;
1340
+ }
1341
+
1342
+ _EXPORT_STD template <class _Ty>
1343
+ _NODISCARD valarray<_Ty> pow(const valarray<_Ty>& _Left, const valarray<_Ty>& _Right) {
1344
+ const size_t _Size = _Left.size();
1345
+ valarray<_Ty> _Ans(_Size);
1346
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1347
+ _Ans[_Idx] = pow(_Left[_Idx], _Right[_Idx]); // intentional ADL, N4950 [valarray.transcend]/1
1348
+ }
1349
+ return _Ans;
1350
+ }
1351
+
1352
+ _EXPORT_STD template <class _Ty>
1353
+ _NODISCARD valarray<_Ty> pow(const valarray<_Ty>& _Left, const typename valarray<_Ty>::value_type& _Right) {
1354
+ const size_t _Size = _Left.size();
1355
+ valarray<_Ty> _Ans(_Size);
1356
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1357
+ _Ans[_Idx] = pow(_Left[_Idx], _Right); // intentional ADL, N4950 [valarray.transcend]/1
1358
+ }
1359
+ return _Ans;
1360
+ }
1361
+
1362
+ _EXPORT_STD template <class _Ty>
1363
+ _NODISCARD valarray<_Ty> pow(const typename valarray<_Ty>::value_type& _Left, const valarray<_Ty>& _Right) {
1364
+ const size_t _Size = _Right.size();
1365
+ valarray<_Ty> _Ans(_Size);
1366
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1367
+ _Ans[_Idx] = pow(_Left, _Right[_Idx]); // intentional ADL, N4950 [valarray.transcend]/1
1368
+ }
1369
+ return _Ans;
1370
+ }
1371
+
1372
+ _EXPORT_STD template <class _Ty>
1373
+ _NODISCARD valarray<_Ty> sin(const valarray<_Ty>& _Left) {
1374
+ const size_t _Size = _Left.size();
1375
+ valarray<_Ty> _Ans(_Size);
1376
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1377
+ _Ans[_Idx] = sin(_Left[_Idx]); // intentional ADL, N4950 [valarray.transcend]/1
1378
+ }
1379
+ return _Ans;
1380
+ }
1381
+
1382
+ _EXPORT_STD template <class _Ty>
1383
+ _NODISCARD valarray<_Ty> sinh(const valarray<_Ty>& _Left) {
1384
+ const size_t _Size = _Left.size();
1385
+ valarray<_Ty> _Ans(_Size);
1386
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1387
+ _Ans[_Idx] = sinh(_Left[_Idx]); // intentional ADL, N4950 [valarray.transcend]/1
1388
+ }
1389
+ return _Ans;
1390
+ }
1391
+
1392
+ _EXPORT_STD template <class _Ty>
1393
+ _NODISCARD valarray<_Ty> sqrt(const valarray<_Ty>& _Left) {
1394
+ const size_t _Size = _Left.size();
1395
+ valarray<_Ty> _Ans(_Size);
1396
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1397
+ _Ans[_Idx] = sqrt(_Left[_Idx]); // intentional ADL, N4950 [valarray.transcend]/1
1398
+ }
1399
+ return _Ans;
1400
+ }
1401
+
1402
+ _EXPORT_STD template <class _Ty>
1403
+ _NODISCARD valarray<_Ty> tan(const valarray<_Ty>& _Left) {
1404
+ const size_t _Size = _Left.size();
1405
+ valarray<_Ty> _Ans(_Size);
1406
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1407
+ _Ans[_Idx] = tan(_Left[_Idx]); // intentional ADL, N4950 [valarray.transcend]/1
1408
+ }
1409
+ return _Ans;
1410
+ }
1411
+
1412
+ _EXPORT_STD template <class _Ty>
1413
+ _NODISCARD valarray<_Ty> tanh(const valarray<_Ty>& _Left) {
1414
+ const size_t _Size = _Left.size();
1415
+ valarray<_Ty> _Ans(_Size);
1416
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1417
+ _Ans[_Idx] = tanh(_Left[_Idx]); // intentional ADL, N4950 [valarray.transcend]/1
1418
+ }
1419
+ return _Ans;
1420
+ }
1421
+
1422
+ _EXPORT_STD class slice { // define a slice of a valarray
1423
+ public:
1424
+ slice() = default;
1425
+
1426
+ slice(size_t _Off, size_t _Count, size_t _Inc) noexcept // strengthened
1427
+ : _Start(_Off), _Len(_Count), _Stride(_Inc) {}
1428
+
1429
+ _NODISCARD size_t start() const noexcept /* strengthened */ {
1430
+ return _Start;
1431
+ }
1432
+
1433
+ _NODISCARD size_t size() const noexcept /* strengthened */ {
1434
+ return _Len;
1435
+ }
1436
+
1437
+ _NODISCARD size_t stride() const noexcept /* strengthened */ {
1438
+ return _Stride;
1439
+ }
1440
+
1441
+ #if _HAS_CXX20
1442
+ _NODISCARD friend bool operator==(const slice& _Left, const slice& _Right) noexcept /* strengthened */ {
1443
+ return _Left.start() == _Right.start() && _Left.size() == _Right.size() && _Left.stride() == _Right.stride();
1444
+ }
1445
+ #endif // _HAS_CXX20
1446
+
1447
+ protected:
1448
+ size_t _Start = 0; // the starting offset
1449
+ size_t _Len = 0; // the number of elements
1450
+ size_t _Stride = 0; // the distance between elements
1451
+ };
1452
+
1453
+ _EXPORT_STD template <class _Ty>
1454
+ class slice_array : public slice { // define a slice of a valarray
1455
+ public:
1456
+ using value_type = _Ty;
1457
+
1458
+ void operator=(const valarray<_Ty>& _Right) const {
1459
+ size_t _Off = _Start;
1460
+ for (size_t _Idx = 0; _Idx < _Len; ++_Idx, _Off += _Stride) {
1461
+ _Myptr[_Off] = _Right[_Idx];
1462
+ }
1463
+ }
1464
+
1465
+ void operator=(const _Ty& _Right) const {
1466
+ size_t _Off = _Start;
1467
+ for (size_t _Idx = 0; _Idx < _Len; ++_Idx, _Off += _Stride) {
1468
+ _Myptr[_Off] = _Right;
1469
+ }
1470
+ }
1471
+
1472
+ void operator*=(const valarray<_Ty>& _Right) const {
1473
+ size_t _Off = _Start;
1474
+ for (size_t _Idx = 0; _Idx < _Len; ++_Idx, _Off += _Stride) {
1475
+ _Myptr[_Off] *= _Right[_Idx];
1476
+ }
1477
+ }
1478
+
1479
+ void operator/=(const valarray<_Ty>& _Right) const {
1480
+ size_t _Off = _Start;
1481
+ for (size_t _Idx = 0; _Idx < _Len; ++_Idx, _Off += _Stride) {
1482
+ _Myptr[_Off] /= _Right[_Idx];
1483
+ }
1484
+ }
1485
+
1486
+ void operator%=(const valarray<_Ty>& _Right) const {
1487
+ size_t _Off = _Start;
1488
+ for (size_t _Idx = 0; _Idx < _Len; ++_Idx, _Off += _Stride) {
1489
+ _Myptr[_Off] %= _Right[_Idx];
1490
+ }
1491
+ }
1492
+
1493
+ void operator+=(const valarray<_Ty>& _Right) const {
1494
+ size_t _Off = _Start;
1495
+ for (size_t _Idx = 0; _Idx < _Len; ++_Idx, _Off += _Stride) {
1496
+ _Myptr[_Off] += _Right[_Idx];
1497
+ }
1498
+ }
1499
+
1500
+ void operator-=(const valarray<_Ty>& _Right) const {
1501
+ size_t _Off = _Start;
1502
+ for (size_t _Idx = 0; _Idx < _Len; ++_Idx, _Off += _Stride) {
1503
+ _Myptr[_Off] -= _Right[_Idx];
1504
+ }
1505
+ }
1506
+
1507
+ void operator^=(const valarray<_Ty>& _Right) const {
1508
+ size_t _Off = _Start;
1509
+ for (size_t _Idx = 0; _Idx < _Len; ++_Idx, _Off += _Stride) {
1510
+ _Myptr[_Off] ^= _Right[_Idx];
1511
+ }
1512
+ }
1513
+
1514
+ void operator&=(const valarray<_Ty>& _Right) const {
1515
+ size_t _Off = _Start;
1516
+ for (size_t _Idx = 0; _Idx < _Len; ++_Idx, _Off += _Stride) {
1517
+ _Myptr[_Off] &= _Right[_Idx];
1518
+ }
1519
+ }
1520
+
1521
+ void operator|=(const valarray<_Ty>& _Right) const {
1522
+ size_t _Off = _Start;
1523
+ for (size_t _Idx = 0; _Idx < _Len; ++_Idx, _Off += _Stride) {
1524
+ _Myptr[_Off] |= _Right[_Idx];
1525
+ }
1526
+ }
1527
+
1528
+ void operator<<=(const valarray<_Ty>& _Right) const {
1529
+ size_t _Off = _Start;
1530
+ for (size_t _Idx = 0; _Idx < _Len; ++_Idx, _Off += _Stride) {
1531
+ _Myptr[_Off] <<= _Right[_Idx];
1532
+ }
1533
+ }
1534
+
1535
+ void operator>>=(const valarray<_Ty>& _Right) const {
1536
+ size_t _Off = _Start;
1537
+ for (size_t _Idx = 0; _Idx < _Len; ++_Idx, _Off += _Stride) {
1538
+ _Myptr[_Off] >>= _Right[_Idx];
1539
+ }
1540
+ }
1541
+
1542
+ _Ty& _Data(size_t _Idx) const noexcept {
1543
+ return _Myptr[_Idx];
1544
+ }
1545
+
1546
+ slice_array() = delete;
1547
+
1548
+ slice_array(const slice_array&) = default;
1549
+
1550
+ const slice_array& operator=(const slice_array& _Right) const noexcept /* strengthened */ {
1551
+ size_t _Dst_off = _Start;
1552
+ size_t _Src_off = _Right._Start;
1553
+ for (size_t _Idx = 0; _Idx < _Len; ++_Idx, _Dst_off += _Stride, _Src_off += _Right._Stride) {
1554
+ _Myptr[_Dst_off] = _Right._Myptr[_Src_off];
1555
+ }
1556
+ return *this;
1557
+ }
1558
+
1559
+ private:
1560
+ friend valarray<_Ty>;
1561
+
1562
+ slice_array(const slice& _Slice, _Ty* _Pdata) noexcept : slice(_Slice), _Myptr(_Pdata) {}
1563
+
1564
+ _Ty* _Myptr; // pointer to valarray contents
1565
+ };
1566
+
1567
+ _EXPORT_STD class gslice { // define a generalized (multidimensional) slice of a valarray
1568
+ public:
1569
+ gslice() = default; // construct with all zeros
1570
+
1571
+ gslice(size_t _Off, const _Sizarray& _Lenarr, const _Sizarray& _Incarr)
1572
+ : _Start(_Off), _Len(_Lenarr), _Stride(_Incarr) {}
1573
+
1574
+ _NODISCARD size_t start() const noexcept /* strengthened */ {
1575
+ return _Start;
1576
+ }
1577
+
1578
+ _NODISCARD _Sizarray size() const {
1579
+ return _Len;
1580
+ }
1581
+
1582
+ _NODISCARD _Sizarray stride() const {
1583
+ return _Stride;
1584
+ }
1585
+
1586
+ size_t _Nslice() const noexcept {
1587
+ return _Len.size();
1588
+ }
1589
+
1590
+ size_t _Off(_Sizarray& _Indexarr) const noexcept {
1591
+ size_t _Idx;
1592
+ size_t _Ans = _Start;
1593
+ const size_t _Size = _Indexarr.size();
1594
+ for (_Idx = 0; _Idx < _Size; ++_Idx) {
1595
+ _Ans += _Indexarr[_Idx] * _Stride[_Idx]; // compute offset
1596
+ }
1597
+
1598
+ while (0 < _Idx) {
1599
+ --_Idx;
1600
+ if (++_Indexarr[_Idx] < _Len[_Idx]) {
1601
+ break; // increment done, quit
1602
+ } else {
1603
+ _Indexarr[_Idx] = 0; // carry to more-significant index
1604
+ }
1605
+ }
1606
+
1607
+ return _Ans;
1608
+ }
1609
+
1610
+ _NODISCARD size_t _Totlen() const noexcept {
1611
+ const size_t _Size = _Len.size();
1612
+ if (_Size == 0) {
1613
+ return 0;
1614
+ }
1615
+
1616
+ size_t _Count = _Len[0];
1617
+ for (size_t _Idx = 1; _Idx < _Size; ++_Idx) {
1618
+ _Count *= _Len[_Idx];
1619
+ }
1620
+
1621
+ return _Count;
1622
+ }
1623
+
1624
+ private:
1625
+ size_t _Start = 0; // the starting offset
1626
+ _Sizarray _Len; // array of numbers of elements
1627
+ _Sizarray _Stride; // array of distances between elements
1628
+ };
1629
+
1630
+ _EXPORT_STD template <class _Ty>
1631
+ class gslice_array : public gslice { // define a generalized slice of a valarray
1632
+ public:
1633
+ using value_type = _Ty;
1634
+
1635
+ void operator=(const valarray<_Ty>& _Right) const {
1636
+ _Sizarray _Indexarray(size_t{0}, _Nslice());
1637
+ const size_t _Size = _Totlen();
1638
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1639
+ _Myptr[_Off(_Indexarray)] = _Right[_Idx];
1640
+ }
1641
+ }
1642
+
1643
+ void operator=(const _Ty& _Right) const {
1644
+ _Sizarray _Indexarray(size_t{0}, _Nslice());
1645
+ const size_t _Size = _Totlen();
1646
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1647
+ _Myptr[_Off(_Indexarray)] = _Right;
1648
+ }
1649
+ }
1650
+
1651
+ void operator*=(const valarray<_Ty>& _Right) const { // multiply generalized slice by valarray
1652
+ _Sizarray _Indexarray(size_t{0}, _Nslice());
1653
+ const size_t _Size = _Totlen();
1654
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1655
+ _Myptr[_Off(_Indexarray)] *= _Right[_Idx];
1656
+ }
1657
+ }
1658
+
1659
+ void operator/=(const valarray<_Ty>& _Right) const { // divide generalized slice by valarray
1660
+ _Sizarray _Indexarray(size_t{0}, _Nslice());
1661
+ const size_t _Size = _Totlen();
1662
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1663
+ _Myptr[_Off(_Indexarray)] /= _Right[_Idx];
1664
+ }
1665
+ }
1666
+
1667
+ void operator%=(const valarray<_Ty>& _Right) const { // remainder generalized slice by valarray
1668
+ _Sizarray _Indexarray(size_t{0}, _Nslice());
1669
+ const size_t _Size = _Totlen();
1670
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1671
+ _Myptr[_Off(_Indexarray)] %= _Right[_Idx];
1672
+ }
1673
+ }
1674
+
1675
+ void operator+=(const valarray<_Ty>& _Right) const { // add valarray to generalized slice
1676
+ _Sizarray _Indexarray(size_t{0}, _Nslice());
1677
+ const size_t _Size = _Totlen();
1678
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1679
+ _Myptr[_Off(_Indexarray)] += _Right[_Idx];
1680
+ }
1681
+ }
1682
+
1683
+ void operator-=(const valarray<_Ty>& _Right) const { // subtract valarray from generalized slice
1684
+ _Sizarray _Indexarray(size_t{0}, _Nslice());
1685
+ const size_t _Size = _Totlen();
1686
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1687
+ _Myptr[_Off(_Indexarray)] -= _Right[_Idx];
1688
+ }
1689
+ }
1690
+
1691
+ void operator^=(const valarray<_Ty>& _Right) const { // XOR valarray into generalized slice
1692
+ _Sizarray _Indexarray(size_t{0}, _Nslice());
1693
+ const size_t _Size = _Totlen();
1694
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1695
+ _Myptr[_Off(_Indexarray)] ^= _Right[_Idx];
1696
+ }
1697
+ }
1698
+
1699
+ void operator&=(const valarray<_Ty>& _Right) const { // AND valarray into generalized slice
1700
+ _Sizarray _Indexarray(size_t{0}, _Nslice());
1701
+ const size_t _Size = _Totlen();
1702
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1703
+ _Myptr[_Off(_Indexarray)] &= _Right[_Idx];
1704
+ }
1705
+ }
1706
+
1707
+ void operator|=(const valarray<_Ty>& _Right) const { // OR valarray into generalized slice
1708
+ _Sizarray _Indexarray(size_t{0}, _Nslice());
1709
+ const size_t _Size = _Totlen();
1710
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1711
+ _Myptr[_Off(_Indexarray)] |= _Right[_Idx];
1712
+ }
1713
+ }
1714
+
1715
+ void operator<<=(const valarray<_Ty>& _Right) const { // left shift generalized slice by valarray
1716
+ _Sizarray _Indexarray(size_t{0}, _Nslice());
1717
+ const size_t _Size = _Totlen();
1718
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1719
+ _Myptr[_Off(_Indexarray)] <<= _Right[_Idx];
1720
+ }
1721
+ }
1722
+
1723
+ void operator>>=(const valarray<_Ty>& _Right) const { // right shift generalized slice by valarray
1724
+ _Sizarray _Indexarray(size_t{0}, _Nslice());
1725
+ const size_t _Size = _Totlen();
1726
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1727
+ _Myptr[_Off(_Indexarray)] >>= _Right[_Idx];
1728
+ }
1729
+ }
1730
+
1731
+ _NODISCARD _Ty& _Data(size_t _Idx) const noexcept {
1732
+ return _Myptr[_Idx];
1733
+ }
1734
+
1735
+ gslice_array() = delete;
1736
+
1737
+ gslice_array(const gslice_array&) = default;
1738
+
1739
+ const gslice_array& operator=(const gslice_array& _Right) const {
1740
+ _Sizarray _Dst_indexarray(size_t{0}, _Nslice());
1741
+ _Sizarray _Src_indexarray(size_t{0}, _Right._Nslice());
1742
+ const size_t _Size = _Totlen();
1743
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1744
+ _Myptr[_Off(_Dst_indexarray)] = _Right._Myptr[_Right._Off(_Src_indexarray)];
1745
+ }
1746
+ return *this;
1747
+ }
1748
+
1749
+ private:
1750
+ friend valarray<_Ty>;
1751
+
1752
+ gslice_array(const gslice& _Gslice, _Ty* _Ptr) : gslice(_Gslice), _Myptr(_Ptr) {}
1753
+
1754
+ _Ty* _Myptr; // pointer to valarray contents
1755
+ };
1756
+
1757
+ _EXPORT_STD template <class _Ty>
1758
+ class mask_array { // define a subset of a valarray with an array of mask bits
1759
+ public:
1760
+ using value_type = _Ty;
1761
+
1762
+ void operator=(const valarray<_Ty>& _Right) const {
1763
+ const size_t _Size = _Mybool.size();
1764
+ for (size_t _Idx = 0, _Off = _Start_off(); _Off < _Size; _Off = _Next_off(_Off), ++_Idx) {
1765
+ _Myptr[_Off] = _Right[_Idx];
1766
+ }
1767
+ }
1768
+
1769
+ void operator=(const _Ty& _Right) const {
1770
+ const size_t _Size = _Mybool.size();
1771
+ for (size_t _Off = _Start_off(); _Off < _Size; _Off = _Next_off(_Off)) {
1772
+ _Myptr[_Off] = _Right;
1773
+ }
1774
+ }
1775
+
1776
+ void operator*=(const valarray<_Ty>& _Right) const { // multiply masked array by valarray
1777
+ const size_t _Size = _Mybool.size();
1778
+ for (size_t _Idx = 0, _Off = _Start_off(); _Off < _Size; _Off = _Next_off(_Off), ++_Idx) {
1779
+ _Myptr[_Off] *= _Right[_Idx];
1780
+ }
1781
+ }
1782
+
1783
+ void operator/=(const valarray<_Ty>& _Right) const { // divide masked array by valarray
1784
+ const size_t _Size = _Mybool.size();
1785
+ for (size_t _Idx = 0, _Off = _Start_off(); _Off < _Size; _Off = _Next_off(_Off), ++_Idx) {
1786
+ _Myptr[_Off] /= _Right[_Idx];
1787
+ }
1788
+ }
1789
+
1790
+ void operator%=(const valarray<_Ty>& _Right) const { // remainder masked array by valarray
1791
+ const size_t _Size = _Mybool.size();
1792
+ for (size_t _Idx = 0, _Off = _Start_off(); _Off < _Size; _Off = _Next_off(_Off), ++_Idx) {
1793
+ _Myptr[_Off] %= _Right[_Idx];
1794
+ }
1795
+ }
1796
+
1797
+ void operator+=(const valarray<_Ty>& _Right) const { // add valarray to masked array
1798
+ const size_t _Size = _Mybool.size();
1799
+ for (size_t _Idx = 0, _Off = _Start_off(); _Off < _Size; _Off = _Next_off(_Off), ++_Idx) {
1800
+ _Myptr[_Off] += _Right[_Idx];
1801
+ }
1802
+ }
1803
+
1804
+ void operator-=(const valarray<_Ty>& _Right) const { // subtract valarray from masked array
1805
+ const size_t _Size = _Mybool.size();
1806
+ for (size_t _Idx = 0, _Off = _Start_off(); _Off < _Size; _Off = _Next_off(_Off), ++_Idx) {
1807
+ _Myptr[_Off] -= _Right[_Idx];
1808
+ }
1809
+ }
1810
+
1811
+ void operator^=(const valarray<_Ty>& _Right) const { // XOR valarray into masked array
1812
+ const size_t _Size = _Mybool.size();
1813
+ for (size_t _Idx = 0, _Off = _Start_off(); _Off < _Size; _Off = _Next_off(_Off), ++_Idx) {
1814
+ _Myptr[_Off] ^= _Right[_Idx];
1815
+ }
1816
+ }
1817
+
1818
+ void operator&=(const valarray<_Ty>& _Right) const { // OR valarray into masked array
1819
+ const size_t _Size = _Mybool.size();
1820
+ for (size_t _Idx = 0, _Off = _Start_off(); _Off < _Size; _Off = _Next_off(_Off), ++_Idx) {
1821
+ _Myptr[_Off] &= _Right[_Idx];
1822
+ }
1823
+ }
1824
+
1825
+ void operator|=(const valarray<_Ty>& _Right) const { // OR valarray into masked array
1826
+ const size_t _Size = _Mybool.size();
1827
+ for (size_t _Idx = 0, _Off = _Start_off(); _Off < _Size; _Off = _Next_off(_Off), ++_Idx) {
1828
+ _Myptr[_Off] |= _Right[_Idx];
1829
+ }
1830
+ }
1831
+
1832
+ void operator<<=(const valarray<_Ty>& _Right) const { // left shift masked array by valarray
1833
+ const size_t _Size = _Mybool.size();
1834
+ for (size_t _Idx = 0, _Off = _Start_off(); _Off < _Size; _Off = _Next_off(_Off), ++_Idx) {
1835
+ _Myptr[_Off] <<= _Right[_Idx];
1836
+ }
1837
+ }
1838
+
1839
+ void operator>>=(const valarray<_Ty>& _Right) const { // right shift masked array by valarray
1840
+ const size_t _Size = _Mybool.size();
1841
+ for (size_t _Idx = 0, _Off = _Start_off(); _Off < _Size; _Off = _Next_off(_Off), ++_Idx) {
1842
+ _Myptr[_Off] >>= _Right[_Idx];
1843
+ }
1844
+ }
1845
+
1846
+ _NODISCARD _Ty& _Data(size_t _Idx) const noexcept {
1847
+ return _Myptr[_Idx];
1848
+ }
1849
+
1850
+ _NODISCARD bool _Mask(size_t _Idx) const noexcept {
1851
+ return _Mybool[_Idx];
1852
+ }
1853
+
1854
+ _NODISCARD size_t _Start_off() const noexcept {
1855
+ size_t _Off = 0;
1856
+ const size_t _Size = _Mybool.size();
1857
+ while (_Off < _Size && !_Mybool[_Off]) {
1858
+ ++_Off;
1859
+ }
1860
+ return _Off;
1861
+ }
1862
+
1863
+ _NODISCARD size_t _Next_off(size_t _Off) const noexcept {
1864
+ const size_t _Size = _Mybool.size();
1865
+ do {
1866
+ ++_Off;
1867
+ } while (_Off < _Size && !_Mybool[_Off]);
1868
+ return _Off;
1869
+ }
1870
+
1871
+ _NODISCARD size_t _Totlen() const noexcept {
1872
+ size_t _Count = 0;
1873
+ const size_t _Size = _Mybool.size();
1874
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1875
+ if (_Mybool[_Idx]) {
1876
+ ++_Count;
1877
+ }
1878
+ }
1879
+
1880
+ return _Count;
1881
+ }
1882
+
1883
+ mask_array() = delete;
1884
+
1885
+ mask_array(const mask_array&) = default;
1886
+
1887
+ const mask_array& operator=(const mask_array& _Right) const noexcept /* strengthened */ {
1888
+ const size_t _Size = _Mybool.size();
1889
+ size_t _Dst_off = _Start_off();
1890
+ size_t _Src_off = _Right._Start_off();
1891
+ for (; _Dst_off < _Size; _Src_off = _Right._Next_off(_Src_off), _Dst_off = _Next_off(_Dst_off)) {
1892
+ _Myptr[_Dst_off] = _Right._Myptr[_Src_off];
1893
+ }
1894
+ return *this;
1895
+ }
1896
+
1897
+ private:
1898
+ friend valarray<_Ty>;
1899
+
1900
+ mask_array(const _Boolarray& _Maskarr, _Ty* _Pdata) : _Mybool(_Maskarr), _Myptr(_Pdata) {}
1901
+
1902
+ _Boolarray _Mybool; // array of mask bits
1903
+ _Ty* _Myptr; // pointer to valarray contents
1904
+ };
1905
+
1906
+ _EXPORT_STD template <class _Ty>
1907
+ class indirect_array { // define a subset of a valarray with an array of indexes
1908
+ public:
1909
+ using value_type = _Ty;
1910
+
1911
+ void operator=(const valarray<_Ty>& _Right) const {
1912
+ const size_t _Size = _Totlen();
1913
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1914
+ _Myptr[_Indir(_Idx)] = _Right[_Idx];
1915
+ }
1916
+ }
1917
+
1918
+ void operator=(const _Ty& _Right) const {
1919
+ const size_t _Size = _Totlen();
1920
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1921
+ _Myptr[_Indir(_Idx)] = _Right;
1922
+ }
1923
+ }
1924
+
1925
+ void operator*=(const valarray<_Ty>& _Right) const { // multiply indirect array by valarray
1926
+ const size_t _Size = _Totlen();
1927
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1928
+ _Myptr[_Indir(_Idx)] *= _Right[_Idx];
1929
+ }
1930
+ }
1931
+
1932
+ void operator/=(const valarray<_Ty>& _Right) const { // divide indirect array by valarray
1933
+ const size_t _Size = _Totlen();
1934
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1935
+ _Myptr[_Indir(_Idx)] /= _Right[_Idx];
1936
+ }
1937
+ }
1938
+
1939
+ void operator%=(const valarray<_Ty>& _Right) const { // remainder indirect array by valarray
1940
+ const size_t _Size = _Totlen();
1941
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1942
+ _Myptr[_Indir(_Idx)] %= _Right[_Idx];
1943
+ }
1944
+ }
1945
+
1946
+ void operator+=(const valarray<_Ty>& _Right) const { // add valarray to indirect array
1947
+ const size_t _Size = _Totlen();
1948
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1949
+ _Myptr[_Indir(_Idx)] += _Right[_Idx];
1950
+ }
1951
+ }
1952
+
1953
+ void operator-=(const valarray<_Ty>& _Right) const { // subtract valarray from indirect array
1954
+ const size_t _Size = _Totlen();
1955
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1956
+ _Myptr[_Indir(_Idx)] -= _Right[_Idx];
1957
+ }
1958
+ }
1959
+
1960
+ void operator^=(const valarray<_Ty>& _Right) const { // XOR valarray into indirect array
1961
+ const size_t _Size = _Totlen();
1962
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1963
+ _Myptr[_Indir(_Idx)] ^= _Right[_Idx];
1964
+ }
1965
+ }
1966
+
1967
+ void operator&=(const valarray<_Ty>& _Right) const { // AND valarray into indirect array
1968
+ const size_t _Size = _Totlen();
1969
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1970
+ _Myptr[_Indir(_Idx)] &= _Right[_Idx];
1971
+ }
1972
+ }
1973
+
1974
+ void operator|=(const valarray<_Ty>& _Right) const { // OR valarray into indirect array
1975
+ const size_t _Size = _Totlen();
1976
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1977
+ _Myptr[_Indir(_Idx)] |= _Right[_Idx];
1978
+ }
1979
+ }
1980
+
1981
+ void operator<<=(const valarray<_Ty>& _Right) const { // left shift indirect array by valarray
1982
+ const size_t _Size = _Totlen();
1983
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1984
+ _Myptr[_Indir(_Idx)] <<= _Right[_Idx];
1985
+ }
1986
+ }
1987
+
1988
+ void operator>>=(const valarray<_Ty>& _Right) const { // right shift indirect array by valarray
1989
+ const size_t _Size = _Totlen();
1990
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
1991
+ _Myptr[_Indir(_Idx)] >>= _Right[_Idx];
1992
+ }
1993
+ }
1994
+
1995
+ _NODISCARD _Ty& _Data(size_t _Idx) const noexcept {
1996
+ return _Myptr[_Idx];
1997
+ }
1998
+
1999
+ _NODISCARD size_t _Indir(size_t _Idx) const noexcept {
2000
+ return _Myindarr[_Idx];
2001
+ }
2002
+
2003
+ _NODISCARD size_t _Totlen() const noexcept {
2004
+ return _Myindarr.size();
2005
+ }
2006
+
2007
+ indirect_array() = delete;
2008
+
2009
+ indirect_array(const indirect_array&) = default;
2010
+
2011
+ const indirect_array& operator=(const indirect_array& _Right) const noexcept /* strengthened */ {
2012
+ const size_t _Size = _Totlen();
2013
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
2014
+ _Myptr[_Indir(_Idx)] = _Right._Myptr[_Right._Indir(_Idx)];
2015
+ }
2016
+ return *this;
2017
+ }
2018
+
2019
+ private:
2020
+ friend valarray<_Ty>;
2021
+
2022
+ indirect_array(const _Sizarray& _Indarr, _Ty* _Ptr) : _Myindarr(_Indarr), _Myptr(_Ptr) {}
2023
+
2024
+ _Sizarray _Myindarr; // array of indirect indexes
2025
+ _Ty* _Myptr; // pointer to valarray contents
2026
+ };
2027
+
2028
+ template <class _Ty>
2029
+ valarray<_Ty>& valarray<_Ty>::operator=(const slice_array<_Ty>& _Slicearr) {
2030
+ _Tidy_deallocate();
2031
+ _Grow(_Slicearr.size(), &_Slicearr._Data(_Slicearr.start()), _Slicearr.stride());
2032
+ return *this;
2033
+ }
2034
+
2035
+ template <class _Ty>
2036
+ _NODISCARD valarray<_Ty> valarray<_Ty>::operator[](slice _Slice) const {
2037
+ return valarray<_Ty>(slice_array<_Ty>(_Slice, _Myptr));
2038
+ }
2039
+
2040
+ template <class _Ty>
2041
+ _NODISCARD slice_array<_Ty> valarray<_Ty>::operator[](slice _Slice) noexcept /* strengthened */ {
2042
+ return slice_array<_Ty>(_Slice, _Myptr);
2043
+ }
2044
+
2045
+ template <class _Ty>
2046
+ valarray<_Ty>& valarray<_Ty>::operator=(const gslice_array<_Ty>& _Gslicearr) {
2047
+ _Tidy_deallocate();
2048
+ _Grow(_Gslicearr._Totlen());
2049
+ _Sizarray _Indexarray(size_t{0}, _Gslicearr._Nslice());
2050
+ const size_t _Size = _Mysize; // eliminating indirection helps vectorization
2051
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
2052
+ _Myptr[_Idx] = _Gslicearr._Data(_Gslicearr._Off(_Indexarray));
2053
+ }
2054
+ return *this;
2055
+ }
2056
+
2057
+ template <class _Ty>
2058
+ _NODISCARD valarray<_Ty> valarray<_Ty>::operator[](const gslice& _Gslice) const {
2059
+ return valarray<_Ty>(gslice_array<_Ty>(_Gslice, _Myptr));
2060
+ }
2061
+
2062
+ template <class _Ty>
2063
+ _NODISCARD gslice_array<_Ty> valarray<_Ty>::operator[](const gslice& _Gslicearr) {
2064
+ return gslice_array<_Ty>(_Gslicearr, _Myptr);
2065
+ }
2066
+
2067
+ template <class _Ty>
2068
+ valarray<_Ty>& valarray<_Ty>::operator=(const mask_array<_Ty>& _Maskarr) {
2069
+ _Tidy_deallocate();
2070
+ _Grow(_Maskarr._Totlen());
2071
+ size_t _Count = 0;
2072
+
2073
+ const size_t _Size = _Mysize; // eliminating indirection helps vectorization
2074
+ for (size_t _Idx = 0; _Idx < _Size; ++_Count) {
2075
+ if (_Maskarr._Mask(_Count)) {
2076
+ _Myptr[_Idx++] = _Maskarr._Data(_Count);
2077
+ }
2078
+ }
2079
+
2080
+ return *this;
2081
+ }
2082
+
2083
+ template <class _Ty>
2084
+ _NODISCARD valarray<_Ty> valarray<_Ty>::operator[](const _Boolarray& _Boolarr) const {
2085
+ return valarray<_Ty>(mask_array<_Ty>(_Boolarr, _Myptr));
2086
+ }
2087
+
2088
+ template <class _Ty>
2089
+ _NODISCARD mask_array<_Ty> valarray<_Ty>::operator[](const _Boolarray& _Boolarr) {
2090
+ return mask_array<_Ty>(_Boolarr, _Myptr);
2091
+ }
2092
+
2093
+ template <class _Ty>
2094
+ valarray<_Ty>& valarray<_Ty>::operator=(const indirect_array<_Ty>& _Indarr) {
2095
+ _Tidy_deallocate();
2096
+ _Grow(_Indarr._Totlen());
2097
+ const size_t _Size = _Mysize; // eliminating indirection helps vectorization
2098
+ for (size_t _Idx = 0; _Idx < _Size; ++_Idx) {
2099
+ _Myptr[_Idx] = _Indarr._Data(_Indarr._Indir(_Idx));
2100
+ }
2101
+ return *this;
2102
+ }
2103
+
2104
+ template <class _Ty>
2105
+ _NODISCARD valarray<_Ty> valarray<_Ty>::operator[](const _Sizarray& _Indarr) const {
2106
+ return valarray<_Ty>(indirect_array<_Ty>(_Indarr, _Myptr));
2107
+ }
2108
+
2109
+ template <class _Ty>
2110
+ _NODISCARD indirect_array<_Ty> valarray<_Ty>::operator[](const _Sizarray& _Indarr) {
2111
+ return indirect_array<_Ty>(_Indarr, _Myptr);
2112
+ }
2113
+ _STD_END
2114
+
2115
+ #pragma pop_macro("new")
2116
+ _STL_RESTORE_CLANG_WARNINGS
2117
+ #pragma warning(pop)
2118
+ #pragma pack(pop)
2119
+ #endif // _STL_COMPILER_PREPROCESSOR
2120
+ #endif // _VALARRAY_
miniMSVC/VC/Tools/MSVC/14.42.34433/include/varargs.h ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //
2
+ // varargs.h
3
+ //
4
+ // Copyright (c) Microsoft Corporation. All rights reserved.
5
+ //
6
+ // XENIX-style macros for accessing arguments of a function which takes a
7
+ // variable number of arguments. Use the C Standard <stdarg.h> instead.
8
+ //
9
+ #pragma once
10
+ #define _INC_VARARGS
11
+
12
+ #include <vcruntime.h>
13
+
14
+ #pragma warning(push)
15
+ #pragma warning(disable: _VCRUNTIME_DISABLED_WARNINGS)
16
+
17
+ _CRT_BEGIN_C_HEADER
18
+
19
+
20
+
21
+ #if __STDC__
22
+ #error varargs.h incompatible with __STDC__ (use stdarg.h)
23
+ #endif
24
+
25
+ #ifndef va_arg
26
+ #if defined _M_CEE
27
+ #error varargs.h not supported when targeting _M_CEE (use stdarg.h)
28
+ #endif
29
+
30
+ #define va_dcl va_list va_alist;
31
+ #define va_arg __crt_va_arg
32
+ #define va_end __crt_va_end
33
+
34
+ #if defined _M_IX86 || defined _M_ARM
35
+ #define va_start(ap) ((void)(ap = (va_list)&va_alist))
36
+ #elif defined _M_X64 && !defined(_M_ARM64EC)
37
+ #define va_start(ap) ((void)(__va_start(&ap, 0)))
38
+ #elif defined _M_ARM64 || defined _M_ARM64EC
39
+ #define va_start(ap) ((void)(__va_start(&ap, 0, 0, 0, 0)))
40
+ #else
41
+ #error Unsupported architecture
42
+ #endif
43
+ #endif // va_arg
44
+
45
+
46
+
47
+ _CRT_END_C_HEADER
48
+
49
+ #pragma warning(pop) // _VCRUNTIME_DISABLED_WARNINGS
miniMSVC/VC/Tools/MSVC/14.42.34433/include/variant ADDED
@@ -0,0 +1,1718 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // variant standard header
2
+
3
+ // Copyright (c) Microsoft Corporation.
4
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
5
+
6
+ #ifndef _VARIANT_
7
+ #define _VARIANT_
8
+ #include <yvals.h>
9
+ #if _STL_COMPILER_PREPROCESSOR
10
+
11
+ #if !_HAS_CXX17
12
+ _EMIT_STL_WARNING(STL4038, "The contents of <variant> are available only with C++17 or later.");
13
+ #else // ^^^ !_HAS_CXX17 / _HAS_CXX17 vvv
14
+ #if _HAS_CXX20
15
+ #include <compare>
16
+ #endif // _HAS_CXX20
17
+ #include <exception>
18
+ #include <initializer_list>
19
+ #include <type_traits>
20
+ #include <utility>
21
+ #include <xsmf_control.h>
22
+ #include <xutility>
23
+
24
+ #pragma pack(push, _CRT_PACKING)
25
+ #pragma warning(push, _STL_WARNING_LEVEL)
26
+ #pragma warning(disable : _STL_DISABLED_WARNINGS)
27
+ _STL_DISABLE_CLANG_WARNINGS
28
+ #pragma push_macro("new")
29
+ #undef new
30
+
31
+ _STD_BEGIN
32
+
33
+ template <class...>
34
+ struct _Meta_list; // a sequence of types (not defined)
35
+
36
+ template <class _List>
37
+ struct _Meta_front_;
38
+ template <class _List>
39
+ using _Meta_front =
40
+ // extract the first type in a sequence (head of list)
41
+ typename _Meta_front_<_List>::type;
42
+
43
+ template <template <class...> class _List, class _First, class... _Rest>
44
+ struct _Meta_front_<_List<_First, _Rest...>> {
45
+ using type = _First;
46
+ };
47
+
48
+ template <class _List>
49
+ struct _Meta_pop_front_;
50
+ template <class _List>
51
+ using _Meta_pop_front =
52
+ // subsequence including all but the first type (tail of list)
53
+ typename _Meta_pop_front_<_List>::type;
54
+
55
+ template <template <class...> class _List, class _First, class... _Rest>
56
+ struct _Meta_pop_front_<_List<_First, _Rest...>> {
57
+ using type = _List<_Rest...>;
58
+ };
59
+
60
+ template <class _List, class _Ty>
61
+ struct _Meta_push_front_;
62
+ template <class _List, class _Ty>
63
+ using _Meta_push_front =
64
+ // prepend a new type onto a sequence
65
+ typename _Meta_push_front_<_List, _Ty>::type;
66
+
67
+ template <template <class...> class _List, class... _Types, class _Ty>
68
+ struct _Meta_push_front_<_List<_Types...>, _Ty> {
69
+ using type = _List<_Ty, _Types...>;
70
+ };
71
+
72
+ template <class _Void, template <class...> class _Fn, class... _Args>
73
+ struct _Meta_quote_helper_;
74
+ template <template <class...> class _Fn, class... _Args>
75
+ struct _Meta_quote_helper_<void_t<_Fn<_Args...>>, _Fn, _Args...> {
76
+ using type = _Fn<_Args...>;
77
+ };
78
+ template <template <class...> class _Fn>
79
+ struct _Meta_quote { // encapsulate a template into a meta-callable type
80
+ template <class... _Types>
81
+ using _Invoke = typename _Meta_quote_helper_<void, _Fn, _Types...>::type;
82
+ };
83
+
84
+ template <class _Fn, class... _Args>
85
+ using _Meta_invoke = // invoke meta-callable _Fn with _Args
86
+ typename _Fn::template _Invoke<_Args...>;
87
+
88
+ template <class _Fn, class... _Args>
89
+ struct _Meta_bind_back { // construct a meta-callable that passes its arguments and _Args to _Fn
90
+ template <class... _Types>
91
+ using _Invoke = _Meta_invoke<_Fn, _Types..., _Args...>;
92
+ };
93
+
94
+ template <class _Fn, class _List>
95
+ struct _Meta_apply_;
96
+ template <class _Fn, class _List>
97
+ using _Meta_apply =
98
+ // unpack _List into the parameters of meta-callable _Fn
99
+ typename _Meta_apply_<_Fn, _List>::type;
100
+
101
+ template <class _Fn, template <class...> class _List, class... _Types>
102
+ struct _Meta_apply_<_Fn, _List<_Types...>> {
103
+ // invoke meta-callable _Fn with the parameters of a template specialization
104
+ using type = _Meta_invoke<_Fn, _Types...>;
105
+ };
106
+
107
+ template <class _Fn, class _Ty, _Ty... _Idxs>
108
+ struct _Meta_apply_<_Fn, integer_sequence<_Ty, _Idxs...>> {
109
+ // invoke meta-callable _Fn with the elements of an integer_sequence
110
+ using type = _Meta_invoke<_Fn, integral_constant<_Ty, _Idxs>...>;
111
+ };
112
+
113
+ template <class _Fn, class _List>
114
+ struct _Meta_transform_;
115
+ template <class _Fn, class _List>
116
+ using _Meta_transform =
117
+ // transform sequence of _Types... into sequence of _Fn<_Types...>
118
+ typename _Meta_transform_<_Fn, _List>::type;
119
+
120
+ template <template <class...> class _List, class _Fn, class... _Types>
121
+ struct _Meta_transform_<_Fn, _List<_Types...>> {
122
+ using type = _List<_Meta_invoke<_Fn, _Types>...>;
123
+ };
124
+
125
+ template <class, class, template <class...> class>
126
+ struct _Meta_repeat_n_c_;
127
+ template <size_t _Count, class _Ty, template <class...> class _Continue>
128
+ using _Meta_repeat_n_c =
129
+ // construct a sequence consisting of repetitions of _Ty
130
+ typename _Meta_repeat_n_c_<_Ty, make_index_sequence<_Count>, _Continue>::type;
131
+
132
+ template <class _Ty, size_t>
133
+ using _Meta_repeat_first_helper = _Ty;
134
+
135
+ template <class _Ty, size_t... _Idxs, template <class...> class _Continue>
136
+ struct _Meta_repeat_n_c_<_Ty, index_sequence<_Idxs...>, _Continue> {
137
+ using type = _Continue<_Meta_repeat_first_helper<_Ty, _Idxs>...>;
138
+ };
139
+
140
+ template <class _List, size_t _Idx, class = void>
141
+ struct _Meta_at_;
142
+ template <class _List, size_t _Idx>
143
+ using _Meta_at_c =
144
+ // Extract the _Idx-th type from _List
145
+ typename _Meta_at_<_List, _Idx>::type;
146
+
147
+ #ifdef __clang__
148
+ template <template <class...> class _List, class... _Types, size_t _Idx>
149
+ struct _Meta_at_<_List<_Types...>, _Idx, void_t<__type_pack_element<_Idx, _Types...>>> {
150
+ using type = __type_pack_element<_Idx, _Types...>;
151
+ };
152
+ #else // ^^^ defined(__clang__) / !defined(__clang__) vvv
153
+ template <class... _VoidPtrs>
154
+ struct _Meta_at_impl {
155
+ template <class _Ty, class... _Types>
156
+ static _Ty _Eval(_VoidPtrs..., _Ty*, _Types*...); // undefined
157
+ };
158
+
159
+ template <class _Ty>
160
+ constexpr _Identity<_Ty>* _Type_as_pointer() {
161
+ return nullptr;
162
+ }
163
+
164
+ template <template <class...> class _List, class... _Types, size_t _Idx>
165
+ struct _Meta_at_<_List<_Types...>, _Idx, enable_if_t<(_Idx < sizeof...(_Types))>> {
166
+ using type =
167
+ typename decltype(_Meta_repeat_n_c<_Idx, void*, _Meta_at_impl>::_Eval(_Type_as_pointer<_Types>()...))::type;
168
+ };
169
+ #endif // ^^^ !defined(__clang__) ^^^
170
+
171
+ template <class>
172
+ struct _Meta_as_list_;
173
+ template <class _Ty>
174
+ using _Meta_as_list =
175
+ // convert _Ty to a _Meta_list
176
+ typename _Meta_as_list_<_Ty>::type;
177
+
178
+ template <template <class...> class _List, class... _Types>
179
+ struct _Meta_as_list_<_List<_Types...>> {
180
+ // convert the parameters of an arbitrary template specialization to a _Meta_list of types
181
+ using type = _Meta_list<_Types...>;
182
+ };
183
+
184
+ template <class _Ty, _Ty... _Idxs>
185
+ struct _Meta_as_list_<integer_sequence<_Ty, _Idxs...>> {
186
+ // convert an integer_sequence to a _Meta_list of integral_constants
187
+ using type = _Meta_list<integral_constant<_Ty, _Idxs>...>;
188
+ };
189
+
190
+ template <class _List>
191
+ struct _Meta_as_integer_sequence_;
192
+ template <class _List>
193
+ using _Meta_as_integer_sequence =
194
+ // convert a list of integral_constants to an integer_sequence
195
+ typename _Meta_as_integer_sequence_<_List>::type;
196
+
197
+ template <template <class...> class _List, class _Ty, _Ty... _Idxs>
198
+ struct _Meta_as_integer_sequence_<_List<integral_constant<_Ty, _Idxs>...>> {
199
+ using type = integer_sequence<_Ty, _Idxs...>;
200
+ };
201
+
202
+ template <class...>
203
+ struct _Meta_concat_;
204
+ template <class... _Types>
205
+ using _Meta_concat =
206
+ // merge several lists into one
207
+ typename _Meta_concat_<_Types...>::type;
208
+
209
+ template <template <class...> class _List>
210
+ struct _Meta_concat_<_List<>> {
211
+ using type = _List<>;
212
+ };
213
+
214
+ template <template <class...> class _List, class... _Items1>
215
+ struct _Meta_concat_<_List<_Items1...>> {
216
+ using type = _List<_Items1...>;
217
+ };
218
+
219
+ template <template <class...> class _List, class... _Items1, class... _Items2>
220
+ struct _Meta_concat_<_List<_Items1...>, _List<_Items2...>> {
221
+ using type = _List<_Items1..., _Items2...>;
222
+ };
223
+
224
+ template <template <class...> class _List, class... _Items1, class... _Items2, class... _Items3>
225
+ struct _Meta_concat_<_List<_Items1...>, _List<_Items2...>, _List<_Items3...>> {
226
+ using type = _List<_Items1..., _Items2..., _Items3...>;
227
+ };
228
+
229
+ template <template <class...> class _List, class... _Items1, class... _Items2, class... _Items3, class... _Rest>
230
+ struct _Meta_concat_<_List<_Items1...>, _List<_Items2...>, _List<_Items3...>, _Rest...> {
231
+ using type = _Meta_concat<_List<_Items1..., _Items2..., _Items3...>, _Rest...>;
232
+ };
233
+
234
+ template <class _ListOfLists>
235
+ using _Meta_join =
236
+ // transform a list of lists of elements into a single list containing those elements
237
+ _Meta_apply<_Meta_quote<_Meta_concat>, _ListOfLists>;
238
+
239
+ template <class>
240
+ struct _Meta_cartesian_product_;
241
+ template <class _ListOfLists>
242
+ using _Meta_cartesian_product =
243
+ // find the n-ary Cartesian Product of the lists in the input list
244
+ typename _Meta_cartesian_product_<_ListOfLists>::type;
245
+
246
+ template <template <class...> class _List>
247
+ struct _Meta_cartesian_product_<_List<>> {
248
+ using type = _List<>;
249
+ };
250
+
251
+ template <template <class...> class _List1, template <class...> class _List2, class... _Items>
252
+ struct _Meta_cartesian_product_<_List1<_List2<_Items...>>> {
253
+ using type = _List1<_List2<_Items>...>;
254
+ };
255
+
256
+ template <template <class...> class _List1, class... _Items, template <class...> class _List2, class... _Lists>
257
+ struct _Meta_cartesian_product_<_List1<_List2<_Items...>, _Lists...>> {
258
+ using type = _Meta_join<_List1<_Meta_transform<_Meta_bind_back<_Meta_quote<_Meta_push_front>, _Items>,
259
+ _Meta_cartesian_product<_List1<_Lists...>>>...>>;
260
+ };
261
+
262
+ #define _STL_STAMP4(n, x) \
263
+ x(n); \
264
+ x(n + 1); \
265
+ x(n + 2); \
266
+ x(n + 3)
267
+ #define _STL_STAMP16(n, x) \
268
+ _STL_STAMP4(n, x); \
269
+ _STL_STAMP4(n + 4, x); \
270
+ _STL_STAMP4(n + 8, x); \
271
+ _STL_STAMP4(n + 12, x)
272
+ #define _STL_STAMP64(n, x) \
273
+ _STL_STAMP16(n, x); \
274
+ _STL_STAMP16(n + 16, x); \
275
+ _STL_STAMP16(n + 32, x); \
276
+ _STL_STAMP16(n + 48, x)
277
+ #define _STL_STAMP256(n, x) \
278
+ _STL_STAMP64(n, x); \
279
+ _STL_STAMP64(n + 64, x); \
280
+ _STL_STAMP64(n + 128, x); \
281
+ _STL_STAMP64(n + 192, x)
282
+
283
+ #define _STL_STAMP(n, x) x(_STL_STAMP##n, n)
284
+
285
+ _EXPORT_STD template <class... _Types>
286
+ class variant;
287
+
288
+ _EXPORT_STD template <class _Ty>
289
+ struct variant_size; // undefined
290
+ template <class _Ty>
291
+ struct variant_size<const _Ty> : variant_size<_Ty>::type {};
292
+ template <class _Ty>
293
+ struct _CXX20_DEPRECATE_VOLATILE variant_size<volatile _Ty> : variant_size<_Ty>::type {};
294
+ template <class _Ty>
295
+ struct _CXX20_DEPRECATE_VOLATILE variant_size<const volatile _Ty> : variant_size<_Ty>::type {};
296
+ _EXPORT_STD template <class _Ty>
297
+ constexpr size_t variant_size_v = variant_size<_Ty>::value;
298
+
299
+ template <class... _Types>
300
+ struct variant_size<variant<_Types...>> : integral_constant<size_t, sizeof...(_Types)> {};
301
+
302
+ _EXPORT_STD template <size_t _Idx, class _Ty>
303
+ struct variant_alternative; // undefined
304
+ _EXPORT_STD template <size_t _Idx, class _Ty>
305
+ using variant_alternative_t = typename variant_alternative<_Idx, _Ty>::type;
306
+ template <size_t _Idx, class _Ty>
307
+ struct variant_alternative<_Idx, const _Ty> {
308
+ using type = add_const_t<variant_alternative_t<_Idx, _Ty>>;
309
+ };
310
+ template <size_t _Idx, class _Ty>
311
+ struct _CXX20_DEPRECATE_VOLATILE variant_alternative<_Idx, volatile _Ty> {
312
+ using type = add_volatile_t<variant_alternative_t<_Idx, _Ty>>;
313
+ };
314
+ template <size_t _Idx, class _Ty>
315
+ struct _CXX20_DEPRECATE_VOLATILE variant_alternative<_Idx, const volatile _Ty> {
316
+ using type = add_cv_t<variant_alternative_t<_Idx, _Ty>>;
317
+ };
318
+ template <size_t _Idx, class... _Types>
319
+ struct variant_alternative<_Idx, variant<_Types...>> {
320
+ static_assert(_Idx < sizeof...(_Types), "variant index out of bounds");
321
+
322
+ #ifdef __clang__
323
+ using type = __type_pack_element<_Idx, _Types...>;
324
+ #else // ^^^ defined(__clang__) / !defined(__clang__) vvv
325
+ using type = _Meta_at_c<variant<_Types...>, _Idx>;
326
+ #endif // ^^^ !defined(__clang__) ^^^
327
+ };
328
+
329
+ _EXPORT_STD inline constexpr size_t variant_npos = _Meta_npos;
330
+
331
+ template <bool _TrivialDestruction, class... _Types>
332
+ class _Variant_storage_ {}; // empty storage (empty "_Types" case)
333
+
334
+ template <class... _Types>
335
+ using _Variant_storage = _Variant_storage_<conjunction_v<is_trivially_destructible<_Types>...>, _Types...>;
336
+
337
+ template <class _First, class... _Rest>
338
+ class _Variant_storage_<true, _First, _Rest...> { // Storage for variant alternatives (trivially destructible case)
339
+ public:
340
+ static constexpr size_t _Size = 1 + sizeof...(_Rest);
341
+ union {
342
+ remove_cv_t<_First> _Head;
343
+ _Variant_storage<_Rest...> _Tail;
344
+ };
345
+
346
+ _CONSTEXPR20 _Variant_storage_() noexcept {} // no initialization (no active member)
347
+
348
+ template <class... _Types>
349
+ constexpr explicit _Variant_storage_(integral_constant<size_t, 0>, _Types&&... _Args) noexcept(
350
+ is_nothrow_constructible_v<_First, _Types...>)
351
+ : _Head(static_cast<_Types&&>(_Args)...) {} // initialize _Head with _Args...
352
+
353
+ template <size_t _Idx, class... _Types, enable_if_t<(_Idx > 0), int> = 0>
354
+ constexpr explicit _Variant_storage_(integral_constant<size_t, _Idx>, _Types&&... _Args) noexcept(
355
+ is_nothrow_constructible_v<_Variant_storage<_Rest...>, integral_constant<size_t, _Idx - 1>, _Types...>)
356
+ : _Tail(integral_constant<size_t, _Idx - 1>{}, static_cast<_Types&&>(_Args)...) {} // initialize _Tail (recurse)
357
+
358
+ _NODISCARD constexpr _First& _Get() & noexcept {
359
+ return _Head;
360
+ }
361
+ _NODISCARD constexpr const _First& _Get() const& noexcept {
362
+ return _Head;
363
+ }
364
+ _NODISCARD constexpr _First&& _Get() && noexcept {
365
+ return _STD move(_Head);
366
+ }
367
+ _NODISCARD constexpr const _First&& _Get() const&& noexcept {
368
+ return _STD move(_Head);
369
+ }
370
+ };
371
+
372
+ template <class _First, class... _Rest>
373
+ class _Variant_storage_<false, _First, _Rest...> { // Storage for variant alternatives (non-trivially destructible case)
374
+ public:
375
+ static constexpr size_t _Size = 1 + sizeof...(_Rest);
376
+ union {
377
+ remove_cv_t<_First> _Head;
378
+ _Variant_storage<_Rest...> _Tail;
379
+ };
380
+
381
+ _CONSTEXPR20 ~_Variant_storage_()
382
+ #ifndef __clang__ // TRANSITION, LLVM-59854
383
+ noexcept
384
+ #endif // ^^^ no workaround ^^^
385
+ {
386
+ // explicitly non-trivial destructor (which would otherwise be defined as deleted
387
+ // since the class has a variant member with a non-trivial destructor)
388
+ }
389
+
390
+ _CONSTEXPR20 _Variant_storage_() noexcept {} // no initialization (no active member)
391
+
392
+ template <class... _Types>
393
+ constexpr explicit _Variant_storage_(integral_constant<size_t, 0>, _Types&&... _Args) noexcept(
394
+ is_nothrow_constructible_v<_First, _Types...>)
395
+ : _Head(static_cast<_Types&&>(_Args)...) {} // initialize _Head with _Args...
396
+
397
+ template <size_t _Idx, class... _Types, enable_if_t<(_Idx > 0), int> = 0>
398
+ constexpr explicit _Variant_storage_(integral_constant<size_t, _Idx>, _Types&&... _Args) noexcept(
399
+ is_nothrow_constructible_v<_Variant_storage<_Rest...>, integral_constant<size_t, _Idx - 1>, _Types...>)
400
+ : _Tail(integral_constant<size_t, _Idx - 1>{}, static_cast<_Types&&>(_Args)...) {} // initialize _Tail (recurse)
401
+
402
+ _Variant_storage_(_Variant_storage_&&) = default;
403
+ _Variant_storage_(const _Variant_storage_&) = default;
404
+ _Variant_storage_& operator=(_Variant_storage_&&) = default;
405
+ _Variant_storage_& operator=(const _Variant_storage_&) = default;
406
+
407
+ _NODISCARD constexpr _First& _Get() & noexcept {
408
+ return _Head;
409
+ }
410
+ _NODISCARD constexpr const _First& _Get() const& noexcept {
411
+ return _Head;
412
+ }
413
+ _NODISCARD constexpr _First&& _Get() && noexcept {
414
+ return _STD move(_Head);
415
+ }
416
+ _NODISCARD constexpr const _First&& _Get() const&& noexcept {
417
+ return _STD move(_Head);
418
+ }
419
+ };
420
+
421
+ #ifdef __cplusplus_winrt // TRANSITION, VSO-586813
422
+ // C++/CX is unable to store hats in unions. We instead store them inside a
423
+ // wrapper to enable minimal hats-in-variants support.
424
+ template <class _Ty>
425
+ struct _Variant_item {
426
+ remove_cv_t<_Ty> _Item;
427
+
428
+ template <class... _Types>
429
+ constexpr _Variant_item(_Types&&... _Args) noexcept(is_nothrow_constructible_v<_Ty, _Types...>)
430
+ : _Item(static_cast<_Types&&>(_Args)...) {}
431
+ };
432
+
433
+ template <class _First, class... _Rest>
434
+ class _Variant_storage_<false, _First ^, _Rest...> { // Storage for variant alternatives (^ case)
435
+ public:
436
+ static constexpr size_t _Size = 1 + sizeof...(_Rest);
437
+ union {
438
+ _Variant_item<_First ^> _Head;
439
+ _Variant_storage<_Rest...> _Tail;
440
+ };
441
+
442
+ _CONSTEXPR20 ~_Variant_storage_() noexcept {
443
+ // explicitly non-trivial destructor (which would otherwise be defined as deleted
444
+ // since the class has a variant member with a non-trivial destructor)
445
+ }
446
+
447
+ _CONSTEXPR20 _Variant_storage_() noexcept {} // no initialization (no active member)
448
+
449
+ template <class... _Types>
450
+ constexpr explicit _Variant_storage_(integral_constant<size_t, 0>, _Types&&... _Args) noexcept(
451
+ is_nothrow_constructible_v<_First ^, _Types...>)
452
+ : _Head(static_cast<_Types&&>(_Args)...) {} // initialize _Head with _Args...
453
+
454
+ template <size_t _Idx, class... _Types, enable_if_t<(_Idx > 0), int> = 0>
455
+ constexpr explicit _Variant_storage_(integral_constant<size_t, _Idx>, _Types&&... _Args) noexcept(
456
+ is_nothrow_constructible_v<_Variant_storage<_Rest...>, integral_constant<size_t, _Idx - 1>, _Types...>)
457
+ : _Tail(integral_constant<size_t, _Idx - 1>{}, static_cast<_Types&&>(_Args)...) {} // initialize _Tail (recurse)
458
+
459
+ _Variant_storage_(_Variant_storage_&&) = default;
460
+ _Variant_storage_(const _Variant_storage_&) = default;
461
+ _Variant_storage_& operator=(_Variant_storage_&&) = default;
462
+ _Variant_storage_& operator=(const _Variant_storage_&) = default;
463
+
464
+ _NODISCARD constexpr _First ^ &_Get() & noexcept {
465
+ return _Head._Item;
466
+ }
467
+ _NODISCARD constexpr _First ^ const& _Get() const& noexcept {
468
+ return _Head._Item;
469
+ }
470
+ _NODISCARD constexpr _First ^ &&_Get() && noexcept {
471
+ return _STD move(_Head)._Item;
472
+ }
473
+ _NODISCARD constexpr _First ^ const&& _Get() const&& noexcept {
474
+ return _STD move(_Head)._Item;
475
+ }
476
+ };
477
+ #endif // ^^^ workaround ^^^
478
+
479
+ template <size_t _Idx, class _Storage>
480
+ _NODISCARD constexpr decltype(auto) _Variant_raw_get(_Storage&& _Obj) noexcept {
481
+ // access the _Idx-th element of a _Variant_storage
482
+ if constexpr (_Idx == 0) {
483
+ return static_cast<_Storage&&>(_Obj)._Get();
484
+ } else if constexpr (_Idx == 1) {
485
+ return static_cast<_Storage&&>(_Obj)._Tail._Get();
486
+ } else if constexpr (_Idx == 2) {
487
+ return static_cast<_Storage&&>(_Obj)._Tail._Tail._Get();
488
+ } else if constexpr (_Idx == 3) {
489
+ return static_cast<_Storage&&>(_Obj)._Tail._Tail._Tail._Get();
490
+ } else if constexpr (_Idx == 4) {
491
+ return static_cast<_Storage&&>(_Obj)._Tail._Tail._Tail._Tail._Get();
492
+ } else if constexpr (_Idx == 5) {
493
+ return static_cast<_Storage&&>(_Obj)._Tail._Tail._Tail._Tail._Tail._Get();
494
+ } else if constexpr (_Idx == 6) {
495
+ return static_cast<_Storage&&>(_Obj)._Tail._Tail._Tail._Tail._Tail._Tail._Get();
496
+ } else if constexpr (_Idx == 7) {
497
+ return static_cast<_Storage&&>(_Obj)._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Get();
498
+ } else if constexpr (_Idx < 16) {
499
+ return _STD _Variant_raw_get<_Idx - 8>(
500
+ static_cast<_Storage&&>(_Obj)._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail);
501
+ } else if constexpr (_Idx < 32) {
502
+ return _STD _Variant_raw_get<_Idx - 16>(
503
+ static_cast<_Storage&&>(_Obj)
504
+ ._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail);
505
+ } else if constexpr (_Idx < 64) {
506
+ return _STD _Variant_raw_get<_Idx - 32>(
507
+ static_cast<_Storage&&>(_Obj)
508
+ ._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail
509
+ ._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail);
510
+ } else { // _Idx >= 64
511
+ return _STD _Variant_raw_get<_Idx - 64>(
512
+ static_cast<_Storage&&>(_Obj)
513
+ ._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail
514
+ ._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail
515
+ ._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail
516
+ ._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail._Tail);
517
+ }
518
+ }
519
+
520
+ template <class _Ty, size_t _Tag>
521
+ struct _Tagged { // aggregate a runtime value and a compile-time tag value
522
+ static constexpr size_t _Idx = _Tag;
523
+ _Ty _Val;
524
+ };
525
+
526
+ template <class _Storage, size_t _Idx>
527
+ using _Variant_tagged_ref_t = _Tagged<decltype(_STD _Variant_raw_get<_Idx>(_STD declval<_Storage>()))&&, _Idx>;
528
+
529
+ template <class _Fn, class _Storage>
530
+ using _Variant_raw_visit_t = decltype(_STD declval<_Fn>()(_STD declval<_Variant_tagged_ref_t<_Storage, 0>>()));
531
+
532
+ template <size_t _Idx, class _Fn, class _Storage>
533
+ _NODISCARD constexpr _Variant_raw_visit_t<_Fn, _Storage> _Variant_raw_visit_dispatch(
534
+ _Fn&& _Func, _Storage&& _Var) noexcept(is_nothrow_invocable_v<_Fn, _Variant_tagged_ref_t<_Storage, _Idx>>) {
535
+ // call _Func with the _Idx-th element in _Storage (tagged with _Idx)
536
+ return static_cast<_Fn&&>(_Func)(
537
+ _Variant_tagged_ref_t<_Storage, _Idx>{_STD _Variant_raw_get<_Idx>(static_cast<_Storage&&>(_Var))});
538
+ }
539
+
540
+ template <class _Fn, class _Storage>
541
+ _NODISCARD constexpr _Variant_raw_visit_t<_Fn, _Storage> _Variant_raw_visit_valueless(
542
+ _Fn&& _Func, _Storage&& _Obj) noexcept(is_nothrow_invocable_v<_Fn, _Tagged<_Storage&&, variant_npos>>) {
543
+ // call _Func with _Storage (tagged with variant_npos)
544
+ return static_cast<_Fn&&>(_Func)(_Tagged<_Storage&&, variant_npos>{static_cast<_Storage&&>(_Obj)});
545
+ }
546
+
547
+ template <class _Fn, class _Storage, class _Indices = make_index_sequence<remove_reference_t<_Storage>::_Size>>
548
+ constexpr bool _Variant_raw_visit_noexcept = false;
549
+
550
+ template <class _Fn, class _Storage, size_t... _Idxs>
551
+ constexpr bool _Variant_raw_visit_noexcept<_Fn, _Storage, index_sequence<_Idxs...>> =
552
+ conjunction_v<is_nothrow_invocable<_Fn, _Tagged<_Storage&&, variant_npos>>,
553
+ is_nothrow_invocable<_Fn, _Variant_tagged_ref_t<_Storage, _Idxs>>...>;
554
+
555
+ template <class _Fn, class _Storage,
556
+ class _Indices = make_index_sequence<remove_reference_t<_Storage>::_Size>>
557
+ struct _Variant_raw_dispatch_table1; // undefined
558
+
559
+ template <class _Fn, class _Storage, size_t... _Idxs>
560
+ struct _Variant_raw_dispatch_table1<_Fn, _Storage, index_sequence<_Idxs...>> {
561
+ // map from canonical index to visitation target
562
+ using _Dispatch_t = _Variant_raw_visit_t<_Fn, _Storage> (*)(_Fn&&, _Storage&&) noexcept(
563
+ _Variant_raw_visit_noexcept<_Fn, _Storage>);
564
+ static constexpr _Dispatch_t _Array[] = {
565
+ &_STD _Variant_raw_visit_valueless<_Fn, _Storage>, &_STD _Variant_raw_visit_dispatch<_Idxs, _Fn, _Storage>...};
566
+ };
567
+
568
+ template <int _Strategy>
569
+ struct _Variant_raw_visit1;
570
+
571
+ template <>
572
+ struct _Variant_raw_visit1<-1> { // Fallback case for variants too large for any of the following "switch" strategies
573
+ template <class _Fn, class _Storage>
574
+ _NODISCARD static constexpr _Variant_raw_visit_t<_Fn, _Storage> _Visit(
575
+ size_t _Idx, _Fn&& _Func, _Storage&& _Obj) noexcept(_Variant_raw_visit_noexcept<_Fn, _Storage>) {
576
+ // dispatch a visitor for a _Variant_storage with many states
577
+ constexpr size_t _Size = remove_reference_t<_Storage>::_Size;
578
+ static_assert(_Size > 256);
579
+ constexpr auto& _Array = _Variant_raw_dispatch_table1<_Fn, _Storage>::_Array;
580
+ return _Array[_Idx](static_cast<_Fn&&>(_Func), static_cast<_Storage&&>(_Obj));
581
+ }
582
+ };
583
+
584
+ #define _STL_CASE(n) \
585
+ case (n) + 1: \
586
+ if constexpr ((n) < _Size) { \
587
+ return static_cast<_Fn&&>(_Func)( \
588
+ _Variant_tagged_ref_t<_Storage, (n)>{_STD _Variant_raw_get<(n)>(static_cast<_Storage&&>(_Obj))}); \
589
+ } \
590
+ _STL_UNREACHABLE; \
591
+ [[fallthrough]]
592
+
593
+ #define _STL_VISIT_STAMP(stamper, n) \
594
+ constexpr size_t _Size = remove_reference_t<_Storage>::_Size; \
595
+ static_assert(((n) == 4 || _Size > (n) / 4) && _Size <= (n)); \
596
+ switch (_Idx) { \
597
+ case 0: \
598
+ return static_cast<_Fn&&>(_Func)(_Tagged<_Storage&&, variant_npos>{static_cast<_Storage&&>(_Obj)}); \
599
+ \
600
+ stamper(0, _STL_CASE); \
601
+ default: \
602
+ _STL_UNREACHABLE; \
603
+ }
604
+
605
+ template <>
606
+ struct _Variant_raw_visit1<1> {
607
+ template <class _Fn, class _Storage>
608
+ _NODISCARD static constexpr _Variant_raw_visit_t<_Fn, _Storage> _Visit(
609
+ size_t _Idx, _Fn&& _Func, _Storage&& _Obj) noexcept(_Variant_raw_visit_noexcept<_Fn, _Storage>) {
610
+ // dispatch a visitor for a _Variant_storage with at most 4^1 states
611
+ _STL_STAMP(4, _STL_VISIT_STAMP);
612
+ }
613
+ };
614
+
615
+ template <>
616
+ struct _Variant_raw_visit1<2> {
617
+ template <class _Fn, class _Storage>
618
+ _NODISCARD static constexpr _Variant_raw_visit_t<_Fn, _Storage> _Visit(
619
+ size_t _Idx, _Fn&& _Func, _Storage&& _Obj) noexcept(_Variant_raw_visit_noexcept<_Fn, _Storage>) {
620
+ // dispatch a visitor for a _Variant_storage with at most 4^2 states
621
+ _STL_STAMP(16, _STL_VISIT_STAMP);
622
+ }
623
+ };
624
+
625
+ template <>
626
+ struct _Variant_raw_visit1<3> {
627
+ template <class _Fn, class _Storage>
628
+ _NODISCARD static constexpr _Variant_raw_visit_t<_Fn, _Storage> _Visit(
629
+ size_t _Idx, _Fn&& _Func, _Storage&& _Obj) noexcept(_Variant_raw_visit_noexcept<_Fn, _Storage>) {
630
+ // dispatch a visitor for a _Variant_storage with at most 4^3 states
631
+ _STL_STAMP(64, _STL_VISIT_STAMP);
632
+ }
633
+ };
634
+
635
+ template <>
636
+ struct _Variant_raw_visit1<4> {
637
+ template <class _Fn, class _Storage>
638
+ _NODISCARD static constexpr _Variant_raw_visit_t<_Fn, _Storage> _Visit(
639
+ size_t _Idx, _Fn&& _Func, _Storage&& _Obj) noexcept(_Variant_raw_visit_noexcept<_Fn, _Storage>) {
640
+ // dispatch a visitor for a _Variant_storage with at most 4^4 states
641
+ _STL_STAMP(256, _STL_VISIT_STAMP);
642
+ }
643
+ };
644
+
645
+ #undef _STL_VISIT_STAMP
646
+ #undef _STL_CASE
647
+
648
+ template <class _Storage, class _Fn>
649
+ _NODISCARD constexpr _Variant_raw_visit_t<_Fn, _Storage> _Variant_raw_visit(
650
+ size_t _Idx, _Storage&& _Obj, _Fn&& _Func) noexcept(_Variant_raw_visit_noexcept<_Fn, _Storage>) {
651
+ // Call _Func with _Storage if _Idx is variant_npos, and otherwise the _Idx-th element in _Storage.
652
+ // pre: _Idx + 1 <= remove_reference_t<_Storage>::_Size
653
+ constexpr size_t _Size = remove_reference_t<_Storage>::_Size;
654
+ constexpr int _Strategy = _Size <= 4 ? 1 : _Size <= 16 ? 2 : _Size <= 64 ? 3 : _Size <= 256 ? 4 : -1;
655
+ ++_Idx; // bias index by +1 to map {variant_npos} U [0, _Size) to the contiguous range [0, _Size]
656
+ return _Variant_raw_visit1<_Strategy>::_Visit(_Idx, static_cast<_Fn&&>(_Func), static_cast<_Storage&&>(_Obj));
657
+ }
658
+
659
+ template <class...>
660
+ class _Variant_base;
661
+
662
+ inline constexpr size_t _Schar_max_as_size = static_cast<unsigned char>(-1) / 2;
663
+ inline constexpr size_t _Short_max_as_size = static_cast<unsigned short>(-1) / 2;
664
+
665
+ template <size_t _Count>
666
+ using _Variant_index_t = // signed so that conversion of -1 to size_t can cheaply sign extend
667
+ conditional_t<(_Count < _Schar_max_as_size), signed char, conditional_t<(_Count < _Short_max_as_size), short, int>>;
668
+
669
+ template <class... _Types>
670
+ struct _Variant_construct_visitor { // visitor that constructs the same alternative in a target _Variant_base as is
671
+ // currently active in a source _Variant_base from the source's contained value
672
+ _Variant_base<_Types...>& _Self;
673
+
674
+ template <class _Ty, size_t _Idx>
675
+ _CONSTEXPR20 void operator()(_Tagged<_Ty, _Idx> _Source) const noexcept(
676
+ disjunction_v<bool_constant<_Idx == variant_npos>, is_nothrow_constructible<remove_reference_t<_Ty>, _Ty>>) {
677
+ // initialize _Idx-th item in _Self from _Source
678
+ _STL_INTERNAL_CHECK(_Self.valueless_by_exception());
679
+ if constexpr (_Idx != variant_npos) {
680
+ _STD _Construct_in_place(
681
+ _Self._Storage(), integral_constant<size_t, _Idx>{}, static_cast<_Ty&&>(_Source._Val));
682
+ _Self._Set_index(_Idx);
683
+ }
684
+ }
685
+ };
686
+
687
+ template <class _Target, class... _Types>
688
+ constexpr bool _Variant_should_directly_construct_v =
689
+ disjunction_v<is_nothrow_constructible<_Target, _Types...>, negation<is_nothrow_move_constructible<_Target>>>;
690
+
691
+ template <class... _Types>
692
+ struct _Variant_assign_visitor { // visitor that implements assignment for variants with non-trivial alternatives
693
+ _Variant_base<_Types...>& _Self;
694
+
695
+ template <class _Ty, size_t _Idx>
696
+ _CONSTEXPR20 void operator()(_Tagged<_Ty, _Idx> _Source) const
697
+ noexcept(disjunction_v<bool_constant<_Idx == variant_npos>,
698
+ conjunction<is_nothrow_assignable<_Remove_cvref_t<_Ty>&, _Ty>,
699
+ is_nothrow_constructible<_Remove_cvref_t<_Ty>, _Ty>>>) {
700
+ // assign the _Idx-th alternative of _Self from _Source
701
+ if constexpr (_Idx == variant_npos) { // assign from valueless _Source
702
+ _Self._Reset();
703
+ } else {
704
+ if (_Self._Which == _Idx) { // same alternative: assign directly
705
+ auto& _Target = _STD _Variant_raw_get<_Idx>(_Self._Storage());
706
+ _Target = static_cast<_Ty&&>(_Source._Val);
707
+ } else { // different alternative
708
+ if constexpr (is_lvalue_reference_v<_Ty>) { // RHS is an lvalue: copy
709
+ if constexpr (_Variant_should_directly_construct_v<_Remove_cvref_t<_Ty>, _Ty>) {
710
+ // copy is nothrow or move throws; construct in place
711
+ _Self._Reset();
712
+ _STD _Construct_in_place(_Self._Storage(), integral_constant<size_t, _Idx>{}, _Source._Val);
713
+ } else { // copy throws and move does not; move from a temporary copy
714
+ auto _Temp = _Source._Val;
715
+ _Self._Reset();
716
+ _STD _Construct_in_place(_Self._Storage(), integral_constant<size_t, _Idx>{}, _STD move(_Temp));
717
+ }
718
+ } else { // RHS is an rvalue: move
719
+ _Self._Reset();
720
+ _STD _Construct_in_place(
721
+ _Self._Storage(), integral_constant<size_t, _Idx>{}, static_cast<_Ty&&>(_Source._Val));
722
+ }
723
+
724
+ _Self._Set_index(_Idx);
725
+ }
726
+ }
727
+ }
728
+ };
729
+
730
+ template <class... _Types> // Associate an integral discriminator with a _Variant_storage
731
+ class _Variant_base : private _Variant_storage<_Types...> {
732
+ public:
733
+ using _Index_t = _Variant_index_t<sizeof...(_Types)>;
734
+ static constexpr auto _Invalid_index = static_cast<_Index_t>(-1);
735
+ _Index_t _Which;
736
+
737
+ using _Storage_t = _Variant_storage<_Types...>;
738
+ _NODISCARD constexpr _Storage_t& _Storage() & noexcept { // access this variant's storage
739
+ return *this;
740
+ }
741
+ _NODISCARD constexpr const _Storage_t& _Storage() const& noexcept { // access this variant's storage
742
+ return *this;
743
+ }
744
+ _NODISCARD constexpr _Storage_t&& _Storage() && noexcept { // access this variant's storage
745
+ return _STD move(*this);
746
+ }
747
+ _NODISCARD constexpr const _Storage_t&& _Storage() const&& noexcept { // access this variant's storage
748
+ return _STD move(*this);
749
+ }
750
+
751
+ // initialize to the value-less state
752
+ _CONSTEXPR20 _Variant_base() noexcept : _Storage_t{}, _Which{_Invalid_index} {}
753
+
754
+ template <size_t _Idx, class... _UTypes,
755
+ enable_if_t<is_constructible_v<_Meta_at_c<variant<_Types...>, _Idx>, _UTypes...>, int> = 0>
756
+ constexpr explicit _Variant_base(in_place_index_t<_Idx>, _UTypes&&... _Args) noexcept(
757
+ is_nothrow_constructible_v<_Meta_at_c<variant<_Types...>, _Idx>, _UTypes...>)
758
+ : _Storage_t(integral_constant<size_t, _Idx>{}, static_cast<_UTypes&&>(_Args)...),
759
+ _Which{static_cast<_Index_t>(_Idx)} { // initialize alternative _Idx from _Args...
760
+ }
761
+
762
+ _NODISCARD constexpr bool valueless_by_exception() const noexcept { // does this variant NOT hold a value?
763
+ return _Which < 0;
764
+ }
765
+ _NODISCARD constexpr size_t index() const noexcept {
766
+ // index of the contained alternative or variant_npos if valueless_by_exception
767
+ return static_cast<size_t>(_Which);
768
+ }
769
+ _CONSTEXPR20 void _Set_index(const size_t _Idx) noexcept {
770
+ // record _Idx as the active alternative
771
+ // pre: the active alternative of *this is _Idx
772
+ _Which = static_cast<_Index_t>(_Idx);
773
+ }
774
+
775
+ template <size_t _Idx>
776
+ _CONSTEXPR20 void _Destroy() noexcept {
777
+ // destroy the contained value
778
+ // pre: _Idx == index()
779
+ using _Indexed_value_type = remove_cv_t<_Meta_at_c<variant<_Types...>, _Idx>>;
780
+ if constexpr (_Idx != variant_npos && !is_trivially_destructible_v<_Indexed_value_type>) {
781
+ _STD _Variant_raw_get<_Idx>(_Storage()).~_Indexed_value_type();
782
+ }
783
+ }
784
+
785
+ _CONSTEXPR20 void _Destroy() noexcept { // destroy the contained value, if any
786
+ if constexpr (!conjunction_v<is_trivially_destructible<_Types>...>) {
787
+ _STD _Variant_raw_visit(index(), _Storage(), [](auto _Ref) noexcept {
788
+ if constexpr (decltype(_Ref)::_Idx != variant_npos) {
789
+ using _Indexed_value_type = _Remove_cvref_t<decltype(_Ref._Val)>;
790
+ _Ref._Val.~_Indexed_value_type();
791
+ }
792
+ });
793
+ }
794
+ }
795
+
796
+ _CONSTEXPR20 void _Reset() noexcept { // transition to the valueless_by_exception state
797
+ _Destroy();
798
+ _Set_index(variant_npos);
799
+ }
800
+
801
+ template <size_t _Idx>
802
+ _CONSTEXPR20 void _Reset() noexcept {
803
+ // transition to the valueless_by_exception state
804
+ // pre: _Idx == index()
805
+ if constexpr (_Idx != variant_npos) {
806
+ _Destroy<_Idx>();
807
+ _Set_index(variant_npos);
808
+ }
809
+ }
810
+
811
+ _CONSTEXPR20 void _Construct_from(const _Variant_base& _That) noexcept(
812
+ conjunction_v<is_nothrow_copy_constructible<_Types>...>) {
813
+ // copy _That's contained value into *this
814
+ // pre: valueless_by_exception()
815
+ _STD _Variant_raw_visit(_That.index(), _That._Storage(), _Variant_construct_visitor<_Types...>{*this});
816
+ }
817
+
818
+ _CONSTEXPR20 void _Construct_from(_Variant_base&& _That) noexcept(
819
+ conjunction_v<is_nothrow_move_constructible<_Types>...>) {
820
+ // move _That's contained value into *this
821
+ // pre: valueless_by_exception()
822
+ _STD _Variant_raw_visit(
823
+ _That.index(), _STD move(_That)._Storage(), _Variant_construct_visitor<_Types...>{*this});
824
+ }
825
+
826
+ _CONSTEXPR20 void _Assign_from(const _Variant_base& _That) noexcept(
827
+ conjunction_v<is_nothrow_copy_constructible<_Types>..., is_nothrow_copy_assignable<_Types>...>) {
828
+ // copy assign _That's contained value (if any) into *this
829
+ _STD _Variant_raw_visit(_That.index(), _That._Storage(), _Variant_assign_visitor<_Types...>{*this});
830
+ }
831
+
832
+ _CONSTEXPR20 void _Assign_from(_Variant_base&& _That) noexcept(
833
+ conjunction_v<is_nothrow_move_constructible<_Types>..., is_nothrow_move_assignable<_Types>...>) {
834
+ // move assign _That's contained value (if any) into *this
835
+ _STD _Variant_raw_visit(_That.index(), _STD move(_That)._Storage(), _Variant_assign_visitor<_Types...>{*this});
836
+ }
837
+ };
838
+
839
+ template <class... _Types>
840
+ struct _Variant_destroy_layer_ : _Variant_base<_Types...> { // destruction behavior facade (non-trivial case)
841
+ using _Variant_base<_Types...>::_Variant_base;
842
+
843
+ _CONSTEXPR20 ~_Variant_destroy_layer_() noexcept { // Destroy contained value, if any
844
+ this->_Destroy();
845
+ }
846
+
847
+ _Variant_destroy_layer_() = default;
848
+ _Variant_destroy_layer_(const _Variant_destroy_layer_&) = default;
849
+ _Variant_destroy_layer_(_Variant_destroy_layer_&&) = default;
850
+ _Variant_destroy_layer_& operator=(const _Variant_destroy_layer_&) = default;
851
+ _Variant_destroy_layer_& operator=(_Variant_destroy_layer_&&) = default;
852
+ };
853
+
854
+ template <class... _Types>
855
+ using _Variant_destroy_layer = conditional_t<conjunction_v<is_trivially_destructible<_Types>...>,
856
+ _Variant_base<_Types...>, _Variant_destroy_layer_<_Types...>>;
857
+
858
+ #ifdef __clang__
859
+ #pragma clang diagnostic push
860
+ #pragma clang diagnostic ignored "-Wdeprecated-volatile"
861
+ #else // ^^^ Clang / not Clang vvv
862
+ #pragma warning(push)
863
+ #pragma warning(disable : 4242) // '%s': conversion from '%s' to '%s', possible loss of data
864
+ #pragma warning(disable : 4244) // '%s': conversion from '%s' to '%s', possible loss of data (Yes, duplicated message.)
865
+ #pragma warning(disable : 4365) // '%s': conversion from '%s' to '%s', signed/unsigned mismatch
866
+ #pragma warning(disable : 5215) // '%s' a function parameter with volatile qualified type is deprecated in C++20
867
+ #endif // ^^^ not Clang ^^^
868
+
869
+ // build Ti x[] = {std::forward<T>(t)};
870
+ template <size_t _Idx, class _TargetType>
871
+ auto _Construct_array(_TargetType (&&)[1]) -> _Meta_list<integral_constant<size_t, _Idx>, _TargetType>;
872
+
873
+ template <size_t _Idx, class _TargetType, class _InitializerType>
874
+ using _Variant_type_resolver = decltype(_STD _Construct_array<_Idx, _TargetType>({_STD declval<_InitializerType>()}));
875
+
876
+ template <size_t _Idx, class _TargetType>
877
+ struct _Variant_init_single_overload {
878
+ template <class _InitializerType>
879
+ auto operator()(_TargetType, _InitializerType&&) -> _Variant_type_resolver<_Idx, _TargetType, _InitializerType>;
880
+ };
881
+
882
+ template <class _Indices, class... _Types>
883
+ struct _Variant_init_overload_set_;
884
+
885
+ template <size_t... _Indices, class... _Types>
886
+ struct _Variant_init_overload_set_<index_sequence<_Indices...>, _Types...>
887
+ : _Variant_init_single_overload<_Indices, _Types>... {
888
+ using _Variant_init_single_overload<_Indices, _Types>::operator()...;
889
+ };
890
+
891
+ template <class... _Types>
892
+ using _Variant_init_overload_set = _Variant_init_overload_set_<index_sequence_for<_Types...>, _Types...>;
893
+
894
+ template <class Enable, class _Ty, class... _Types>
895
+ struct _Variant_init_helper {}; // failure case (has no member "type")
896
+
897
+ template <class _Ty, class... _Types>
898
+ struct _Variant_init_helper<
899
+ void_t<decltype(_Variant_init_overload_set<_Types...>{}(_STD declval<_Ty>(), _STD declval<_Ty>()))>, _Ty,
900
+ _Types...> {
901
+ // perform overload resolution to determine the unique alternative that should be initialized in
902
+ // variant<_Types...> from an argument expression with type and value category _Ty
903
+ using type = decltype(_Variant_init_overload_set<_Types...>{}(_STD declval<_Ty>(), _STD declval<_Ty>()));
904
+ };
905
+
906
+ template <class _Ty, class... _Types> // extract the type from _Variant_init_helper
907
+ using _Variant_init_type = _Meta_front<_Meta_pop_front<typename _Variant_init_helper<void, _Ty, _Types...>::type>>;
908
+
909
+ template <class _Ty, class... _Types> // extract the index from _Variant_init_helper
910
+ using _Variant_init_index = _Meta_front<typename _Variant_init_helper<void, _Ty, _Types...>::type>;
911
+ #ifdef __clang__
912
+ #pragma clang diagnostic pop
913
+ #else // ^^^ Clang / not Clang vvv
914
+ #pragma warning(pop)
915
+ #endif // ^^^ not Clang ^^^
916
+
917
+ template <class>
918
+ constexpr bool _Is_in_place_index_specialization = false;
919
+ template <size_t _Idx>
920
+ constexpr bool _Is_in_place_index_specialization<in_place_index_t<_Idx>> = true;
921
+
922
+ _EXPORT_STD template <class... _Types>
923
+ class variant : private _SMF_control<_Variant_destroy_layer<_Types...>, _Types...> { // discriminated union
924
+ public:
925
+ static_assert(conjunction_v<is_object<_Types>..., negation<is_array<_Types>>..., is_destructible<_Types>...>,
926
+ "variant<Types...> requires all of the Types to meet the Cpp17Destructible requirements "
927
+ "(N4950 [variant.variant.general]/2).");
928
+ static_assert(sizeof...(_Types) > 0,
929
+ "variant<> (with no template arguments) may not be instantiated (N4950 [variant.variant.general]/3).");
930
+ using _Mybase = _SMF_control<_Variant_destroy_layer<_Types...>, _Types...>;
931
+
932
+ template <class _First = _Meta_front<variant>, enable_if_t<is_default_constructible_v<_First>, int> = 0>
933
+ constexpr variant() noexcept(is_nothrow_default_constructible_v<_First>)
934
+ : _Mybase(in_place_index<0>) {} // value-initialize alternative 0
935
+
936
+ template <class _Ty,
937
+ enable_if_t<sizeof...(_Types) != 0 //
938
+ && !is_same_v<_Remove_cvref_t<_Ty>, variant> //
939
+ && !_Is_specialization_v<_Remove_cvref_t<_Ty>, in_place_type_t> //
940
+ && !_Is_in_place_index_specialization<_Remove_cvref_t<_Ty>> //
941
+ && is_constructible_v<_Variant_init_type<_Ty, _Types...>, _Ty>, //
942
+ int> = 0>
943
+ constexpr variant(_Ty&& _Obj) noexcept(is_nothrow_constructible_v<_Variant_init_type<_Ty, _Types...>, _Ty>)
944
+ : _Mybase(in_place_index<_Variant_init_index<_Ty, _Types...>::value>, static_cast<_Ty&&>(_Obj)) {
945
+ // initialize to the type selected by passing _Obj to the overload set f(Types)...
946
+ }
947
+
948
+ template <class _Ty, class... _UTypes, class _Idx = _Meta_find_unique_index<variant, _Ty>,
949
+ enable_if_t<_Idx::value != _Meta_npos && is_constructible_v<_Ty, _UTypes...>, int> = 0>
950
+ constexpr explicit variant(in_place_type_t<_Ty>, _UTypes&&... _Args) noexcept(
951
+ is_nothrow_constructible_v<_Ty, _UTypes...>) // strengthened
952
+ : _Mybase(in_place_index<_Idx::value>, static_cast<_UTypes&&>(_Args)...) {
953
+ // initialize alternative _Ty from _Args...
954
+ }
955
+ template <class _Ty, class _Elem, class... _UTypes, class _Idx = _Meta_find_unique_index<variant, _Ty>,
956
+ enable_if_t<_Idx::value != _Meta_npos && is_constructible_v<_Ty, initializer_list<_Elem>&, _UTypes...>, int> =
957
+ 0>
958
+ constexpr explicit variant(in_place_type_t<_Ty>, initializer_list<_Elem> _Ilist, _UTypes&&... _Args) noexcept(
959
+ is_nothrow_constructible_v<_Ty, initializer_list<_Elem>&, _UTypes...>) // strengthened
960
+ : _Mybase(in_place_index<_Idx::value>, _Ilist, static_cast<_UTypes&&>(_Args)...) {
961
+ // initialize alternative _Ty from _Ilist and _Args...
962
+ }
963
+
964
+ template <size_t _Idx, class... _UTypes,
965
+ enable_if_t<is_constructible_v<_Meta_at_c<variant, _Idx>, _UTypes...>, int> = 0>
966
+ constexpr explicit variant(in_place_index_t<_Idx>, _UTypes&&... _Args) noexcept(
967
+ is_nothrow_constructible_v<_Meta_at_c<variant, _Idx>, _UTypes...>) // strengthened
968
+ : _Mybase(in_place_index<_Idx>, static_cast<_UTypes&&>(_Args)...) {
969
+ // initialize alternative _Idx from _Args...
970
+ }
971
+ template <size_t _Idx, class _Elem, class... _UTypes,
972
+ enable_if_t<is_constructible_v<_Meta_at_c<variant, _Idx>, initializer_list<_Elem>&, _UTypes...>, int> = 0>
973
+ constexpr explicit variant(in_place_index_t<_Idx>, initializer_list<_Elem> _Ilist, _UTypes&&... _Args) noexcept(
974
+ is_constructible_v<_Meta_at_c<variant, _Idx>, initializer_list<_Elem>&, _UTypes...>) // strengthened
975
+ : _Mybase(in_place_index<_Idx>, _Ilist, static_cast<_UTypes&&>(_Args)...) {
976
+ // initialize alternative _Idx from _Ilist and _Args...
977
+ }
978
+
979
+ template <class _Ty, enable_if_t<!is_same_v<_Remove_cvref_t<_Ty>, variant>
980
+ && is_constructible_v<_Variant_init_type<_Ty, _Types...>, _Ty>
981
+ && is_assignable_v<_Variant_init_type<_Ty, _Types...>&, _Ty>,
982
+ int> = 0>
983
+ _CONSTEXPR20 variant& operator=(_Ty&& _Obj) noexcept(
984
+ is_nothrow_assignable_v<_Variant_init_type<_Ty, _Types...>&, _Ty>
985
+ && is_nothrow_constructible_v<_Variant_init_type<_Ty, _Types...>, _Ty>) {
986
+ // assign/emplace the alternative chosen by overload resolution of _Obj with f(_Types)...
987
+ constexpr size_t _TargetIdx = _Variant_init_index<_Ty, _Types...>::value;
988
+ if (index() == _TargetIdx) {
989
+ auto& _Target = _STD _Variant_raw_get<_TargetIdx>(_Storage());
990
+ _Target = static_cast<_Ty&&>(_Obj);
991
+ } else {
992
+ using _TargetTy = _Variant_init_type<_Ty, _Types...>;
993
+ if constexpr (_Variant_should_directly_construct_v<_TargetTy, _Ty>) {
994
+ this->_Reset();
995
+ _Emplace_valueless<_TargetIdx>(static_cast<_Ty&&>(_Obj));
996
+ } else {
997
+ _TargetTy _Temp(static_cast<_Ty&&>(_Obj));
998
+ this->_Reset();
999
+ _Emplace_valueless<_TargetIdx>(_STD move(_Temp));
1000
+ }
1001
+ }
1002
+
1003
+ return *this;
1004
+ }
1005
+
1006
+ using _Mybase::_Storage;
1007
+
1008
+ template <class _Ty, class... _ArgTypes, size_t _Idx = _Meta_find_unique_index<variant, _Ty>::value,
1009
+ enable_if_t<_Idx != _Meta_npos && is_constructible_v<_Ty, _ArgTypes...>, int> = 0>
1010
+ _CONSTEXPR20 _Ty& emplace(_ArgTypes&&... _Args) noexcept(
1011
+ is_nothrow_constructible_v<_Ty, _ArgTypes...>) /* strengthened */ {
1012
+ // emplace alternative _Ty from _Args...
1013
+ this->_Reset();
1014
+ return _Emplace_valueless<_Idx>(static_cast<_ArgTypes&&>(_Args)...);
1015
+ }
1016
+ template <class _Ty, class _Elem, class... _ArgTypes, size_t _Idx = _Meta_find_unique_index<variant, _Ty>::value,
1017
+ enable_if_t<_Idx != _Meta_npos && is_constructible_v<_Ty, initializer_list<_Elem>&, _ArgTypes...>, int> = 0>
1018
+ _CONSTEXPR20 _Ty& emplace(initializer_list<_Elem> _Ilist, _ArgTypes&&... _Args) noexcept(
1019
+ is_nothrow_constructible_v<_Ty, initializer_list<_Elem>&, _ArgTypes...>) /* strengthened */ {
1020
+ // emplace alternative _Ty from _Ilist and _Args...
1021
+ this->_Reset();
1022
+ return _Emplace_valueless<_Idx>(_Ilist, static_cast<_ArgTypes&&>(_Args)...);
1023
+ }
1024
+
1025
+ template <size_t _Idx, class... _ArgTypes,
1026
+ enable_if_t<is_constructible_v<_Meta_at_c<variant, _Idx>, _ArgTypes...>, int> = 0>
1027
+ _CONSTEXPR20 _Meta_at_c<variant, _Idx>& emplace(_ArgTypes&&... _Args) noexcept(
1028
+ is_nothrow_constructible_v<_Meta_at_c<variant, _Idx>, _ArgTypes...>) /* strengthened */ {
1029
+ // emplace alternative _Idx from _Args...
1030
+ this->_Reset();
1031
+ return _Emplace_valueless<_Idx>(static_cast<_ArgTypes&&>(_Args)...);
1032
+ }
1033
+ template <size_t _Idx, class _Elem, class... _ArgTypes,
1034
+ enable_if_t<is_constructible_v<_Meta_at_c<variant, _Idx>, initializer_list<_Elem>&, _ArgTypes...>, int> = 0>
1035
+ _CONSTEXPR20 _Meta_at_c<variant, _Idx>& emplace(initializer_list<_Elem> _Ilist, _ArgTypes&&... _Args) noexcept(
1036
+ is_nothrow_constructible_v<_Meta_at_c<variant, _Idx>, initializer_list<_Elem>&,
1037
+ _ArgTypes...>) /* strengthened */ {
1038
+ // emplace alternative _Idx from _Ilist and _Args...
1039
+ this->_Reset();
1040
+ return _Emplace_valueless<_Idx>(_Ilist, static_cast<_ArgTypes&&>(_Args)...);
1041
+ }
1042
+
1043
+ using _Mybase::index;
1044
+ using _Mybase::valueless_by_exception;
1045
+
1046
+ #ifdef __clang__ // TRANSITION, LLVM-35450
1047
+ #pragma clang diagnostic push
1048
+ #pragma clang diagnostic ignored "-Wunused-lambda-capture"
1049
+ #endif // ^^^ workaround ^^^
1050
+ _CONSTEXPR20 void swap(variant& _That) noexcept(
1051
+ conjunction_v<is_nothrow_move_constructible<_Types>..., is_nothrow_swappable<_Types>...>) {
1052
+ // exchange the contained values if *this and _That hold the same alternative, otherwise exchange the values of
1053
+ // the variants themselves
1054
+ static_assert(conjunction_v<is_move_constructible<_Types>...>,
1055
+ "variant<Types...>::swap requires all of the Types... to be move constructible. (N4950 [variant.swap]/1)");
1056
+ static_assert(disjunction_v<negation<is_move_constructible<_Types>>..., conjunction<is_swappable<_Types>...>>,
1057
+ "variant<Types...>::swap requires all of the Types... to be swappable. (N4950 [variant.swap]/2)");
1058
+ if constexpr (conjunction_v<_Is_trivially_swappable<_Types>...>) {
1059
+ using _BaseTy = _Variant_base<_Types...>;
1060
+ _STD swap(static_cast<_BaseTy&>(*this), static_cast<_BaseTy&>(_That));
1061
+ } else if constexpr (sizeof...(_Types) < 32) {
1062
+ // Limit the size of variants that use this quadratic code size implementation of swap.
1063
+ _STD _Variant_raw_visit(index(), _Storage(),
1064
+ [this, &_That](auto _My_ref) noexcept(
1065
+ conjunction_v<is_nothrow_move_constructible<_Types>..., is_nothrow_swappable<_Types>...>) {
1066
+ _STD _Variant_raw_visit(_That.index(), _That._Storage(),
1067
+ [this, &_That, _My_ref](auto _That_ref) noexcept(
1068
+ conjunction_v<is_nothrow_move_constructible<_Types>..., is_nothrow_swappable<_Types>...>) {
1069
+ constexpr size_t _That_idx = decltype(_That_ref)::_Idx;
1070
+ constexpr size_t _My_idx = decltype(_My_ref)::_Idx;
1071
+ if constexpr (_My_idx == _That_idx) { // Same alternatives...
1072
+ if constexpr (_My_idx != variant_npos) { // ...and not valueless, swap directly
1073
+ using _STD swap;
1074
+ swap(_My_ref._Val, _That_ref._Val); // intentional ADL
1075
+ }
1076
+ } else if constexpr (_My_idx == variant_npos) { // *this is valueless, _That is not
1077
+ this->_Emplace_valueless<_That_idx>(_STD move(_That_ref._Val));
1078
+ _That.template _Reset<_That_idx>();
1079
+ } else if constexpr (_That_idx == variant_npos) { // _That is valueless, *this is not
1080
+ _That._Emplace_valueless<_My_idx>(_STD move(_My_ref._Val));
1081
+ this->template _Reset<_My_idx>();
1082
+ } else { // different non-valueless alternatives
1083
+ auto _Tmp = _STD move(_My_ref._Val);
1084
+ this->template _Reset<_My_idx>();
1085
+ this->_Emplace_valueless<_That_idx>(_STD move(_That_ref._Val));
1086
+ _That.template _Reset<_That_idx>();
1087
+ _That._Emplace_valueless<_My_idx>(_STD move(_Tmp));
1088
+ }
1089
+ });
1090
+ });
1091
+ } else {
1092
+ if (this->_Which == _That._Which) {
1093
+ _STD _Variant_raw_visit(static_cast<size_t>(this->_Which), _That._Storage(),
1094
+ [this](auto _Ref) noexcept(conjunction_v<is_nothrow_swappable<_Types>...>) {
1095
+ constexpr size_t _Idx = decltype(_Ref)::_Idx;
1096
+ if constexpr (_Idx != variant_npos) {
1097
+ using _STD swap;
1098
+ swap(_Variant_raw_get<_Idx>(this->_Storage()), _Ref._Val); // intentional ADL
1099
+ }
1100
+ });
1101
+ } else {
1102
+ variant _Tmp = _STD move(*this);
1103
+ this->_Emplace_from(_STD move(_That));
1104
+ _That._Emplace_from(_STD move(_Tmp));
1105
+ }
1106
+ }
1107
+ }
1108
+ #ifdef __clang__ // TRANSITION, LLVM-35450
1109
+ #pragma clang diagnostic pop
1110
+ #endif // ^^^ workaround ^^^
1111
+
1112
+ private:
1113
+ template <size_t _Idx, class... _ArgTypes>
1114
+ _CONSTEXPR20 _Meta_at_c<variant, _Idx>& _Emplace_valueless(_ArgTypes&&... _Args) noexcept(
1115
+ is_nothrow_constructible_v<_Meta_at_c<variant, _Idx>, _ArgTypes...>) {
1116
+ // initialize alternative _Idx from _Args...
1117
+ _STL_INTERNAL_CHECK(valueless_by_exception());
1118
+ _STD _Construct_in_place(_Storage(), integral_constant<size_t, _Idx>{}, static_cast<_ArgTypes&&>(_Args)...);
1119
+ this->_Set_index(_Idx);
1120
+ return _STD _Variant_raw_get<_Idx>(_Storage());
1121
+ }
1122
+
1123
+ _CONSTEXPR20 void _Emplace_from(variant&& _That) noexcept(conjunction_v<is_nothrow_move_constructible<_Types>...>) {
1124
+ // steal the contained value from _That
1125
+ this->_Reset();
1126
+ _STD _Variant_raw_visit(_That.index(), _That._Storage(),
1127
+ [this](auto _Ref) noexcept(conjunction_v<is_nothrow_move_constructible<_Types>...>) {
1128
+ constexpr size_t _Idx = decltype(_Ref)::_Idx;
1129
+ if constexpr (_Idx != variant_npos) {
1130
+ this->_Emplace_valueless<_Idx>(_STD move(_Ref._Val));
1131
+ }
1132
+ });
1133
+ }
1134
+ };
1135
+
1136
+ _EXPORT_STD template <class _Ty, class... _Types>
1137
+ _NODISCARD constexpr bool holds_alternative(const variant<_Types...>& _Var) noexcept {
1138
+ // true iff _Var holds alternative _Ty
1139
+ constexpr size_t _Idx = _Meta_find_unique_index<variant<_Types...>, _Ty>::value;
1140
+ if constexpr (_Idx != _Meta_npos) {
1141
+ return _Var.index() == _Idx;
1142
+ } else {
1143
+ static_assert(false, "holds_alternative<T>(const variant<Types...>&) requires T to occur exactly "
1144
+ "once in Types. (N4971 [variant.get]/1)");
1145
+ }
1146
+ }
1147
+
1148
+ _EXPORT_STD template <size_t _Idx, class... _Types>
1149
+ _NODISCARD constexpr decltype(auto) get(variant<_Types...>& _Var) {
1150
+ // access the contained value of _Var if its _Idx-th alternative is active
1151
+ static_assert(_Idx < sizeof...(_Types), "variant index out of bounds");
1152
+ if (_Var.index() == _Idx) {
1153
+ return _STD _Variant_raw_get<_Idx>(_Var._Storage());
1154
+ }
1155
+
1156
+ _STD _Throw_bad_variant_access();
1157
+ }
1158
+ _EXPORT_STD template <size_t _Idx, class... _Types>
1159
+ _NODISCARD constexpr decltype(auto) get(variant<_Types...>&& _Var) {
1160
+ // access the contained value of _Var if its _Idx-th alternative is active
1161
+ static_assert(_Idx < sizeof...(_Types), "variant index out of bounds");
1162
+ if (_Var.index() == _Idx) {
1163
+ return _STD _Variant_raw_get<_Idx>(_STD move(_Var)._Storage());
1164
+ }
1165
+
1166
+ _STD _Throw_bad_variant_access();
1167
+ }
1168
+ _EXPORT_STD template <size_t _Idx, class... _Types>
1169
+ _NODISCARD constexpr decltype(auto) get(const variant<_Types...>& _Var) {
1170
+ // access the contained value of _Var if its _Idx-th alternative is active
1171
+ static_assert(_Idx < sizeof...(_Types), "variant index out of bounds");
1172
+ if (_Var.index() == _Idx) {
1173
+ return _STD _Variant_raw_get<_Idx>(_Var._Storage());
1174
+ }
1175
+
1176
+ _STD _Throw_bad_variant_access();
1177
+ }
1178
+ _EXPORT_STD template <size_t _Idx, class... _Types>
1179
+ _NODISCARD constexpr decltype(auto) get(const variant<_Types...>&& _Var) {
1180
+ // access the contained value of _Var if its _Idx-th alternative is active
1181
+ static_assert(_Idx < sizeof...(_Types), "variant index out of bounds");
1182
+ if (_Var.index() == _Idx) {
1183
+ return _STD _Variant_raw_get<_Idx>(_STD move(_Var)._Storage());
1184
+ }
1185
+
1186
+ _STD _Throw_bad_variant_access();
1187
+ }
1188
+
1189
+ _EXPORT_STD template <class _Ty, class... _Types>
1190
+ _NODISCARD constexpr decltype(auto) get(variant<_Types...>& _Var) {
1191
+ // access the contained value of _Var if its alternative _Ty is active
1192
+ constexpr size_t _Idx = _Meta_find_unique_index<variant<_Types...>, _Ty>::value;
1193
+ if constexpr (_Idx < sizeof...(_Types)) {
1194
+ return _STD get<_Idx>(_Var);
1195
+ } else {
1196
+ static_assert(false, "get<T>(variant<Types...>&) "
1197
+ "requires T to occur exactly once in Types. (N4971 [variant.get]/8)");
1198
+ }
1199
+ }
1200
+ _EXPORT_STD template <class _Ty, class... _Types>
1201
+ _NODISCARD constexpr decltype(auto) get(variant<_Types...>&& _Var) {
1202
+ // access the contained value of _Var if its alternative _Ty is active
1203
+ constexpr size_t _Idx = _Meta_find_unique_index<variant<_Types...>, _Ty>::value;
1204
+ if constexpr (_Idx < sizeof...(_Types)) {
1205
+ return _STD get<_Idx>(_STD move(_Var));
1206
+ } else {
1207
+ static_assert(false, "get<T>(variant<Types...>&&) "
1208
+ "requires T to occur exactly once in Types. (N4971 [variant.get]/8)");
1209
+ }
1210
+ }
1211
+ _EXPORT_STD template <class _Ty, class... _Types>
1212
+ _NODISCARD constexpr decltype(auto) get(const variant<_Types...>& _Var) {
1213
+ // access the contained value of _Var if its alternative _Ty is active
1214
+ constexpr size_t _Idx = _Meta_find_unique_index<variant<_Types...>, _Ty>::value;
1215
+ if constexpr (_Idx < sizeof...(_Types)) {
1216
+ return _STD get<_Idx>(_Var);
1217
+ } else {
1218
+ static_assert(false, "get<T>(const variant<Types...>&) "
1219
+ "requires T to occur exactly once in Types. (N4971 [variant.get]/8)");
1220
+ }
1221
+ }
1222
+ _EXPORT_STD template <class _Ty, class... _Types>
1223
+ _NODISCARD constexpr decltype(auto) get(const variant<_Types...>&& _Var) {
1224
+ // access the contained value of _Var if its alternative _Ty is active
1225
+ constexpr size_t _Idx = _Meta_find_unique_index<variant<_Types...>, _Ty>::value;
1226
+ if constexpr (_Idx < sizeof...(_Types)) {
1227
+ return _STD get<_Idx>(_STD move(_Var));
1228
+ } else {
1229
+ static_assert(false, "get<T>(const variant<Types...>&&) "
1230
+ "requires T to occur exactly once in Types. (N4971 [variant.get]/8)");
1231
+ }
1232
+ }
1233
+
1234
+ _EXPORT_STD template <size_t _Idx, class... _Types>
1235
+ _NODISCARD constexpr auto get_if(variant<_Types...>* _Ptr) noexcept {
1236
+ // get the address of *_Ptr's contained value if it holds alternative _Idx
1237
+ static_assert(_Idx < sizeof...(_Types), "variant index out of bounds");
1238
+ return _Ptr && _Ptr->index() == _Idx ? _STD addressof(_STD _Variant_raw_get<_Idx>(_Ptr->_Storage())) : nullptr;
1239
+ }
1240
+ _EXPORT_STD template <size_t _Idx, class... _Types>
1241
+ _NODISCARD constexpr auto get_if(const variant<_Types...>* _Ptr) noexcept {
1242
+ // get the address of *_Ptr's contained value if it holds alternative _Idx
1243
+ static_assert(_Idx < sizeof...(_Types), "variant index out of bounds");
1244
+ return _Ptr && _Ptr->index() == _Idx ? _STD addressof(_STD _Variant_raw_get<_Idx>(_Ptr->_Storage())) : nullptr;
1245
+ }
1246
+
1247
+ _EXPORT_STD template <class _Ty, class... _Types>
1248
+ _NODISCARD constexpr add_pointer_t<_Ty> get_if(variant<_Types...>* _Ptr) noexcept {
1249
+ // get the address of *_Ptr's contained value if it holds alternative _Ty
1250
+ constexpr size_t _Idx = _Meta_find_unique_index<variant<_Types...>, _Ty>::value;
1251
+ if constexpr (_Idx != _Meta_npos) {
1252
+ return _STD get_if<_Idx>(_Ptr);
1253
+ } else {
1254
+ static_assert(false,
1255
+ "get_if<T>(variant<Types...> *) requires T to occur exactly once in Types. (N4971 [variant.get]/12)");
1256
+ }
1257
+ }
1258
+ _EXPORT_STD template <class _Ty, class... _Types>
1259
+ _NODISCARD constexpr add_pointer_t<const _Ty> get_if(const variant<_Types...>* _Ptr) noexcept {
1260
+ // get the address of *_Ptr's contained value if it holds alternative _Ty
1261
+ constexpr size_t _Idx = _Meta_find_unique_index<variant<_Types...>, _Ty>::value;
1262
+ if constexpr (_Idx != _Meta_npos) {
1263
+ return _STD get_if<_Idx>(_Ptr);
1264
+ } else {
1265
+ static_assert(false,
1266
+ "get_if<T>(const variant<Types...> *) requires T to occur exactly once in Types. (N4971 [variant.get]/12)");
1267
+ }
1268
+ }
1269
+
1270
+ template <class _Op, class _Result, class... _Types>
1271
+ struct _Variant_relop_visitor2 { // evaluate _Op with the contained value of two variants that hold the same alternative
1272
+ const _Variant_storage<_Types...>& _Left;
1273
+
1274
+ template <class _Ty, size_t _Idx>
1275
+ _NODISCARD constexpr _Result operator()(_Tagged<const _Ty&, _Idx> _Right) const
1276
+ noexcept(disjunction_v<bool_constant<_Idx == variant_npos>,
1277
+ is_nothrow_invocable_r<_Result, _Op, const _Ty&, const _Ty&>>) {
1278
+ // determine the relationship between the stored values of _Left and _Right
1279
+ // pre: _Left.index() == _Idx && _Right.index() == _Idx
1280
+ if constexpr (_Idx != variant_npos) {
1281
+ return _Op{}(_STD _Variant_raw_get<_Idx>(_Left), _Right._Val);
1282
+ } else { // return whatever _Op returns for equal values
1283
+ return _Op{}(0, 0);
1284
+ }
1285
+ }
1286
+ };
1287
+
1288
+ _EXPORT_STD template <class... _Types>
1289
+ _NODISCARD constexpr bool operator==(const variant<_Types...>& _Left, const variant<_Types...>& _Right) noexcept(
1290
+ conjunction_v<is_nothrow_invocable_r<bool, equal_to<>, const _Types&, const _Types&>...>) /* strengthened */ {
1291
+ // determine if the arguments are both valueless or contain equal values
1292
+ using _Visitor = _Variant_relop_visitor2<equal_to<>, bool, _Types...>;
1293
+ const size_t _Right_index = _Right.index();
1294
+ return _Left.index() == _Right_index
1295
+ && _STD _Variant_raw_visit(_Right_index, _Right._Storage(), _Visitor{_Left._Storage()});
1296
+ }
1297
+
1298
+ _EXPORT_STD template <class... _Types>
1299
+ _NODISCARD constexpr bool operator!=(const variant<_Types...>& _Left, const variant<_Types...>& _Right) noexcept(
1300
+ conjunction_v<is_nothrow_invocable_r<bool, not_equal_to<>, const _Types&, const _Types&>...>) /* strengthened */ {
1301
+ // determine if the arguments have different active alternatives or contain unequal values
1302
+ using _Visitor = _Variant_relop_visitor2<not_equal_to<>, bool, _Types...>;
1303
+ const size_t _Right_index = _Right.index();
1304
+ return _Left.index() != _Right_index
1305
+ || _STD _Variant_raw_visit(_Right_index, _Right._Storage(), _Visitor{_Left._Storage()});
1306
+ }
1307
+
1308
+ _EXPORT_STD template <class... _Types>
1309
+ _NODISCARD constexpr bool operator<(const variant<_Types...>& _Left, const variant<_Types...>& _Right) noexcept(
1310
+ conjunction_v<is_nothrow_invocable_r<bool, less<>, const _Types&, const _Types&>...>) /* strengthened */ {
1311
+ // determine if _Left has a lesser index(), or equal index() and lesser
1312
+ // contained value than _Right
1313
+ using _Visitor = _Variant_relop_visitor2<less<>, bool, _Types...>;
1314
+ const size_t _Left_offset = _Left.index() + 1;
1315
+ const size_t _Right_offset = _Right.index() + 1;
1316
+ return _Left_offset < _Right_offset
1317
+ || (_Left_offset == _Right_offset
1318
+ && _STD _Variant_raw_visit(_Right_offset - 1, _Right._Storage(), _Visitor{_Left._Storage()}));
1319
+ }
1320
+
1321
+ _EXPORT_STD template <class... _Types>
1322
+ _NODISCARD constexpr bool operator>(const variant<_Types...>& _Left, const variant<_Types...>& _Right) noexcept(
1323
+ conjunction_v<is_nothrow_invocable_r<bool, greater<>, const _Types&, const _Types&>...>) /* strengthened */ {
1324
+ // determine if _Left has a greater index(), or equal index() and
1325
+ // greater contained value than _Right
1326
+ using _Visitor = _Variant_relop_visitor2<greater<>, bool, _Types...>;
1327
+ const size_t _Left_offset = _Left.index() + 1;
1328
+ const size_t _Right_offset = _Right.index() + 1;
1329
+ return _Left_offset > _Right_offset
1330
+ || (_Left_offset == _Right_offset
1331
+ && _STD _Variant_raw_visit(_Right_offset - 1, _Right._Storage(), _Visitor{_Left._Storage()}));
1332
+ }
1333
+
1334
+ _EXPORT_STD template <class... _Types>
1335
+ _NODISCARD constexpr bool operator<=(const variant<_Types...>& _Left, const variant<_Types...>& _Right) noexcept(
1336
+ conjunction_v<is_nothrow_invocable_r<bool, less_equal<>, const _Types&, const _Types&>...>) /* strengthened */ {
1337
+ // determine if _Left's index() is less than _Right's, or equal and
1338
+ // _Left contains a value less than or equal to _Right
1339
+ using _Visitor = _Variant_relop_visitor2<less_equal<>, bool, _Types...>;
1340
+ const size_t _Left_offset = _Left.index() + 1;
1341
+ const size_t _Right_offset = _Right.index() + 1;
1342
+ return _Left_offset < _Right_offset
1343
+ || (_Left_offset == _Right_offset
1344
+ && _STD _Variant_raw_visit(_Right_offset - 1, _Right._Storage(), _Visitor{_Left._Storage()}));
1345
+ }
1346
+
1347
+ _EXPORT_STD template <class... _Types>
1348
+ _NODISCARD constexpr bool operator>=(const variant<_Types...>& _Left, const variant<_Types...>& _Right) noexcept(
1349
+ conjunction_v<is_nothrow_invocable_r<bool, greater_equal<>, const _Types&, const _Types&>...>) /* strengthened */ {
1350
+ // determine if _Left's index() is greater than _Right's, or equal and
1351
+ // _Left contains a value greater than or equal to _Right
1352
+ using _Visitor = _Variant_relop_visitor2<greater_equal<>, bool, _Types...>;
1353
+ const size_t _Left_offset = _Left.index() + 1;
1354
+ const size_t _Right_offset = _Right.index() + 1;
1355
+ return _Left_offset > _Right_offset
1356
+ || (_Left_offset == _Right_offset
1357
+ && _STD _Variant_raw_visit(_Right_offset - 1, _Right._Storage(), _Visitor{_Left._Storage()}));
1358
+ }
1359
+
1360
+ #if _HAS_CXX20
1361
+ _EXPORT_STD template <class... _Types>
1362
+ requires (three_way_comparable<_Types> && ...)
1363
+ _NODISCARD constexpr common_comparison_category_t<compare_three_way_result_t<_Types>...>
1364
+ operator<=>(const variant<_Types...>& _Left, const variant<_Types...>& _Right) noexcept(
1365
+ conjunction_v<is_nothrow_invocable_r<common_comparison_category_t<compare_three_way_result_t<_Types>...>,
1366
+ compare_three_way, const _Types&, const _Types&>...>) /* strengthened */ {
1367
+ // determine the three-way comparison of _Left's and _Right's index, if equal
1368
+ // return the three-way comparison of the contained values of _Left and _Right
1369
+ using _Visitor = _Variant_relop_visitor2<compare_three_way,
1370
+ common_comparison_category_t<compare_three_way_result_t<_Types>...>, _Types...>;
1371
+ const size_t _Left_offset = _Left.index() + 1;
1372
+ const size_t _Right_offset = _Right.index() + 1;
1373
+ const auto _Offset_order = _Left_offset <=> _Right_offset;
1374
+ return _Offset_order != 0
1375
+ ? _Offset_order
1376
+ : _STD _Variant_raw_visit(_Right_offset - 1, _Right._Storage(), _Visitor{_Left._Storage()});
1377
+ }
1378
+ #endif // _HAS_CXX20
1379
+
1380
+ template <class... _Variants>
1381
+ constexpr size_t _Variant_total_states =
1382
+ (size_t{1} * ... * (variant_size_v<_Variants> + 1)); // +1 to account for the valueless state
1383
+
1384
+ _NODISCARD constexpr size_t _Variant_visit_index1(const size_t _Acc) noexcept {
1385
+ return _Acc;
1386
+ }
1387
+ template <class _FirstTy, class... _RestTys>
1388
+ _NODISCARD constexpr size_t _Variant_visit_index1(
1389
+ size_t _Acc, const _FirstTy& _First, const _RestTys&... _Rest) noexcept {
1390
+ // calculate a canonical index from the biased indices of the variants _First and _Rest...
1391
+ _Acc += (_First.index() + 1) * _Variant_total_states<_RestTys...>;
1392
+ return _STD _Variant_visit_index1(_Acc, _Rest...);
1393
+ }
1394
+
1395
+ template <class _Callable, class... _Types>
1396
+ using _Variant_visit_result_t =
1397
+ decltype(_STD invoke(_STD declval<_Callable>(), _STD _Variant_raw_get<0>(_STD declval<_Types>()._Storage())...));
1398
+
1399
+ template <class>
1400
+ struct _Variant_dispatcher;
1401
+
1402
+ template <size_t... _Is>
1403
+ struct _Variant_dispatcher<index_sequence<_Is...>> {
1404
+ template <class _Ret, class _Callable, class... _Types, bool _Any_valueless = ((_Is == 0) || ...)>
1405
+ _NODISCARD static constexpr _Ret _Dispatch2(_Callable&& _Obj, _Types&&... _Args) {
1406
+ if constexpr (_Any_valueless) {
1407
+ #if !defined(__clang__) && !defined(__EDG__) // TRANSITION, VSO-1513409
1408
+ ((void) _Args, ...);
1409
+ #endif // ^^^ workaround ^^^
1410
+ _STD _Throw_bad_variant_access();
1411
+ }
1412
+ #if _HAS_CXX20
1413
+ else if constexpr (is_void_v<_Ret>) {
1414
+ static_cast<void>(_STD invoke(static_cast<_Callable&&>(_Obj),
1415
+ _STD _Variant_raw_get<_Is - 1>(static_cast<_Types&&>(_Args)._Storage())...));
1416
+ }
1417
+ #endif // _HAS_CXX20
1418
+ else {
1419
+ return _STD invoke(static_cast<_Callable&&>(_Obj),
1420
+ _STD _Variant_raw_get<_Is - 1>(static_cast<_Types&&>(_Args)._Storage())...);
1421
+ }
1422
+ }
1423
+ };
1424
+
1425
+ template <class _Ret, class _Ordinals, class _Callable, class _Variants>
1426
+ struct _Variant_dispatch_table; // undefined
1427
+
1428
+ template <class _Ret, class... _Ordinals, class _Callable, class... _Variants>
1429
+ struct _Variant_dispatch_table<_Ret, _Meta_list<_Ordinals...>, _Callable, _Meta_list<_Variants...>> {
1430
+ // map from canonical index to visitation target
1431
+ using _Dispatch_t = _Ret (*)(_Callable&&, _Variants&&...);
1432
+ static constexpr _Dispatch_t _Array[] = {
1433
+ &_Variant_dispatcher<_Ordinals>::template _Dispatch2<_Ret, _Callable, _Variants...>...};
1434
+ };
1435
+
1436
+ template <int _Strategy>
1437
+ struct _Visit_strategy;
1438
+
1439
+ template <>
1440
+ struct _Visit_strategy<-1> {
1441
+ // Fallback strategy for visitations with too many total states for the following "switch" strategies.
1442
+ template <class _Ret, class _ListOfIndexVectors, class _Callable, class... _Variants>
1443
+ static constexpr _Ret _Visit2(
1444
+ size_t _Idx, _Callable&& _Obj, _Variants&&... _Args) { // dispatch a visitation with many potential states
1445
+ constexpr size_t _Size = _Variant_total_states<_Remove_cvref_t<_Variants>...>;
1446
+ static_assert(_Size > 256);
1447
+ constexpr auto& _Array =
1448
+ _Variant_dispatch_table<_Ret, _ListOfIndexVectors, _Callable, _Meta_list<_Variants...>>::_Array;
1449
+ return _Array[_Idx](static_cast<_Callable&&>(_Obj), static_cast<_Variants&&>(_Args)...);
1450
+ }
1451
+ };
1452
+
1453
+ template <>
1454
+ struct _Visit_strategy<0> {
1455
+ template <class _Ret, class, class _Callable>
1456
+ static constexpr _Ret _Visit2(size_t, _Callable&& _Obj) { // dispatch a visitation with 4^0 potential states
1457
+ if constexpr (is_void_v<_Ret>) {
1458
+ return static_cast<void>(static_cast<_Callable&&>(_Obj)());
1459
+ } else {
1460
+ return static_cast<_Callable&&>(_Obj)();
1461
+ }
1462
+ }
1463
+ };
1464
+
1465
+ #define _STL_CASE(n) \
1466
+ case (n): \
1467
+ if constexpr ((n) < _Size) { \
1468
+ using _Indices = _Meta_at_c<_ListOfIndexVectors, (n)>; \
1469
+ return _Variant_dispatcher<_Indices>::template _Dispatch2<_Ret, _Callable, _Variants...>( \
1470
+ static_cast<_Callable&&>(_Obj), static_cast<_Variants&&>(_Args)...); \
1471
+ } \
1472
+ _STL_UNREACHABLE; \
1473
+ [[fallthrough]]
1474
+
1475
+ #define _STL_VISIT_STAMP(stamper, n) \
1476
+ constexpr size_t _Size = _Variant_total_states<_Remove_cvref_t<_Variants>...>; \
1477
+ static_assert(_Size > (n) / 4 && _Size <= (n)); \
1478
+ switch (_Idx) { \
1479
+ stamper(0, _STL_CASE); \
1480
+ default: \
1481
+ _STL_UNREACHABLE; \
1482
+ }
1483
+
1484
+ template <>
1485
+ struct _Visit_strategy<1> {
1486
+ template <class _Ret, class _ListOfIndexVectors, class _Callable, class... _Variants>
1487
+ static constexpr _Ret _Visit2(size_t _Idx, _Callable&& _Obj, _Variants&&... _Args) {
1488
+ // dispatch a visitation with 4^1 potential states
1489
+ _STL_STAMP(4, _STL_VISIT_STAMP);
1490
+ }
1491
+ };
1492
+
1493
+ template <>
1494
+ struct _Visit_strategy<2> {
1495
+ template <class _Ret, class _ListOfIndexVectors, class _Callable, class... _Variants>
1496
+ static constexpr _Ret _Visit2(size_t _Idx, _Callable&& _Obj, _Variants&&... _Args) {
1497
+ // dispatch a visitation with 4^2 potential states
1498
+ _STL_STAMP(16, _STL_VISIT_STAMP);
1499
+ }
1500
+ };
1501
+
1502
+ template <>
1503
+ struct _Visit_strategy<3> {
1504
+ template <class _Ret, class _ListOfIndexVectors, class _Callable, class... _Variants>
1505
+ static constexpr _Ret _Visit2(size_t _Idx, _Callable&& _Obj, _Variants&&... _Args) {
1506
+ // dispatch a visitation with 4^3 potential states
1507
+ _STL_STAMP(64, _STL_VISIT_STAMP);
1508
+ }
1509
+ };
1510
+
1511
+ template <>
1512
+ struct _Visit_strategy<4> {
1513
+ template <class _Ret, class _ListOfIndexVectors, class _Callable, class... _Variants>
1514
+ static constexpr _Ret _Visit2(size_t _Idx, _Callable&& _Obj, _Variants&&... _Args) {
1515
+ // dispatch a visitation with 4^4 potential states
1516
+ _STL_STAMP(256, _STL_VISIT_STAMP);
1517
+ }
1518
+ };
1519
+
1520
+ #undef _STL_VISIT_STAMP
1521
+ #undef _STL_CASE
1522
+
1523
+ template <class... _Types>
1524
+ variant<_Types...>& _As_variant_impl(variant<_Types...>&);
1525
+ template <class... _Types>
1526
+ const variant<_Types...>& _As_variant_impl(const variant<_Types...>&);
1527
+ template <class... _Types>
1528
+ variant<_Types...>&& _As_variant_impl(variant<_Types...>&&);
1529
+ template <class... _Types>
1530
+ const variant<_Types...>&& _As_variant_impl(const variant<_Types...>&&);
1531
+ template <class _Ty>
1532
+ using _As_variant = // Deduce variant specialization from a derived type
1533
+ decltype(_STD _As_variant_impl(_STD declval<_Ty>()));
1534
+
1535
+ template <size_t _Size, class _Ret, class _ListOfIndexVectors, class _Callable, class... _Variants>
1536
+ constexpr _Ret _Visit_impl(_Callable&& _Obj, _Variants&&... _Args) {
1537
+ constexpr int _Strategy = _Size == 1 ? 0
1538
+ : _Size <= 4 ? 1
1539
+ : _Size <= 16 ? 2
1540
+ : _Size <= 64 ? 3
1541
+ : _Size <= 256 ? 4
1542
+ : -1;
1543
+ return _Visit_strategy<_Strategy>::template _Visit2<_Ret, _ListOfIndexVectors>(
1544
+ _STD _Variant_visit_index1(0, static_cast<_As_variant<_Variants>&>(_Args)...), static_cast<_Callable&&>(_Obj),
1545
+ static_cast<_As_variant<_Variants>&&>(_Args)...);
1546
+ }
1547
+
1548
+ template <class _Expected, class _Callable, class _ArgList, class... _Variants>
1549
+ constexpr bool _Variant_all_visit_results_same = false;
1550
+
1551
+ template <class _Expected, class _Callable, class... _Args>
1552
+ constexpr bool _Variant_all_visit_results_same<_Expected, _Callable, _Meta_list<_Args...>> =
1553
+ is_same_v<decltype(_STD invoke(_STD declval<_Callable>(), _STD declval<_Args>()...)), _Expected>;
1554
+
1555
+ template <class _Expected, class _Callable, class... _Args, class... _Types, class... _Rest>
1556
+ constexpr bool
1557
+ _Variant_all_visit_results_same<_Expected, _Callable, _Meta_list<_Args...>, variant<_Types...>&, _Rest...> =
1558
+ (_Variant_all_visit_results_same<_Expected, _Callable, _Meta_list<_Args..., _Types&>, _Rest...> && ...);
1559
+
1560
+ template <class _Expected, class _Callable, class... _Args, class... _Types, class... _Rest>
1561
+ constexpr bool
1562
+ _Variant_all_visit_results_same<_Expected, _Callable, _Meta_list<_Args...>, const variant<_Types...>&, _Rest...> =
1563
+ (_Variant_all_visit_results_same<_Expected, _Callable, _Meta_list<_Args..., const _Types&>, _Rest...> && ...);
1564
+
1565
+ template <class _Expected, class _Callable, class... _Args, class... _Types, class... _Rest>
1566
+ constexpr bool
1567
+ _Variant_all_visit_results_same<_Expected, _Callable, _Meta_list<_Args...>, variant<_Types...>&&, _Rest...> =
1568
+ (_Variant_all_visit_results_same<_Expected, _Callable, _Meta_list<_Args..., _Types>, _Rest...> && ...);
1569
+
1570
+ template <class _Expected, class _Callable, class... _Args, class... _Types, class... _Rest>
1571
+ constexpr bool
1572
+ _Variant_all_visit_results_same<_Expected, _Callable, _Meta_list<_Args...>, const variant<_Types...>&&, _Rest...> =
1573
+ (_Variant_all_visit_results_same<_Expected, _Callable, _Meta_list<_Args..., const _Types>, _Rest...> && ...);
1574
+
1575
+ _EXPORT_STD template <class _Callable, class... _Variants, class = void_t<_As_variant<_Variants>...>>
1576
+ constexpr _Variant_visit_result_t<_Callable, _As_variant<_Variants>...> visit(_Callable&& _Obj, _Variants&&... _Args) {
1577
+ // Invoke _Obj with the contained values of _Args...
1578
+ constexpr auto _Size = _Variant_total_states<_Remove_cvref_t<_As_variant<_Variants>>...>;
1579
+ using _ListOfIndexLists =
1580
+ _Meta_list<_Meta_as_list<make_index_sequence<1 + variant_size_v<_Remove_cvref_t<_As_variant<_Variants>>>>>...>;
1581
+ using _ListOfIndexVectors =
1582
+ _Meta_transform<_Meta_quote<_Meta_as_integer_sequence>, _Meta_cartesian_product<_ListOfIndexLists>>;
1583
+ using _Ret = _Variant_visit_result_t<_Callable, _As_variant<_Variants>...>;
1584
+ static_assert(_Variant_all_visit_results_same<_Ret, _Callable, _Meta_list<>, _As_variant<_Variants>...>,
1585
+ "visit() requires the result of all potential invocations to have the same type and value category "
1586
+ "(N4950 [variant.visit]/5).");
1587
+
1588
+ return _STD _Visit_impl<_Size, _Ret, _ListOfIndexVectors>(
1589
+ static_cast<_Callable&&>(_Obj), static_cast<_Variants&&>(_Args)...);
1590
+ }
1591
+
1592
+ #if _HAS_CXX20
1593
+ template <class _Expected, class _Callable, class _ArgList, class... _Variants>
1594
+ constexpr bool _Variant_all_visit_results_convertible = false;
1595
+
1596
+ template <class _Expected, class _Callable, class... _Args>
1597
+ constexpr bool _Variant_all_visit_results_convertible<_Expected, _Callable, _Meta_list<_Args...>> =
1598
+ _Invoke_convertible<decltype(_STD invoke(_STD declval<_Callable>(), _STD declval<_Args>()...)), _Expected>::value;
1599
+
1600
+ template <class _Expected, class _Callable, class... _Args, class... _Types, class... _Rest>
1601
+ constexpr bool
1602
+ _Variant_all_visit_results_convertible<_Expected, _Callable, _Meta_list<_Args...>, variant<_Types...>&, _Rest...> =
1603
+ (_Variant_all_visit_results_convertible<_Expected, _Callable, _Meta_list<_Args..., _Types&>, _Rest...> && ...);
1604
+
1605
+ template <class _Expected, class _Callable, class... _Args, class... _Types, class... _Rest>
1606
+ constexpr bool _Variant_all_visit_results_convertible<_Expected, _Callable, _Meta_list<_Args...>,
1607
+ const variant<_Types...>&, _Rest...> =
1608
+ (_Variant_all_visit_results_convertible<_Expected, _Callable, _Meta_list<_Args..., const _Types&>, _Rest...>
1609
+ && ...);
1610
+
1611
+ template <class _Expected, class _Callable, class... _Args, class... _Types, class... _Rest>
1612
+ constexpr bool
1613
+ _Variant_all_visit_results_convertible<_Expected, _Callable, _Meta_list<_Args...>, variant<_Types...>&&, _Rest...> =
1614
+ (_Variant_all_visit_results_convertible<_Expected, _Callable, _Meta_list<_Args..., _Types>, _Rest...> && ...);
1615
+
1616
+ template <class _Expected, class _Callable, class... _Args, class... _Types, class... _Rest>
1617
+ constexpr bool _Variant_all_visit_results_convertible<_Expected, _Callable, _Meta_list<_Args...>,
1618
+ const variant<_Types...>&&, _Rest...> =
1619
+ (_Variant_all_visit_results_convertible<_Expected, _Callable, _Meta_list<_Args..., const _Types>, _Rest...> && ...);
1620
+
1621
+ _EXPORT_STD template <class _Ret, class _Callable, class... _Variants, class = void_t<_As_variant<_Variants>...>>
1622
+ constexpr _Ret visit(_Callable&& _Obj, _Variants&&... _Args) {
1623
+ constexpr auto _Size = _Variant_total_states<_Remove_cvref_t<_As_variant<_Variants>>...>;
1624
+ using _ListOfIndexLists =
1625
+ _Meta_list<_Meta_as_list<make_index_sequence<1 + variant_size_v<_Remove_cvref_t<_As_variant<_Variants>>>>>...>;
1626
+ using _ListOfIndexVectors =
1627
+ _Meta_transform<_Meta_quote<_Meta_as_integer_sequence>, _Meta_cartesian_product<_ListOfIndexLists>>;
1628
+ if constexpr (!is_void_v<_Ret>) {
1629
+ static_assert(_Variant_all_visit_results_convertible<_Ret, _Callable, _Meta_list<>, _As_variant<_Variants>...>,
1630
+ "visit<R>() requires the result of all potential invocations to be implicitly convertible to R "
1631
+ "(N4950 [variant.visit]/5).");
1632
+ }
1633
+
1634
+ return _STD _Visit_impl<_Size, _Ret, _ListOfIndexVectors>(
1635
+ static_cast<_Callable&&>(_Obj), static_cast<_Variants&&>(_Args)...);
1636
+ }
1637
+ #endif // _HAS_CXX20
1638
+
1639
+ _EXPORT_STD _NODISCARD constexpr bool operator==(monostate, monostate) noexcept {
1640
+ return true;
1641
+ }
1642
+
1643
+ #if _HAS_CXX20
1644
+ _EXPORT_STD _NODISCARD constexpr strong_ordering operator<=>(monostate, monostate) noexcept {
1645
+ return strong_ordering::equal;
1646
+ }
1647
+ #else // ^^^ _HAS_CXX20 / !_HAS_CXX20 vvv
1648
+ _NODISCARD constexpr bool operator!=(monostate, monostate) noexcept {
1649
+ return false;
1650
+ }
1651
+ _NODISCARD constexpr bool operator<(monostate, monostate) noexcept {
1652
+ return false;
1653
+ }
1654
+ _NODISCARD constexpr bool operator>(monostate, monostate) noexcept {
1655
+ return false;
1656
+ }
1657
+ _NODISCARD constexpr bool operator<=(monostate, monostate) noexcept {
1658
+ return true;
1659
+ }
1660
+ _NODISCARD constexpr bool operator>=(monostate, monostate) noexcept {
1661
+ return true;
1662
+ }
1663
+ #endif // ^^^ !_HAS_CXX20 ^^^
1664
+
1665
+ _EXPORT_STD template <class... _Types,
1666
+ enable_if_t<conjunction_v<is_move_constructible<_Types>..., is_swappable<_Types>...>, int> = 0>
1667
+ _CONSTEXPR20 void swap(variant<_Types...>& _Left, variant<_Types...>& _Right) noexcept(noexcept(_Left.swap(_Right))) {
1668
+ _Left.swap(_Right);
1669
+ }
1670
+
1671
+ struct _Variant_hash_visitor { // visitation function for hashing variants
1672
+ template <class _Ty, size_t _Idx>
1673
+ _NODISCARD _STATIC_CALL_OPERATOR size_t operator()(_Tagged<const _Ty&, _Idx> _Obj) _CONST_CALL_OPERATOR
1674
+ noexcept(disjunction_v<bool_constant<_Idx == variant_npos>,
1675
+ is_nothrow_invocable<hash<_Ty>, const _Ty&>>) { // hash contained value _Obj
1676
+ if constexpr (_Idx == variant_npos) { // hash a valueless variant
1677
+ return 0;
1678
+ } else { // hash the contained value
1679
+ return hash<_Ty>{}(_Obj._Val);
1680
+ }
1681
+ }
1682
+ };
1683
+
1684
+ template <class... _Types>
1685
+ struct hash<variant<_Types...>> : _Conditionally_enabled_hash<variant<_Types...>,
1686
+ conjunction_v<is_default_constructible<hash<remove_const_t<_Types>>>...>> {
1687
+ _NODISCARD static size_t _Do_hash(const variant<_Types...>& _Var) noexcept(
1688
+ conjunction_v<_Is_nothrow_hashable<remove_const_t<_Types>>...>) {
1689
+ // called from the CRTP base to hash _Var iff the hash is enabled
1690
+ return _STD _Variant_raw_visit(_Var.index(), _Var._Storage(), _Variant_hash_visitor{});
1691
+ }
1692
+ };
1693
+
1694
+ template <>
1695
+ struct hash<monostate> {
1696
+ using _ARGUMENT_TYPE_NAME _CXX17_DEPRECATE_ADAPTOR_TYPEDEFS = monostate;
1697
+ using _RESULT_TYPE_NAME _CXX17_DEPRECATE_ADAPTOR_TYPEDEFS = size_t;
1698
+
1699
+ _NODISCARD _STATIC_CALL_OPERATOR size_t operator()(monostate) _CONST_CALL_OPERATOR noexcept {
1700
+ return 1729; // Arbitrary value
1701
+ }
1702
+ };
1703
+
1704
+ _STD_END
1705
+
1706
+ #undef _STL_STAMP
1707
+ #undef _STL_STAMP256
1708
+ #undef _STL_STAMP64
1709
+ #undef _STL_STAMP16
1710
+ #undef _STL_STAMP4
1711
+
1712
+ #pragma pop_macro("new")
1713
+ _STL_RESTORE_CLANG_WARNINGS
1714
+ #pragma warning(pop)
1715
+ #pragma pack(pop)
1716
+ #endif // ^^^ _HAS_CXX17 ^^^
1717
+ #endif // _STL_COMPILER_PREPROCESSOR
1718
+ #endif // _VARIANT_
miniMSVC/VC/Tools/MSVC/14.42.34433/include/vcclr.h ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //
2
+ // vcclr.h - helper code for using the managed extensions to C++
3
+ //
4
+ // Copyright (C) Microsoft Corporation
5
+ // All rights reserved.
6
+ //
7
+
8
+ #if _MSC_VER > 1000
9
+ #pragma once
10
+ #endif
11
+
12
+ #if !defined(_INC_VCCLR)
13
+ #define _INC_VCCLR
14
+ #ifndef RC_INVOKED
15
+
16
+ #include <gcroot.h>
17
+
18
+ #pragma warning(push)
19
+ #pragma warning(disable:4400)
20
+
21
+ #ifdef __cplusplus_cli
22
+ typedef cli::interior_ptr<const System::Char> __const_Char_ptr;
23
+ typedef cli::interior_ptr<const System::Byte> __const_Byte_ptr;
24
+ typedef cli::interior_ptr<System::Byte> _Byte_ptr;
25
+ typedef const System::String^ __const_String_handle;
26
+ #define _NULLPTR nullptr
27
+ #else
28
+ typedef const System::Char* __const_Char_ptr;
29
+ typedef const System::Byte* __const_Byte_ptr;
30
+ typedef System::Byte* _Byte_ptr;
31
+ typedef const System::String* __const_String_handle;
32
+ #define _NULLPTR 0
33
+ #endif
34
+
35
+
36
+ //
37
+ // get an interior gc pointer to the first character contained in a System::String object
38
+ //
39
+ inline __const_Char_ptr PtrToStringChars(__const_String_handle s) {
40
+
41
+ _Byte_ptr bp = const_cast<_Byte_ptr>(reinterpret_cast<__const_Byte_ptr>(s));
42
+ if( bp != _NULLPTR ) {
43
+ bp += System::Runtime::CompilerServices::RuntimeHelpers::OffsetToStringData;
44
+ }
45
+ return reinterpret_cast<__const_Char_ptr>(bp);
46
+ }
47
+
48
+ #pragma warning(pop)
49
+
50
+ #undef _NULLPTR
51
+
52
+ #endif /* RC_INVOKED */
53
+ #endif //_INC_VCCLR
miniMSVC/VC/Tools/MSVC/14.42.34433/include/vccorlib.h ADDED
The diff for this file is too large to render. See raw diff
 
miniMSVC/VC/Tools/MSVC/14.42.34433/include/vcruntime.h ADDED
@@ -0,0 +1,404 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //
2
+ // vcruntime.h
3
+ //
4
+ // Copyright (c) Microsoft Corporation. All rights reserved.
5
+ //
6
+ // Declarations used throughout the VCRuntime library.
7
+ //
8
+ #pragma once
9
+ //
10
+ // Note on use of "deprecate":
11
+ //
12
+ // Various places in this header and other headers use
13
+ // __declspec(deprecate) or macros that have the term DEPRECATE in them.
14
+ // We use "deprecate" here ONLY to signal the compiler to emit a warning
15
+ // about these items. The use of "deprecate" should NOT be taken to imply
16
+ // that any standard committee has deprecated these functions from the
17
+ // relevant standards. In fact, these functions are NOT deprecated from
18
+ // the standard.
19
+ //
20
+ // Full details can be found in our documentation by searching for
21
+ // "Security Enhancements in the CRT".
22
+ //
23
+ #ifndef _VCRUNTIME_H
24
+ #define _VCRUNTIME_H
25
+
26
+ #ifndef _VCRT_COMPILER_PREPROCESSOR
27
+ // Many VCRuntime headers avoid exposing their contents to non-compilers like
28
+ // the Windows resource compiler and Qt's meta-object compiler (moc).
29
+ #if defined(RC_INVOKED) || defined(Q_MOC_RUN)
30
+ #define _VCRT_COMPILER_PREPROCESSOR 0
31
+ #else
32
+ #define _VCRT_COMPILER_PREPROCESSOR 1
33
+ #endif
34
+ #endif // _VCRT_COMPILER_PREPROCESSOR
35
+
36
+ #ifndef _UCRT
37
+ #define _UCRT
38
+ #endif
39
+
40
+ // The _CRTIMP macro is not used in the VCRuntime or the CoreCRT anymore, but
41
+ // there is a lot of existing code that declares CRT functions using this macro,
42
+ // and if we remove its definition, we break that existing code. It is thus
43
+ // defined here only for compatibility.
44
+ #ifndef _CRTIMP
45
+ #define _VCRT_DEFINED_CRTIMP
46
+ #if defined CRTDLL && defined _CRTBLD
47
+ #define _CRTIMP __declspec(dllexport)
48
+ #else
49
+ #ifdef _DLL
50
+ #define _CRTIMP __declspec(dllimport)
51
+ #else
52
+ #define _CRTIMP
53
+ #endif
54
+ #endif
55
+ #endif
56
+
57
+ #include <sal.h>
58
+ #include <vadefs.h>
59
+
60
+ #pragma warning(push)
61
+ #pragma warning(disable: _VCRUNTIME_DISABLED_WARNINGS)
62
+
63
+ // All C headers have a common prologue and epilogue, to enclose the header in
64
+ // an extern "C" declaration when the header is #included in a C++ translation
65
+ // unit and to push/pop the packing.
66
+ #if defined __cplusplus
67
+
68
+ #define _CRT_BEGIN_C_HEADER \
69
+ __pragma(pack(push, _CRT_PACKING)) \
70
+ extern "C" {
71
+
72
+ #define _CRT_END_C_HEADER \
73
+ } \
74
+ __pragma(pack(pop))
75
+
76
+ #elif defined __midl
77
+
78
+ #define _CRT_BEGIN_C_HEADER \
79
+ cpp_quote("__pragma(pack(push, _CRT_PACKING))") \
80
+ cpp_quote("extern \"C\" {")
81
+
82
+ #define _CRT_END_C_HEADER \
83
+ cpp_quote("}") \
84
+ cpp_quote("__pragma(pack(pop))")
85
+
86
+ #else
87
+
88
+ #define _CRT_BEGIN_C_HEADER \
89
+ __pragma(pack(push, _CRT_PACKING))
90
+
91
+ #define _CRT_END_C_HEADER \
92
+ __pragma(pack(pop))
93
+
94
+ #endif
95
+
96
+ _CRT_BEGIN_C_HEADER
97
+
98
+
99
+
100
+ #ifndef _HAS_EXCEPTIONS // Predefine as 0 to disable exceptions
101
+ #ifdef _KERNEL_MODE
102
+ #define _HAS_EXCEPTIONS 0
103
+ #else
104
+ #define _HAS_EXCEPTIONS 1
105
+ #endif /* _KERNEL_MODE */
106
+ #endif /* _HAS_EXCEPTIONS */
107
+
108
+
109
+
110
+ #define _CRT_STRINGIZE_(x) #x
111
+ #define _CRT_STRINGIZE(x) _CRT_STRINGIZE_(x)
112
+
113
+ #define _CRT_WIDE_(s) L ## s
114
+ #define _CRT_WIDE(s) _CRT_WIDE_(s)
115
+
116
+ #define _CRT_CONCATENATE_(a, b) a ## b
117
+ #define _CRT_CONCATENATE(a, b) _CRT_CONCATENATE_(a, b)
118
+
119
+ #define _CRT_UNPARENTHESIZE_(...) __VA_ARGS__
120
+ #define _CRT_UNPARENTHESIZE(...) _CRT_UNPARENTHESIZE_ __VA_ARGS__
121
+
122
+ #ifndef _VCRTIMP
123
+ #if defined _CRTIMP && !defined _VCRT_DEFINED_CRTIMP
124
+ #define _VCRTIMP _CRTIMP
125
+ #elif defined _VCRT_BUILD && defined CRTDLL && !defined _VCRT_SAT_1
126
+ #define _VCRTIMP __declspec(dllexport)
127
+ #else
128
+ #define _VCRTIMP
129
+ #endif
130
+ #endif
131
+
132
+ #ifndef _MRTIMP
133
+ #if defined MRTDLL && defined _CRTBLD && !defined _M_CEE_PURE
134
+ #define _MRTIMP __declspec(dllexport)
135
+ #else
136
+ #define _MRTIMP
137
+ #endif
138
+ #endif
139
+
140
+ // Definitions of calling conventions used code sometimes compiled as managed
141
+ #if defined _M_CEE_PURE || defined MRTDLL
142
+ #define __CLRCALL_OR_CDECL __clrcall
143
+ #define __CLR_OR_THIS_CALL __clrcall
144
+ #else
145
+ #define __CLRCALL_OR_CDECL __cdecl
146
+ #define __CLR_OR_THIS_CALL
147
+ #endif
148
+
149
+ #ifdef _M_CEE_PURE
150
+ #define __CLRCALL_PURE_OR_CDECL __clrcall
151
+ #else
152
+ #define __CLRCALL_PURE_OR_CDECL __cdecl
153
+ #endif
154
+
155
+ #define __CRTDECL __CLRCALL_PURE_OR_CDECL
156
+
157
+ // Definitions of common __declspecs
158
+ #define _VCRT_NOALIAS __declspec(noalias)
159
+ #define _VCRT_RESTRICT __declspec(restrict)
160
+ #define _VCRT_ALLOCATOR __declspec(allocator)
161
+
162
+ #if defined _M_CEE && defined _M_X64
163
+ #define _VCRT_JIT_INTRINSIC __declspec(jitintrinsic)
164
+ #else
165
+ #define _VCRT_JIT_INTRINSIC
166
+ #endif
167
+
168
+ #ifdef __midl
169
+ #define _VCRT_ALIGN(x)
170
+ #else
171
+ #define _VCRT_ALIGN(x) __declspec(align(x))
172
+ #endif
173
+
174
+ #ifndef _CONST_RETURN
175
+ #ifdef __cplusplus
176
+ #define _CRT_CONST_CORRECT_OVERLOADS
177
+ #define _CONST_RETURN const
178
+ #else
179
+ #define _CONST_RETURN
180
+ #endif
181
+ #endif
182
+
183
+ // For backwards compatibility
184
+ #define _WConst_return _CONST_RETURN
185
+
186
+ // Definitions of common types
187
+ #ifdef _WIN64
188
+ typedef unsigned __int64 size_t;
189
+ typedef __int64 ptrdiff_t;
190
+ typedef __int64 intptr_t;
191
+ #else
192
+ typedef unsigned int size_t;
193
+ typedef int ptrdiff_t;
194
+ typedef int intptr_t;
195
+ #endif
196
+
197
+ #if defined __cplusplus
198
+ typedef bool __vcrt_bool;
199
+ #elif defined __midl
200
+ // MIDL understands neither bool nor _Bool. Use char as a best-fit
201
+ // replacement (the differences won't matter in practice).
202
+ typedef char __vcrt_bool;
203
+ #else
204
+ typedef _Bool __vcrt_bool;
205
+ #endif
206
+
207
+ // Indicate that these common types are defined
208
+ #ifndef _SIZE_T_DEFINED
209
+ #define _SIZE_T_DEFINED
210
+ #endif
211
+
212
+ #ifndef _PTRDIFF_T_DEFINED
213
+ #define _PTRDIFF_T_DEFINED
214
+ #endif
215
+
216
+ #ifndef _INTPTR_T_DEFINED
217
+ #define _INTPTR_T_DEFINED
218
+ #endif
219
+
220
+ // Provide a typedef for wchar_t for use under /Zc:wchar_t-
221
+ #ifndef _WCHAR_T_DEFINED
222
+ #define _WCHAR_T_DEFINED
223
+ typedef unsigned short wchar_t;
224
+ #endif
225
+
226
+ #ifndef NULL
227
+ #ifdef __cplusplus
228
+ #define NULL 0
229
+ #else
230
+ #define NULL ((void *)0)
231
+ #endif
232
+ #endif
233
+
234
+ #if defined _M_X64 || defined _M_ARM || defined _M_ARM64
235
+ #define _UNALIGNED __unaligned
236
+ #else
237
+ #define _UNALIGNED
238
+ #endif
239
+
240
+ #if defined _M_ARM64EC
241
+ #define __security_check_cookie __security_check_cookie_arm64ec
242
+ #endif
243
+
244
+ #ifdef __cplusplus
245
+ extern "C++"
246
+ {
247
+ template <typename _CountofType, size_t _SizeOfArray>
248
+ char (*__countof_helper(_UNALIGNED _CountofType (&_Array)[_SizeOfArray]))[_SizeOfArray];
249
+
250
+ #define __crt_countof(_Array) (sizeof(*__countof_helper(_Array)) + 0)
251
+ }
252
+ #else
253
+ #define __crt_countof(_Array) (sizeof(_Array) / sizeof(_Array[0]))
254
+ #endif
255
+
256
+ #if defined(_M_IX86) && defined(_CRT_LEGACY_X86_FLT_EXCEPTIONS) && !defined(_M_CEE_PURE)
257
+ #pragma comment(lib, "legacy_x86_flt_exceptions")
258
+ #endif
259
+
260
+ #ifdef __cplusplus
261
+ #if defined(_MSVC_LANG) && _MSVC_LANG > __cplusplus
262
+ #define _STL_LANG _MSVC_LANG
263
+ #else // ^^^ language mode is _MSVC_LANG / language mode is __cplusplus vvv
264
+ #define _STL_LANG __cplusplus
265
+ #endif // ^^^ language mode is larger of _MSVC_LANG and __cplusplus ^^^
266
+ #else // ^^^ determine compiler's C++ mode / no C++ support vvv
267
+ #define _STL_LANG 0L
268
+ #endif // ^^^ no C++ support ^^^
269
+
270
+ #ifndef _HAS_CXX17
271
+ #if _STL_LANG > 201402L
272
+ #define _HAS_CXX17 1
273
+ #else
274
+ #define _HAS_CXX17 0
275
+ #endif
276
+ #endif // _HAS_CXX17
277
+
278
+ #ifndef _HAS_CXX20
279
+ #if _HAS_CXX17 && _STL_LANG > 201703L
280
+ #define _HAS_CXX20 1
281
+ #else
282
+ #define _HAS_CXX20 0
283
+ #endif
284
+ #endif // _HAS_CXX20
285
+
286
+ #ifndef _HAS_CXX23
287
+ #if _HAS_CXX20 && _STL_LANG > 202002L
288
+ #define _HAS_CXX23 1
289
+ #else
290
+ #define _HAS_CXX23 0
291
+ #endif
292
+ #endif // _HAS_CXX23
293
+
294
+ #undef _STL_LANG
295
+
296
+ #if _HAS_CXX20 && !_HAS_CXX17
297
+ #error _HAS_CXX20 must imply _HAS_CXX17.
298
+ #endif
299
+
300
+ #if _HAS_CXX23 && !_HAS_CXX20
301
+ #error _HAS_CXX23 must imply _HAS_CXX20.
302
+ #endif
303
+
304
+ // [[nodiscard]] attributes on STL functions
305
+ #ifndef _HAS_NODISCARD
306
+ #ifndef __has_cpp_attribute
307
+ #define _HAS_NODISCARD 0
308
+ #elif __has_cpp_attribute(nodiscard) >= 201603L // TRANSITION, VSO#939899 (need toolset update)
309
+ #define _HAS_NODISCARD 1
310
+ #else
311
+ #define _HAS_NODISCARD 0
312
+ #endif
313
+ #endif // _HAS_NODISCARD
314
+
315
+ #if _HAS_NODISCARD
316
+ #define _NODISCARD [[nodiscard]]
317
+ #else // ^^^ CAN HAZ [[nodiscard]] / NO CAN HAZ [[nodiscard]] vvv
318
+ #define _NODISCARD
319
+ #endif // _HAS_NODISCARD
320
+
321
+ #pragma push_macro("msvc")
322
+ #pragma push_macro("constexpr")
323
+ #undef msvc
324
+ #undef constexpr
325
+
326
+ // Determine if we should use [[msvc::constexpr]] to allow for "extended constexpr"
327
+ // in Visual C++.
328
+ #ifndef _MSVC_CONSTEXPR
329
+ #ifdef _MSVC_CONSTEXPR_ATTRIBUTE
330
+ #define _MSVC_CONSTEXPR [[msvc::constexpr]]
331
+ #else
332
+ #define _MSVC_CONSTEXPR
333
+ #endif
334
+ #endif
335
+
336
+ #pragma pop_macro("constexpr")
337
+ #pragma pop_macro("msvc")
338
+
339
+ #ifdef _BUILD_STD_MODULE
340
+ #define _VCRT_EXPORT_STD export
341
+ #else // ^^^ defined(_BUILD_STD_MODULE) / !defined(_BUILD_STD_MODULE) vvv
342
+ #define _VCRT_EXPORT_STD
343
+ #endif // ^^^ !defined(_BUILD_STD_MODULE) ^^^
344
+
345
+ // See note on use of "deprecate" at the top of this file
346
+ #define _CRT_DEPRECATE_TEXT(_Text) __declspec(deprecated(_Text))
347
+
348
+ #if defined _CRT_SECURE_NO_DEPRECATE && !defined _CRT_SECURE_NO_WARNINGS
349
+ #define _CRT_SECURE_NO_WARNINGS
350
+ #endif
351
+
352
+ #ifndef _CRT_INSECURE_DEPRECATE
353
+ #ifdef _CRT_SECURE_NO_WARNINGS
354
+ #define _CRT_INSECURE_DEPRECATE(_Replacement)
355
+ #else
356
+ #define _CRT_INSECURE_DEPRECATE(_Replacement) _CRT_DEPRECATE_TEXT( \
357
+ "This function or variable may be unsafe. Consider using " \
358
+ #_Replacement \
359
+ " instead. To disable deprecation, use _CRT_SECURE_NO_WARNINGS. " \
360
+ "See online help for details.")
361
+ #endif
362
+ #endif
363
+
364
+ #if defined _CRT_SECURE_DEPRECATE_MEMORY && !defined _CRT_SECURE_WARNINGS_MEMORY
365
+ #define _CRT_SECURE_WARNINGS_MEMORY
366
+ #endif
367
+
368
+ #ifndef _CRT_INSECURE_DEPRECATE_MEMORY
369
+ #ifndef _CRT_SECURE_WARNINGS_MEMORY
370
+ #define _CRT_INSECURE_DEPRECATE_MEMORY(_Replacement)
371
+ #else
372
+ #define _CRT_INSECURE_DEPRECATE_MEMORY(_Replacement) \
373
+ _CRT_INSECURE_DEPRECATE(_Replacement)
374
+ #endif
375
+ #endif
376
+
377
+ #if !defined _M_CEE && !defined __midl
378
+ void __cdecl __security_init_cookie(void);
379
+
380
+ #if defined(_M_IX86)
381
+ void __fastcall __security_check_cookie(_In_ uintptr_t _StackCookie);
382
+ __declspec(noreturn) void __cdecl __report_gsfailure(void);
383
+ #elif defined(_M_ARM64EC)
384
+ void __cdecl __security_check_cookie_arm64ec(_In_ uintptr_t _StackCookie);
385
+ __declspec(noreturn) void __cdecl __report_gsfailure(_In_ uintptr_t _StackCookie);
386
+ #else
387
+ void __cdecl __security_check_cookie(_In_ uintptr_t _StackCookie);
388
+ __declspec(noreturn) void __cdecl __report_gsfailure(_In_ uintptr_t _StackCookie);
389
+ #endif
390
+ #endif
391
+
392
+ extern uintptr_t __security_cookie;
393
+
394
+ #ifndef _VCRT_BUILD
395
+ #define __vcrt_malloc_normal(_Size) malloc(_Size)
396
+ #define __vcrt_calloc_normal(_Count, _Size) calloc(_Count, _Size)
397
+ #define __vcrt_free_normal(_Memory) free(_Memory)
398
+ #endif
399
+
400
+ _CRT_END_C_HEADER
401
+
402
+ #pragma warning(pop) // _VCRUNTIME_DISABLED_WARNINGS
403
+
404
+ #endif // _VCRUNTIME_H
miniMSVC/VC/Tools/MSVC/14.42.34433/include/vcruntime_c11_atomic_support.h ADDED
@@ -0,0 +1,1127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation. All rights reserved.
2
+ //
3
+ // C11 atomic support routines
4
+ #pragma once
5
+
6
+ #ifdef __cplusplus
7
+ // this header should never be included in c++ mode, but if it is
8
+ // we need to catch it because the content of this header is provided by
9
+ // the STL's <atomic> header in C++
10
+ #error "vcruntime_c11_atomic_support.h is a C-only header"
11
+ #endif // __cplusplus
12
+
13
+ #include <crtdbg.h>
14
+ #include <intrin0.h>
15
+ #include <stdint.h>
16
+ #include <vcruntime_string.h>
17
+
18
+ // code from xatomic.h
19
+ #define _CONCATX(x, y) x##y
20
+ #define _CONCAT(x, y) _CONCATX(x, y)
21
+
22
+ // Interlocked intrinsic mapping for _nf/_acq/_rel
23
+ #if defined(_M_CEE_PURE) || (defined(_M_IX86) && !defined(_M_HYBRID_X86_ARM64)) || (defined(_M_X64) && !defined(_M_ARM64EC))
24
+ #define _INTRIN_RELAXED(x) x
25
+ #define _INTRIN_ACQUIRE(x) x
26
+ #define _INTRIN_RELEASE(x) x
27
+ #define _INTRIN_ACQ_REL(x) x
28
+ #ifdef _M_CEE_PURE
29
+ #define _YIELD_PROCESSOR()
30
+ #else // ^^^ _M_CEE_PURE / !_M_CEE_PURE vvv
31
+ #define _YIELD_PROCESSOR() _mm_pause()
32
+ #endif // ^^^ !_M_CEE_PURE ^^^
33
+
34
+ #elif defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC) || defined(_M_HYBRID_X86_ARM64)
35
+ #define _INTRIN_RELAXED(x) _CONCAT(x, _nf)
36
+ #define _INTRIN_ACQUIRE(x) _CONCAT(x, _acq)
37
+ #define _INTRIN_RELEASE(x) _CONCAT(x, _rel)
38
+ // We don't have interlocked intrinsics for acquire-release ordering, even on
39
+ // ARM32/ARM64, so fall back to sequentially consistent.
40
+ #define _INTRIN_ACQ_REL(x) x
41
+ #define _YIELD_PROCESSOR() __yield()
42
+
43
+ #else // ^^^ ARM32/ARM64/ARM64EC/HYBRID_X86_ARM64 / unsupported hardware vvv
44
+ #error Unsupported hardware
45
+ #endif // hardware
46
+ // end code from xatomic.h
47
+
48
+
49
+ // The following is modified from the _CRT_SECURE_INVALID_PARAMETER macro in
50
+ // corecrt.h. We need to do this because this header must be C, not C++, but we
51
+ // still want to report invalid parameters in the same way as C++ does. The
52
+ // macro in the CRT expands to C++ code because it contains global namespace
53
+ // qualification. This can be fixed in the ucrt by using a mechanism that
54
+ // defines something like _GLOBAL_NAMESPACE to :: in c++ mode and nothing in C
55
+ // mode.
56
+ #ifndef _ATOMIC_INVALID_PARAMETER
57
+ #ifdef _DEBUG
58
+ #define _ATOMIC_INVALID_PARAMETER(expr) _invalid_parameter(_CRT_WIDE(#expr), L"", __FILEW__, __LINE__, 0)
59
+ #else
60
+ // By default, _ATOMIC_INVALID_PARAMETER in retail invokes
61
+ // _invalid_parameter_noinfo_noreturn(), which is marked
62
+ // __declspec(noreturn) and does not return control to the application.
63
+ // Even if _set_invalid_parameter_handler() is used to set a new invalid
64
+ // parameter handler which does return control to the application,
65
+ // _invalid_parameter_noinfo_noreturn() will terminate the application
66
+ // and invoke Watson. You can overwrite the definition of
67
+ // _ATOMIC_INVALID_PARAMETER if you need.
68
+ #define _ATOMIC_INVALID_PARAMETER(expr) _invalid_parameter_noinfo_noreturn()
69
+ #endif
70
+ #endif
71
+
72
+ // The following code is SHARED between the STL's <atomic> header and vcruntime's
73
+ // vcruntime_c11_atomic_support.h header. Any updates should be mirrored.
74
+ // Also: if any macros are added they should be #undefed in both headers
75
+
76
+ #if defined(_M_ARM64) || defined(_M_ARM64EC) || defined(_M_HYBRID_X86_ARM64)
77
+ #define _STD_ATOMIC_USE_ARM64_LDAR_STLR 1
78
+ #ifdef __clang__
79
+ #define __LOAD_ACQUIRE_ARM64(_Width, _Ptr) \
80
+ (__int##_Width)(__atomic_load_n((const volatile unsigned __int##_Width*)(_Ptr), 2))
81
+ #define __STORE_RELEASE(_Width, _Ptr, _Desired) \
82
+ _Compiler_barrier(); \
83
+ __atomic_store_n((volatile unsigned __int##_Width*)(_Ptr), (unsigned __int##_Width)(_Desired), 3)
84
+ #else // ^^^ Clang / MSVC vvv
85
+ #define __LOAD_ACQUIRE_ARM64(_Width, _Ptr) \
86
+ (__int##_Width)(__load_acquire##_Width((const volatile unsigned __int##_Width*)(_Ptr)))
87
+ #define __STORE_RELEASE(_Width, _Ptr, _Desired) \
88
+ _Compiler_barrier(); \
89
+ __stlr##_Width( \
90
+ (volatile unsigned __int##_Width*)(_Ptr), (unsigned __int##_Width)(_Desired))
91
+ #endif // ^^^ MSVC ^^^
92
+ #else // ^^^ ARM64/ARM64EC/HYBRID_X86_ARM64 / Other architectures vvv
93
+ #define _STD_ATOMIC_USE_ARM64_LDAR_STLR 0
94
+ #define __STORE_RELEASE(_Width, _Ptr, _Desired) \
95
+ _Compiler_or_memory_barrier(); \
96
+ __iso_volatile_store##_Width((_Ptr), (_Desired))
97
+ #endif // ^^^ Other architectures ^^^
98
+
99
+ enum {
100
+ _Atomic_memory_order_relaxed,
101
+ _Atomic_memory_order_consume,
102
+ _Atomic_memory_order_acquire,
103
+ _Atomic_memory_order_release,
104
+ _Atomic_memory_order_acq_rel,
105
+ _Atomic_memory_order_seq_cst,
106
+ };
107
+
108
+ #ifndef _INVALID_MEMORY_ORDER
109
+ #ifdef _DEBUG
110
+ #define _INVALID_MEMORY_ORDER \
111
+ do { \
112
+ _RPTF0(_CRT_ASSERT, "Invalid memory order"); \
113
+ _ATOMIC_INVALID_PARAMETER("Invalid memory order"); \
114
+ } while (0)
115
+ #else // ^^^ _DEBUG / !_DEBUG vvv
116
+ #define _INVALID_MEMORY_ORDER
117
+ #endif // _DEBUG
118
+ #endif // _INVALID_MEMORY_ORDER
119
+
120
+ #if defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC) || defined(_M_HYBRID_X86_ARM64)
121
+ #define _Memory_barrier() __dmb(0xB) // inner shared data memory barrier
122
+ #define _Compiler_or_memory_barrier() _Memory_barrier()
123
+ #if defined(_M_ARM64) || defined(_M_ARM64EC) || defined(_M_HYBRID_X86_ARM64)
124
+ #define _Memory_load_acquire_barrier() __dmb(0x9) // inner shared data memory load barrier
125
+ #else // ^^^ ARM64/ARM64EC/HYBRID_X86_ARM64 / ARM32 vvv
126
+ #define _Memory_load_acquire_barrier() _Memory_barrier()
127
+ #endif // ^^^ ARM32 ^^^
128
+ #elif defined(_M_IX86) || defined(_M_X64)
129
+ // x86/x64 hardware only emits memory barriers inside _Interlocked intrinsics
130
+ #define _Compiler_or_memory_barrier() _Compiler_barrier()
131
+ #else // ^^^ x86/x64 / unsupported hardware vvv
132
+ #error Unsupported hardware
133
+ #endif // hardware
134
+
135
+ inline void _Check_memory_order(const unsigned int _Order) {
136
+ if (_Order > _Atomic_memory_order_seq_cst) {
137
+ _INVALID_MEMORY_ORDER;
138
+ }
139
+ }
140
+
141
+ // this is different from the STL
142
+ // we are the MSVC runtime so we need not support clang here
143
+ #define _Compiler_barrier() \
144
+ _Pragma("warning(push)") _Pragma("warning(disable : 4996)") /* was declared deprecated */ \
145
+ _ReadWriteBarrier() _Pragma("warning(pop)")
146
+
147
+ // note: these macros are _not_ always safe to use with a trailing semicolon,
148
+ // we avoid wrapping them in do {} while (0) because MSVC generates code for such loops
149
+ // in debug mode.
150
+
151
+ #if (defined(_M_IX86) && !defined(_M_HYBRID_X86_ARM64)) || (defined(_M_X64) && !defined(_M_ARM64EC))
152
+ #define _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _Intrinsic, ...) \
153
+ _Check_memory_order(_Order); \
154
+ _Result = _Intrinsic(__VA_ARGS__)
155
+ #elif defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC) || defined(_M_HYBRID_X86_ARM64)
156
+ #define _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _Intrinsic, ...) \
157
+ switch (_Order) { \
158
+ case _Atomic_memory_order_relaxed: \
159
+ _Result = _INTRIN_RELAXED(_Intrinsic)(__VA_ARGS__); \
160
+ break; \
161
+ case _Atomic_memory_order_consume: \
162
+ case _Atomic_memory_order_acquire: \
163
+ _Result = _INTRIN_ACQUIRE(_Intrinsic)(__VA_ARGS__); \
164
+ break; \
165
+ case _Atomic_memory_order_release: \
166
+ _Result = _INTRIN_RELEASE(_Intrinsic)(__VA_ARGS__); \
167
+ break; \
168
+ default: \
169
+ _INVALID_MEMORY_ORDER; \
170
+ /* [[fallthrough]]; */ \
171
+ case _Atomic_memory_order_acq_rel: \
172
+ case _Atomic_memory_order_seq_cst: \
173
+ _Result = _Intrinsic(__VA_ARGS__); \
174
+ break; \
175
+ }
176
+ #endif // hardware
177
+
178
+ #if _STD_ATOMIC_USE_ARM64_LDAR_STLR == 1
179
+
180
+ #define _ATOMIC_LOAD_ARM64(_Result, _Width, _Ptr, _Order_var) \
181
+ switch (_Order_var) { \
182
+ case _Atomic_memory_order_relaxed: \
183
+ _Result = __iso_volatile_load##_Width(_Ptr); \
184
+ break; \
185
+ case _Atomic_memory_order_consume: \
186
+ case _Atomic_memory_order_acquire: \
187
+ case _Atomic_memory_order_seq_cst: \
188
+ _Result = __LOAD_ACQUIRE_ARM64(_Width, _Ptr); \
189
+ _Compiler_barrier(); \
190
+ break; \
191
+ case _Atomic_memory_order_release: \
192
+ case _Atomic_memory_order_acq_rel: \
193
+ default: \
194
+ _Result = __iso_volatile_load##_Width(_Ptr); \
195
+ _INVALID_MEMORY_ORDER; \
196
+ break; \
197
+ }
198
+
199
+ #endif // _STD_ATOMIC_USE_ARM64_LDAR_STLR == 1
200
+
201
+ #define _ATOMIC_POST_LOAD_BARRIER_AS_NEEDED(_Order_var) \
202
+ switch (_Order_var) { \
203
+ case _Atomic_memory_order_relaxed: \
204
+ break; \
205
+ case _Atomic_memory_order_consume: \
206
+ case _Atomic_memory_order_acquire: \
207
+ case _Atomic_memory_order_seq_cst: \
208
+ _Compiler_or_memory_barrier(); \
209
+ break; \
210
+ case _Atomic_memory_order_release: \
211
+ case _Atomic_memory_order_acq_rel: \
212
+ default: \
213
+ _INVALID_MEMORY_ORDER; \
214
+ break; \
215
+ }
216
+
217
+ #define _ATOMIC_STORE_PREFIX(_Width, _Ptr, _Desired) \
218
+ case _Atomic_memory_order_relaxed: \
219
+ __iso_volatile_store##_Width((_Ptr), (_Desired)); \
220
+ return; \
221
+ case _Atomic_memory_order_release: \
222
+ __STORE_RELEASE(_Width, _Ptr, _Desired); \
223
+ return; \
224
+ default: \
225
+ case _Atomic_memory_order_consume: \
226
+ case _Atomic_memory_order_acquire: \
227
+ case _Atomic_memory_order_acq_rel: \
228
+ _INVALID_MEMORY_ORDER; \
229
+ /* [[fallthrough]]; */
230
+
231
+ #define _ATOMIC_STORE_SEQ_CST_ARM(_Width, _Ptr, _Desired) \
232
+ _Memory_barrier(); \
233
+ __iso_volatile_store##_Width((_Ptr), (_Desired)); \
234
+ _Memory_barrier();
235
+
236
+ #define _ATOMIC_STORE_SEQ_CST_ARM64(_Width, _Ptr, _Desired) \
237
+ __STORE_RELEASE(_Width, _Ptr, _Desired); \
238
+ _Memory_barrier();
239
+
240
+ #define _ATOMIC_STORE_SEQ_CST_X86_X64(_Width, _Ptr, _Desired) (void) _InterlockedExchange##_Width((_Ptr), (_Desired));
241
+ #define _ATOMIC_STORE_32_SEQ_CST_X86_X64(_Ptr, _Desired) \
242
+ (void) _InterlockedExchange((volatile long*)(_Ptr), (long)(_Desired));
243
+
244
+ #define _ATOMIC_STORE_64_SEQ_CST_IX86(_Ptr, _Desired) \
245
+ _Compiler_barrier(); \
246
+ __iso_volatile_store64((_Ptr), (_Desired)); \
247
+ _Atomic_thread_fence(_Atomic_memory_order_seq_cst);
248
+
249
+ #if defined(_M_ARM)
250
+ #define _ATOMIC_STORE_SEQ_CST(_Width, _Ptr, _Desired) _ATOMIC_STORE_SEQ_CST_ARM(_Width, (_Ptr), (_Desired))
251
+ #define _ATOMIC_STORE_32_SEQ_CST(_Ptr, _Desired) _ATOMIC_STORE_SEQ_CST_ARM(32, (_Ptr), (_Desired))
252
+ #define _ATOMIC_STORE_64_SEQ_CST(_Ptr, _Desired) _ATOMIC_STORE_SEQ_CST_ARM(64, (_Ptr), (_Desired))
253
+ #elif defined(_M_ARM64) || defined(_M_ARM64EC) || defined(_M_HYBRID_X86_ARM64) // ^^^ ARM32 / ARM64/ARM64EC/HYBRID_X86_ARM64 vvv
254
+ #define _ATOMIC_STORE_SEQ_CST(_Width, _Ptr, _Desired) _ATOMIC_STORE_SEQ_CST_ARM64(_Width, (_Ptr), (_Desired))
255
+ #define _ATOMIC_STORE_32_SEQ_CST(_Ptr, _Desired) _ATOMIC_STORE_SEQ_CST_ARM64(32, (_Ptr), (_Desired))
256
+ #define _ATOMIC_STORE_64_SEQ_CST(_Ptr, _Desired) _ATOMIC_STORE_SEQ_CST_ARM64(64, (_Ptr), (_Desired))
257
+ #elif defined(_M_IX86) || defined(_M_X64) // ^^^ ARM64/ARM64EC/HYBRID_X86_ARM64 / x86/x64 vvv
258
+ #define _ATOMIC_STORE_SEQ_CST(_Width, _Ptr, _Desired) _ATOMIC_STORE_SEQ_CST_X86_X64(_Width, (_Ptr), (_Desired))
259
+ #define _ATOMIC_STORE_32_SEQ_CST(_Ptr, _Desired) _ATOMIC_STORE_32_SEQ_CST_X86_X64((_Ptr), (_Desired))
260
+ #ifdef _M_IX86
261
+ #define _ATOMIC_STORE_64_SEQ_CST(_Ptr, _Desired) _ATOMIC_STORE_64_SEQ_CST_IX86((_Ptr), (_Desired))
262
+ #else // ^^^ x86 / x64 vvv
263
+ #define _ATOMIC_STORE_64_SEQ_CST(_Ptr, _Desired) _ATOMIC_STORE_SEQ_CST_X86_X64(64, (_Ptr), (_Desired))
264
+ #endif // ^^^ x64 ^^^
265
+ #else // ^^^ x86/x64 / Unsupported hardware vvv
266
+ #error "Unsupported hardware"
267
+ #endif
268
+
269
+ #pragma warning(push)
270
+ #pragma warning(disable : 6001) // "Using uninitialized memory '_Guard'"
271
+ #pragma warning(disable : 28113) // "Accessing a local variable _Guard via an Interlocked function: This is an unusual
272
+ // usage which could be reconsidered."
273
+ inline void _Atomic_thread_fence(const unsigned int _Order) {
274
+ if (_Order == _Atomic_memory_order_relaxed) {
275
+ return;
276
+ }
277
+
278
+ #if (defined(_M_IX86) && !defined(_M_HYBRID_X86_ARM64)) || (defined(_M_X64) && !defined(_M_ARM64EC))
279
+ _Compiler_barrier();
280
+ if (_Order == _Atomic_memory_order_seq_cst) {
281
+ volatile long _Guard; // Not initialized to avoid an unnecessary operation; the value does not matter
282
+
283
+ // _mm_mfence could have been used, but it is not supported on older x86 CPUs and is slower on some recent CPUs.
284
+ // The memory fence provided by interlocked operations has some exceptions, but this is fine:
285
+ // std::atomic_thread_fence works with respect to other atomics only; it may not be a full fence for all ops.
286
+ (void) _InterlockedIncrement(&_Guard);
287
+ _Compiler_barrier();
288
+ }
289
+ #elif defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC) || defined(_M_HYBRID_X86_ARM64)
290
+ if (_Order == _Atomic_memory_order_acquire || _Order == _Atomic_memory_order_consume) {
291
+ _Memory_load_acquire_barrier();
292
+ } else {
293
+ _Memory_barrier();
294
+ }
295
+ #else // ^^^ ARM32/ARM64/ARM64EC/HYBRID_X86_ARM64 / unsupported hardware vvv
296
+ #error Unsupported hardware
297
+ #endif // unsupported hardware
298
+ }
299
+ #pragma warning(pop)
300
+
301
+ // End of code shared with STL <atomic>
302
+
303
+ inline void _Atomic_lock_acquire(volatile long* _Spinlock) {
304
+ #if (defined(_M_IX86) && !defined(_M_HYBRID_X86_ARM64)) || (defined(_M_X64) && !defined(_M_ARM64EC))
305
+ // Algorithm from Intel(R) 64 and IA-32 Architectures Optimization Reference Manual, May 2020
306
+ // Example 2-4. Contended Locks with Increasing Back-off Example - Improved Version, page 2-22
307
+ // The code in mentioned manual is covered by the 0BSD license.
308
+ int _Current_backoff = 1;
309
+ const int _Max_backoff = 64;
310
+ while (_InterlockedExchange(_Spinlock, 1) != 0) {
311
+ while (__iso_volatile_load32((int*) _Spinlock) != 0) {
312
+ for (int _Count_down = _Current_backoff; _Count_down != 0; --_Count_down) {
313
+ _mm_pause();
314
+ }
315
+ _Current_backoff = _Current_backoff < _Max_backoff ? _Current_backoff << 1 : _Max_backoff;
316
+ }
317
+ }
318
+ #elif defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC) || defined(_M_HYBRID_X86_ARM64)
319
+ while (_InterlockedExchange_acq(_Spinlock, 1) != 0) {
320
+ while (__iso_volatile_load32((int*) _Spinlock) != 0) {
321
+ __yield();
322
+ }
323
+ }
324
+ #else // ^^^ defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC) || defined(_M_HYBRID_X86_ARM64) ^^^
325
+ #error Unsupported hardware
326
+ #endif
327
+ }
328
+
329
+ inline void _Atomic_lock_release(volatile long* _Spinlock) {
330
+ __STORE_RELEASE(32, (int*) _Spinlock, 0);
331
+ }
332
+
333
+ // End of code shared with vcruntime
334
+
335
+ inline void _Atomic_signal_fence(int _Order) {
336
+ if (_Order != _Atomic_memory_order_relaxed) {
337
+ _Compiler_barrier();
338
+ }
339
+ }
340
+
341
+ inline _Bool _Atomic_is_lock_free(size_t _Sz) {
342
+ return _Sz <= 8 && (_Sz & _Sz - 1) == 0;
343
+ }
344
+
345
+ inline void _Atomic_store8(volatile char* _Ptr, char _Desired, int _Order) {
346
+ switch (_Order) {
347
+ _ATOMIC_STORE_PREFIX(8, _Ptr, _Desired)
348
+ case _Atomic_memory_order_seq_cst:
349
+ _ATOMIC_STORE_SEQ_CST(8, _Ptr, _Desired)
350
+ return;
351
+ }
352
+ }
353
+
354
+ inline void _Atomic_store16(volatile short* _Ptr, short _Desired, int _Order) {
355
+ switch (_Order) {
356
+ _ATOMIC_STORE_PREFIX(16, _Ptr, _Desired)
357
+ case _Atomic_memory_order_seq_cst:
358
+ _ATOMIC_STORE_SEQ_CST(16, _Ptr, _Desired)
359
+ return;
360
+ }
361
+ }
362
+
363
+ inline void _Atomic_store32(volatile int* _Ptr, int _Desired, int _Order) {
364
+ switch (_Order) {
365
+ _ATOMIC_STORE_PREFIX(32, _Ptr, _Desired)
366
+ case _Atomic_memory_order_seq_cst:
367
+ _ATOMIC_STORE_32_SEQ_CST(_Ptr, _Desired)
368
+ return;
369
+ }
370
+ }
371
+
372
+ inline void _Atomic_store64(volatile long long* _Ptr, long long _Desired, int _Order) {
373
+ switch (_Order) {
374
+ _ATOMIC_STORE_PREFIX(64, _Ptr, _Desired)
375
+ case _Atomic_memory_order_seq_cst:
376
+ _ATOMIC_STORE_64_SEQ_CST(_Ptr, _Desired)
377
+ return;
378
+ }
379
+ }
380
+
381
+ inline void _Atomic_storef(volatile float* _Ptr, float _Desired, int _Order) {
382
+ _Atomic_store32((volatile int*)_Ptr, *(int*)&_Desired, _Order);
383
+ }
384
+
385
+ inline void _Atomic_stored(volatile double* _Ptr, double _Desired, int _Order) {
386
+ _Atomic_store64((volatile long long*)_Ptr, *(long long*)&_Desired, _Order);
387
+ }
388
+
389
+ inline char _Atomic_load8(const volatile char* _Ptr, int _Order) {
390
+ char _As_bytes;
391
+ #if _STD_ATOMIC_USE_ARM64_LDAR_STLR == 1
392
+ _ATOMIC_LOAD_ARM64(_As_bytes, 8, _Ptr, _Order)
393
+ #else
394
+ _As_bytes = __iso_volatile_load8(_Ptr);
395
+ _ATOMIC_POST_LOAD_BARRIER_AS_NEEDED(_Order)
396
+ #endif
397
+ return _As_bytes;
398
+ }
399
+ inline short _Atomic_load16(const volatile short* _Ptr, int _Order) {
400
+ short _As_bytes;
401
+ #if _STD_ATOMIC_USE_ARM64_LDAR_STLR == 1
402
+ _ATOMIC_LOAD_ARM64(_As_bytes, 16, _Ptr, _Order)
403
+ #else
404
+ _As_bytes = __iso_volatile_load16(_Ptr);
405
+ _ATOMIC_POST_LOAD_BARRIER_AS_NEEDED(_Order)
406
+ #endif
407
+ return _As_bytes;
408
+ }
409
+ inline int _Atomic_load32(const volatile int* _Ptr, int _Order) {
410
+ int _As_bytes;
411
+ #if _STD_ATOMIC_USE_ARM64_LDAR_STLR == 1
412
+ _ATOMIC_LOAD_ARM64(_As_bytes, 32, _Ptr, _Order)
413
+ #else
414
+ _As_bytes = __iso_volatile_load32(_Ptr);
415
+ _ATOMIC_POST_LOAD_BARRIER_AS_NEEDED(_Order)
416
+ #endif
417
+ return _As_bytes;
418
+ }
419
+ inline long long _Atomic_load64(const volatile long long* _Ptr, int _Order) {
420
+ long long _As_bytes;
421
+ #if _STD_ATOMIC_USE_ARM64_LDAR_STLR == 1
422
+ _ATOMIC_LOAD_ARM64(_As_bytes, 64, _Ptr, _Order)
423
+ #else // ^^^ _STD_ATOMIC_USE_ARM64_LDAR_STLR == 1 / _STD_ATOMIC_USE_ARM64_LDAR_STLR != 1 vvv
424
+
425
+ #ifdef _M_ARM
426
+ _As_bytes = __ldrexd(_Ptr);
427
+ #else
428
+ _As_bytes = __iso_volatile_load64(_Ptr);
429
+ #endif
430
+ _ATOMIC_POST_LOAD_BARRIER_AS_NEEDED(_Order);
431
+ #endif // _STD_ATOMIC_USE_ARM64_LDAR_STLR == 1
432
+ return _As_bytes;
433
+ }
434
+ inline float _Atomic_loadf(const volatile float* _Ptr, int _Order) {
435
+ int _As_bytes = _Atomic_load32((const volatile int*)_Ptr, _Order);
436
+ return *(float*)&_As_bytes;
437
+ }
438
+ inline double _Atomic_loadd(const volatile double* _Ptr, int _Order) {
439
+ long long _As_bytes = _Atomic_load64((const volatile long long*)_Ptr, _Order);
440
+ return *(double*)&_As_bytes;
441
+ }
442
+
443
+ inline _Bool _Atomic_compare_exchange_strong8(volatile char* _Ptr, char* _Expected, char _Desired, int _Order) {
444
+ char _Prev_bytes;
445
+ char _Expected_bytes = *_Expected;
446
+ _ATOMIC_CHOOSE_INTRINSIC(_Order, _Prev_bytes, _InterlockedCompareExchange8, _Ptr, _Desired, _Expected_bytes);
447
+ if (_Prev_bytes == _Expected_bytes) {
448
+ return 1;
449
+ }
450
+ *_Expected = _Prev_bytes;
451
+ return 0;
452
+ }
453
+ inline _Bool _Atomic_compare_exchange_strong16(volatile short* _Ptr, short* _Expected, short _Desired, int _Order) {
454
+ short _Prev_bytes;
455
+ short _Expected_bytes = *_Expected;
456
+ _ATOMIC_CHOOSE_INTRINSIC(_Order, _Prev_bytes, _InterlockedCompareExchange16, _Ptr, _Desired, _Expected_bytes);
457
+ if (_Prev_bytes == _Expected_bytes) {
458
+ return 1;
459
+ }
460
+ *_Expected = _Prev_bytes;
461
+ return 0;
462
+ }
463
+ inline _Bool _Atomic_compare_exchange_strong32(volatile int* _Ptr, int* _Expected, int _Desired, int _Order) {
464
+ int _Prev_bytes;
465
+ int _Expected_bytes = *_Expected;
466
+ _ATOMIC_CHOOSE_INTRINSIC(
467
+ _Order, _Prev_bytes, _InterlockedCompareExchange, (volatile long*) _Ptr, _Desired, _Expected_bytes);
468
+ if (_Prev_bytes == _Expected_bytes) {
469
+ return 1;
470
+ }
471
+ *_Expected = _Prev_bytes;
472
+ return 0;
473
+ }
474
+ inline _Bool _Atomic_compare_exchange_strong64(
475
+ volatile long long* _Ptr, long long* _Expected, long long _Desired, int _Order) {
476
+ long long _Prev_bytes;
477
+ long long _Expected_bytes = *_Expected;
478
+ _ATOMIC_CHOOSE_INTRINSIC(_Order, _Prev_bytes, _InterlockedCompareExchange64, _Ptr, _Desired, _Expected_bytes);
479
+ if (_Prev_bytes == _Expected_bytes) {
480
+ return 1;
481
+ }
482
+ *_Expected = _Prev_bytes;
483
+ return 0;
484
+ }
485
+ inline _Bool _Atomic_compare_exchange_strongf(volatile float* _Ptr, float* _Expected, float _Desired, int _Order) {
486
+ return _Atomic_compare_exchange_strong32((volatile int*)_Ptr, (int*)_Expected, *(int*)&_Desired, _Order);
487
+ }
488
+ inline _Bool _Atomic_compare_exchange_strongd(volatile double* _Ptr, double* _Expected, double _Desired, int _Order) {
489
+ return _Atomic_compare_exchange_strong64((volatile long long*)_Ptr, (long long*)_Expected, *(long long*)&_Desired, _Order);
490
+ }
491
+
492
+ inline char _Atomic_exchange8(volatile char* _Ptr, int _Desired, int _Order) {
493
+ char _As_bytes;
494
+ _ATOMIC_CHOOSE_INTRINSIC(_Order, _As_bytes, _InterlockedExchange8, _Ptr, (char) _Desired);
495
+ return _As_bytes;
496
+ }
497
+ inline short _Atomic_exchange16(volatile short* _Ptr, int _Desired, int _Order) {
498
+ short _As_bytes;
499
+ _ATOMIC_CHOOSE_INTRINSIC(_Order, _As_bytes, _InterlockedExchange16, _Ptr, (short) _Desired);
500
+ return _As_bytes;
501
+ }
502
+ inline int _Atomic_exchange32(volatile int* _Ptr, int _Desired, int _Order) {
503
+ long _As_bytes;
504
+ _ATOMIC_CHOOSE_INTRINSIC(_Order, _As_bytes, _InterlockedExchange, (volatile long*) _Ptr, (long) _Desired);
505
+ return (int) _As_bytes;
506
+ }
507
+ inline long long _Atomic_exchange64(volatile long long* _Ptr, long long _Desired, int _Order) {
508
+ #if defined(_M_IX86) && !defined(_M_HYBRID_X86_ARM64)
509
+ long long _As_bytes = _Atomic_load64(_Ptr, _Atomic_memory_order_seq_cst);
510
+ while (!_Atomic_compare_exchange_strong64(_Ptr, &_As_bytes, _Desired, _Order)) {
511
+ }
512
+ return _As_bytes;
513
+ #else // ^^^ defined(_M_IX86) && !defined(_M_HYBRID_X86_ARM64) / !defined(_M_IX86) || defined(_M_HYBRID_X86_ARM64) vvv
514
+ long long _As_bytes;
515
+ _ATOMIC_CHOOSE_INTRINSIC(_Order, _As_bytes, _InterlockedExchange64, _Ptr, _Desired);
516
+ return _As_bytes;
517
+ #endif // defined(_M_IX86) && !defined(_M_HYBRID_X86_ARM64)
518
+ }
519
+ inline float _Atomic_exchangef(volatile float* _Ptr, float _Desired, int _Order) {
520
+ long _As_bytes = _Atomic_exchange32((volatile int*)_Ptr, *(int*)&_Desired, _Order);
521
+ return *(float*) &_As_bytes;
522
+ }
523
+ inline double _Atomic_exchanged(volatile double* _Ptr, double _Desired, int _Order) {
524
+ long long _As_bytes = _Atomic_exchange64((volatile long long*)_Ptr, *(long long*)&_Desired, _Order);
525
+ return *(double*)&_As_bytes;
526
+ }
527
+
528
+ inline char _Atomic_fetch_add8(volatile char* _Ptr, int _Val, int _Order) {
529
+ char _Result;
530
+ _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedExchangeAdd8, _Ptr, (char) _Val);
531
+ return _Result;
532
+ }
533
+ inline short _Atomic_fetch_add16(volatile short* _Ptr, int _Val, int _Order) {
534
+ short _Result;
535
+ _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedExchangeAdd16, _Ptr, (short) _Val);
536
+ return _Result;
537
+ }
538
+ inline int _Atomic_fetch_add32(volatile int* _Ptr, int _Val, int _Order) {
539
+ int _Result;
540
+ _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedExchangeAdd, (volatile long*) _Ptr, _Val);
541
+ return _Result;
542
+ }
543
+ inline long long _Atomic_fetch_add64(volatile long long* _Ptr, long long _Val, int _Order) {
544
+ #if defined(_M_IX86) && !defined(_M_HYBRID_X86_ARM64)
545
+ long long _Result = _Atomic_load64(_Ptr, _Atomic_memory_order_seq_cst);
546
+ while (!_Atomic_compare_exchange_strong64(_Ptr, &_Result, _Result + _Val, _Order)) {
547
+ }
548
+ return _Result;
549
+ #else // ^^^ defined(_M_IX86) && !defined(_M_HYBRID_X86_ARM64) / !defined(_M_IX86) || defined(_M_HYBRID_X86_ARM64) vvvv
550
+ long long _Result;
551
+ _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedExchangeAdd64, _Ptr, _Val);
552
+ return _Result;
553
+ #endif // defined(_M_IX86) && !defined(_M_HYBRID_X86_ARM64)
554
+ }
555
+ inline float _Atomic_fetch_addf(volatile float* _Ptr, float _Val, int _Order) {
556
+ float _Result = _Atomic_loadf(_Ptr, _Atomic_memory_order_relaxed);
557
+ while (!_Atomic_compare_exchange_strongf(_Ptr, &_Result, _Result + _Val, _Order)) {
558
+ }
559
+ return _Result;
560
+ }
561
+ inline double _Atomic_fetch_addd(volatile double* _Ptr, double _Val, int _Order) {
562
+ double _Result = _Atomic_loadd(_Ptr, _Atomic_memory_order_relaxed);
563
+ while (!_Atomic_compare_exchange_strongd(_Ptr, &_Result, _Result + _Val, _Order)) {
564
+ }
565
+ return _Result;
566
+ }
567
+
568
+ inline char _Atomic_add_fetch8(volatile char* _Ptr, int _Val, int _Order) {
569
+ char _Result;
570
+ _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedExchangeAdd8, _Ptr, (char) _Val);
571
+ return (char) (_Result + (char) _Val);
572
+ }
573
+ inline short _Atomic_add_fetch16(volatile short* _Ptr, int _Val, int _Order) {
574
+ short _Result;
575
+ _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedExchangeAdd16, _Ptr, (short) _Val);
576
+ return (short) (_Result + (short) _Val);
577
+ }
578
+ inline int _Atomic_add_fetch32(volatile int* _Ptr, int _Val, int _Order) {
579
+ int _Result;
580
+ _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedExchangeAdd, (volatile long*) _Ptr, _Val);
581
+ return _Result + _Val;
582
+ }
583
+ inline long long _Atomic_add_fetch64(volatile long long* _Ptr, long long _Val, int _Order) {
584
+ #if defined(_M_IX86) && !defined(_M_HYBRID_X86_ARM64)
585
+ long long _Result = _Atomic_load64(_Ptr, _Atomic_memory_order_seq_cst);
586
+ while (!_Atomic_compare_exchange_strong64(_Ptr, &_Result, _Result + _Val, _Order)) {
587
+ }
588
+ return _Result + _Val;
589
+ #else // ^^^ defined(_M_IX86) && !defined(_M_HYBRID_X86_ARM64) / !defined(_M_IX86) || defined(_M_HYBRID_X86_ARM64) vvvv
590
+ long long _Result;
591
+ _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedExchangeAdd64, _Ptr, _Val);
592
+ return _Result + _Val;
593
+ #endif // defined(_M_IX86) && !defined(_M_HYBRID_X86_ARM64)
594
+ }
595
+ inline float _Atomic_add_fetchf(volatile float* _Ptr, float _Val, int _Order) {
596
+ float _Result = _Atomic_loadf(_Ptr, _Atomic_memory_order_seq_cst);
597
+ while (!_Atomic_compare_exchange_strongf(_Ptr, &_Result, _Result + _Val, _Order)) {
598
+ }
599
+ return _Result + _Val;
600
+ }
601
+ inline double _Atomic_add_fetchd(volatile double* _Ptr, double _Val, int _Order) {
602
+ double _Result = _Atomic_loadd(_Ptr, _Atomic_memory_order_seq_cst);
603
+ while (!_Atomic_compare_exchange_strongd(_Ptr, &_Result, _Result + _Val, _Order)) {
604
+ }
605
+ return _Result + _Val;
606
+ }
607
+
608
+ inline char _Atomic_fetch_sub8(volatile char* _Ptr, int _Val, int _Order) {
609
+ char _Result;
610
+ _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedExchangeAdd8, _Ptr, -(char) _Val);
611
+ return _Result;
612
+ }
613
+ inline short _Atomic_fetch_sub16(volatile short* _Ptr, int _Val, int _Order) {
614
+ short _Result;
615
+ _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedExchangeAdd16, _Ptr, -(short) _Val);
616
+ return _Result;
617
+ }
618
+ inline int _Atomic_fetch_sub32(volatile int* _Ptr, int _Val, int _Order) {
619
+ int _Result;
620
+ _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedExchangeAdd, (volatile long*) _Ptr, -_Val);
621
+ return _Result;
622
+ }
623
+ inline long long _Atomic_fetch_sub64(volatile long long* _Ptr, long long _Val, int _Order) {
624
+ #if defined(_M_IX86) && !defined(_M_HYBRID_X86_ARM64)
625
+ long long _Result = _Atomic_load64(_Ptr, _Atomic_memory_order_seq_cst);
626
+ while (!_Atomic_compare_exchange_strong64(_Ptr, &_Result, _Result - _Val, _Order)) {
627
+ }
628
+ return _Result;
629
+ #else // ^^^ defined(_M_IX86) && !defined(_M_HYBRID_X86_ARM64) / !defined(_M_IX86) || defined(_M_HYBRID_X86_ARM64) vvvv
630
+ long long _Result;
631
+ _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedExchangeAdd64, _Ptr, -_Val);
632
+ return _Result;
633
+ #endif // defined(_M_IX86) && !defined(_M_HYBRID_X86_ARM64)
634
+ }
635
+ inline float _Atomic_fetch_subf(volatile float* _Ptr, float _Val, int _Order) {
636
+ float _Result = _Atomic_loadf(_Ptr, _Atomic_memory_order_relaxed);
637
+ while (!_Atomic_compare_exchange_strongf(_Ptr, &_Result, _Result - _Val, _Order)) {
638
+ }
639
+ return _Result;
640
+ }
641
+ inline double _Atomic_fetch_subd(volatile double* _Ptr, double _Val, int _Order) {
642
+ double _Result = _Atomic_loadd(_Ptr, _Atomic_memory_order_relaxed);
643
+ while (!_Atomic_compare_exchange_strongd(_Ptr, &_Result, _Result - _Val, _Order)) {
644
+ }
645
+ return _Result;
646
+ }
647
+
648
+ inline char _Atomic_sub_fetch8(volatile char* _Ptr, int _Val, int _Order) {
649
+ char _Result;
650
+ _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedExchangeAdd8, _Ptr, -(char) _Val);
651
+ return (char) (_Result - (char) _Val);
652
+ }
653
+ inline short _Atomic_sub_fetch16(volatile short* _Ptr, int _Val, int _Order) {
654
+ short _Result;
655
+ _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedExchangeAdd16, _Ptr, -(short) _Val);
656
+ return (short) (_Result - (short) _Val);
657
+ }
658
+ inline int _Atomic_sub_fetch32(volatile int* _Ptr, int _Val, int _Order) {
659
+ int _Result;
660
+ _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedExchangeAdd, (volatile long*) _Ptr, -_Val);
661
+ return _Result - _Val;
662
+ }
663
+ inline long long _Atomic_sub_fetch64(volatile long long* _Ptr, long long _Val, int _Order) {
664
+ #if defined(_M_IX86) && !defined(_M_HYBRID_X86_ARM64)
665
+ long long _Result = _Atomic_load64(_Ptr, _Atomic_memory_order_seq_cst);
666
+ while (!_Atomic_compare_exchange_strong64(_Ptr, &_Result, _Result - _Val, _Order)) {
667
+ }
668
+ return _Result - _Val;
669
+ #else // ^^^ defined(_M_IX86) && !defined(_M_HYBRID_X86_ARM64) / !defined(_M_IX86) || defined(_M_HYBRID_X86_ARM64) vvvv
670
+ long long _Result;
671
+ _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedExchangeAdd64, _Ptr, -_Val);
672
+ return _Result - _Val;
673
+ #endif // defined(_M_IX86) && !defined(_M_HYBRID_X86_ARM64)
674
+ }
675
+ inline float _Atomic_sub_fetchf(volatile float* _Ptr, float _Val, int _Order) {
676
+ float _Result = _Atomic_loadf(_Ptr, _Atomic_memory_order_seq_cst);
677
+ while (!_Atomic_compare_exchange_strongf(_Ptr, &_Result, _Result - _Val, _Order)) {
678
+ }
679
+ return _Result - _Val;
680
+ }
681
+ inline double _Atomic_sub_fetchd(volatile double* _Ptr, double _Val, int _Order) {
682
+ double _Result = _Atomic_loadd(_Ptr, _Atomic_memory_order_seq_cst);
683
+ while (!_Atomic_compare_exchange_strongd(_Ptr, &_Result, _Result - _Val, _Order)) {
684
+ }
685
+ return _Result - _Val;
686
+ }
687
+
688
+ inline char _Atomic_fetch_and8(volatile char* _Ptr, int _Val, int _Order) {
689
+ char _Result;
690
+ _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedAnd8, _Ptr, (char) _Val);
691
+ return _Result;
692
+ }
693
+ inline short _Atomic_fetch_and16(volatile short* _Ptr, int _Val, int _Order) {
694
+ short _Result;
695
+ _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedAnd16, _Ptr, (short) _Val);
696
+ return _Result;
697
+ }
698
+ inline int _Atomic_fetch_and32(volatile int* _Ptr, int _Val, int _Order) {
699
+ int _Result;
700
+ _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedAnd, (volatile long*) _Ptr, _Val);
701
+ return _Result;
702
+ }
703
+ inline long long _Atomic_fetch_and64(volatile long long* _Ptr, long long _Val, int _Order) {
704
+ #if defined(_M_IX86) && !defined(_M_HYBRID_X86_ARM64)
705
+ long long _Result = _Atomic_load64(_Ptr, _Atomic_memory_order_seq_cst);
706
+ while (!_Atomic_compare_exchange_strong64(_Ptr, &_Result, _Result & _Val, _Order)) {
707
+ }
708
+ return _Result;
709
+ #else // ^^^ defined(_M_IX86) && !defined(_M_HYBRID_X86_ARM64) / !defined(_M_IX86) || defined(_M_HYBRID_X86_ARM64) vvvv
710
+ long long _Result;
711
+ _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedAnd64, _Ptr, _Val);
712
+ return _Result;
713
+ #endif // defined(_M_IX86) && !defined(_M_HYBRID_X86_ARM64)
714
+ }
715
+
716
+ inline char _Atomic_and_fetch8(volatile char* _Ptr, int _Val, int _Order) {
717
+ char _Result;
718
+ _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedAnd8, _Ptr, (char) _Val);
719
+ return (char) (_Result & (char) _Val);
720
+ }
721
+ inline short _Atomic_and_fetch16(volatile short* _Ptr, int _Val, int _Order) {
722
+ short _Result;
723
+ _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedAnd16, _Ptr, (short) _Val);
724
+ return (short) (_Result & (short) _Val);
725
+ }
726
+ inline int _Atomic_and_fetch32(volatile int* _Ptr, int _Val, int _Order) {
727
+ int _Result;
728
+ _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedAnd, (volatile long*) _Ptr, _Val);
729
+ return _Result & _Val;
730
+ }
731
+ inline long long _Atomic_and_fetch64(volatile long long* _Ptr, long long _Val, int _Order) {
732
+ #if defined(_M_IX86) && !defined(_M_HYBRID_X86_ARM64)
733
+ long long _Result = _Atomic_load64(_Ptr, _Atomic_memory_order_seq_cst);
734
+ while (!_Atomic_compare_exchange_strong64(_Ptr, &_Result, _Result & _Val, _Order)) {
735
+ }
736
+ return _Result & _Val;
737
+ #else // ^^^ defined(_M_IX86) && !defined(_M_HYBRID_X86_ARM64) / !defined(_M_IX86) || defined(_M_HYBRID_X86_ARM64) vvvv
738
+ long long _Result;
739
+ _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedAnd64, _Ptr, _Val);
740
+ return _Result & _Val;
741
+ #endif // defined(_M_IX86) && !defined(_M_HYBRID_X86_ARM64)
742
+ }
743
+
744
+ inline char _Atomic_fetch_or8(volatile char* _Ptr, int _Val, int _Order) {
745
+ char _Result;
746
+ _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedOr8, _Ptr, (char) _Val);
747
+ return _Result;
748
+ }
749
+ inline short _Atomic_fetch_or16(volatile short* _Ptr, int _Val, int _Order) {
750
+ short _Result;
751
+ _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedOr16, _Ptr, (short) _Val);
752
+ return _Result;
753
+ }
754
+ inline int _Atomic_fetch_or32(volatile int* _Ptr, int _Val, int _Order) {
755
+ int _Result;
756
+ _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedOr, (volatile long*) _Ptr, _Val);
757
+ return _Result;
758
+ }
759
+ inline long long _Atomic_fetch_or64(volatile long long* _Ptr, long long _Val, int _Order) {
760
+ #if defined(_M_IX86) && !defined(_M_HYBRID_X86_ARM64)
761
+ long long _Result = _Atomic_load64(_Ptr, _Atomic_memory_order_seq_cst);
762
+ while (!_Atomic_compare_exchange_strong64(_Ptr, &_Result, _Result | _Val, _Order)) {
763
+ }
764
+ return _Result;
765
+ #else // ^^^ defined(_M_IX86) && !defined(_M_HYBRID_X86_ARM64) / !defined(_M_IX86) || defined(_M_HYBRID_X86_ARM64) vvvv
766
+ long long _Result;
767
+ _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedOr64, _Ptr, _Val);
768
+ return _Result;
769
+ #endif // defined(_M_IX86) && !defined(_M_HYBRID_X86_ARM64)
770
+ }
771
+
772
+ inline char _Atomic_or_fetch8(volatile char* _Ptr, int _Val, int _Order) {
773
+ char _Result;
774
+ _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedOr8, _Ptr, (char) _Val);
775
+ return (char) (_Result | (char) _Val);
776
+ }
777
+ inline short _Atomic_or_fetch16(volatile short* _Ptr, int _Val, int _Order) {
778
+ short _Result;
779
+ _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedOr16, _Ptr, (short) _Val);
780
+ return (short) (_Result | (short) _Val);
781
+ }
782
+ inline int _Atomic_or_fetch32(volatile int* _Ptr, int _Val, int _Order) {
783
+ int _Result;
784
+ _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedOr, (volatile long*) _Ptr, _Val);
785
+ return _Result | _Val;
786
+ }
787
+ inline long long _Atomic_or_fetch64(volatile long long* _Ptr, long long _Val, int _Order) {
788
+ #if defined(_M_IX86) && !defined(_M_HYBRID_X86_ARM64)
789
+ long long _Result = _Atomic_load64(_Ptr, _Atomic_memory_order_seq_cst);
790
+ while (!_Atomic_compare_exchange_strong64(_Ptr, &_Result, _Result | _Val, _Order)) {
791
+ }
792
+ return _Result | _Val;
793
+ #else // ^^^ defined(_M_IX86) && !defined(_M_HYBRID_X86_ARM64) / !defined(_M_IX86) || defined(_M_HYBRID_X86_ARM64) vvvv
794
+ long long _Result;
795
+ _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedOr64, _Ptr, _Val);
796
+ return _Result | _Val;
797
+ #endif // defined(_M_IX86) && !defined(_M_HYBRID_X86_ARM64)
798
+ }
799
+
800
+ inline char _Atomic_fetch_xor8(volatile char* _Ptr, int _Val, int _Order) {
801
+ char _Result;
802
+ _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedXor8, _Ptr, (char) _Val);
803
+ return _Result;
804
+ }
805
+ inline short _Atomic_fetch_xor16(volatile short* _Ptr, int _Val, int _Order) {
806
+ short _Result;
807
+ _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedXor16, _Ptr, (short) _Val);
808
+ return _Result;
809
+ }
810
+ inline int _Atomic_fetch_xor32(volatile int* _Ptr, int _Val, int _Order) {
811
+ int _Result;
812
+ _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedXor, (volatile long*) _Ptr, _Val);
813
+ return _Result;
814
+ }
815
+ inline long long _Atomic_fetch_xor64(volatile long long* _Ptr, long long _Val, int _Order) {
816
+ #if defined(_M_IX86) && !defined(_M_HYBRID_X86_ARM64)
817
+ long long _Result = _Atomic_load64(_Ptr, _Atomic_memory_order_seq_cst);
818
+ while (!_Atomic_compare_exchange_strong64(_Ptr, &_Result, _Result ^ _Val, _Order)) {
819
+ }
820
+ return _Result;
821
+ #else // ^^^ defined(_M_IX86) && !defined(_M_HYBRID_X86_ARM64) / !defined(_M_IX86) || defined(_M_HYBRID_X86_ARM64) vvvv
822
+ long long _Result;
823
+ _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedXor64, _Ptr, _Val);
824
+ return _Result;
825
+ #endif // defined(_M_IX86) && !defined(_M_HYBRID_X86_ARM64)
826
+ }
827
+
828
+ inline char _Atomic_xor_fetch8(volatile char* _Ptr, int _Val, int _Order) {
829
+ char _Result;
830
+ _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedXor8, _Ptr, (char) _Val);
831
+ return (char) (_Result ^ (char) _Val);
832
+ }
833
+ inline short _Atomic_xor_fetch16(volatile short* _Ptr, int _Val, int _Order) {
834
+ short _Result;
835
+ _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedXor16, _Ptr, (short) _Val);
836
+ return (short) (_Result ^ (short) _Val);
837
+ }
838
+ inline int _Atomic_xor_fetch32(volatile int* _Ptr, int _Val, int _Order) {
839
+ int _Result;
840
+ _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedXor, (volatile long*) _Ptr, _Val);
841
+ return _Result ^ _Val;
842
+ }
843
+ inline long long _Atomic_xor_fetch64(volatile long long* _Ptr, long long _Val, int _Order) {
844
+ #if defined(_M_IX86) && !defined(_M_HYBRID_X86_ARM64)
845
+ long long _Result = _Atomic_load64(_Ptr, _Atomic_memory_order_seq_cst);
846
+ while (!_Atomic_compare_exchange_strong64(_Ptr, &_Result, _Result ^ _Val, _Order)) {
847
+ }
848
+ return _Result ^ _Val;
849
+ #else // ^^^ defined(_M_IX86) && !defined(_M_HYBRID_X86_ARM64) / !defined(_M_IX86) || defined(_M_HYBRID_X86_ARM64) vvvv
850
+ long long _Result;
851
+ _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedXor64, _Ptr, _Val);
852
+ return _Result ^ _Val;
853
+ #endif // defined(_M_IX86) && !defined(_M_HYBRID_X86_ARM64)
854
+ }
855
+
856
+ inline char _Atomic_mult_fetch8(volatile char* _Ptr, int _Val, int _Order) {
857
+ char _Result = _Atomic_load8(_Ptr, _Atomic_memory_order_seq_cst);
858
+ while (!_Atomic_compare_exchange_strong8(_Ptr, &_Result, (char) (_Result * (char) _Val), _Order)) {
859
+ }
860
+ return (char) (_Result * (char) _Val);
861
+ }
862
+ inline short _Atomic_mult_fetch16(volatile short* _Ptr, int _Val, int _Order) {
863
+ short _Result = _Atomic_load16(_Ptr, _Atomic_memory_order_seq_cst);
864
+ while (!_Atomic_compare_exchange_strong16(_Ptr, &_Result, (short) (_Result * (short) _Val), _Order)) {
865
+ }
866
+ return (short) (_Result * (short) _Val);
867
+ }
868
+ inline int _Atomic_mult_fetch32(volatile int* _Ptr, int _Val, int _Order) {
869
+ int _Result = _Atomic_load32(_Ptr, _Atomic_memory_order_seq_cst);
870
+ while (!_Atomic_compare_exchange_strong32(_Ptr, &_Result, _Result * _Val, _Order)) {
871
+ }
872
+ return _Result * _Val;
873
+ }
874
+ inline long long _Atomic_mult_fetch64(volatile long long* _Ptr, long long _Val, int _Order) {
875
+ long long _Result = _Atomic_load64(_Ptr, _Atomic_memory_order_seq_cst);
876
+ while (!_Atomic_compare_exchange_strong64(_Ptr, &_Result, _Result * _Val, _Order)) {
877
+ }
878
+ return _Result * _Val;
879
+ }
880
+ inline float _Atomic_mult_fetchf(volatile float* _Ptr, float _Val, int _Order) {
881
+ float _Result = _Atomic_loadf(_Ptr, _Atomic_memory_order_seq_cst);
882
+ while (!_Atomic_compare_exchange_strongf(_Ptr, &_Result, _Result * _Val, _Order)) {
883
+ }
884
+ return _Result * _Val;
885
+ }
886
+ inline double _Atomic_mult_fetchd(volatile double* _Ptr, double _Val, int _Order) {
887
+ double _Result = _Atomic_loadd(_Ptr, _Atomic_memory_order_seq_cst);
888
+ while (!_Atomic_compare_exchange_strongd(_Ptr, &_Result, _Result * _Val, _Order)) {
889
+ }
890
+ return _Result * _Val;
891
+ }
892
+
893
+ inline unsigned char _Atomic_div_fetch8(volatile unsigned char* _Ptr, unsigned int _Val, int _Order) {
894
+ unsigned char _Result = (unsigned char) _Atomic_load8((volatile char*) _Ptr, _Atomic_memory_order_seq_cst);
895
+ while (!_Atomic_compare_exchange_strong8(
896
+ (volatile char*) _Ptr, (char*) &_Result, (char) (_Result / (unsigned char) _Val), _Order)) {
897
+ }
898
+ return (unsigned char) (_Result / (unsigned char) _Val);
899
+ }
900
+ inline unsigned short _Atomic_div_fetch16(volatile unsigned short* _Ptr, unsigned int _Val, int _Order) {
901
+ unsigned short _Result = (unsigned short) _Atomic_load16((volatile short*) _Ptr, _Atomic_memory_order_seq_cst);
902
+ while (!_Atomic_compare_exchange_strong16(
903
+ (volatile short*) _Ptr, (short*) &_Result, (short) (_Result / (unsigned short) _Val), _Order)) {
904
+ }
905
+ return (unsigned short) (_Result / (unsigned short) _Val);
906
+ }
907
+ inline unsigned int _Atomic_div_fetch32(volatile unsigned int* _Ptr, unsigned int _Val, int _Order) {
908
+ unsigned int _Result = (unsigned int) _Atomic_load32((volatile int*) _Ptr, _Atomic_memory_order_seq_cst);
909
+ while (!_Atomic_compare_exchange_strong32((volatile int*) _Ptr, (int*) &_Result, (int) (_Result / _Val), _Order)) {
910
+ }
911
+ return _Result / _Val;
912
+ }
913
+ inline unsigned long long _Atomic_div_fetch64(volatile unsigned long long* _Ptr, unsigned long long _Val, int _Order) {
914
+ unsigned long long _Result =
915
+ (unsigned long long) _Atomic_load64((volatile long long*) _Ptr, _Atomic_memory_order_seq_cst);
916
+ while (!_Atomic_compare_exchange_strong64(
917
+ (volatile long long*) _Ptr, (long long*) &_Result, (long long) (_Result / _Val), _Order)) {
918
+ }
919
+ return _Result / _Val;
920
+ }
921
+ inline float _Atomic_div_fetchf(volatile float* _Ptr, float _Val, int _Order) {
922
+ float _Result = _Atomic_loadf(_Ptr, _Atomic_memory_order_seq_cst);
923
+ while (!_Atomic_compare_exchange_strongf(_Ptr, &_Result, _Result / _Val, _Order)) {
924
+ }
925
+ return _Result / _Val;
926
+ }
927
+ inline double _Atomic_div_fetchd(volatile double* _Ptr, double _Val, int _Order) {
928
+ double _Result = _Atomic_loadd(_Ptr, _Atomic_memory_order_seq_cst);
929
+ while (!_Atomic_compare_exchange_strongd(_Ptr, &_Result, _Result / _Val, _Order)) {
930
+ }
931
+ return _Result / _Val;
932
+ }
933
+
934
+ inline signed char _Atomic_idiv_fetch8(volatile char* _Ptr, int _Val, int _Order) {
935
+ signed char _Result = (signed char) _Atomic_load8(_Ptr, _Atomic_memory_order_seq_cst);
936
+ while (!_Atomic_compare_exchange_strong8(_Ptr, (char*) &_Result, (char) (_Result / (signed char) _Val), _Order)) {
937
+ }
938
+ return (signed char) (_Result / (signed char) _Val);
939
+ }
940
+ inline short _Atomic_idiv_fetch16(volatile short* _Ptr, int _Val, int _Order) {
941
+ short _Result = _Atomic_load16(_Ptr, _Atomic_memory_order_seq_cst);
942
+ while (!_Atomic_compare_exchange_strong16(_Ptr, &_Result, (short) (_Result / (short) _Val), _Order)) {
943
+ }
944
+ return (short) (_Result / (short) _Val);
945
+ }
946
+ inline int _Atomic_idiv_fetch32(volatile int* _Ptr, int _Val, int _Order) {
947
+ int _Result = _Atomic_load32(_Ptr, _Atomic_memory_order_seq_cst);
948
+ while (!_Atomic_compare_exchange_strong32(_Ptr, &_Result, _Result / _Val, _Order)) {
949
+ }
950
+ return _Result / _Val;
951
+ }
952
+ inline long long _Atomic_idiv_fetch64(volatile long long* _Ptr, long long _Val, int _Order) {
953
+ long long _Result = _Atomic_load64(_Ptr, _Atomic_memory_order_seq_cst);
954
+ while (!_Atomic_compare_exchange_strong64(_Ptr, &_Result, _Result / _Val, _Order)) {
955
+ }
956
+ return _Result / _Val;
957
+ }
958
+
959
+ inline char _Atomic_shl_fetch8(volatile char* _Ptr, int _Val, int _Order) {
960
+ char _Result = _Atomic_load8(_Ptr, _Atomic_memory_order_seq_cst);
961
+ while (!_Atomic_compare_exchange_strong8(_Ptr, &_Result, (char) (_Result << (char) _Val), _Order)) {
962
+ }
963
+ return (char) (_Result << (char) _Val);
964
+ }
965
+ inline short _Atomic_shl_fetch16(volatile short* _Ptr, int _Val, int _Order) {
966
+ short _Result = _Atomic_load16(_Ptr, _Atomic_memory_order_seq_cst);
967
+ while (!_Atomic_compare_exchange_strong16(_Ptr, &_Result, (short) (_Result << (short) _Val), _Order)) {
968
+ }
969
+ return (short) (_Result << (short) _Val);
970
+ }
971
+ inline int _Atomic_shl_fetch32(volatile int* _Ptr, int _Val, int _Order) {
972
+ int _Result = _Atomic_load32(_Ptr, _Atomic_memory_order_seq_cst);
973
+ while (!_Atomic_compare_exchange_strong32(_Ptr, &_Result, _Result << _Val, _Order)) {
974
+ }
975
+ return _Result << _Val;
976
+ }
977
+ inline long long _Atomic_shl_fetch64(volatile long long* _Ptr, long long _Val, int _Order) {
978
+ long long _Result = _Atomic_load64(_Ptr, _Atomic_memory_order_seq_cst);
979
+ while (!_Atomic_compare_exchange_strong64(_Ptr, &_Result, _Result << _Val, _Order)) {
980
+ }
981
+ return _Result << _Val;
982
+ }
983
+
984
+ inline char _Atomic_shr_fetch8(volatile char* _Ptr, int _Val, int _Order) {
985
+ char _Result = _Atomic_load8(_Ptr, _Atomic_memory_order_seq_cst);
986
+ while (!_Atomic_compare_exchange_strong8(_Ptr, &_Result, (char) (_Result >> (char) _Val), _Order)) {
987
+ }
988
+ return (char) (_Result >> (char) _Val);
989
+ }
990
+ inline short _Atomic_shr_fetch16(volatile short* _Ptr, int _Val, int _Order) {
991
+ short _Result = _Atomic_load16(_Ptr, _Atomic_memory_order_seq_cst);
992
+ while (!_Atomic_compare_exchange_strong16(_Ptr, &_Result, (short) (_Result >> (short) _Val), _Order)) {
993
+ }
994
+ return (short) (_Result >> (short) _Val);
995
+ }
996
+ inline int _Atomic_shr_fetch32(volatile int* _Ptr, int _Val, int _Order) {
997
+ int _Result = _Atomic_load32(_Ptr, _Atomic_memory_order_seq_cst);
998
+ while (!_Atomic_compare_exchange_strong32(_Ptr, &_Result, _Result >> _Val, _Order)) {
999
+ }
1000
+ return _Result >> _Val;
1001
+ }
1002
+ inline long long _Atomic_shr_fetch64(volatile long long* _Ptr, long long _Val, int _Order) {
1003
+ long long _Result = _Atomic_load64(_Ptr, _Atomic_memory_order_seq_cst);
1004
+ while (!_Atomic_compare_exchange_strong64(_Ptr, &_Result, _Result >> _Val, _Order)) {
1005
+ }
1006
+ return _Result >> _Val;
1007
+ }
1008
+
1009
+ inline signed char _Atomic_imod_fetch8(volatile char* _Ptr, int _Val, int _Order) {
1010
+ signed char _Result = (signed char) _Atomic_load8(_Ptr, _Atomic_memory_order_seq_cst);
1011
+ while (!_Atomic_compare_exchange_strong8(_Ptr, (char*) &_Result, (char) (_Result % (signed char) _Val), _Order)) {
1012
+ }
1013
+ return (signed char) (_Result % (signed char) _Val);
1014
+ }
1015
+ inline short _Atomic_imod_fetch16(volatile short* _Ptr, int _Val, int _Order) {
1016
+ short _Result = _Atomic_load16(_Ptr, _Atomic_memory_order_seq_cst);
1017
+ while (!_Atomic_compare_exchange_strong16(_Ptr, &_Result, (short) (_Result % (short) _Val), _Order)) {
1018
+ }
1019
+ return (short) (_Result % (short) _Val);
1020
+ }
1021
+ inline int _Atomic_imod_fetch32(volatile int* _Ptr, int _Val, int _Order) {
1022
+ int _Result = _Atomic_load32(_Ptr, _Atomic_memory_order_seq_cst);
1023
+ while (!_Atomic_compare_exchange_strong32(_Ptr, &_Result, _Result % _Val, _Order)) {
1024
+ }
1025
+ return _Result % _Val;
1026
+ }
1027
+ inline long long _Atomic_imod_fetch64(volatile long long* _Ptr, long long _Val, int _Order) {
1028
+ long long _Result = _Atomic_load64(_Ptr, _Atomic_memory_order_seq_cst);
1029
+ while (!_Atomic_compare_exchange_strong64(_Ptr, &_Result, _Result % _Val, _Order)) {
1030
+ }
1031
+ return _Result % _Val;
1032
+ }
1033
+
1034
+ inline unsigned char _Atomic_mod_fetch8(volatile unsigned char* _Ptr, unsigned int _Val, int _Order) {
1035
+ unsigned char _Result = (unsigned char) _Atomic_load8((volatile char*) _Ptr, _Atomic_memory_order_seq_cst);
1036
+ while (!_Atomic_compare_exchange_strong8(
1037
+ (volatile char*) _Ptr, (char*) &_Result, (char) (_Result % (unsigned char) _Val), _Order)) {
1038
+ }
1039
+ return (unsigned char) (_Result % (unsigned char) _Val);
1040
+ }
1041
+ inline unsigned short _Atomic_mod_fetch16(volatile unsigned short* _Ptr, unsigned int _Val, int _Order) {
1042
+ unsigned short _Result = (unsigned short) _Atomic_load16((volatile short*) _Ptr, _Atomic_memory_order_seq_cst);
1043
+ while (!_Atomic_compare_exchange_strong16(
1044
+ (volatile short*) _Ptr, (short*) &_Result, (short) (_Result % (unsigned short) _Val), _Order)) {
1045
+ }
1046
+ return (unsigned short) (_Result % (unsigned short) _Val);
1047
+ }
1048
+ inline unsigned int _Atomic_mod_fetch32(volatile unsigned int* _Ptr, unsigned int _Val, int _Order) {
1049
+ unsigned int _Result = (unsigned int) _Atomic_load32((volatile int*) _Ptr, _Atomic_memory_order_seq_cst);
1050
+ while (!_Atomic_compare_exchange_strong32((volatile int*) _Ptr, (int*) &_Result, (int) (_Result % _Val), _Order)) {
1051
+ }
1052
+ return _Result % _Val;
1053
+ }
1054
+ inline unsigned long long _Atomic_mod_fetch64(volatile unsigned long long* _Ptr, unsigned long long _Val, int _Order) {
1055
+ unsigned long long _Result =
1056
+ (unsigned long long) _Atomic_load64((volatile long long*) _Ptr, _Atomic_memory_order_seq_cst);
1057
+ while (!_Atomic_compare_exchange_strong64(
1058
+ (volatile long long*) _Ptr, (long long*) &_Result, (long long) (_Result % _Val), _Order)) {
1059
+ }
1060
+ return _Result % _Val;
1061
+ }
1062
+
1063
+ inline void _Atomic_lock_and_store(volatile void* _Obj, const void* _Desired, int _Offset, size_t _Size) {
1064
+ _Atomic_lock_acquire(_Obj);
1065
+ memmove((char*) _Obj + _Offset, _Desired, _Size);
1066
+ _Atomic_lock_release(_Obj);
1067
+ }
1068
+
1069
+ inline void _Atomic_lock_and_load(volatile void* _Obj, void* _Dest, int _Offset, size_t _Size) {
1070
+ _Atomic_lock_acquire(_Obj);
1071
+ memmove(_Dest, (char*) _Obj + _Offset, _Size);
1072
+ _Atomic_lock_release(_Obj);
1073
+ }
1074
+
1075
+ inline void _Atomic_lock_and_exchange(
1076
+ volatile void* _Obj, const void* _Desired, void* _Dest, int _Offset, size_t _Size) {
1077
+ _Atomic_lock_acquire(_Obj);
1078
+ memmove(_Dest, (char*) _Obj + _Offset, _Size);
1079
+ memmove((char*) _Obj + _Offset, _Desired, _Size);
1080
+ _Atomic_lock_release(_Obj);
1081
+ }
1082
+
1083
+ inline _Bool _Atomic_lock_and_compare_exchange_strong(
1084
+ volatile void* _Obj, void* _Expected, const void* _Desired, int _Offset, size_t _Size) {
1085
+ _Bool _Result;
1086
+ _Atomic_lock_acquire(_Obj);
1087
+ _Result = memcmp((char*) _Obj + _Offset, _Expected, _Size) == 0;
1088
+ if (_Result) {
1089
+ memmove((char*) _Obj + _Offset, _Desired, _Size);
1090
+ } else {
1091
+ memmove(_Expected, (char*) _Obj + _Offset, _Size);
1092
+ }
1093
+ _Atomic_lock_release(_Obj);
1094
+ return _Result;
1095
+ }
1096
+
1097
+ #undef _ATOMIC_CHOOSE_INTRINSIC
1098
+ #undef _ATOMIC_POST_LOAD_BARRIER_AS_NEEDED
1099
+ #undef _ATOMIC_STORE_PREFIX
1100
+ #undef _ATOMIC_STORE_SEQ_CST_ARM
1101
+ #undef _ATOMIC_STORE_SEQ_CST_X86_X64
1102
+ #undef _ATOMIC_STORE_32_SEQ_CST_X86_X64
1103
+ #undef _ATOMIC_STORE_SEQ_CST
1104
+ #undef _ATOMIC_STORE_32_SEQ_CST
1105
+ #undef _ATOMIC_STORE_64_SEQ_CST
1106
+ #undef _ATOMIC_STORE_64_SEQ_CST_IX86
1107
+ #undef _ATOMIC_INVALID_PARAMETER
1108
+ #undef _ATOMIC_STORE_SEQ_CST_ARM64
1109
+ #undef __LOAD_ACQUIRE_ARM64
1110
+ #undef _ATOMIC_LOAD_ARM64
1111
+ #undef __STORE_RELEASE
1112
+ #undef _STD_ATOMIC_USE_ARM64_LDAR_STLR
1113
+
1114
+ #undef _STD_COMPARE_EXCHANGE_128
1115
+ #undef _INVALID_MEMORY_ORDER
1116
+ #undef _Compiler_or_memory_barrier
1117
+ #undef _Memory_barrier
1118
+ #undef _Memory_load_acquire_barrier
1119
+ #undef _Compiler_barrier
1120
+
1121
+ #undef _CONCATX
1122
+ #undef _CONCAT
1123
+ #undef _INTRIN_RELAXED
1124
+ #undef _INTRIN_ACQUIRE
1125
+ #undef _INTRIN_RELEASE
1126
+ #undef _INTRIN_ACQ_REL
1127
+ #undef _YIELD_PROCESSOR
miniMSVC/VC/Tools/MSVC/14.42.34433/include/vcruntime_c11_stdatomic.h ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation. All rights reserved.
2
+ //
3
+ // C11 version of stdatomic.h
4
+ #pragma once
5
+
6
+ #ifdef __cplusplus
7
+ // This header should never be included in C++ mode, C++ has it's own stdatomic.h
8
+ #error "vcruntime_c11_stdatomic.h is a C-only header"
9
+ #endif
10
+
11
+ #include <stddef.h>
12
+ #include <stdint.h>
13
+ #include <uchar.h>
14
+ #include <vcruntime_c11_atomic_support.h>
15
+
16
+ typedef enum memory_order {
17
+ memory_order_relaxed = _Atomic_memory_order_relaxed,
18
+ memory_order_consume = _Atomic_memory_order_consume,
19
+ memory_order_acquire = _Atomic_memory_order_acquire,
20
+ memory_order_release = _Atomic_memory_order_release,
21
+ memory_order_acq_rel = _Atomic_memory_order_acq_rel,
22
+ memory_order_seq_cst = _Atomic_memory_order_seq_cst
23
+ } memory_order;
24
+
25
+ #define ATOMIC_BOOL_LOCK_FREE 1
26
+ #define ATOMIC_CHAR_LOCK_FREE 1
27
+ #define ATOMIC_CHAR16_T_LOCK_FREE 1
28
+ #define ATOMIC_CHAR32_T_LOCK_FREE 1
29
+ #define ATOMIC_WCHAR_T_LOCK_FREE 1
30
+ #define ATOMIC_SHORT_LOCK_FREE 1
31
+ #define ATOMIC_INT_LOCK_FREE 1
32
+ #define ATOMIC_LONG_LOCK_FREE 1
33
+ #define ATOMIC_LLONG_LOCK_FREE 1
34
+ #define ATOMIC_POINTER_LOCK_FREE 1
35
+
36
+ typedef _Atomic(_Bool) atomic_bool;
37
+ typedef _Atomic(char) atomic_char;
38
+ typedef _Atomic(signed char) atomic_schar;
39
+ typedef _Atomic(unsigned char) atomic_uchar;
40
+ typedef _Atomic(short) atomic_short;
41
+ typedef _Atomic(unsigned short) atomic_ushort;
42
+ typedef _Atomic(int) atomic_int;
43
+ typedef _Atomic(unsigned int) atomic_uint;
44
+ typedef _Atomic(long) atomic_long;
45
+ typedef _Atomic(unsigned long) atomic_ulong;
46
+ typedef _Atomic(long long) atomic_llong;
47
+ typedef _Atomic(unsigned long long) atomic_ullong;
48
+ typedef _Atomic(char16_t) atomic_char16_t;
49
+ typedef _Atomic(char32_t) atomic_char32_t;
50
+ typedef _Atomic(wchar_t) atomic_wchar_t;
51
+ typedef _Atomic(int_least8_t) atomic_int_least8_t;
52
+ typedef _Atomic(uint_least8_t) atomic_uint_least8_t;
53
+ typedef _Atomic(int_least16_t) atomic_int_least16_t;
54
+ typedef _Atomic(uint_least16_t) atomic_uint_least16_t;
55
+ typedef _Atomic(int_least32_t) atomic_int_least32_t;
56
+ typedef _Atomic(uint_least32_t) atomic_uint_least32_t;
57
+ typedef _Atomic(int_least64_t) atomic_int_least64_t;
58
+ typedef _Atomic(uint_least64_t) atomic_uint_least64_t;
59
+ typedef _Atomic(int_fast8_t) atomic_int_fast8_t;
60
+ typedef _Atomic(uint_fast8_t) atomic_uint_fast8_t;
61
+ typedef _Atomic(int_fast16_t) atomic_int_fast16_t;
62
+ typedef _Atomic(uint_fast16_t) atomic_uint_fast16_t;
63
+ typedef _Atomic(int_fast32_t) atomic_int_fast32_t;
64
+ typedef _Atomic(uint_fast32_t) atomic_uint_fast32_t;
65
+ typedef _Atomic(int_fast64_t) atomic_int_fast64_t;
66
+ typedef _Atomic(uint_fast64_t) atomic_uint_fast64_t;
67
+ typedef _Atomic(intptr_t) atomic_intptr_t;
68
+ typedef _Atomic(uintptr_t) atomic_uintptr_t;
69
+ typedef _Atomic(size_t) atomic_size_t;
70
+ typedef _Atomic(ptrdiff_t) atomic_ptrdiff_t;
71
+ typedef _Atomic(intmax_t) atomic_intmax_t;
72
+ typedef _Atomic(uintmax_t) atomic_uintmax_t;
73
+
74
+ #define atomic_init __c11_atomic_init
75
+
76
+ #define kill_dependency(_Obj) (_Obj)
77
+
78
+ #define atomic_thread_fence(_Order) _Atomic_thread_fence(_Order)
79
+ #define atomic_signal_fence(_Order) _Atomic_signal_fence(_Order)
80
+
81
+ #define atomic_is_lock_free(_Obj) _Atomic_is_lock_free(sizeof(__typeof_unqual__(*(_Obj))))
82
+
83
+ #define atomic_store(_Obj, _Desired) __c11_atomic_store(_Obj, _Desired, _Atomic_memory_order_seq_cst)
84
+ #define atomic_store_explicit __c11_atomic_store
85
+
86
+ #define atomic_load(_Obj) __c11_atomic_load(_Obj, _Atomic_memory_order_seq_cst)
87
+ #define atomic_load_explicit __c11_atomic_load
88
+
89
+ #define atomic_exchange(_Obj, _Desired) __c11_atomic_exchange(_Obj, _Desired, _Atomic_memory_order_seq_cst)
90
+ #define atomic_exchange_explicit __c11_atomic_exchange
91
+
92
+ #define atomic_compare_exchange_strong(_Obj, _Expected, _Desired) \
93
+ __c11_atomic_compare_exchange_strong( \
94
+ _Obj, _Expected, _Desired, _Atomic_memory_order_seq_cst, _Atomic_memory_order_seq_cst)
95
+ #define atomic_compare_exchange_strong_explicit __c11_atomic_compare_exchange_strong
96
+
97
+ #define atomic_compare_exchange_weak(_Obj, _Expected, _Desired) \
98
+ __c11_atomic_compare_exchange_weak( \
99
+ _Obj, _Expected, _Desired, _Atomic_memory_order_seq_cst, _Atomic_memory_order_seq_cst)
100
+ #define atomic_compare_exchange_weak_explicit __c11_atomic_compare_exchange_weak
101
+
102
+ #define atomic_fetch_add(_Obj, _Val) __c11_atomic_fetch_add(_Obj, _Val, _Atomic_memory_order_seq_cst)
103
+ #define atomic_fetch_add_explicit __c11_atomic_fetch_add
104
+
105
+ #define atomic_fetch_sub(_Obj, _Val) __c11_atomic_fetch_sub(_Obj, _Val, _Atomic_memory_order_seq_cst)
106
+ #define atomic_fetch_sub_explicit __c11_atomic_fetch_sub
107
+
108
+ #define atomic_fetch_or(_Obj, _Val) __c11_atomic_fetch_or(_Obj, _Val, _Atomic_memory_order_seq_cst)
109
+ #define atomic_fetch_or_explicit __c11_atomic_fetch_or
110
+
111
+ #define atomic_fetch_xor(_Obj, _Val) __c11_atomic_fetch_xor(_Obj, _Val, _Atomic_memory_order_seq_cst)
112
+ #define atomic_fetch_xor_explicit __c11_atomic_fetch_xor
113
+
114
+ #define atomic_fetch_and(_Obj, _Val) __c11_atomic_fetch_and(_Obj, _Val, _Atomic_memory_order_seq_cst)
115
+ #define atomic_fetch_and_explicit __c11_atomic_fetch_and
116
+
117
+ #define ATOMIC_FLAG_INIT \
118
+ { 0 }
119
+
120
+ typedef struct atomic_flag {
121
+ _Atomic(_Bool) _Val;
122
+ } atomic_flag;
123
+
124
+ #define atomic_flag_test_and_set(_Obj) __c11_atomic_exchange(&(_Obj)->_Val, 1, _Atomic_memory_order_seq_cst)
125
+ #define atomic_flag_test_and_set_explicit(_Obj, _Order) __c11_atomic_exchange(&(_Obj)->_Val, 1, _Order)
126
+
127
+ #define atomic_flag_clear(_Obj) __c11_atomic_store(&(_Obj)->_Val, 0, _Atomic_memory_order_seq_cst)
128
+ #define atomic_flag_clear_explicit(_Obj, _Order) __c11_atomic_store(&(_Obj)->_Val, 0, _Order)
miniMSVC/VC/Tools/MSVC/14.42.34433/include/vcruntime_exception.h ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //
2
+ // vcruntime_exception.h
3
+ //
4
+ // Copyright (c) Microsoft Corporation. All rights reserved.
5
+ //
6
+ // <exception> functionality that is implemented in the VCRuntime.
7
+ //
8
+ #pragma once
9
+
10
+ #include <eh.h>
11
+
12
+ #ifdef _M_CEE_PURE
13
+ #include <vcruntime_new.h>
14
+ #endif
15
+
16
+ #if _VCRT_COMPILER_PREPROCESSOR && _HAS_EXCEPTIONS
17
+
18
+ #pragma warning(push)
19
+ #pragma warning(disable: _VCRUNTIME_DISABLED_WARNINGS)
20
+ #pragma warning(disable: 4577) // 'noexcept' used with no exception handling mode specified
21
+ #pragma warning(disable: 4643) // Forward declaring 'meow' in namespace std is not permitted by the C++ Standard.
22
+
23
+ #pragma pack(push, _CRT_PACKING)
24
+
25
+ _CRT_BEGIN_C_HEADER
26
+
27
+ struct __std_exception_data
28
+ {
29
+ char const* _What;
30
+ bool _DoFree;
31
+ };
32
+
33
+ _VCRTIMP void __cdecl __std_exception_copy(
34
+ _In_ __std_exception_data const* _From,
35
+ _Inout_ __std_exception_data* _To
36
+ );
37
+
38
+ _VCRTIMP void __cdecl __std_exception_destroy(
39
+ _Inout_ __std_exception_data* _Data
40
+ );
41
+
42
+ _CRT_END_C_HEADER
43
+
44
+
45
+ extern "C++" {
46
+
47
+ namespace std {
48
+
49
+ _VCRT_EXPORT_STD class exception
50
+ {
51
+ public:
52
+
53
+ exception() noexcept
54
+ : _Data()
55
+ {
56
+ }
57
+
58
+ explicit exception(char const* const _Message) noexcept
59
+ : _Data()
60
+ {
61
+ __std_exception_data _InitData = { _Message, true };
62
+ __std_exception_copy(&_InitData, &_Data);
63
+ }
64
+
65
+ exception(char const* const _Message, int) noexcept
66
+ : _Data()
67
+ {
68
+ _Data._What = _Message;
69
+ }
70
+
71
+ exception(exception const& _Other) noexcept
72
+ : _Data()
73
+ {
74
+ __std_exception_copy(&_Other._Data, &_Data);
75
+ }
76
+
77
+ exception& operator=(exception const& _Other) noexcept
78
+ {
79
+ if (this == &_Other)
80
+ {
81
+ return *this;
82
+ }
83
+
84
+ __std_exception_destroy(&_Data);
85
+ __std_exception_copy(&_Other._Data, &_Data);
86
+ return *this;
87
+ }
88
+
89
+ virtual ~exception() noexcept
90
+ {
91
+ __std_exception_destroy(&_Data);
92
+ }
93
+
94
+ _NODISCARD virtual char const* what() const
95
+ {
96
+ return _Data._What ? _Data._What : "Unknown exception";
97
+ }
98
+
99
+ private:
100
+
101
+ __std_exception_data _Data;
102
+ };
103
+
104
+ _VCRT_EXPORT_STD class bad_exception
105
+ : public exception
106
+ {
107
+ public:
108
+
109
+ bad_exception() noexcept
110
+ : exception("bad exception", 1)
111
+ {
112
+ }
113
+ };
114
+
115
+ _VCRT_EXPORT_STD class bad_array_new_length;
116
+
117
+ _VCRT_EXPORT_STD class bad_alloc
118
+ : public exception
119
+ {
120
+ public:
121
+
122
+ bad_alloc() noexcept
123
+ : exception("bad allocation", 1)
124
+ {
125
+ }
126
+
127
+ private:
128
+
129
+ friend bad_array_new_length;
130
+
131
+ bad_alloc(char const* const _Message) noexcept
132
+ : exception(_Message, 1)
133
+ {
134
+ }
135
+ };
136
+
137
+ _VCRT_EXPORT_STD class bad_array_new_length
138
+ : public bad_alloc
139
+ {
140
+ public:
141
+
142
+ bad_array_new_length() noexcept
143
+ : bad_alloc("bad array new length")
144
+ {
145
+ }
146
+ };
147
+
148
+ } // namespace std
149
+
150
+ } // extern "C++"
151
+
152
+ #pragma pack(pop)
153
+
154
+ #pragma warning(pop) // _VCRUNTIME_DISABLED_WARNINGS
155
+ #endif // _VCRT_COMPILER_PREPROCESSOR && _HAS_EXCEPTIONS
miniMSVC/VC/Tools/MSVC/14.42.34433/include/vcruntime_new.h ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //
2
+ // vcruntime_new.h
3
+ //
4
+ // Copyright (c) Microsoft Corporation. All rights reserved.
5
+ //
6
+ // Declarations and definitions of memory management functions in the VCRuntime.
7
+ //
8
+ #pragma once
9
+
10
+ #include <vcruntime.h>
11
+
12
+ #pragma warning(push)
13
+ #pragma warning(disable: _VCRUNTIME_DISABLED_WARNINGS)
14
+ #pragma warning(disable: 4985) // attributes not present on previous declaration
15
+
16
+ #ifdef __cplusplus
17
+ extern "C++" {
18
+
19
+ #pragma pack(push, _CRT_PACKING)
20
+
21
+ #pragma push_macro("new")
22
+ #undef new
23
+
24
+ #ifdef __cpp_aligned_new
25
+ namespace std
26
+ {
27
+ _VCRT_EXPORT_STD enum class align_val_t : size_t {};
28
+ }
29
+ #endif // __cpp_aligned_new
30
+
31
+ #ifndef __NOTHROW_T_DEFINED
32
+ #define __NOTHROW_T_DEFINED
33
+ namespace std
34
+ {
35
+ _VCRT_EXPORT_STD struct nothrow_t {
36
+ explicit nothrow_t() = default;
37
+ };
38
+
39
+ _VCRT_EXPORT_STD extern nothrow_t const nothrow;
40
+ }
41
+ #endif
42
+
43
+ _VCRT_EXPORT_STD _NODISCARD _Ret_notnull_ _Post_writable_byte_size_(_Size) _VCRT_ALLOCATOR
44
+ void* __CRTDECL operator new(
45
+ size_t _Size
46
+ );
47
+
48
+ _VCRT_EXPORT_STD _NODISCARD _Ret_maybenull_ _Success_(return != NULL) _Post_writable_byte_size_(_Size) _VCRT_ALLOCATOR
49
+ void* __CRTDECL operator new(
50
+ size_t _Size,
51
+ ::std::nothrow_t const&
52
+ ) noexcept;
53
+
54
+ _VCRT_EXPORT_STD _NODISCARD _Ret_notnull_ _Post_writable_byte_size_(_Size) _VCRT_ALLOCATOR
55
+ void* __CRTDECL operator new[](
56
+ size_t _Size
57
+ );
58
+
59
+ _VCRT_EXPORT_STD _NODISCARD _Ret_maybenull_ _Success_(return != NULL) _Post_writable_byte_size_(_Size) _VCRT_ALLOCATOR
60
+ void* __CRTDECL operator new[](
61
+ size_t _Size,
62
+ ::std::nothrow_t const&
63
+ ) noexcept;
64
+
65
+ _VCRT_EXPORT_STD void __CRTDECL operator delete(
66
+ void* _Block
67
+ ) noexcept;
68
+
69
+ _VCRT_EXPORT_STD void __CRTDECL operator delete(
70
+ void* _Block,
71
+ ::std::nothrow_t const&
72
+ ) noexcept;
73
+
74
+ _VCRT_EXPORT_STD void __CRTDECL operator delete[](
75
+ void* _Block
76
+ ) noexcept;
77
+
78
+ _VCRT_EXPORT_STD void __CRTDECL operator delete[](
79
+ void* _Block,
80
+ ::std::nothrow_t const&
81
+ ) noexcept;
82
+
83
+ _VCRT_EXPORT_STD void __CRTDECL operator delete(
84
+ void* _Block,
85
+ size_t _Size
86
+ ) noexcept;
87
+
88
+ _VCRT_EXPORT_STD void __CRTDECL operator delete[](
89
+ void* _Block,
90
+ size_t _Size
91
+ ) noexcept;
92
+
93
+ #ifdef __cpp_aligned_new
94
+ _VCRT_EXPORT_STD _NODISCARD _Ret_notnull_ _Post_writable_byte_size_(_Size) _VCRT_ALLOCATOR
95
+ void* __CRTDECL operator new(
96
+ size_t _Size,
97
+ ::std::align_val_t _Al
98
+ );
99
+
100
+ _VCRT_EXPORT_STD _NODISCARD _Ret_maybenull_ _Success_(return != NULL) _Post_writable_byte_size_(_Size) _VCRT_ALLOCATOR
101
+ void* __CRTDECL operator new(
102
+ size_t _Size,
103
+ ::std::align_val_t _Al,
104
+ ::std::nothrow_t const&
105
+ ) noexcept;
106
+
107
+
108
+ _VCRT_EXPORT_STD _NODISCARD _Ret_notnull_ _Post_writable_byte_size_(_Size) _VCRT_ALLOCATOR
109
+ void* __CRTDECL operator new[](
110
+ size_t _Size,
111
+ ::std::align_val_t _Al
112
+ );
113
+
114
+ _VCRT_EXPORT_STD _NODISCARD _Ret_maybenull_ _Success_(return != NULL) _Post_writable_byte_size_(_Size) _VCRT_ALLOCATOR
115
+ void* __CRTDECL operator new[](
116
+ size_t _Size,
117
+ ::std::align_val_t _Al,
118
+ ::std::nothrow_t const&
119
+ ) noexcept;
120
+
121
+ _VCRT_EXPORT_STD void __CRTDECL operator delete(
122
+ void* _Block,
123
+ ::std::align_val_t _Al
124
+ ) noexcept;
125
+
126
+ _VCRT_EXPORT_STD void __CRTDECL operator delete(
127
+ void* _Block,
128
+ ::std::align_val_t _Al,
129
+ ::std::nothrow_t const&
130
+ ) noexcept;
131
+
132
+ _VCRT_EXPORT_STD void __CRTDECL operator delete[](
133
+ void* _Block,
134
+ ::std::align_val_t _Al
135
+ ) noexcept;
136
+
137
+ _VCRT_EXPORT_STD void __CRTDECL operator delete[](
138
+ void* _Block,
139
+ ::std::align_val_t _Al,
140
+ ::std::nothrow_t const&
141
+ ) noexcept;
142
+
143
+ _VCRT_EXPORT_STD void __CRTDECL operator delete(
144
+ void* _Block,
145
+ size_t _Size,
146
+ ::std::align_val_t _Al
147
+ ) noexcept;
148
+
149
+ _VCRT_EXPORT_STD void __CRTDECL operator delete[](
150
+ void* _Block,
151
+ size_t _Size,
152
+ ::std::align_val_t _Al
153
+ ) noexcept;
154
+ #endif // __cpp_aligned_new
155
+
156
+ #pragma warning(push)
157
+ #pragma warning(disable: 4577) // 'noexcept' used with no exception handling mode specified
158
+ #pragma warning(disable: 4514) // 'operator new': unreferenced inline function has been removed
159
+ #ifndef __PLACEMENT_NEW_INLINE
160
+ #define __PLACEMENT_NEW_INLINE
161
+ _VCRT_EXPORT_STD _NODISCARD _MSVC_CONSTEXPR _Ret_notnull_ _Post_writable_byte_size_(_Size) _Post_satisfies_(return == _Where)
162
+ inline void* __CRTDECL operator new(size_t _Size,
163
+ _Writable_bytes_(_Size) void* _Where) noexcept
164
+ {
165
+ (void)_Size;
166
+ return _Where;
167
+ }
168
+
169
+ _VCRT_EXPORT_STD inline void __CRTDECL operator delete(void*, void*) noexcept
170
+ {
171
+ return;
172
+ }
173
+ #endif
174
+
175
+ #ifndef __PLACEMENT_VEC_NEW_INLINE
176
+ #define __PLACEMENT_VEC_NEW_INLINE
177
+ _VCRT_EXPORT_STD _NODISCARD _Ret_notnull_ _Post_writable_byte_size_(_Size) _Post_satisfies_(return == _Where)
178
+ inline void* __CRTDECL operator new[](size_t _Size,
179
+ _Writable_bytes_(_Size) void* _Where) noexcept
180
+ {
181
+ (void)_Size;
182
+ return _Where;
183
+ }
184
+
185
+ _VCRT_EXPORT_STD inline void __CRTDECL operator delete[](void*, void*) noexcept
186
+ {
187
+ }
188
+ #endif
189
+ #pragma warning(pop)
190
+
191
+ #pragma pop_macro("new")
192
+
193
+ #pragma pack(pop)
194
+
195
+ } // extern "C++"
196
+ #endif // __cplusplus
197
+
198
+ #pragma warning(pop) // _VCRUNTIME_DISABLED_WARNINGS
miniMSVC/VC/Tools/MSVC/14.42.34433/include/vcruntime_new_debug.h ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //
2
+ // vcruntime_new_debug.h
3
+ //
4
+ // Copyright (c) Microsoft Corporation. All rights reserved.
5
+ //
6
+ // Declarations and definitions of the debug operators new and delete.
7
+ //
8
+ #pragma once
9
+
10
+ #include <vcruntime_new.h>
11
+
12
+ #pragma warning(push)
13
+ #pragma warning(disable: _VCRUNTIME_DISABLED_WARNINGS)
14
+
15
+ #ifdef __cplusplus
16
+ extern "C++" {
17
+
18
+ #pragma pack(push, _CRT_PACKING)
19
+
20
+ #pragma push_macro("new")
21
+ #undef new
22
+
23
+ #ifndef _MFC_OVERRIDES_NEW
24
+
25
+ _NODISCARD _Check_return_ _Ret_notnull_ _Post_writable_byte_size_(_Size)
26
+ _VCRT_ALLOCATOR void* __CRTDECL operator new(
27
+ _In_ size_t _Size,
28
+ _In_ int _BlockUse,
29
+ _In_z_ char const* _FileName,
30
+ _In_ int _LineNumber
31
+ );
32
+
33
+ _NODISCARD _Check_return_ _Ret_notnull_ _Post_writable_byte_size_(_Size)
34
+ _VCRT_ALLOCATOR void* __CRTDECL operator new[](
35
+ _In_ size_t _Size,
36
+ _In_ int _BlockUse,
37
+ _In_z_ char const* _FileName,
38
+ _In_ int _LineNumber
39
+ );
40
+
41
+ void __CRTDECL operator delete(
42
+ void* _Block,
43
+ int _BlockUse,
44
+ char const* _FileName,
45
+ int _LineNumber
46
+ ) noexcept;
47
+
48
+ void __CRTDECL operator delete[](
49
+ void* _Block,
50
+ int _BlockUse,
51
+ char const* _FileName,
52
+ int _LineNumber
53
+ ) noexcept;
54
+
55
+ #endif
56
+
57
+ #pragma pop_macro("new")
58
+
59
+ #pragma pack(pop)
60
+
61
+ } // extern "C++"
62
+ #endif // __cplusplus
63
+
64
+ #pragma warning(pop) // _VCRUNTIME_DISABLED_WARNINGS
miniMSVC/VC/Tools/MSVC/14.42.34433/include/vcruntime_startup.h ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //
2
+ // vcruntime_startup.h
3
+ //
4
+ // Copyright (c) Microsoft Corporation. All rights reserved.
5
+ //
6
+ // Declarations of the VCRuntime startup functionality
7
+ //
8
+ #pragma once
9
+
10
+ #include <vcruntime.h>
11
+
12
+ #pragma warning(push)
13
+ #pragma warning(disable: _VCRUNTIME_DISABLED_WARNINGS)
14
+
15
+ _CRT_BEGIN_C_HEADER
16
+
17
+
18
+
19
+ typedef enum _crt_argv_mode
20
+ {
21
+ _crt_argv_no_arguments,
22
+ _crt_argv_unexpanded_arguments,
23
+ _crt_argv_expanded_arguments,
24
+ } _crt_argv_mode;
25
+
26
+ typedef enum _crt_exit_return_mode
27
+ {
28
+ _crt_exit_terminate_process,
29
+ _crt_exit_return_to_caller
30
+ } _crt_exit_return_mode;
31
+
32
+ typedef enum _crt_exit_cleanup_mode
33
+ {
34
+ _crt_exit_full_cleanup,
35
+ _crt_exit_quick_cleanup,
36
+ _crt_exit_no_cleanup
37
+ } _crt_exit_cleanup_mode;
38
+
39
+ extern _crt_exit_return_mode __current_exit_return_mode;
40
+
41
+
42
+
43
+ __vcrt_bool __cdecl __vcrt_initialize(void);
44
+ __vcrt_bool __cdecl __vcrt_uninitialize(_In_ __vcrt_bool _Terminating);
45
+ __vcrt_bool __cdecl __vcrt_uninitialize_critical(void);
46
+ __vcrt_bool __cdecl __vcrt_thread_attach(void);
47
+ __vcrt_bool __cdecl __vcrt_thread_detach(void);
48
+
49
+ int __cdecl __isa_available_init(void);
50
+ _crt_argv_mode __CRTDECL _get_startup_argv_mode(void);
51
+
52
+
53
+
54
+ _CRT_END_C_HEADER
55
+
56
+ #pragma warning(pop) // _VCRUNTIME_DISABLED_WARNINGS
miniMSVC/VC/Tools/MSVC/14.42.34433/include/vcruntime_string.h ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //
2
+ // vcruntime_string.h
3
+ //
4
+ // Copyright (c) Microsoft Corporation. All rights reserved.
5
+ //
6
+ // <string.h> functionality that is implemented in the VCRuntime.
7
+ //
8
+ #pragma once
9
+
10
+ #include <vcruntime.h>
11
+
12
+ #pragma warning(push)
13
+ #pragma warning(disable: _VCRUNTIME_DISABLED_WARNINGS)
14
+
15
+ #ifndef __midl
16
+
17
+ _CRT_BEGIN_C_HEADER
18
+
19
+
20
+
21
+ _NODISCARD _Check_return_
22
+ _VCRTIMP void _CONST_RETURN* __cdecl memchr(
23
+ _In_reads_bytes_opt_(_MaxCount) void const* _Buf,
24
+ _In_ int _Val,
25
+ _In_ size_t _MaxCount
26
+ );
27
+
28
+ _NODISCARD _Check_return_
29
+ int __cdecl memcmp(
30
+ _In_reads_bytes_(_Size) void const* _Buf1,
31
+ _In_reads_bytes_(_Size) void const* _Buf2,
32
+ _In_ size_t _Size
33
+ );
34
+
35
+ _CRT_INSECURE_DEPRECATE_MEMORY(memcpy_s)
36
+ _Post_equal_to_(_Dst)
37
+ _At_buffer_(
38
+ (unsigned char*)_Dst,
39
+ _Iter_,
40
+ _Size,
41
+ _Post_satisfies_(((unsigned char*)_Dst)[_Iter_] == ((unsigned char*)_Src)[_Iter_])
42
+ )
43
+ void* __cdecl memcpy(
44
+ _Out_writes_bytes_all_(_Size) void* _Dst,
45
+ _In_reads_bytes_(_Size) void const* _Src,
46
+ _In_ size_t _Size
47
+ );
48
+
49
+ _CRT_INSECURE_DEPRECATE_MEMORY(memmove_s)
50
+ _VCRTIMP void* __cdecl memmove(
51
+ _Out_writes_bytes_all_opt_(_Size) void* _Dst,
52
+ _In_reads_bytes_opt_(_Size) void const* _Src,
53
+ _In_ size_t _Size
54
+ );
55
+
56
+ _Post_equal_to_(_Dst)
57
+ _At_buffer_(
58
+ (unsigned char*)_Dst,
59
+ _Iter_,
60
+ _Size,
61
+ _Post_satisfies_(((unsigned char*)_Dst)[_Iter_] == _Val)
62
+ )
63
+ void* __cdecl memset(
64
+ _Out_writes_bytes_all_(_Size) void* _Dst,
65
+ _In_ int _Val,
66
+ _In_ size_t _Size
67
+ );
68
+
69
+ _NODISCARD _Check_return_
70
+ _VCRTIMP char _CONST_RETURN* __cdecl strchr(
71
+ _In_z_ char const* _Str,
72
+ _In_ int _Val
73
+ );
74
+
75
+ _NODISCARD _Check_return_
76
+ _VCRTIMP char _CONST_RETURN* __cdecl strrchr(
77
+ _In_z_ char const* _Str,
78
+ _In_ int _Ch
79
+ );
80
+
81
+ _NODISCARD _Check_return_ _Ret_maybenull_
82
+ _VCRTIMP char _CONST_RETURN* __cdecl strstr(
83
+ _In_z_ char const* _Str,
84
+ _In_z_ char const* _SubStr
85
+ );
86
+
87
+ _NODISCARD _Check_return_
88
+ _When_(return != NULL, _Ret_range_(_Str, _Str + _String_length_(_Str) - 1))
89
+ _VCRTIMP wchar_t _CONST_RETURN* __cdecl wcschr(
90
+ _In_z_ wchar_t const* _Str,
91
+ _In_ wchar_t _Ch
92
+ );
93
+
94
+ _NODISCARD _Check_return_
95
+ _VCRTIMP wchar_t _CONST_RETURN* __cdecl wcsrchr(
96
+ _In_z_ wchar_t const* _Str,
97
+ _In_ wchar_t _Ch
98
+ );
99
+
100
+ _NODISCARD _Check_return_ _Ret_maybenull_
101
+ _When_(return != NULL, _Ret_range_(_Str, _Str + _String_length_(_Str) - 1))
102
+ _VCRTIMP wchar_t _CONST_RETURN* __cdecl wcsstr(
103
+ _In_z_ wchar_t const* _Str,
104
+ _In_z_ wchar_t const* _SubStr
105
+ );
106
+
107
+
108
+
109
+ _CRT_END_C_HEADER
110
+
111
+ #endif // __midl
112
+
113
+ #pragma warning(pop) // _VCRUNTIME_DISABLED_WARNINGS
miniMSVC/VC/Tools/MSVC/14.42.34433/include/vcruntime_typeinfo.h ADDED
@@ -0,0 +1,221 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //
2
+ // vcruntime_typeinfo.h
3
+ //
4
+ // Copyright (c) Microsoft Corporation. All rights reserved.
5
+ //
6
+ // <typeinfo> functionality that is implemented in the VCRuntime.
7
+ //
8
+ #pragma once
9
+
10
+ #include <vcruntime.h>
11
+
12
+ #if _VCRT_COMPILER_PREPROCESSOR
13
+ #include <vcruntime_exception.h>
14
+
15
+ #pragma warning(push)
16
+ #pragma warning(disable: _VCRUNTIME_DISABLED_WARNINGS)
17
+
18
+ #pragma pack(push, _CRT_PACKING)
19
+
20
+ extern "C++" { // attach declarations to the global module, see N4910 [module.unit]/7
21
+
22
+ #if defined _M_CEE_MIXED && !defined _VCRT_BUILD
23
+ // Provide a fake definition of __type_info_node to suppress linker warning
24
+ // LNK4248: unresolved typeref token for '__type_info_node'; image may not run.
25
+ struct __type_info_node { };
26
+ #else
27
+ struct __type_info_node;
28
+ #endif
29
+
30
+ #ifdef _M_CEE_PURE
31
+ extern System::IntPtr __type_info_root_node;
32
+ #else
33
+ extern __type_info_node __type_info_root_node;
34
+ #endif
35
+
36
+ } // extern "C++"
37
+
38
+
39
+ _CRT_BEGIN_C_HEADER
40
+
41
+ struct __std_type_info_data
42
+ {
43
+ const char * _UndecoratedName;
44
+ const char _DecoratedName[1];
45
+ __std_type_info_data() = delete;
46
+ __std_type_info_data(const __std_type_info_data&) = delete;
47
+ __std_type_info_data(__std_type_info_data&&) = delete;
48
+
49
+ __std_type_info_data& operator=(const __std_type_info_data&) = delete;
50
+ __std_type_info_data& operator=(__std_type_info_data&&) = delete;
51
+ };
52
+
53
+ _VCRTIMP int __cdecl __std_type_info_compare(
54
+ _In_ const __std_type_info_data* _Lhs,
55
+ _In_ const __std_type_info_data* _Rhs
56
+ );
57
+
58
+ _VCRTIMP size_t __cdecl __std_type_info_hash(
59
+ _In_ const __std_type_info_data* _Data
60
+ );
61
+
62
+ _VCRTIMP const char* __cdecl __std_type_info_name(
63
+ _Inout_ __std_type_info_data* _Data,
64
+ _Inout_ __type_info_node* _RootNode
65
+ );
66
+
67
+ _CRT_END_C_HEADER
68
+
69
+
70
+ extern "C++" { // attach declarations to the global module, see N4910 [module.unit]/7
71
+
72
+ #pragma warning(push)
73
+ #pragma warning(disable: 4577) // 'noexcept' used with no exception handling mode specified
74
+ _VCRT_EXPORT_STD class type_info // Exported because for typeid, MSVC looks for type_info in the global namespace
75
+ {
76
+ public:
77
+
78
+ type_info(const type_info&) = delete;
79
+ type_info& operator=(const type_info&) = delete;
80
+
81
+ _NODISCARD size_t hash_code() const noexcept
82
+ {
83
+ return __std_type_info_hash(&_Data);
84
+ }
85
+
86
+ _NODISCARD
87
+ #if _HAS_CXX23
88
+ constexpr
89
+ #endif // _HAS_CXX23
90
+ bool operator==(const type_info& _Other) const noexcept
91
+ {
92
+ #if _HAS_CXX23
93
+ if (__builtin_is_constant_evaluated())
94
+ {
95
+ return &_Data == &_Other._Data;
96
+ }
97
+ #endif // _HAS_CXX23
98
+
99
+ return __std_type_info_compare(&_Data, &_Other._Data) == 0;
100
+ }
101
+
102
+ #if !_HAS_CXX20
103
+ _NODISCARD bool operator!=(const type_info& _Other) const noexcept
104
+ {
105
+ return __std_type_info_compare(&_Data, &_Other._Data) != 0;
106
+ }
107
+ #endif // !_HAS_CXX20
108
+
109
+ _NODISCARD bool before(const type_info& _Other) const noexcept
110
+ {
111
+ return __std_type_info_compare(&_Data, &_Other._Data) < 0;
112
+ }
113
+
114
+ _NODISCARD const char* name() const noexcept
115
+ {
116
+ #ifdef _M_CEE_PURE
117
+ return __std_type_info_name(&_Data, static_cast<__type_info_node*>(__type_info_root_node.ToPointer()));
118
+ #else
119
+ return __std_type_info_name(&_Data, &__type_info_root_node);
120
+ #endif
121
+ }
122
+
123
+ _NODISCARD const char* raw_name() const noexcept
124
+ {
125
+ return _Data._DecoratedName;
126
+ }
127
+
128
+ virtual ~type_info() noexcept;
129
+
130
+ private:
131
+
132
+ mutable __std_type_info_data _Data;
133
+ };
134
+ #pragma warning(pop)
135
+
136
+ namespace std {
137
+ _VCRT_EXPORT_STD using ::type_info;
138
+ }
139
+
140
+ #if _HAS_EXCEPTIONS
141
+
142
+ namespace std {
143
+
144
+ #pragma warning(push)
145
+ #pragma warning(disable: 4577) // 'noexcept' used with no exception handling mode specified
146
+ _VCRT_EXPORT_STD class bad_cast
147
+ : public exception
148
+ {
149
+ public:
150
+
151
+ bad_cast() noexcept
152
+ : exception("bad cast", 1)
153
+ {
154
+ }
155
+
156
+ static bad_cast __construct_from_string_literal(const char* const _Message) noexcept
157
+ {
158
+ return bad_cast(_Message, 1);
159
+ }
160
+
161
+ private:
162
+
163
+ bad_cast(const char* const _Message, int) noexcept
164
+ : exception(_Message, 1)
165
+ {
166
+ }
167
+ };
168
+
169
+ _VCRT_EXPORT_STD class bad_typeid
170
+ : public exception
171
+ {
172
+ public:
173
+
174
+ bad_typeid() noexcept
175
+ : exception("bad typeid", 1)
176
+ {
177
+ }
178
+
179
+ static bad_typeid __construct_from_string_literal(const char* const _Message) noexcept
180
+ {
181
+ return bad_typeid(_Message, 1);
182
+ }
183
+
184
+ private:
185
+
186
+ friend class __non_rtti_object;
187
+
188
+ bad_typeid(const char* const _Message, int) noexcept
189
+ : exception(_Message, 1)
190
+ {
191
+ }
192
+ };
193
+
194
+ class __non_rtti_object
195
+ : public bad_typeid
196
+ {
197
+ public:
198
+
199
+ static __non_rtti_object __construct_from_string_literal(const char* const _Message) noexcept
200
+ {
201
+ return __non_rtti_object(_Message, 1);
202
+ }
203
+
204
+ private:
205
+
206
+ __non_rtti_object(const char* const _Message, int) noexcept
207
+ : bad_typeid(_Message, 1)
208
+ {
209
+ }
210
+ };
211
+
212
+ #pragma warning(pop)
213
+ } // namespace std
214
+
215
+ #endif // _HAS_EXCEPTIONS
216
+
217
+ } // extern "C++"
218
+
219
+ #pragma pack(pop)
220
+ #pragma warning(pop) // _VCRUNTIME_DISABLED_WARNINGS
221
+ #endif // _VCRT_COMPILER_PREPROCESSOR
miniMSVC/VC/Tools/MSVC/14.42.34433/include/vector ADDED
The diff for this file is too large to render. See raw diff
 
miniMSVC/VC/Tools/MSVC/14.42.34433/include/version ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ // version standard header (core)
2
+
3
+ // Copyright (c) Microsoft Corporation.
4
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
5
+
6
+ #ifndef _STD_VERSION_HEADER_
7
+ #define _STD_VERSION_HEADER_
8
+ #include <yvals_core.h>
9
+ #endif // _STD_VERSION_HEADER_
miniMSVC/VC/Tools/MSVC/14.42.34433/include/wmmintrin.h ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright (C) 1985-2015 Intel Corporation.
3
+ *
4
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
5
+ */
6
+
7
+ /*
8
+ * wmmintrin.h
9
+ *
10
+ * Principal header file for Intel(R) AES and PCLMULQDQ intrinsics.
11
+ */
12
+
13
+ #pragma once
14
+
15
+ #if !defined(_M_IX86) && !defined(_M_X64) && !(defined(_M_ARM64) && defined(USE_SOFT_INTRINSICS))
16
+ #error This header is specific to X86, X64, ARM64, and ARM64EC targets
17
+ #endif
18
+
19
+ #if (defined(_M_ARM64) || defined(_M_ARM64EC)) && !defined(__INTRIN_H_)
20
+ #error this header should only be included through <intrin.h>
21
+ #endif
22
+
23
+ #ifndef _INCLUDED_WMM
24
+ #define _INCLUDED_WMM
25
+ #ifndef __midl
26
+
27
+ #if defined (_M_CEE_PURE)
28
+ #error ERROR: EMM intrinsics not supported in the pure mode!
29
+ #else /* defined (_M_CEE_PURE) */
30
+
31
+ #include <nmmintrin.h>
32
+
33
+
34
+ #if defined (__cplusplus)
35
+ extern "C" {
36
+ #endif /* defined (__cplusplus) */
37
+
38
+ /*
39
+ * Performs 1 round of AES decryption of the first m128i using
40
+ * the second m128i as a round key.
41
+ */
42
+ extern __m128i _mm_aesdec_si128(__m128i /* v */, __m128i /* rkey */);
43
+
44
+ /*
45
+ * Performs the last round of AES decryption of the first m128i
46
+ * using the second m128i as a round key.
47
+ */
48
+ extern __m128i _mm_aesdeclast_si128(__m128i /* v */, __m128i /* rkey */);
49
+
50
+ /*
51
+ * Performs 1 round of AES encryption of the first m128i using
52
+ * the second m128i as a round key.
53
+ */
54
+ extern __m128i _mm_aesenc_si128(__m128i /* v */, __m128i /* rkey */);
55
+
56
+ /*
57
+ * Performs the last round of AES encryption of the first m128i
58
+ * using the second m128i as a round key.
59
+ */
60
+ extern __m128i _mm_aesenclast_si128(__m128i /* v */, __m128i /* rkey */);
61
+
62
+ /*
63
+ * Performs the InverseMixColumn operation on the source m128i
64
+ * and stores the result into m128i destination.
65
+ */
66
+ extern __m128i _mm_aesimc_si128(__m128i /* v */);
67
+
68
+ /*
69
+ * Generates a m128i round key for the input m128i
70
+ * AES cipher key and byte round constant.
71
+ * The second parameter must be a compile time constant.
72
+ */
73
+ extern __m128i _mm_aeskeygenassist_si128(__m128i /* ckey */, const int /* rcon */);
74
+
75
+ /*
76
+ * Performs carry-less integer multiplication of 64-bit halves
77
+ * of 128-bit input operands.
78
+ * The third parameter indicates which 64-bit halves of the input parameters
79
+ * v1 and v2 should be used. It must be a compile time constant.
80
+ */
81
+ extern __m128i _mm_clmulepi64_si128(__m128i /* v1 */, __m128i /* v2 */,
82
+ const int /* imm8 */);
83
+
84
+
85
+ #if defined __cplusplus
86
+ }; /* End "C" */
87
+ #endif /* defined __cplusplus */
88
+
89
+ #endif /* defined (_M_CEE_PURE) */
90
+ #endif /* __midl */
91
+ #endif /* _INCLUDED_WMM */
miniMSVC/VC/Tools/MSVC/14.42.34433/include/xatomic.h ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // xatomic.h internal header (core)
2
+
3
+ // Copyright (c) Microsoft Corporation.
4
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
5
+
6
+ #ifndef _XATOMIC_H
7
+ #define _XATOMIC_H
8
+ #include <yvals_core.h>
9
+ #if _STL_COMPILER_PREPROCESSOR
10
+
11
+ #include <type_traits>
12
+
13
+ #include _STL_INTRIN_HEADER
14
+
15
+ #pragma pack(push, _CRT_PACKING)
16
+ #pragma warning(push, _STL_WARNING_LEVEL)
17
+ #pragma warning(disable : _STL_DISABLED_WARNINGS)
18
+ _STL_DISABLE_CLANG_WARNINGS
19
+ #pragma push_macro("new")
20
+ #undef new
21
+
22
+ #define _CONCATX(x, y) x##y
23
+ #define _CONCAT(x, y) _CONCATX(x, y)
24
+
25
+ // Interlocked intrinsic mapping for _nf/_acq/_rel
26
+ #if defined(_M_CEE_PURE) || (defined(_M_IX86) && !defined(_M_HYBRID_X86_ARM64)) \
27
+ || (defined(_M_X64) && !defined(_M_ARM64EC))
28
+ #define _INTRIN_RELAXED(x) x
29
+ #define _INTRIN_ACQUIRE(x) x
30
+ #define _INTRIN_RELEASE(x) x
31
+ #define _INTRIN_ACQ_REL(x) x
32
+ #ifdef _M_CEE_PURE
33
+ #define _YIELD_PROCESSOR()
34
+ #else // ^^^ defined(_M_CEE_PURE) / !defined(_M_CEE_PURE) vvv
35
+ #define _YIELD_PROCESSOR() _mm_pause()
36
+ #endif // ^^^ !defined(_M_CEE_PURE) ^^^
37
+
38
+ #elif defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC) || defined(_M_HYBRID_X86_ARM64)
39
+ #define _INTRIN_RELAXED(x) _CONCAT(x, _nf)
40
+ #define _INTRIN_ACQUIRE(x) _CONCAT(x, _acq)
41
+ #define _INTRIN_RELEASE(x) _CONCAT(x, _rel)
42
+ // We don't have interlocked intrinsics for acquire-release ordering, even on
43
+ // ARM32/ARM64, so fall back to sequentially consistent.
44
+ #define _INTRIN_ACQ_REL(x) x
45
+ #define _YIELD_PROCESSOR() __yield()
46
+
47
+ #else // ^^^ ARM32/ARM64/ARM64EC/HYBRID_X86_ARM64 / unsupported hardware vvv
48
+ #error Unsupported hardware
49
+ #endif // hardware
50
+
51
+ #define _MT_INCR(x) _INTRIN_RELAXED(_InterlockedIncrement)(reinterpret_cast<volatile long*>(&x))
52
+ #define _MT_DECR(x) _INTRIN_ACQ_REL(_InterlockedDecrement)(reinterpret_cast<volatile long*>(&x))
53
+
54
+ // The following macros are SHARED with vcruntime and any updates should be mirrored.
55
+ // Also: if any macros are added they should be #undefed in vcruntime as well.
56
+ #define _Compiler_barrier() _STL_DISABLE_DEPRECATED_WARNING _ReadWriteBarrier() _STL_RESTORE_DEPRECATED_WARNING
57
+
58
+ #if defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC) || defined(_M_HYBRID_X86_ARM64)
59
+ #define _Memory_barrier() __dmb(0xB) // inner shared data memory barrier
60
+ #define _Compiler_or_memory_barrier() _Memory_barrier()
61
+ #if defined(_M_ARM64) || defined(_M_ARM64EC) || defined(_M_HYBRID_X86_ARM64)
62
+ #define _Memory_load_acquire_barrier() __dmb(0x9) // inner shared data memory load barrier
63
+ #else // ^^^ ARM64/ARM64EC/HYBRID_X86_ARM64 / ARM32 vvv
64
+ #define _Memory_load_acquire_barrier() _Memory_barrier()
65
+ #endif // ^^^ ARM32 ^^^
66
+ #elif defined(_M_IX86) || defined(_M_X64)
67
+ // x86/x64 hardware only emits memory barriers inside _Interlocked intrinsics
68
+ #define _Compiler_or_memory_barrier() _Compiler_barrier()
69
+ #else // ^^^ x86/x64 / unsupported hardware vvv
70
+ #error Unsupported hardware
71
+ #endif // hardware
72
+
73
+ _STD_BEGIN
74
+
75
+ #if _HAS_CXX20
76
+ _EXPORT_STD enum class memory_order : int {
77
+ relaxed,
78
+ consume,
79
+ acquire,
80
+ release,
81
+ acq_rel,
82
+ seq_cst,
83
+
84
+ // LWG-3268
85
+ memory_order_relaxed = relaxed,
86
+ memory_order_consume = consume,
87
+ memory_order_acquire = acquire,
88
+ memory_order_release = release,
89
+ memory_order_acq_rel = acq_rel,
90
+ memory_order_seq_cst = seq_cst
91
+ };
92
+ _EXPORT_STD inline constexpr memory_order memory_order_relaxed = memory_order::relaxed;
93
+ _EXPORT_STD inline constexpr memory_order memory_order_consume = memory_order::consume;
94
+ _EXPORT_STD inline constexpr memory_order memory_order_acquire = memory_order::acquire;
95
+ _EXPORT_STD inline constexpr memory_order memory_order_release = memory_order::release;
96
+ _EXPORT_STD inline constexpr memory_order memory_order_acq_rel = memory_order::acq_rel;
97
+ _EXPORT_STD inline constexpr memory_order memory_order_seq_cst = memory_order::seq_cst;
98
+ #else // ^^^ _HAS_CXX20 / !_HAS_CXX20 vvv
99
+ enum memory_order {
100
+ memory_order_relaxed,
101
+ memory_order_consume,
102
+ memory_order_acquire,
103
+ memory_order_release,
104
+ memory_order_acq_rel,
105
+ memory_order_seq_cst
106
+ };
107
+ #endif // ^^^ !_HAS_CXX20 ^^^
108
+
109
+ using _Atomic_counter_t = unsigned long;
110
+
111
+ template <class _Integral, class _Ty>
112
+ _NODISCARD volatile _Integral* _Atomic_address_as(_Ty& _Source) noexcept {
113
+ // gets a pointer to the argument as an integral type (to pass to intrinsics)
114
+ static_assert(is_integral_v<_Integral>, "Tried to reinterpret memory as non-integral");
115
+ return &reinterpret_cast<volatile _Integral&>(_Source);
116
+ }
117
+
118
+ template <class _Integral, class _Ty>
119
+ _NODISCARD const volatile _Integral* _Atomic_address_as(const _Ty& _Source) noexcept {
120
+ // gets a pointer to the argument as an integral type (to pass to intrinsics)
121
+ static_assert(is_integral_v<_Integral>, "Tried to reinterpret memory as non-integral");
122
+ return &reinterpret_cast<const volatile _Integral&>(_Source);
123
+ }
124
+
125
+ _STD_END
126
+
127
+ #pragma pop_macro("new")
128
+ _STL_RESTORE_CLANG_WARNINGS
129
+ #pragma warning(pop)
130
+ #pragma pack(pop)
131
+ #endif // _STL_COMPILER_PREPROCESSOR
132
+ #endif // _XATOMIC_H
miniMSVC/VC/Tools/MSVC/14.42.34433/include/xatomic_wait.h ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // xatomic_wait.h internal header
2
+
3
+ // Copyright (c) Microsoft Corporation.
4
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
5
+
6
+ #ifndef _XATOMIC_WAIT_H
7
+ #define _XATOMIC_WAIT_H
8
+ #include <yvals.h>
9
+ #if _STL_COMPILER_PREPROCESSOR
10
+
11
+ #include <cstdlib>
12
+ #include <xatomic.h>
13
+
14
+ #pragma pack(push, _CRT_PACKING)
15
+ #pragma warning(push, _STL_WARNING_LEVEL)
16
+ #pragma warning(disable : _STL_DISABLED_WARNINGS)
17
+ _STL_DISABLE_CLANG_WARNINGS
18
+ #pragma push_macro("new")
19
+ #undef new
20
+
21
+ extern "C" {
22
+ inline constexpr unsigned long __std_atomic_wait_no_timeout = 0xFFFF'FFFF; // Pass as partial timeout
23
+
24
+ enum class __std_atomic_api_level : unsigned long {
25
+ __not_set,
26
+ __detecting,
27
+ __has_srwlock,
28
+ __has_wait_on_address,
29
+ };
30
+
31
+ // This function allows testing the atomic wait support while always using the APIs for a platform with fewer
32
+ // capabilities; it attempts to lock the APIs used to the level `_Requested_api_level`, and returns the actual API level
33
+ // in use. Once the API level has been set by calling this function (or detected by a call to one of the atomic wait
34
+ // functions), it can no longer be changed.
35
+ __std_atomic_api_level __stdcall __std_atomic_set_api_level(__std_atomic_api_level _Requested_api_level) noexcept;
36
+
37
+ // Support for atomic waits.
38
+ // The "direct" functions are used when the underlying infrastructure can use WaitOnAddress directly; that is, _Size is
39
+ // 1, 2, 4, or 8. The contract is the same as the WaitOnAddress function from the Windows SDK. If WaitOnAddress is not
40
+ // available on the current platform, falls back to a similar solution based on SRWLOCK and CONDITION_VARIABLE.
41
+ int __stdcall __std_atomic_wait_direct(
42
+ const void* _Storage, void* _Comparand, size_t _Size, unsigned long _Remaining_timeout) noexcept;
43
+ void __stdcall __std_atomic_notify_one_direct(const void* _Storage) noexcept;
44
+ void __stdcall __std_atomic_notify_all_direct(const void* _Storage) noexcept;
45
+
46
+ // The "indirect" functions are used when the size is not 1, 2, 4, or 8; these notionally wait on another value which is
47
+ // of one of those sizes whose value changes upon notify, hence "indirect". (As of 2020-07-24, this always uses the
48
+ // fallback SRWLOCK and CONDITION_VARIABLE implementation but that is not contractual.)
49
+ using _Atomic_wait_indirect_equal_callback_t = bool(__stdcall*)(
50
+ const void* _Storage, void* _Comparand, size_t _Size, void* _Param) _NOEXCEPT_FNPTR;
51
+
52
+ int __stdcall __std_atomic_wait_indirect(const void* _Storage, void* _Comparand, size_t _Size, void* _Param,
53
+ _Atomic_wait_indirect_equal_callback_t _Are_equal, unsigned long _Remaining_timeout) noexcept;
54
+ void __stdcall __std_atomic_notify_one_indirect(const void* _Storage) noexcept;
55
+ void __stdcall __std_atomic_notify_all_indirect(const void* _Storage) noexcept;
56
+
57
+ // These functions convert a duration into a time point in order to tolerate spurious wakes in atomic wait, and then
58
+ // convert back from the time point to individual wait attempts (which are limited by DWORD milliseconds to a length of
59
+ // ~49 days)
60
+ unsigned long long __stdcall __std_atomic_wait_get_deadline(unsigned long long _Timeout) noexcept;
61
+ unsigned long __stdcall __std_atomic_wait_get_remaining_timeout(unsigned long long _Deadline) noexcept;
62
+
63
+ } // extern "C"
64
+
65
+ #pragma pop_macro("new")
66
+ _STL_RESTORE_CLANG_WARNINGS
67
+ #pragma warning(pop)
68
+ #pragma pack(pop)
69
+ #endif // _STL_COMPILER_PREPROCESSOR
70
+ #endif // _XATOMIC_WAIT_H
miniMSVC/VC/Tools/MSVC/14.42.34433/include/xbit_ops.h ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // xbit_ops.h internal header (core)
2
+
3
+ // Copyright (c) Microsoft Corporation.
4
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
5
+
6
+ #ifndef _XBIT_OPS_H
7
+ #define _XBIT_OPS_H
8
+ #include <yvals_core.h>
9
+ #if _STL_COMPILER_PREPROCESSOR
10
+
11
+ #include <cstdint>
12
+
13
+ #include _STL_INTRIN_HEADER
14
+
15
+ #pragma pack(push, _CRT_PACKING)
16
+ #pragma warning(push, _STL_WARNING_LEVEL)
17
+ #pragma warning(disable : _STL_DISABLED_WARNINGS)
18
+ _STL_DISABLE_CLANG_WARNINGS
19
+ #pragma push_macro("new")
20
+ #undef new
21
+
22
+ _STD_BEGIN
23
+
24
+ _NODISCARD inline unsigned long _Floor_of_log_2(size_t _Value) noexcept { // returns floor(log_2(_Value))
25
+ _Value |= size_t{1}; // avoid undefined answer from _BitScanReverse for 0
26
+ unsigned long _Result;
27
+
28
+ #ifdef _M_CEE_PURE
29
+ #ifdef _WIN64
30
+ _Result = 63;
31
+ #else // ^^^ 64-bit / 32-bit vvv
32
+ _Result = 31;
33
+ #endif // ^^^ 32-bit ^^^
34
+
35
+ while ((size_t{1} << _Result) > _Value) {
36
+ --_Result;
37
+ }
38
+ #else // ^^^ defined(_M_CEE_PURE) / !defined(_M_CEE_PURE) vvv
39
+ #ifdef _WIN64
40
+ // CodeQL [SM02313] _Result is always initialized: the code above guarantees that _Value is non-zero.
41
+ _BitScanReverse64(&_Result, _Value);
42
+ #else // ^^^ 64-bit / 32-bit vvv
43
+ // CodeQL [SM02313] _Result is always initialized: the code above guarantees that _Value is non-zero.
44
+ _BitScanReverse(&_Result, _Value);
45
+ #endif // ^^^ 32-bit ^^^
46
+ #endif // ^^^ !defined(_M_CEE_PURE) ^^^
47
+
48
+ return _Result;
49
+ }
50
+
51
+ _NODISCARD inline unsigned long _Ceiling_of_log_2(const size_t _Value) noexcept { // returns ceil(log_2(_Value))
52
+ // pre: _Value > 1
53
+ return 1 + _Floor_of_log_2(_Value - 1);
54
+ }
55
+
56
+ _NODISCARD inline uint32_t _Bit_scan_reverse(const uint32_t _Value) noexcept {
57
+ unsigned long _Index; // Intentionally uninitialized for better codegen
58
+
59
+ if (_BitScanReverse(&_Index, _Value)) {
60
+ return _Index + 1;
61
+ }
62
+
63
+ return 0;
64
+ }
65
+
66
+ _NODISCARD inline uint32_t _Bit_scan_reverse(const uint64_t _Value) noexcept {
67
+ unsigned long _Index; // Intentionally uninitialized for better codegen
68
+
69
+ #ifdef _WIN64
70
+ if (_BitScanReverse64(&_Index, _Value)) {
71
+ return _Index + 1;
72
+ }
73
+ #else // ^^^ 64-bit / 32-bit vvv
74
+ uint32_t _Ui32 = static_cast<uint32_t>(_Value >> 32);
75
+
76
+ if (_BitScanReverse(&_Index, _Ui32)) {
77
+ return _Index + 1 + 32;
78
+ }
79
+
80
+ _Ui32 = static_cast<uint32_t>(_Value);
81
+
82
+ if (_BitScanReverse(&_Index, _Ui32)) {
83
+ return _Index + 1;
84
+ }
85
+ #endif // ^^^ 32-bit ^^^
86
+
87
+ return 0;
88
+ }
89
+
90
+ _STD_END
91
+
92
+ #pragma pop_macro("new")
93
+ _STL_RESTORE_CLANG_WARNINGS
94
+ #pragma warning(pop)
95
+ #pragma pack(pop)
96
+ #endif // _STL_COMPILER_PREPROCESSOR
97
+ #endif // _XBIT_OPS_H
miniMSVC/VC/Tools/MSVC/14.42.34433/include/xcall_once.h ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // xcall_once.h internal header
2
+
3
+ // Copyright (c) Microsoft Corporation.
4
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
5
+
6
+ #ifndef _XCALL_ONCE_H
7
+ #define _XCALL_ONCE_H
8
+ #include <yvals.h>
9
+ #if _STL_COMPILER_PREPROCESSOR
10
+
11
+ #include <cstdlib>
12
+ #include <type_traits>
13
+
14
+ #pragma pack(push, _CRT_PACKING)
15
+ #pragma warning(push, _STL_WARNING_LEVEL)
16
+ #pragma warning(disable : _STL_DISABLED_WARNINGS)
17
+ _STL_DISABLE_CLANG_WARNINGS
18
+ #pragma push_macro("new")
19
+ #undef new
20
+
21
+ _STD_BEGIN
22
+ _EXPORT_STD struct once_flag { // opaque data structure for call_once()
23
+ constexpr once_flag() noexcept : _Opaque(nullptr) {}
24
+
25
+ once_flag(const once_flag&) = delete;
26
+ once_flag& operator=(const once_flag&) = delete;
27
+
28
+ void* _Opaque;
29
+ };
30
+
31
+ template <class _Ty>
32
+ union _Immortalizer_impl { // constructs _Ty, never destroys
33
+ constexpr _Immortalizer_impl() noexcept : _Storage{} {}
34
+ _Immortalizer_impl(const _Immortalizer_impl&) = delete;
35
+ _Immortalizer_impl& operator=(const _Immortalizer_impl&) = delete;
36
+ ~_Immortalizer_impl() {
37
+ // do nothing
38
+ }
39
+
40
+ _Ty _Storage;
41
+ };
42
+
43
+ #if defined(_M_CEE) || defined(_M_ARM64EC) || defined(_M_HYBRID) \
44
+ || defined(__clang__) // TRANSITION, Clang doesn't recognize /ALTERNATENAME, not yet reported
45
+ #define _WINDOWS_API __stdcall
46
+ #define _RENAME_WINDOWS_API(_Api) _Api##_clr
47
+ #else // ^^^ use forwarders / use /ALTERNATENAME vvv
48
+ #define _WINDOWS_API __declspec(dllimport) __stdcall
49
+ #define _RENAME_WINDOWS_API(_Api) _Api
50
+ #endif // ^^^ use /ALTERNATENAME ^^^
51
+
52
+ // WINBASEAPI
53
+ // BOOL
54
+ // WINAPI
55
+ // InitOnceBeginInitialize(
56
+ // _Inout_ LPINIT_ONCE lpInitOnce,
57
+ // _In_ DWORD dwFlags,
58
+ // _Out_ PBOOL fPending,
59
+ // _Outptr_opt_result_maybenull_ LPVOID* lpContext
60
+ // );
61
+ extern "C" _NODISCARD int _WINDOWS_API _RENAME_WINDOWS_API(__std_init_once_begin_initialize)(
62
+ void** _LpInitOnce, unsigned long _DwFlags, int* _FPending, void** _LpContext) noexcept;
63
+
64
+ // WINBASEAPI
65
+ // BOOL
66
+ // WINAPI
67
+ // InitOnceComplete(
68
+ // _Inout_ LPINIT_ONCE lpInitOnce,
69
+ // _In_ DWORD dwFlags,
70
+ // _In_opt_ LPVOID lpContext
71
+ // );
72
+ extern "C" _NODISCARD int _WINDOWS_API _RENAME_WINDOWS_API(__std_init_once_complete)(
73
+ void** _LpInitOnce, unsigned long _DwFlags, void* _LpContext) noexcept;
74
+
75
+ extern "C" [[noreturn]] void __stdcall __std_init_once_link_alternate_names_and_abort() noexcept;
76
+
77
+ // #define RTL_RUN_ONCE_INIT_FAILED 0x00000004UL
78
+ // #define INIT_ONCE_INIT_FAILED RTL_RUN_ONCE_INIT_FAILED
79
+ _INLINE_VAR constexpr unsigned long _Init_once_init_failed = 0x4UL;
80
+
81
+ struct _Init_once_completer {
82
+ once_flag& _Once;
83
+ unsigned long _DwFlags;
84
+ ~_Init_once_completer() {
85
+ if (!_RENAME_WINDOWS_API(__std_init_once_complete)(&_Once._Opaque, _DwFlags, nullptr)) {
86
+ __std_init_once_link_alternate_names_and_abort();
87
+ }
88
+ }
89
+ };
90
+
91
+ _EXPORT_STD template <class _Fn, class... _Args>
92
+ void(call_once)(once_flag& _Once, _Fn&& _Fx, _Args&&... _Ax) noexcept(
93
+ noexcept(_STD invoke(_STD forward<_Fn>(_Fx), _STD forward<_Args>(_Ax)...))) /* strengthened */ {
94
+ // call _Fx(_Ax...) once
95
+ // parentheses against common "#define call_once(flag,func) pthread_once(flag,func)"
96
+ int _Pending;
97
+ if (!_RENAME_WINDOWS_API(__std_init_once_begin_initialize)(&_Once._Opaque, 0, &_Pending, nullptr)) {
98
+ _CSTD abort();
99
+ }
100
+
101
+ if (_Pending != 0) {
102
+ _Init_once_completer _Op{_Once, _Init_once_init_failed};
103
+ _STD invoke(_STD forward<_Fn>(_Fx), _STD forward<_Args>(_Ax)...);
104
+ _Op._DwFlags = 0;
105
+ }
106
+ }
107
+
108
+ #undef _WINDOWS_API
109
+ #undef _RENAME_WINDOWS_API
110
+ _STD_END
111
+
112
+ #pragma pop_macro("new")
113
+ _STL_RESTORE_CLANG_WARNINGS
114
+ #pragma warning(pop)
115
+ #pragma pack(pop)
116
+ #endif // _STL_COMPILER_PREPROCESSOR
117
+ #endif // _XCALL_ONCE_H
miniMSVC/VC/Tools/MSVC/14.42.34433/include/xcharconv.h ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // xcharconv.h internal header
2
+
3
+ // Copyright (c) Microsoft Corporation.
4
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
5
+
6
+ #ifndef _XCHARCONV_H
7
+ #define _XCHARCONV_H
8
+ #include <yvals_core.h>
9
+ #if _STL_COMPILER_PREPROCESSOR
10
+
11
+ #if !_HAS_CXX17
12
+ #error The contents of <charconv> are only available with C++17. (Also, you should not include this internal header.)
13
+ #endif // !_HAS_CXX17
14
+
15
+ #include <cstdint>
16
+ #include <type_traits>
17
+ #include <xerrc.h>
18
+
19
+ #pragma pack(push, _CRT_PACKING)
20
+ #pragma warning(push, _STL_WARNING_LEVEL)
21
+ #pragma warning(disable : _STL_DISABLED_WARNINGS)
22
+ _STL_DISABLE_CLANG_WARNINGS
23
+ #pragma push_macro("new")
24
+ #undef new
25
+
26
+ _STD_BEGIN
27
+ _EXPORT_STD enum class chars_format {
28
+ scientific = 0b001,
29
+ fixed = 0b010,
30
+ hex = 0b100,
31
+ general = fixed | scientific,
32
+ };
33
+
34
+ _BITMASK_OPS(_EXPORT_STD, chars_format)
35
+
36
+ _EXPORT_STD struct to_chars_result {
37
+ char* ptr;
38
+ errc ec;
39
+ #if _HAS_CXX20
40
+ _NODISCARD friend bool operator==(const to_chars_result&, const to_chars_result&) = default;
41
+ #endif // _HAS_CXX20
42
+ };
43
+
44
+ _STD_END
45
+
46
+ #pragma pop_macro("new")
47
+ _STL_RESTORE_CLANG_WARNINGS
48
+ #pragma warning(pop)
49
+ #pragma pack(pop)
50
+
51
+ #endif // _STL_COMPILER_PREPROCESSOR
52
+ #endif // _XCHARCONV_H
miniMSVC/VC/Tools/MSVC/14.42.34433/include/xcharconv_ryu.h ADDED
The diff for this file is too large to render. See raw diff
 
miniMSVC/VC/Tools/MSVC/14.42.34433/include/xcharconv_ryu_tables.h ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // xcharconv_ryu_tables.h internal header
2
+
3
+ // Copyright (c) Microsoft Corporation.
4
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
5
+
6
+
7
+ // Copyright 2018 Ulf Adams
8
+ // Copyright (c) Microsoft Corporation. All rights reserved.
9
+
10
+ // Boost Software License - Version 1.0 - August 17th, 2003
11
+
12
+ // Permission is hereby granted, free of charge, to any person or organization
13
+ // obtaining a copy of the software and accompanying documentation covered by
14
+ // this license (the "Software") to use, reproduce, display, distribute,
15
+ // execute, and transmit the Software, and to prepare derivative works of the
16
+ // Software, and to permit third-parties to whom the Software is furnished to
17
+ // do so, all subject to the following:
18
+
19
+ // The copyright notices in the Software and this entire statement, including
20
+ // the above license grant, this restriction and the following disclaimer,
21
+ // must be included in all copies of the Software, in whole or in part, and
22
+ // all derivative works of the Software, unless such copies or derivative
23
+ // works are solely in the form of machine-executable object code generated by
24
+ // a source language processor.
25
+
26
+ // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27
+ // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28
+ // FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
29
+ // SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
30
+ // FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
31
+ // ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
32
+ // DEALINGS IN THE SOFTWARE.
33
+
34
+
35
+ #ifndef _XCHARCONV_RYU_TABLES_H
36
+ #define _XCHARCONV_RYU_TABLES_H
37
+ #include <yvals_core.h>
38
+ #if _STL_COMPILER_PREPROCESSOR
39
+
40
+ #if !_HAS_CXX17
41
+ #error The contents of <charconv> are only available with C++17. (Also, you should not include this internal header.)
42
+ #endif // !_HAS_CXX17
43
+
44
+ #include <cstdint>
45
+
46
+ #pragma pack(push, _CRT_PACKING)
47
+ #pragma warning(push, _STL_WARNING_LEVEL)
48
+ #pragma warning(disable : _STL_DISABLED_WARNINGS)
49
+ _STL_DISABLE_CLANG_WARNINGS
50
+ #pragma push_macro("new")
51
+ #undef new
52
+
53
+ _STD_BEGIN
54
+
55
+ // https://github.com/ulfjack/ryu
56
+ // See xcharconv_ryu.h for the exact commit.
57
+ // (Keep the cgmanifest.json commitHash in sync.)
58
+
59
+ // clang-format off
60
+
61
+ // vvvvvvvvvv DERIVED FROM digit_table.h vvvvvvvvvv
62
+
63
+ // A table of all two-digit numbers. This is used to speed up decimal digit
64
+ // generation by copying pairs of digits into the final output.
65
+ template <class _CharT> constexpr _CharT __DIGIT_TABLE[] = {_CharT{}};
66
+
67
+ template <> inline constexpr char __DIGIT_TABLE<char>[200] = {
68
+ '0','0','0','1','0','2','0','3','0','4','0','5','0','6','0','7','0','8','0','9',
69
+ '1','0','1','1','1','2','1','3','1','4','1','5','1','6','1','7','1','8','1','9',
70
+ '2','0','2','1','2','2','2','3','2','4','2','5','2','6','2','7','2','8','2','9',
71
+ '3','0','3','1','3','2','3','3','3','4','3','5','3','6','3','7','3','8','3','9',
72
+ '4','0','4','1','4','2','4','3','4','4','4','5','4','6','4','7','4','8','4','9',
73
+ '5','0','5','1','5','2','5','3','5','4','5','5','5','6','5','7','5','8','5','9',
74
+ '6','0','6','1','6','2','6','3','6','4','6','5','6','6','6','7','6','8','6','9',
75
+ '7','0','7','1','7','2','7','3','7','4','7','5','7','6','7','7','7','8','7','9',
76
+ '8','0','8','1','8','2','8','3','8','4','8','5','8','6','8','7','8','8','8','9',
77
+ '9','0','9','1','9','2','9','3','9','4','9','5','9','6','9','7','9','8','9','9'
78
+ };
79
+
80
+ template <> inline constexpr wchar_t __DIGIT_TABLE<wchar_t>[200] = {
81
+ L'0',L'0',L'0',L'1',L'0',L'2',L'0',L'3',L'0',L'4',L'0',L'5',L'0',L'6',L'0',L'7',L'0',L'8',L'0',L'9',
82
+ L'1',L'0',L'1',L'1',L'1',L'2',L'1',L'3',L'1',L'4',L'1',L'5',L'1',L'6',L'1',L'7',L'1',L'8',L'1',L'9',
83
+ L'2',L'0',L'2',L'1',L'2',L'2',L'2',L'3',L'2',L'4',L'2',L'5',L'2',L'6',L'2',L'7',L'2',L'8',L'2',L'9',
84
+ L'3',L'0',L'3',L'1',L'3',L'2',L'3',L'3',L'3',L'4',L'3',L'5',L'3',L'6',L'3',L'7',L'3',L'8',L'3',L'9',
85
+ L'4',L'0',L'4',L'1',L'4',L'2',L'4',L'3',L'4',L'4',L'4',L'5',L'4',L'6',L'4',L'7',L'4',L'8',L'4',L'9',
86
+ L'5',L'0',L'5',L'1',L'5',L'2',L'5',L'3',L'5',L'4',L'5',L'5',L'5',L'6',L'5',L'7',L'5',L'8',L'5',L'9',
87
+ L'6',L'0',L'6',L'1',L'6',L'2',L'6',L'3',L'6',L'4',L'6',L'5',L'6',L'6',L'6',L'7',L'6',L'8',L'6',L'9',
88
+ L'7',L'0',L'7',L'1',L'7',L'2',L'7',L'3',L'7',L'4',L'7',L'5',L'7',L'6',L'7',L'7',L'7',L'8',L'7',L'9',
89
+ L'8',L'0',L'8',L'1',L'8',L'2',L'8',L'3',L'8',L'4',L'8',L'5',L'8',L'6',L'8',L'7',L'8',L'8',L'8',L'9',
90
+ L'9',L'0',L'9',L'1',L'9',L'2',L'9',L'3',L'9',L'4',L'9',L'5',L'9',L'6',L'9',L'7',L'9',L'8',L'9',L'9'
91
+ };
92
+
93
+ // ^^^^^^^^^^ DERIVED FROM digit_table.h ^^^^^^^^^^
94
+
95
+ // vvvvvvvvvv DERIVED FROM d2s_full_table.h vvvvvvvvvv
96
+
97
+ // These tables are generated by PrintDoubleLookupTable.
98
+ extern const uint64_t __DOUBLE_POW5_INV_SPLIT[292][2];
99
+
100
+ extern const uint64_t __DOUBLE_POW5_SPLIT[326][2];
101
+
102
+ // ^^^^^^^^^^ DERIVED FROM d2s_full_table.h ^^^^^^^^^^
103
+
104
+ // vvvvvvvvvv DERIVED FROM d2fixed_full_table.h vvvvvvvvvv
105
+
106
+ inline constexpr int __TABLE_SIZE = 64;
107
+
108
+ inline constexpr uint16_t __POW10_OFFSET[__TABLE_SIZE] = {
109
+ 0, 2, 5, 8, 12, 16, 21, 26, 32, 39,
110
+ 46, 54, 62, 71, 80, 90, 100, 111, 122, 134,
111
+ 146, 159, 173, 187, 202, 217, 233, 249, 266, 283,
112
+ 301, 319, 338, 357, 377, 397, 418, 440, 462, 485,
113
+ 508, 532, 556, 581, 606, 632, 658, 685, 712, 740,
114
+ 769, 798, 828, 858, 889, 920, 952, 984, 1017, 1050,
115
+ 1084, 1118, 1153, 1188
116
+ };
117
+
118
+ extern const uint64_t __POW10_SPLIT[1224][3];
119
+
120
+ inline constexpr int __TABLE_SIZE_2 = 69;
121
+ inline constexpr int __ADDITIONAL_BITS_2 = 120;
122
+
123
+ inline constexpr uint16_t __POW10_OFFSET_2[__TABLE_SIZE_2] = {
124
+ 0, 2, 6, 12, 20, 29, 40, 52, 66, 80,
125
+ 95, 112, 130, 150, 170, 192, 215, 240, 265, 292,
126
+ 320, 350, 381, 413, 446, 480, 516, 552, 590, 629,
127
+ 670, 712, 755, 799, 845, 892, 940, 989, 1040, 1092,
128
+ 1145, 1199, 1254, 1311, 1369, 1428, 1488, 1550, 1613, 1678,
129
+ 1743, 1810, 1878, 1947, 2017, 2088, 2161, 2235, 2311, 2387,
130
+ 2465, 2544, 2625, 2706, 2789, 2873, 2959, 3046, 3133
131
+ };
132
+
133
+ inline constexpr uint8_t __MIN_BLOCK_2[__TABLE_SIZE_2] = {
134
+ 0, 0, 0, 0, 0, 0, 1, 1, 2, 3,
135
+ 3, 4, 4, 5, 5, 6, 6, 7, 7, 8,
136
+ 8, 9, 9, 10, 11, 11, 12, 12, 13, 13,
137
+ 14, 14, 15, 15, 16, 16, 17, 17, 18, 19,
138
+ 19, 20, 20, 21, 21, 22, 22, 23, 23, 24,
139
+ 24, 25, 26, 26, 27, 27, 28, 28, 29, 29,
140
+ 30, 30, 31, 31, 32, 32, 33, 34, 0
141
+ };
142
+
143
+ extern const uint64_t __POW10_SPLIT_2[3133][3];
144
+
145
+ // ^^^^^^^^^^ DERIVED FROM d2fixed_full_table.h ^^^^^^^^^^
146
+
147
+ // clang-format on
148
+
149
+ _STD_END
150
+
151
+ #pragma pop_macro("new")
152
+ _STL_RESTORE_CLANG_WARNINGS
153
+ #pragma warning(pop)
154
+ #pragma pack(pop)
155
+
156
+ #endif // _STL_COMPILER_PREPROCESSOR
157
+ #endif // _XCHARCONV_RYU_TABLES_H
miniMSVC/VC/Tools/MSVC/14.42.34433/include/xcharconv_tables.h ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // xcharconv_tables.h internal header (core)
2
+
3
+ // Copyright (c) Microsoft Corporation.
4
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
5
+
6
+ #ifndef _XCHARCONV_TABLES_H
7
+ #define _XCHARCONV_TABLES_H
8
+ #include <yvals_core.h>
9
+ #if _STL_COMPILER_PREPROCESSOR
10
+
11
+ #if !_HAS_CXX17
12
+ #error The contents of <charconv> are only available with C++17. (Also, you should not include this internal header.)
13
+ #endif // !_HAS_CXX17
14
+
15
+ #include <cstdint>
16
+
17
+ #pragma pack(push, _CRT_PACKING)
18
+ #pragma warning(push, _STL_WARNING_LEVEL)
19
+ #pragma warning(disable : _STL_DISABLED_WARNINGS)
20
+ _STL_DISABLE_CLANG_WARNINGS
21
+ #pragma push_macro("new")
22
+ #undef new
23
+
24
+ // For general precision, we can use lookup tables to avoid performing trial formatting.
25
+
26
+ // For a simple example, imagine counting the number of digits D in an integer, and needing to know
27
+ // whether D is less than 3, equal to 3/4/5/6, or greater than 6. We could use a lookup table:
28
+ // D | Largest integer with D digits
29
+ // 2 | 99
30
+ // 3 | 999
31
+ // 4 | 9'999
32
+ // 5 | 99'999
33
+ // 6 | 999'999
34
+ // 7 | table end
35
+ // Looking up an integer in this table with lower_bound() will work:
36
+ // * Too-small integers, like 7, 70, and 99, will cause lower_bound() to return the D == 2 row. (If all we care
37
+ // about is whether D is less than 3, then it's okay to smash the D == 1 and D == 2 cases together.)
38
+ // * Integers in [100, 999] will cause lower_bound() to return the D == 3 row, and so forth.
39
+ // * Too-large integers, like 1'000'000 and above, will cause lower_bound() to return the end of the table. If we
40
+ // compute D from that index, this will be considered D == 7, which will activate any "greater than 6" logic.
41
+
42
+ // Floating-point is slightly more complicated.
43
+
44
+ // The ordinary lookup tables are for X within [-5, 38] for float, and [-5, 308] for double.
45
+ // (-5 absorbs too-negative exponents, outside the P > X >= -4 criterion. 38 and 308 are the maximum exponents.)
46
+ // Due to the P > X condition, we can use a subset of the table for X within [-5, P - 1], suitably clamped.
47
+
48
+ // When P is small, rounding can affect X. For example:
49
+ // For P == 1, the largest double with X == 0 is: 9.4999999999999982236431605997495353221893310546875
50
+ // For P == 2, the largest double with X == 0 is: 9.949999999999999289457264239899814128875732421875
51
+ // For P == 3, the largest double with X == 0 is: 9.9949999999999992184029906638897955417633056640625
52
+
53
+ // Exponent adjustment is a concern for P within [1, 7] for float, and [1, 15] for double (determined via
54
+ // brute force). While larger values of P still perform rounding, they can't trigger exponent adjustment.
55
+ // This is because only values with repeated '9' digits can undergo exponent adjustment during rounding,
56
+ // and floating-point granularity limits the number of consecutive '9' digits that can appear.
57
+
58
+ // So, we need special lookup tables for small values of P.
59
+ // These tables have varying lengths due to the P > X >= -4 criterion. For example:
60
+ // For P == 1, need table entries for X: -5, -4, -3, -2, -1, 0
61
+ // For P == 2, need table entries for X: -5, -4, -3, -2, -1, 0, 1
62
+ // For P == 3, need table entries for X: -5, -4, -3, -2, -1, 0, 1, 2
63
+ // For P == 4, need table entries for X: -5, -4, -3, -2, -1, 0, 1, 2, 3
64
+
65
+ // We can concatenate these tables for compact storage, using triangular numbers to access them.
66
+ // The table for P begins at index (P - 1) * (P + 10) / 2 with length P + 5.
67
+
68
+ // For both the ordinary and special lookup tables, after an index I is returned by lower_bound(), X is I - 5.
69
+
70
+ // We need to special-case the floating-point value 0.0, which is considered to have X == 0.
71
+ // Otherwise, the lookup tables would consider it to have a highly negative X.
72
+
73
+ // Finally, because we're working with positive floating-point values,
74
+ // representation comparisons behave identically to floating-point comparisons.
75
+
76
+ // The generator is in /tools/scripts/charconv_tables_generate.cpp
77
+
78
+ _STD_BEGIN
79
+
80
+ template <class _Floating>
81
+ struct _General_precision_tables_2;
82
+
83
+ template <>
84
+ struct _General_precision_tables_2<float> {
85
+ static constexpr int _Max_special_P = 7;
86
+
87
+ static const uint32_t _Special_X_table[63];
88
+
89
+ static constexpr int _Max_P = 39;
90
+
91
+ static const uint32_t _Ordinary_X_table[44];
92
+ };
93
+
94
+ template <>
95
+ struct _General_precision_tables_2<double> {
96
+ static constexpr int _Max_special_P = 15;
97
+
98
+ static const uint64_t _Special_X_table[195];
99
+
100
+ static constexpr int _Max_P = 309;
101
+
102
+ static const uint64_t _Ordinary_X_table[314];
103
+ };
104
+
105
+ _STD_END
106
+
107
+ #pragma pop_macro("new")
108
+ _STL_RESTORE_CLANG_WARNINGS
109
+ #pragma warning(pop)
110
+ #pragma pack(pop)
111
+
112
+ #endif // _STL_COMPILER_PREPROCESSOR
113
+ #endif // _XCHARCONV_TABLES_H
miniMSVC/VC/Tools/MSVC/14.42.34433/include/xerrc.h ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // xerrc.h internal header (core)
2
+
3
+ // Copyright (c) Microsoft Corporation.
4
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
5
+
6
+ #ifndef _XERRC_H
7
+ #define _XERRC_H
8
+ #include <yvals_core.h>
9
+ #if _STL_COMPILER_PREPROCESSOR
10
+
11
+ #pragma pack(push, _CRT_PACKING)
12
+ #pragma warning(push, _STL_WARNING_LEVEL)
13
+ #pragma warning(disable : _STL_DISABLED_WARNINGS)
14
+ _STL_DISABLE_CLANG_WARNINGS
15
+ #pragma push_macro("new")
16
+ #undef new
17
+
18
+ _STD_BEGIN
19
+ _EXPORT_STD enum class errc { // names for generic error codes
20
+ address_family_not_supported = 102, // EAFNOSUPPORT
21
+ address_in_use = 100, // EADDRINUSE
22
+ address_not_available = 101, // EADDRNOTAVAIL
23
+ already_connected = 113, // EISCONN
24
+ argument_list_too_long = 7, // E2BIG
25
+ argument_out_of_domain = 33, // EDOM
26
+ bad_address = 14, // EFAULT
27
+ bad_file_descriptor = 9, // EBADF
28
+ bad_message = 104, // EBADMSG
29
+ broken_pipe = 32, // EPIPE
30
+ connection_aborted = 106, // ECONNABORTED
31
+ connection_already_in_progress = 103, // EALREADY
32
+ connection_refused = 107, // ECONNREFUSED
33
+ connection_reset = 108, // ECONNRESET
34
+ cross_device_link = 18, // EXDEV
35
+ destination_address_required = 109, // EDESTADDRREQ
36
+ device_or_resource_busy = 16, // EBUSY
37
+ directory_not_empty = 41, // ENOTEMPTY
38
+ executable_format_error = 8, // ENOEXEC
39
+ file_exists = 17, // EEXIST
40
+ file_too_large = 27, // EFBIG
41
+ filename_too_long = 38, // ENAMETOOLONG
42
+ function_not_supported = 40, // ENOSYS
43
+ host_unreachable = 110, // EHOSTUNREACH
44
+ identifier_removed = 111, // EIDRM
45
+ illegal_byte_sequence = 42, // EILSEQ
46
+ inappropriate_io_control_operation = 25, // ENOTTY
47
+ interrupted = 4, // EINTR
48
+ invalid_argument = 22, // EINVAL
49
+ invalid_seek = 29, // ESPIPE
50
+ io_error = 5, // EIO
51
+ is_a_directory = 21, // EISDIR
52
+ message_size = 115, // EMSGSIZE
53
+ network_down = 116, // ENETDOWN
54
+ network_reset = 117, // ENETRESET
55
+ network_unreachable = 118, // ENETUNREACH
56
+ no_buffer_space = 119, // ENOBUFS
57
+ no_child_process = 10, // ECHILD
58
+ no_link = 121, // ENOLINK
59
+ no_lock_available = 39, // ENOLCK
60
+ no_message_available _CXX23_DEPRECATE_UNIX_STREAMS = 120, // ENODATA
61
+ no_message = 122, // ENOMSG
62
+ no_protocol_option = 123, // ENOPROTOOPT
63
+ no_space_on_device = 28, // ENOSPC
64
+ no_stream_resources _CXX23_DEPRECATE_UNIX_STREAMS = 124, // ENOSR
65
+ no_such_device_or_address = 6, // ENXIO
66
+ no_such_device = 19, // ENODEV
67
+ no_such_file_or_directory = 2, // ENOENT
68
+ no_such_process = 3, // ESRCH
69
+ not_a_directory = 20, // ENOTDIR
70
+ not_a_socket = 128, // ENOTSOCK
71
+ not_a_stream _CXX23_DEPRECATE_UNIX_STREAMS = 125, // ENOSTR
72
+ not_connected = 126, // ENOTCONN
73
+ not_enough_memory = 12, // ENOMEM
74
+ not_supported = 129, // ENOTSUP
75
+ operation_canceled = 105, // ECANCELED
76
+ operation_in_progress = 112, // EINPROGRESS
77
+ operation_not_permitted = 1, // EPERM
78
+ operation_not_supported = 130, // EOPNOTSUPP
79
+ operation_would_block = 140, // EWOULDBLOCK
80
+ owner_dead = 133, // EOWNERDEAD
81
+ permission_denied = 13, // EACCES
82
+ protocol_error = 134, // EPROTO
83
+ protocol_not_supported = 135, // EPROTONOSUPPORT
84
+ read_only_file_system = 30, // EROFS
85
+ resource_deadlock_would_occur = 36, // EDEADLK
86
+ resource_unavailable_try_again = 11, // EAGAIN
87
+ result_out_of_range = 34, // ERANGE
88
+ state_not_recoverable = 127, // ENOTRECOVERABLE
89
+ stream_timeout _CXX23_DEPRECATE_UNIX_STREAMS = 137, // ETIME
90
+ text_file_busy = 139, // ETXTBSY
91
+ timed_out = 138, // ETIMEDOUT
92
+ too_many_files_open_in_system = 23, // ENFILE
93
+ too_many_files_open = 24, // EMFILE
94
+ too_many_links = 31, // EMLINK
95
+ too_many_symbolic_link_levels = 114, // ELOOP
96
+ value_too_large = 132, // EOVERFLOW
97
+ wrong_protocol_type = 136 // EPROTOTYPE
98
+ };
99
+
100
+ _STD_END
101
+
102
+ #pragma pop_macro("new")
103
+ _STL_RESTORE_CLANG_WARNINGS
104
+ #pragma warning(pop)
105
+ #pragma pack(pop)
106
+
107
+ #endif // _STL_COMPILER_PREPROCESSOR
108
+ #endif // _XERRC_H
miniMSVC/VC/Tools/MSVC/14.42.34433/include/xfacet ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // xfacet internal header
2
+
3
+ // Copyright (c) Microsoft Corporation.
4
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
5
+
6
+ // This header is used to compile the import library (via locale0_implib.cpp => locale0.cpp => xfacet).
7
+ // MAJOR LIMITATIONS apply to what can be included here!
8
+ // Before editing this file, read: /docs/import_library.md
9
+
10
+ #ifndef _XFACET_
11
+ #define _XFACET_
12
+ #include <yvals.h>
13
+ #if _STL_COMPILER_PREPROCESSOR
14
+
15
+ #pragma pack(push, _CRT_PACKING)
16
+ #pragma warning(push, _STL_WARNING_LEVEL)
17
+ #pragma warning(disable : _STL_DISABLED_WARNINGS)
18
+ _STL_DISABLE_CLANG_WARNINGS
19
+ #pragma push_macro("new")
20
+ #undef new
21
+
22
+ _STD_BEGIN
23
+ extern "C++" class _CRTIMP2_PURE_IMPORT _Facet_base { // code for reference counting a facet
24
+ public:
25
+ virtual __CLR_OR_THIS_CALL ~_Facet_base() noexcept {} // ensure that derived classes can be destroyed properly
26
+
27
+ // increment use count
28
+ virtual void __CLR_OR_THIS_CALL _Incref() noexcept = 0;
29
+
30
+ // decrement use count
31
+ virtual _Facet_base* __CLR_OR_THIS_CALL _Decref() noexcept = 0;
32
+ };
33
+
34
+ #if defined(_M_CEE)
35
+ void __CLRCALL_OR_CDECL _Facet_Register_m(_Facet_base*);
36
+ #else // ^^^ defined(_M_CEE) / !defined(_M_CEE) vvv
37
+ extern "C++" void __CLRCALL_OR_CDECL _Facet_Register(_Facet_base*);
38
+ #endif // ^^^ !defined(_M_CEE) ^^^
39
+ _STD_END
40
+ #pragma pop_macro("new")
41
+ _STL_RESTORE_CLANG_WARNINGS
42
+ #pragma warning(pop)
43
+ #pragma pack(pop)
44
+ #endif // _STL_COMPILER_PREPROCESSOR
45
+ #endif // _XFACET_
miniMSVC/VC/Tools/MSVC/14.42.34433/include/xfilesystem_abi.h ADDED
@@ -0,0 +1,399 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // xfilesystem_abi.h internal header (core)
2
+
3
+ // Copyright (c) Microsoft Corporation.
4
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
5
+
6
+ #ifndef _XFILESYSTEM_ABI_H
7
+ #define _XFILESYSTEM_ABI_H
8
+ #include <yvals_core.h>
9
+ #if _STL_COMPILER_PREPROCESSOR
10
+
11
+ #include <cstdint>
12
+ #include <type_traits>
13
+
14
+ #pragma pack(push, _CRT_PACKING)
15
+ #pragma warning(push, _STL_WARNING_LEVEL)
16
+ #pragma warning(disable : _STL_DISABLED_WARNINGS)
17
+ _STL_DISABLE_CLANG_WARNINGS
18
+ #pragma push_macro("new")
19
+ #undef new
20
+
21
+ extern "C" {
22
+ inline constexpr size_t __std_fs_max_path = 260; // #define MAX_PATH 260
23
+ inline constexpr size_t __std_fs_temp_path_max = __std_fs_max_path + 1;
24
+
25
+ enum class __std_win_error : unsigned long {
26
+ _Success = 0, // #define ERROR_SUCCESS 0L
27
+ _Invalid_function = 1, // #define ERROR_INVALID_FUNCTION 1L
28
+ _File_not_found = 2, // #define ERROR_FILE_NOT_FOUND 2L
29
+ _Path_not_found = 3, // #define ERROR_PATH_NOT_FOUND 3L
30
+ _Access_denied = 5, // #define ERROR_ACCESS_DENIED 5L
31
+ _Not_enough_memory = 8, // #define ERROR_NOT_ENOUGH_MEMORY 8L
32
+ _No_more_files = 18, // #define ERROR_NO_MORE_FILES 18L
33
+ _Sharing_violation = 32, // #define ERROR_SHARING_VIOLATION 32L
34
+ _Not_supported = 50, // #define ERROR_NOT_SUPPORTED 50L
35
+ _Error_bad_netpath = 53, // #define ERROR_BAD_NETPATH 53L
36
+ _File_exists = 80, // #define ERROR_FILE_EXISTS 80L
37
+ _Invalid_parameter = 87, // #define ERROR_INVALID_PARAMETER 87L
38
+ _Insufficient_buffer = 122, // #define ERROR_INSUFFICIENT_BUFFER 122L
39
+ _Invalid_name = 123, // #define ERROR_INVALID_NAME 123L
40
+ _Directory_not_empty = 145, // #define ERROR_DIR_NOT_EMPTY 145L
41
+ _Already_exists = 183, // #define ERROR_ALREADY_EXISTS 183L
42
+ _Filename_exceeds_range = 206, // #define ERROR_FILENAME_EXCED_RANGE 206L
43
+ _Directory_name_is_invalid = 267, // #define ERROR_DIRECTORY 267L
44
+ _Reparse_tag_invalid = 4393L, // #define ERROR_REPARSE_TAG_INVALID 4393L
45
+ _Max = ~0UL // sentinel not used by Win32
46
+ };
47
+
48
+ #pragma warning(push)
49
+ #pragma warning(disable : 4061) // enumerator not explicitly handled by switch label
50
+ _NODISCARD inline bool __std_is_file_not_found(const __std_win_error _Error) noexcept {
51
+ switch (_Error) {
52
+ case __std_win_error::_File_not_found:
53
+ case __std_win_error::_Path_not_found:
54
+ case __std_win_error::_Error_bad_netpath:
55
+ case __std_win_error::_Invalid_name:
56
+ case __std_win_error::_Directory_name_is_invalid: // Windows 11 24H2
57
+ return true;
58
+ default:
59
+ return false;
60
+ }
61
+ }
62
+ #pragma warning(pop)
63
+
64
+ enum class __std_fs_dir_handle : intptr_t { _Invalid = -1 };
65
+
66
+ enum class __std_fs_file_attr : unsigned long {
67
+ _Readonly = 0x00000001, // #define FILE_ATTRIBUTE_READONLY 0x00000001
68
+ _Hidden = 0x00000002, // #define FILE_ATTRIBUTE_HIDDEN 0x00000002
69
+ _System = 0x00000004, // #define FILE_ATTRIBUTE_SYSTEM 0x00000004
70
+ _Directory = 0x00000010, // #define FILE_ATTRIBUTE_DIRECTORY 0x00000010
71
+ _Archive = 0x00000020, // #define FILE_ATTRIBUTE_ARCHIVE 0x00000020
72
+ _Device = 0x00000040, // #define FILE_ATTRIBUTE_DEVICE 0x00000040
73
+ _Normal = 0x00000080, // #define FILE_ATTRIBUTE_NORMAL 0x00000080
74
+ _Temporary = 0x00000100, // #define FILE_ATTRIBUTE_TEMPORARY 0x00000100
75
+ _Sparse_file = 0x00000200, // #define FILE_ATTRIBUTE_SPARSE_FILE 0x00000200
76
+ _Reparse_point = 0x00000400, // #define FILE_ATTRIBUTE_REPARSE_POINT 0x00000400
77
+
78
+ _Invalid = 0xFFFFFFFF, // #define INVALID_FILE_ATTRIBUTES ((DWORD)-1)
79
+ };
80
+ } // extern "C"
81
+
82
+ _EXTERN_CXX_WORKAROUND
83
+ _BITMASK_OPS(_EMPTY_ARGUMENT, __std_fs_file_attr)
84
+ _END_EXTERN_CXX_WORKAROUND
85
+
86
+ extern "C" {
87
+ enum class __std_fs_reparse_tag : unsigned long {
88
+ _None = 0,
89
+ _Mount_point = (0xA0000003L), // #define IO_REPARSE_TAG_MOUNT_POINT (0xA0000003L)
90
+ _Symlink = (0xA000000CL), // #define IO_REPARSE_TAG_SYMLINK (0xA000000CL)
91
+ };
92
+
93
+ struct __std_fs_filetime { // typedef struct _FILETIME {
94
+ unsigned long _Low; // DWORD dwLowDateTime;
95
+ unsigned long _High; // DWORD dwHighDateTime;
96
+ }; // } FILETIME, *PFILETIME, *LPFILETIME;
97
+
98
+ struct __std_fs_find_data { // typedef struct _WIN32_FIND_DATAW {
99
+ __std_fs_file_attr _Attributes; // DWORD dwFileAttributes;
100
+ __std_fs_filetime _Creation_time; // FILETIME ftCreationTime;
101
+ __std_fs_filetime _Last_access_time; // FILETIME ftLastAccessTime;
102
+ __std_fs_filetime _Last_write_time; // FILETIME ftLastWriteTime;
103
+ unsigned long _File_size_high; // DWORD nFileSizeHigh;
104
+ unsigned long _File_size_low; // DWORD nFileSizeLow;
105
+
106
+ // MSDN: dwReserved0 specifies the reparse point tag if
107
+ // MSDN: (dwFileAttributes & FILE_ATTRIBUTE_REPARSE_POINT) != 0
108
+
109
+ __std_fs_reparse_tag _Reparse_point_tag; // DWORD dwReserved0;
110
+ unsigned long _Reserved1; // DWORD dwReserved1;
111
+ wchar_t _File_name[__std_fs_max_path]; // _Field_z_ WCHAR cFileName[ MAX_PATH ];
112
+ wchar_t _Short_file_name[14]; // _Field_z_ WCHAR cAlternateFileName[ 14 ];
113
+ }; // } WIN32_FIND_DATAW, ... ;
114
+
115
+ enum class __std_fs_stats_flags : unsigned long {
116
+ _None = 0,
117
+
118
+ _Follow_symlinks = 0x01, // resolve symlink
119
+ _Attributes = 0x02, // read/has attributes
120
+ _Reparse_tag = 0x04, // read/has reparse_tag; may not be combined with _Follow_symlinks
121
+ _File_size = 0x08, // read/has file size
122
+ _Link_count = 0x10, // read/has link count
123
+ _Last_write_time = 0x20, // read/has last write time
124
+
125
+ _All_data = _Attributes | _Reparse_tag | _File_size | _Link_count | _Last_write_time
126
+ };
127
+ } // extern "C"
128
+
129
+ _EXTERN_CXX_WORKAROUND
130
+ _BITMASK_OPS(_EMPTY_ARGUMENT, __std_fs_stats_flags)
131
+ _END_EXTERN_CXX_WORKAROUND
132
+
133
+ extern "C" {
134
+ struct __std_fs_stats {
135
+ long long _Last_write_time;
136
+ unsigned long long _File_size;
137
+ __std_fs_file_attr _Attributes;
138
+ __std_fs_reparse_tag _Reparse_point_tag;
139
+ unsigned long _Link_count;
140
+ __std_fs_stats_flags _Available; // which fields are available
141
+
142
+ _NODISCARD __std_fs_file_attr _Symlink_hint_attributes() const noexcept {
143
+ if (_STD _Bitmask_includes_any(_Available, __std_fs_stats_flags::_Attributes)) {
144
+ return _Attributes;
145
+ }
146
+
147
+ return __std_fs_file_attr::_Invalid;
148
+ }
149
+ };
150
+
151
+ struct __std_fs_reparse_data_buffer { // typedef struct _REPARSE_DATA_BUFFER
152
+ unsigned long _Reparse_tag;
153
+ unsigned short _Reparse_data_length;
154
+ unsigned short _Reserved;
155
+ union {
156
+ struct {
157
+ unsigned short _Substitute_name_offset;
158
+ unsigned short _Substitute_name_length;
159
+ unsigned short _Print_name_offset;
160
+ unsigned short _Print_name_length;
161
+ unsigned long _Flags;
162
+ wchar_t _Path_buffer[1];
163
+ } _Symbolic_link_reparse_buffer;
164
+ struct {
165
+ unsigned short _Substitute_name_offset;
166
+ unsigned short _Substitute_name_length;
167
+ unsigned short _Print_name_offset;
168
+ unsigned short _Print_name_length;
169
+ wchar_t _Path_buffer[1];
170
+ } _Mount_point_reparse_buffer;
171
+ struct {
172
+ unsigned char _Data_buffer[1];
173
+ } _Generic_reparse_buffer;
174
+ };
175
+ };
176
+
177
+ struct __std_ulong_and_error {
178
+ unsigned long _Size;
179
+ __std_win_error _Error;
180
+ };
181
+
182
+ enum class __std_fs_volume_name_kind : unsigned long {
183
+ _Dos = 0, // #define VOLUME_NAME_DOS 0x0
184
+ _Guid = 1, // #define VOLUME_NAME_GUID 0x1
185
+ _Nt = 2, // #define VOLUME_NAME_NT 0x2
186
+ _None = 4 // #define VOLUME_NAME_NONE 0x4
187
+ };
188
+
189
+ enum class __std_access_rights : unsigned long {
190
+ _Delete = 0x00010000, // #define DELETE (0x00010000L)
191
+ _File_read_attributes = 0x0080, // #define FILE_READ_ATTRIBUTES ( 0x0080 )
192
+ _File_write_attributes = 0x0100, // #define FILE_WRITE_ATTRIBUTES ( 0x0100 )
193
+
194
+ // #define READ_CONTROL (0x00020000L)
195
+ // #define STANDARD_RIGHTS_WRITE (READ_CONTROL)
196
+ // #define FILE_WRITE_DATA (0x0002)
197
+ // #define FILE_WRITE_ATTRIBUTES (0x0100)
198
+ // #define FILE_WRITE_EA (0x0010)
199
+ // #define FILE_APPEND_DATA (0x0004)
200
+ // #define SYNCHRONIZE (0x00100000L)
201
+ // #define FILE_GENERIC_WRITE (STANDARD_RIGHTS_WRITE | FILE_WRITE_DATA | FILE_WRITE_ATTRIBUTES
202
+ // | FILE_WRITE_EA | FILE_APPEND_DATA | SYNCHRONIZE)
203
+ _File_generic_write = 0x00120116,
204
+ };
205
+ } // extern "C"
206
+
207
+ _EXTERN_CXX_WORKAROUND
208
+ _BITMASK_OPS(_EMPTY_ARGUMENT, __std_access_rights)
209
+ _END_EXTERN_CXX_WORKAROUND
210
+
211
+ extern "C" {
212
+ enum class __std_fs_file_flags : unsigned long {
213
+ _None = 0,
214
+ _Backup_semantics = 0x02000000, // #define FILE_FLAG_BACKUP_SEMANTICS 0x02000000
215
+ _Open_reparse_point = 0x00200000, // #define FILE_FLAG_OPEN_REPARSE_POINT 0x00200000
216
+ };
217
+ } // extern "C"
218
+
219
+ _EXTERN_CXX_WORKAROUND
220
+ _BITMASK_OPS(_EMPTY_ARGUMENT, __std_fs_file_flags)
221
+ _END_EXTERN_CXX_WORKAROUND
222
+
223
+ extern "C" {
224
+ enum class __std_fs_file_handle : intptr_t { _Invalid = -1 };
225
+
226
+ enum class __std_code_page : unsigned int { _Acp = 0, _Utf8 = 65001 };
227
+
228
+ struct __std_fs_convert_result {
229
+ int _Len;
230
+ __std_win_error _Err;
231
+ };
232
+
233
+ struct __std_fs_file_id { // typedef struct _FILE_ID_INFO {
234
+ unsigned long long _Volume_serial_number; // ULONGLONG VolumeSerialNumber;
235
+ unsigned char _Id[16]; // FILE_ID_128 FileId;
236
+ }; // } FILE_ID_INFO, ...;
237
+
238
+ enum class __std_fs_copy_options {
239
+ _None = 0x0,
240
+
241
+ _Existing_mask = 0xF,
242
+ _Skip_existing = 0x1,
243
+ _Overwrite_existing = 0x2,
244
+ _Update_existing = 0x4,
245
+ };
246
+ } // extern "C"
247
+
248
+ _EXTERN_CXX_WORKAROUND
249
+ _BITMASK_OPS(_EMPTY_ARGUMENT, __std_fs_copy_options)
250
+ _END_EXTERN_CXX_WORKAROUND
251
+
252
+ extern "C" {
253
+ _NODISCARD __std_ulong_and_error __stdcall __std_fs_get_full_path_name(_In_z_ const wchar_t* _Source,
254
+ _In_ unsigned long _Target_size, _Out_writes_z_(_Target_size) wchar_t* _Target) noexcept;
255
+
256
+ _NODISCARD __std_win_error __stdcall __std_fs_open_handle(_Out_ __std_fs_file_handle* _Handle,
257
+ _In_z_ const wchar_t* _File_name, _In_ __std_access_rights _Desired_access,
258
+ _In_ __std_fs_file_flags _Flags) noexcept;
259
+
260
+ void __stdcall __std_fs_close_handle(__std_fs_file_handle _Handle) noexcept;
261
+
262
+ _NODISCARD _Success_(return == __std_win_error::_Success) __std_win_error
263
+ __stdcall __std_fs_get_file_attributes_by_handle(
264
+ _In_ __std_fs_file_handle _Handle, _Out_ unsigned long* _File_attributes) noexcept;
265
+
266
+ _NODISCARD __std_ulong_and_error __stdcall __std_fs_get_final_path_name_by_handle(_In_ __std_fs_file_handle _Handle,
267
+ _Out_writes_z_(_Target_size) wchar_t* _Target, _In_ unsigned long _Target_size,
268
+ _In_ __std_fs_volume_name_kind _Flags) noexcept;
269
+
270
+ struct __std_fs_copy_file_result {
271
+ bool _Copied;
272
+ __std_win_error _Error;
273
+ };
274
+
275
+ _NODISCARD __std_fs_copy_file_result __stdcall __std_fs_copy_file(
276
+ _In_z_ const wchar_t* _Source, _In_z_ const wchar_t* _Target, _In_ __std_fs_copy_options _Options) noexcept;
277
+
278
+ _NODISCARD __std_win_error __stdcall __std_fs_directory_iterator_open(_In_z_ const wchar_t* _Path_spec,
279
+ _Inout_ __std_fs_dir_handle* _Handle, _Out_ __std_fs_find_data* _Results) noexcept;
280
+
281
+ void __stdcall __std_fs_directory_iterator_close(_In_ __std_fs_dir_handle _Handle) noexcept;
282
+
283
+ _NODISCARD _Success_(return == __std_win_error::_Success) __std_win_error
284
+ __stdcall __std_fs_get_stats(_In_z_ const wchar_t* _Path, __std_fs_stats* _Stats, _In_ __std_fs_stats_flags _Flags,
285
+ _In_ __std_fs_file_attr _Symlink_attribute_hint = __std_fs_file_attr::_Invalid) noexcept;
286
+
287
+ _NODISCARD __std_win_error __stdcall __std_fs_directory_iterator_advance(
288
+ _In_ __std_fs_dir_handle _Handle, _Out_ __std_fs_find_data* _Results) noexcept;
289
+
290
+ _NODISCARD __std_code_page __stdcall __std_fs_code_page() noexcept;
291
+
292
+ _NODISCARD __std_fs_convert_result __stdcall __std_fs_convert_narrow_to_wide(_In_ __std_code_page _Code_page,
293
+ _In_reads_(_Input_len) const char* _Input_str, _In_ int _Input_len,
294
+ _Out_writes_opt_(_Output_len) wchar_t* _Output_str, _In_ int _Output_len) noexcept;
295
+
296
+ _NODISCARD __std_fs_convert_result __stdcall __std_fs_convert_wide_to_narrow(_In_ __std_code_page _Code_page,
297
+ _In_reads_(_Input_len) const wchar_t* _Input_str, _In_ int _Input_len,
298
+ _Out_writes_opt_(_Output_len) char* _Output_str, _In_ int _Output_len) noexcept;
299
+
300
+ _NODISCARD __std_fs_convert_result __stdcall __std_fs_convert_wide_to_narrow_replace_chars(
301
+ _In_ __std_code_page _Code_page, _In_reads_(_Input_len) const wchar_t* _Input_str, _In_ int _Input_len,
302
+ _Out_writes_opt_(_Output_len) char* _Output_str, _In_ int _Output_len) noexcept;
303
+
304
+ _NODISCARD _Success_(return == __std_win_error::_Success) __std_win_error
305
+ __stdcall __std_fs_get_file_id(_Out_ __std_fs_file_id* _Id, _In_z_ const wchar_t* _Path) noexcept;
306
+
307
+ _NODISCARD __std_win_error __stdcall __std_fs_set_last_write_time(
308
+ _In_ long long _Last_write_filetime, _In_z_ const wchar_t* _Path) noexcept;
309
+
310
+ _NODISCARD __std_win_error __stdcall __std_fs_change_permissions(
311
+ _In_z_ const wchar_t* _Path, _In_ bool _Follow_symlinks, _In_ bool _Readonly) noexcept;
312
+
313
+ _NODISCARD _Success_(return._Error == __std_win_error::_Success) __std_ulong_and_error
314
+ __stdcall __std_fs_get_temp_path(_Out_writes_z_(__std_fs_temp_path_max) wchar_t* _Target) noexcept;
315
+
316
+ _NODISCARD _Success_(return._Error == __std_win_error::_Success) __std_ulong_and_error
317
+ __stdcall __std_fs_get_current_path(
318
+ _In_ unsigned long _Target_size, _Out_writes_z_(_Target_size) wchar_t* _Target) noexcept;
319
+
320
+ _NODISCARD __std_win_error __stdcall __std_fs_set_current_path(_In_z_ const wchar_t* _Target) noexcept;
321
+
322
+ _NODISCARD __std_win_error __stdcall __std_fs_create_directory_symbolic_link(
323
+ _In_z_ const wchar_t* _Symlink_file_name, _In_z_ const wchar_t* _Target_file_name) noexcept;
324
+
325
+ _NODISCARD __std_win_error __stdcall __std_fs_create_hard_link(
326
+ _In_z_ const wchar_t* _File_name, _In_z_ const wchar_t* _Existing_file_name) noexcept;
327
+
328
+ _NODISCARD __std_win_error __stdcall __std_fs_create_symbolic_link(
329
+ _In_z_ const wchar_t* _Symlink_file_name, _In_z_ const wchar_t* _Target_file_name) noexcept;
330
+
331
+ _NODISCARD __std_win_error __stdcall __std_fs_read_reparse_data_buffer(_In_ __std_fs_file_handle _Handle,
332
+ _Out_writes_bytes_(_Buffer_size) void* _Buffer, _In_ unsigned long _Buffer_size) noexcept;
333
+
334
+ _NODISCARD __std_win_error __stdcall __std_fs_write_reparse_data_buffer(
335
+ _In_ __std_fs_file_handle _Handle, _In_ const __std_fs_reparse_data_buffer* _Buffer) noexcept;
336
+
337
+ _NODISCARD bool __stdcall __std_fs_is_junction_from_reparse_data_buffer(
338
+ _In_ const __std_fs_reparse_data_buffer* _Buffer) noexcept;
339
+
340
+ _NODISCARD _Success_(return == __std_win_error::_Success) __std_win_error
341
+ __stdcall __std_fs_read_name_from_reparse_data_buffer(
342
+ _In_ __std_fs_reparse_data_buffer* _Handle, _Out_ wchar_t** _Offset, _Out_ unsigned short* _Length) noexcept;
343
+
344
+ struct __std_fs_create_directory_result {
345
+ bool _Created;
346
+ __std_win_error _Error;
347
+ };
348
+
349
+ _NODISCARD __std_fs_create_directory_result __stdcall __std_fs_create_directory(
350
+ _In_z_ const wchar_t* _New_directory) noexcept;
351
+
352
+ struct __std_fs_remove_result {
353
+ bool _Removed;
354
+ __std_win_error _Error;
355
+ };
356
+
357
+ _NODISCARD __std_fs_remove_result __stdcall __std_fs_remove(_In_z_ const wchar_t* _Target) noexcept;
358
+
359
+ _NODISCARD __std_win_error __stdcall __std_fs_rename(
360
+ _In_z_ const wchar_t* _Source, _In_z_ const wchar_t* _Target) noexcept;
361
+
362
+ _NODISCARD __std_win_error __stdcall __std_fs_resize_file(_In_z_ const wchar_t* _Target, uintmax_t _New_size) noexcept;
363
+
364
+ _NODISCARD __std_win_error __stdcall __std_fs_space(_In_z_ const wchar_t* _Target, _Out_ uintmax_t* _Available,
365
+ _Out_ uintmax_t* _Total_bytes, _Out_ uintmax_t* _Free_bytes) noexcept;
366
+ } // extern "C"
367
+
368
+ _STD_BEGIN
369
+ struct _Fs_file {
370
+ __std_fs_file_handle _Raw;
371
+
372
+ explicit _Fs_file(void* const _Handle) : _Raw{reinterpret_cast<intptr_t>(_Handle)} {}
373
+
374
+ _Fs_file(const wchar_t* const _File_name, const __std_access_rights _Desired_access,
375
+ const __std_fs_file_flags _Flags, __std_win_error* const _Err) {
376
+ *_Err = __std_fs_open_handle(&_Raw, _File_name, _Desired_access, _Flags);
377
+ }
378
+
379
+ _Fs_file(const _Fs_file&) = delete;
380
+ _Fs_file& operator=(const _Fs_file&) = delete;
381
+
382
+ ~_Fs_file() {
383
+ __std_fs_close_handle(_Raw);
384
+ }
385
+
386
+ _NODISCARD void* _Get() const {
387
+ return reinterpret_cast<void*>(_Raw);
388
+ }
389
+ };
390
+
391
+ _STD_END
392
+
393
+ #pragma pop_macro("new")
394
+ _STL_RESTORE_CLANG_WARNINGS
395
+ #pragma warning(pop)
396
+ #pragma pack(pop)
397
+
398
+ #endif // _STL_COMPILER_PREPROCESSOR
399
+ #endif // _XFILESYSTEM_ABI_H