ZTWHHH commited on
Commit
f6be1ed
·
verified ·
1 Parent(s): 7eb4d21

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/concurrency/async_value_ref.h +468 -0
  3. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/cuda/cuda.inc +635 -0
  4. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/cuda/cudart.inc +413 -0
  5. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/cuda/cudnn.inc +272 -0
  6. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/lib/gtl/compactptrset.h +209 -0
  7. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/lib/math/math_util.h +161 -0
  8. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/byte_order.h +37 -0
  9. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/cloud/auth_provider.h +55 -0
  10. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/cloud/compute_engine_metadata_client.h +67 -0
  11. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/cloud/compute_engine_zone_provider.h +41 -0
  12. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/cloud/curl_http_request.h +275 -0
  13. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/cloud/expiring_lru_cache.h +188 -0
  14. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/cloud/file_block_cache.h +139 -0
  15. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/cloud/gcs_dns_cache.h +77 -0
  16. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/cloud/gcs_file_system.h +456 -0
  17. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/cloud/gcs_throttle.h +168 -0
  18. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/cloud/google_auth_provider.h +70 -0
  19. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/cloud/http_request.h +193 -0
  20. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/cloud/oauth_client.h +64 -0
  21. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/cloud/ram_file_block_cache.h +250 -0
  22. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/cloud/time_util.h +29 -0
  23. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/cloud/zone_provider.h +49 -0
  24. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/crash_analysis.h +28 -0
  25. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/criticality.h +50 -0
  26. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/ctstring_internal.h +455 -0
  27. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/default/casts.h +92 -0
  28. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/default/context.h +37 -0
  29. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/default/crash_analysis.h +48 -0
  30. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/default/criticality.h +32 -0
  31. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/default/dso_loader.h +95 -0
  32. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/default/integral_types.h +38 -0
  33. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/default/logging.h +651 -0
  34. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/default/mutex.h +39 -0
  35. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/default/mutex_data.h +35 -0
  36. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/default/posix_file_system.h +84 -0
  37. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/default/stacktrace.h +102 -0
  38. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/default/status.h +23 -0
  39. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/default/statusor.h +33 -0
  40. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/default/subprocess.h +132 -0
  41. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/default/tracing_impl.h +41 -0
  42. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/default/unbounded_work_queue.h +68 -0
  43. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/env_time.h +65 -0
  44. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/intrusive_ptr.h +81 -0
  45. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/platform.h +87 -0
  46. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/platform_strings_computed.h +735 -0
  47. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/png.h +30 -0
  48. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/profile_utils/android_armv7a_cpu_utils_helper.h +69 -0
  49. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/profile_utils/clock_cycle_profiler.h +107 -0
  50. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/profile_utils/cpu_utils.h +193 -0
.gitattributes CHANGED
@@ -901,3 +901,4 @@ videochat2/lib/python3.10/site-packages/tensorflow/compiler/tf2xla/ops/__pycache
901
  videochat2/lib/python3.10/site-packages/tensorflow/compiler/tf2tensorrt/_pywrap_py_utils.so filter=lfs diff=lfs merge=lfs -text
902
  videochat2/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/python/pywrap_function_lib.so filter=lfs diff=lfs merge=lfs -text
903
  videochat2/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/python/pywrap_quantize_model.so filter=lfs diff=lfs merge=lfs -text
 
 
901
  videochat2/lib/python3.10/site-packages/tensorflow/compiler/tf2tensorrt/_pywrap_py_utils.so filter=lfs diff=lfs merge=lfs -text
902
  videochat2/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/python/pywrap_function_lib.so filter=lfs diff=lfs merge=lfs -text
903
  videochat2/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/python/pywrap_quantize_model.so filter=lfs diff=lfs merge=lfs -text
904
+ videochat2/lib/python3.10/site-packages/torchvision/_C.so filter=lfs diff=lfs merge=lfs -text
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/concurrency/async_value_ref.h ADDED
@@ -0,0 +1,468 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2022 Google LLC. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_CONCURRENCY_ASYNC_VALUE_REF_H_
17
+ #define TENSORFLOW_TSL_CONCURRENCY_ASYNC_VALUE_REF_H_
18
+
19
+ #include <cstddef>
20
+ #include <cstdlib>
21
+ #include <string_view>
22
+ #include <type_traits>
23
+ #include <utility>
24
+
25
+ #include "absl/status/status.h"
26
+ #include "absl/status/statusor.h"
27
+ #include "tsl/concurrency/async_value.h"
28
+ #include "tsl/concurrency/ref_count.h"
29
+
30
+ namespace tsl {
31
+
32
+ // Forward declare non-owning typed async value pointer.
33
+ template <typename T>
34
+ class AsyncValuePtr;
35
+
36
+ // RCReference<AsyncValue> wrapper.
37
+ //
38
+ // AsyncValueRef<T> is an alias for RCReference<AsyncValue> that carries payload
39
+ // type information. The user does not need to pass the payload data type to
40
+ // get() or emplace().
41
+ //
42
+ // Like RCReference<AsyncValue>, it represents one reference on the underlying
43
+ // AsyncValue. When a callee returns an AsyncValueRef to a caller, the callee
44
+ // also transfers their ownership of a reference on the underlying AsyncValue.
45
+ template <typename T>
46
+ class AsyncValueRef {
47
+ public:
48
+ AsyncValueRef() = default;
49
+ AsyncValueRef(std::nullptr_t) {} // NOLINT
50
+
51
+ explicit AsyncValueRef(RCReference<AsyncValue> value)
52
+ : value_(std::move(value)) {}
53
+
54
+ // Support implicit conversion from AsyncValueRef<Derived> to
55
+ // AsyncValueRef<Base>.
56
+ template <typename DerivedT,
57
+ std::enable_if_t<std::is_base_of<T, DerivedT>::value>* = nullptr>
58
+ AsyncValueRef(AsyncValueRef<DerivedT>&& u) // NOLINT
59
+ : value_(u.ReleaseRCRef()) {}
60
+
61
+ // Support implicit conversion from RCReference<AsyncValue>.
62
+ AsyncValueRef(RCReference<ErrorAsyncValue> value) // NOLINT
63
+ : value_(std::move(value)) {}
64
+
65
+ AsyncValueRef& operator=(RCReference<ErrorAsyncValue> new_value) {
66
+ value_ = std::move(new_value);
67
+ return *this;
68
+ }
69
+
70
+ // Allow implicit conversion to type-erased RCReference<AsyncValue>
71
+ operator RCReference<AsyncValue>() && { return std::move(value_); } // NOLINT
72
+
73
+ // Return true if the AsyncValue is resolved to a concrete value or error.
74
+ bool IsAvailable() const { return value_->IsAvailable(); }
75
+ bool IsUnavailable() const { return value_->IsUnavailable(); }
76
+
77
+ // Return true if the AsyncValue contains a concrete value.
78
+ bool IsConcrete() const { return value_->IsConcrete(); }
79
+
80
+ // Return true if state is kUnconstructed.
81
+ bool IsUnconstructed() const { return value_->IsUnconstructed(); }
82
+
83
+ // Return the stored value. The AsyncValueRef must be available.
84
+ T& get() const { return value_->get<T>(); }
85
+
86
+ // Return the stored value as a subclass type. The AsyncValueRef must be
87
+ // available.
88
+ template <typename SubclassT,
89
+ std::enable_if_t<std::is_base_of<T, SubclassT>::value>* = nullptr>
90
+ SubclassT& get() const {
91
+ return value_->get<SubclassT>();
92
+ }
93
+
94
+ T* operator->() const { return &get(); }
95
+
96
+ T& operator*() const { return get(); }
97
+
98
+ template <typename WaiterT>
99
+ void AndThen(WaiterT&& waiter) const {
100
+ AsPtr().AndThen(std::forward<WaiterT>(waiter));
101
+ }
102
+
103
+ // Make the AsyncValueRef available.
104
+ void SetStateConcrete() const { value_->SetStateConcrete(); }
105
+
106
+ // Set the stored value. The AsyncValueRef must be unavailable. After this
107
+ // returns, the AsyncValueRef will be available.
108
+ template <typename... Args>
109
+ void emplace(Args&&... args) const {
110
+ value_->emplace<T>(std::forward<Args>(args)...);
111
+ }
112
+
113
+ void emplace(absl::StatusOr<T> v) const {
114
+ if (v.ok()) {
115
+ emplace(std::move(*v));
116
+ } else {
117
+ SetError(std::move(v.status()));
118
+ }
119
+ }
120
+
121
+ // Return true if this AsyncValueRef represents an error.
122
+ bool IsError() const { return value_->IsError(); }
123
+
124
+ // Returns the underlying error. IsError() must be true.
125
+ const absl::Status& GetError() const { return value_->GetError(); }
126
+
127
+ // Returns the underlying error, or nullptr if there is none.
128
+ const absl::Status* GetErrorIfPresent() const {
129
+ return value_->GetErrorIfPresent();
130
+ }
131
+
132
+ void SetError(absl::Status status) const {
133
+ assert(!status.ok() && "expected non-ok status");
134
+ return value_->SetError(std::move(status));
135
+ }
136
+
137
+ void SetError(std::string_view message) const {
138
+ SetError(absl::InternalError(message));
139
+ }
140
+
141
+ explicit operator bool() const { return value_.get() != nullptr; }
142
+ bool operator==(const AsyncValueRef& r) const { return value_ == r.value_; }
143
+ bool operator!=(const AsyncValueRef& r) const { return value_ != r.value_; }
144
+
145
+ // Return a raw pointer to the AsyncValue.
146
+ AsyncValue* GetAsyncValue() const { return value_.get(); }
147
+
148
+ // Returns a non-owning pointer to the underlying async value.
149
+ AsyncValuePtr<T> AsPtr() const { return AsyncValuePtr<T>(GetAsyncValue()); }
150
+
151
+ // Return true if this is the only ref to the AsyncValue.
152
+ // This function requires the internal AsyncValue to be set (value_ !=
153
+ // nullptr).
154
+ bool IsUnique() const { return value_->IsUnique(); }
155
+
156
+ // Make an explicit copy of this AsyncValueRef, increasing value_'s refcount
157
+ // by one.
158
+ AsyncValueRef<T> CopyRef() const { return AsyncValueRef(CopyRCRef()); }
159
+
160
+ // Make a copy of value_, increasing value_'s refcount by one.
161
+ RCReference<AsyncValue> CopyRCRef() const { return value_; }
162
+
163
+ // Release ownership of one reference on the AsyncValue and return a raw
164
+ // pointer to it.
165
+ AsyncValue* release() { return value_.release(); }
166
+
167
+ void reset() { value_.reset(); }
168
+
169
+ // Transfer ownership of one reference on the AsyncValue to the returned
170
+ // RCReference<AsyncValue>.
171
+ RCReference<AsyncValue> ReleaseRCRef() { return std::move(value_); }
172
+
173
+ private:
174
+ RCReference<AsyncValue> value_;
175
+ };
176
+
177
+ // Non owning typed pointer for the AsyncValue. Can be cheaply passed around
178
+ // when the lifetime of the underlying async value is clear from the context.
179
+ // It is the user responsibility to construct an owning AsyncValueRef to extend
180
+ // the lifetime of the underlying value if needed.
181
+ template <typename T>
182
+ class AsyncValuePtr {
183
+ public:
184
+ AsyncValuePtr() : value_(nullptr) {}
185
+
186
+ explicit AsyncValuePtr(AsyncValue* value) : value_(value) {}
187
+ explicit AsyncValuePtr(const AsyncValueRef<T>& ref)
188
+ : value_(ref.GetAsyncValue()) {}
189
+
190
+ AsyncValue* value() const { return value_; }
191
+
192
+ AsyncValueRef<T> CopyRef() const { return AsyncValueRef<T>(FormRef(value_)); }
193
+
194
+ T& get() const { return value_->template get<T>(); }
195
+ T* operator->() const { return &get(); }
196
+ T& operator*() const { return get(); }
197
+
198
+ explicit operator bool() const { return value_ != nullptr; }
199
+ bool operator!=(std::nullptr_t) const { return value_ != nullptr; }
200
+ AsyncValuePtr& operator=(std::nullptr_t) {
201
+ value_ = nullptr;
202
+ return *this;
203
+ }
204
+
205
+ bool IsAvailable() const { return value_->IsAvailable(); }
206
+ bool IsUnavailable() const { return value_->IsUnavailable(); }
207
+
208
+ bool IsConcrete() const { return value_->IsConcrete(); }
209
+ void SetStateConcrete() const { value_->SetStateConcrete(); }
210
+
211
+ template <typename... Args>
212
+ void emplace(Args&&... args) const {
213
+ value_->emplace<T>(std::forward<Args>(args)...);
214
+ }
215
+
216
+ bool IsError() const { return value_->IsError(); }
217
+
218
+ const absl::Status& GetError() const { return value_->GetError(); }
219
+
220
+ void SetError(absl::Status status) const {
221
+ assert(!status.ok() && "expected non-ok status");
222
+ return value_->SetError(std::move(status));
223
+ }
224
+
225
+ // If the AsyncValueRef is available, run the waiter immediately. Otherwise,
226
+ // run the waiter when the AsyncValueRef becomes available.
227
+ //
228
+ // Sample usage:
229
+ //
230
+ // async_value_ref.AndThen([] {
231
+ // // async_value_ref is now ready.
232
+ // });
233
+ template <typename WaiterT,
234
+ std::enable_if_t<std::is_invocable_v<WaiterT>>* = nullptr>
235
+ void AndThen(WaiterT&& waiter) const {
236
+ value_->AndThen(std::forward<WaiterT>(waiter));
237
+ }
238
+
239
+ // This AndThen() function takes a functor that takes absl::StatusOr<T*> as
240
+ // argument. This makes it easy for the callback function to use the value of
241
+ // the AsyncValue when it becomes available.
242
+ //
243
+ // Sample usage:
244
+ //
245
+ // async_value_ref.AndThen([] (absl::StatusOr<T*> status_or) {
246
+ // // async_value_ref is now ready and its value/error is in the provided
247
+ // // `status_or` argument.
248
+ // if (!status_or.ok()) {
249
+ // // Handle the error in `status_or.status()`.
250
+ // } else {
251
+ // // Handle the value in `*status_or`.
252
+ // }
253
+ // });
254
+ template <typename WaiterT, std::enable_if_t<std::is_invocable_v<
255
+ WaiterT, absl::StatusOr<T*>>>* = nullptr>
256
+ void AndThen(WaiterT&& waiter) const {
257
+ AndThen([waiter = std::forward<WaiterT>(waiter), av_ptr = *this]() mutable {
258
+ if (av_ptr.IsError()) {
259
+ return std::forward<WaiterT>(waiter)(av_ptr.GetError());
260
+ } else {
261
+ return std::forward<WaiterT>(waiter)(&av_ptr.get());
262
+ }
263
+ });
264
+ }
265
+
266
+ // This AndThen() function takes a functor that takes an absl::Status as
267
+ // argument. This makes it easy for the callback function to use the error of
268
+ // the AsyncValue when it becomes available. This is useful when the callback
269
+ // function only cares about the error value of the AsyncValue, e.g. for
270
+ // AsyncValueRef<Chain>.
271
+ //
272
+ // Sample usage:
273
+ //
274
+ // async_value_ref.AndThen([] (absl::Status status) {
275
+ // // async_value_ref is now ready and its status is in the provided
276
+ // // `status` argument.
277
+ // if (!status.ok()) {
278
+ // // Handle the error.
279
+ // } else {
280
+ // // No error occurred.
281
+ // }
282
+ // });
283
+ template <typename WaiterT,
284
+ std::enable_if_t<
285
+ (std::is_invocable_v<WaiterT, absl::Status> &&
286
+ !std::is_invocable_v<WaiterT, absl::StatusOr<T*>>)>* = nullptr>
287
+ void AndThen(WaiterT&& waiter) const {
288
+ AndThen([waiter = std::forward<WaiterT>(waiter), av_ptr = *this]() mutable {
289
+ if (av_ptr.IsError()) {
290
+ return std::forward<WaiterT>(waiter)(av_ptr.GetError());
291
+ } else {
292
+ return std::forward<WaiterT>(waiter)(absl::OkStatus());
293
+ }
294
+ });
295
+ }
296
+
297
+ private:
298
+ AsyncValue* value_; // doesn't own the async value
299
+ };
300
+
301
+ // Create a ConcreteAsyncValue in error state with the given status.
302
+ RCReference<ErrorAsyncValue> MakeErrorAsyncValueRef(absl::Status status);
303
+
304
+ ABSL_DEPRECATED("Use the error async value constructor that takes absl::Status")
305
+ RCReference<ErrorAsyncValue> MakeErrorAsyncValueRef(std::string_view message);
306
+
307
+ // Construct an empty IndirectAsyncValue, not forwarding to anything.
308
+ RCReference<IndirectAsyncValue> MakeIndirectAsyncValue();
309
+
310
+ //===----------------------------------------------------------------------===//
311
+
312
+ namespace internal {
313
+
314
+ template <typename T, typename... Args>
315
+ T* PlacementConstruct(void* buf, Args&&... args) {
316
+ return new (buf) T(std::forward<Args>(args)...);
317
+ }
318
+
319
+ template <typename T, typename... Args>
320
+ T* AllocateAndConstruct(Args&&... args) {
321
+ // TODO(ezhulenev): `port::AlignedMalloc` has a different order of arguments!
322
+ void* buf = internal::AlignedAlloc(alignof(T), sizeof(T));
323
+ return PlacementConstruct<T, Args...>(buf, std::forward<Args>(args)...);
324
+ }
325
+
326
+ } // namespace internal
327
+
328
+ //===----------------------------------------------------------------------===//
329
+ // Constructing reference-counted async values on the heap.
330
+ //===----------------------------------------------------------------------===//
331
+
332
+ // Allocate an unconstructed AsyncValueRef. The AsyncValueRef should be made
333
+ // available later by invoking AsyncValueRef::emplace or
334
+ // AsyncValueRef::SetError.
335
+ template <typename T>
336
+ AsyncValueRef<T> MakeUnconstructedAsyncValueRef() {
337
+ return AsyncValueRef<T>(tsl::TakeRef(
338
+ internal::AllocateAndConstruct<internal::ConcreteAsyncValue<T>>(
339
+ typename internal::ConcreteAsyncValue<T>::UnconstructedPayload{})));
340
+ }
341
+
342
+ // Allocate and construct an AsyncValueRef without making it available for
343
+ // consumption. The AsyncValueRef should be made available later by invoking
344
+ // AsyncValueRef::SetStateConcrete or AsyncValueRef::SetError.
345
+ template <typename T, typename... Args>
346
+ AsyncValueRef<T> MakeConstructedAsyncValueRef(Args&&... args) {
347
+ return AsyncValueRef<T>(tsl::TakeRef(
348
+ internal::AllocateAndConstruct<internal::ConcreteAsyncValue<T>>(
349
+ typename internal::ConcreteAsyncValue<T>::ConstructedPayload{},
350
+ std::forward<Args>(args)...)));
351
+ }
352
+
353
+ // Allocate and construct an available AsyncValueRef.
354
+ template <typename T, typename... Args>
355
+ AsyncValueRef<T> MakeAvailableAsyncValueRef(Args&&... args) {
356
+ return AsyncValueRef<T>(tsl::TakeRef(
357
+ internal::AllocateAndConstruct<internal::ConcreteAsyncValue<T>>(
358
+ typename internal::ConcreteAsyncValue<T>::ConcretePayload{},
359
+ std::forward<Args>(args)...)));
360
+ }
361
+
362
+ //===----------------------------------------------------------------------===//
363
+ // Constructing non-reference-counted values in user provided storage.
364
+ //===----------------------------------------------------------------------===//
365
+
366
+ namespace internal {
367
+
368
+ // Properly sized and aligned storage for allocating async values of given type.
369
+ template <typename T>
370
+ struct AsyncValueStorage {
371
+ using Payload = ConcreteAsyncValue<T>;
372
+
373
+ AsyncValueStorage() = default;
374
+
375
+ AsyncValueStorage(const AsyncValueStorage&) = delete;
376
+ AsyncValueStorage& operator=(const AsyncValueStorage&) = delete;
377
+
378
+ void* buf() { return &storage[0]; }
379
+
380
+ alignas(Payload) std::byte storage[sizeof(Payload)];
381
+ };
382
+
383
+ } // namespace internal
384
+
385
+ // Exclusive owner of the non reference-counted async value (e.g. allocated in
386
+ // the user provided storage) that is responsible for destructing it. If you'd
387
+ // look at `AsyncValueRef` as `std::shared_ptr`, then this is `std::unique_ptr`.
388
+ template <typename T>
389
+ class AsyncValueOwningRef {
390
+ public:
391
+ AsyncValueOwningRef() = default;
392
+ ~AsyncValueOwningRef() { Destroy(); }
393
+
394
+ AsyncValueOwningRef(const AsyncValueOwningRef&) = delete;
395
+ AsyncValueOwningRef& operator=(const AsyncValueOwningRef&) = delete;
396
+
397
+ AsyncValueOwningRef& operator=(AsyncValueOwningRef&& other) {
398
+ Destroy();
399
+ std::swap(value_, other.value_);
400
+ return *this;
401
+ }
402
+
403
+ AsyncValueOwningRef(AsyncValueOwningRef&& other) {
404
+ Destroy();
405
+ std::swap(value_, other.value_);
406
+ }
407
+
408
+ AsyncValueRef<T> AsRef() const { return AsyncValueRef<T>(FormRef(value_)); }
409
+ AsyncValuePtr<T> AsPtr() const { return AsyncValuePtr<T>(value_); }
410
+
411
+ T* operator->() const { return &value_->get(); }
412
+ T& operator*() const { return value_->get(); }
413
+
414
+ private:
415
+ template <typename U, typename... Args>
416
+ friend AsyncValueOwningRef<U> MakeConstructedAsyncValueRef(
417
+ internal::AsyncValueStorage<U>&, Args&&...);
418
+
419
+ template <typename U, typename... Args>
420
+ friend AsyncValueOwningRef<U> MakeAvailableAsyncValueRef(
421
+ internal::AsyncValueStorage<U>&, Args&&...);
422
+
423
+ explicit AsyncValueOwningRef(internal::ConcreteAsyncValue<T>* value)
424
+ : value_(value) {}
425
+
426
+ void Destroy() {
427
+ if (value_) {
428
+ CallDestructor(value_);
429
+ value_ = nullptr;
430
+ }
431
+ }
432
+
433
+ // Work around NVCC compilation error.
434
+ template <typename U>
435
+ void CallDestructor(U* ptr) {
436
+ ptr->~U();
437
+ }
438
+
439
+ internal::ConcreteAsyncValue<T>* value_ = nullptr;
440
+ };
441
+
442
+ // Constructs an AsyncValueRef in the provided storage without making it
443
+ // available for consumption. The AsyncValueRef should be made available later
444
+ // by invoking AsyncValueRef::SetStateConcrete or AsyncValueRef::SetError.
445
+ template <typename T, typename... Args>
446
+ AsyncValueOwningRef<T> MakeConstructedAsyncValueRef(
447
+ internal::AsyncValueStorage<T>& storage, Args&&... args) {
448
+ return AsyncValueOwningRef<T>(
449
+ internal::PlacementConstruct<internal::ConcreteAsyncValue<T>>(
450
+ storage.buf(),
451
+ typename internal::ConcreteAsyncValue<T>::ConstructedPayload{false},
452
+ std::forward<Args>(args)...));
453
+ }
454
+
455
+ // Construct an available AsyncValueRef in the provided storage.
456
+ template <typename T, typename... Args>
457
+ AsyncValueOwningRef<T> MakeAvailableAsyncValueRef(
458
+ internal::AsyncValueStorage<T>& storage, Args&&... args) {
459
+ return AsyncValueOwningRef<T>(
460
+ internal::PlacementConstruct<internal::ConcreteAsyncValue<T>>(
461
+ storage.buf(),
462
+ typename internal::ConcreteAsyncValue<T>::ConcretePayload{false},
463
+ std::forward<Args>(args)...));
464
+ }
465
+
466
+ } // namespace tsl
467
+
468
+ #endif // TENSORFLOW_TSL_CONCURRENCY_ASYNC_VALUE_REF_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/cuda/cuda.inc ADDED
@@ -0,0 +1,635 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "cuArray3DCreate",
2
+ "cuArray3DCreate_v2",
3
+ "cuArray3DGetDescriptor",
4
+ "cuArray3DGetDescriptor_v2",
5
+ "cuArrayCreate",
6
+ "cuArrayCreate_v2",
7
+ "cuArrayDestroy",
8
+ "cuArrayGetDescriptor",
9
+ "cuArrayGetDescriptor_v2",
10
+ "cuArrayGetMemoryRequirements",
11
+ "cuArrayGetPlane",
12
+ "cuArrayGetSparseProperties",
13
+ "cuCoredumpGetAttribute",
14
+ "cuCoredumpGetAttributeGlobal",
15
+ "cuCoredumpSetAttribute",
16
+ "cuCoredumpSetAttributeGlobal",
17
+ "cuCtxAttach",
18
+ "cuCtxCreate",
19
+ "cuCtxCreate_v2",
20
+ "cuCtxCreate_v3",
21
+ "cuCtxDestroy",
22
+ "cuCtxDestroy_v2",
23
+ "cuCtxDetach",
24
+ "cuCtxDisablePeerAccess",
25
+ "cuCtxEnablePeerAccess",
26
+ "cuCtxGetApiVersion",
27
+ "cuCtxGetCacheConfig",
28
+ "cuCtxGetCurrent",
29
+ "cuCtxGetDevice",
30
+ "cuCtxGetExecAffinity",
31
+ "cuCtxGetFlags",
32
+ "cuCtxGetId",
33
+ "cuCtxGetLimit",
34
+ "cuCtxGetSharedMemConfig",
35
+ "cuCtxGetStreamPriorityRange",
36
+ "cuCtxPopCurrent",
37
+ "cuCtxPopCurrent_v2",
38
+ "cuCtxPushCurrent",
39
+ "cuCtxPushCurrent_v2",
40
+ "cuCtxResetPersistingL2Cache",
41
+ "cuCtxSetCacheConfig",
42
+ "cuCtxSetCurrent",
43
+ "cuCtxSetFlags",
44
+ "cuCtxSetLimit",
45
+ "cuCtxSetSharedMemConfig",
46
+ "cuCtxSynchronize",
47
+ "cuDestroyExternalMemory",
48
+ "cuDestroyExternalSemaphore",
49
+ "cuDeviceCanAccessPeer",
50
+ "cuDeviceComputeCapability",
51
+ "cuDeviceGet",
52
+ "cuDeviceGetAttribute",
53
+ "cuDeviceGetByPCIBusId",
54
+ "cuDeviceGetCount",
55
+ "cuDeviceGetDefaultMemPool",
56
+ "cuDeviceGetExecAffinitySupport",
57
+ "cuDeviceGetGraphMemAttribute",
58
+ "cuDeviceGetLuid",
59
+ "cuDeviceGetMemPool",
60
+ "cuDeviceGetName",
61
+ "cuDeviceGetNvSciSyncAttributes",
62
+ "cuDeviceGetP2PAttribute",
63
+ "cuDeviceGetPCIBusId",
64
+ "cuDeviceGetProperties",
65
+ "cuDeviceGetTexture1DLinearMaxWidth",
66
+ "cuDeviceGetUuid",
67
+ "cuDeviceGetUuid_v2",
68
+ "cuDeviceGraphMemTrim",
69
+ "cuDevicePrimaryCtxGetState",
70
+ "cuDevicePrimaryCtxRelease",
71
+ "cuDevicePrimaryCtxRelease_v2",
72
+ "cuDevicePrimaryCtxReset",
73
+ "cuDevicePrimaryCtxReset_v2",
74
+ "cuDevicePrimaryCtxRetain",
75
+ "cuDevicePrimaryCtxSetFlags",
76
+ "cuDevicePrimaryCtxSetFlags_v2",
77
+ "cuDeviceSetGraphMemAttribute",
78
+ "cuDeviceSetMemPool",
79
+ "cuDeviceTotalMem",
80
+ "cuDeviceTotalMem_v2",
81
+ "cuDriverGetVersion",
82
+ "cuEGLApiInit",
83
+ "cuEGLStreamConsumerAcquireFrame",
84
+ "cuEGLStreamConsumerConnect",
85
+ "cuEGLStreamConsumerConnectWithFlags",
86
+ "cuEGLStreamConsumerDisconnect",
87
+ "cuEGLStreamConsumerReleaseFrame",
88
+ "cuEGLStreamProducerConnect",
89
+ "cuEGLStreamProducerDisconnect",
90
+ "cuEGLStreamProducerPresentFrame",
91
+ "cuEGLStreamProducerReturnFrame",
92
+ "cuEventCreate",
93
+ "cuEventDestroy",
94
+ "cuEventDestroy_v2",
95
+ "cuEventElapsedTime",
96
+ "cuEventQuery",
97
+ "cuEventRecord",
98
+ "cuEventRecordWithFlags",
99
+ "cuEventRecordWithFlags_ptsz",
100
+ "cuEventRecord_ptsz",
101
+ "cuEventSynchronize",
102
+ "cuExternalMemoryGetMappedBuffer",
103
+ "cuExternalMemoryGetMappedMipmappedArray",
104
+ "cuFlushGPUDirectRDMAWrites",
105
+ "cuFuncGetAttribute",
106
+ "cuFuncGetModule",
107
+ "cuFuncGetName",
108
+ "cuFuncSetAttribute",
109
+ "cuFuncSetBlockShape",
110
+ "cuFuncSetCacheConfig",
111
+ "cuFuncSetSharedMemConfig",
112
+ "cuFuncSetSharedSize",
113
+ "cuGLCtxCreate",
114
+ "cuGLCtxCreate_v2",
115
+ "cuGLGetDevices",
116
+ "cuGLGetDevices_v2",
117
+ "cuGLInit",
118
+ "cuGLMapBufferObject",
119
+ "cuGLMapBufferObjectAsync",
120
+ "cuGLMapBufferObjectAsync_v2",
121
+ "cuGLMapBufferObjectAsync_v2_ptsz",
122
+ "cuGLMapBufferObject_v2",
123
+ "cuGLMapBufferObject_v2_ptds",
124
+ "cuGLRegisterBufferObject",
125
+ "cuGLSetBufferObjectMapFlags",
126
+ "cuGLUnmapBufferObject",
127
+ "cuGLUnmapBufferObjectAsync",
128
+ "cuGLUnregisterBufferObject",
129
+ "cuGetErrorName",
130
+ "cuGetErrorString",
131
+ "cuGetExportTable",
132
+ "cuGetProcAddress",
133
+ "cuGetProcAddress_v2",
134
+ "cuGraphAddBatchMemOpNode",
135
+ "cuGraphAddChildGraphNode",
136
+ "cuGraphAddDependencies",
137
+ "cuGraphAddDependencies_v2",
138
+ "cuGraphAddEmptyNode",
139
+ "cuGraphAddEventRecordNode",
140
+ "cuGraphAddEventWaitNode",
141
+ "cuGraphAddExternalSemaphoresSignalNode",
142
+ "cuGraphAddExternalSemaphoresWaitNode",
143
+ "cuGraphAddHostNode",
144
+ "cuGraphAddKernelNode",
145
+ "cuGraphAddKernelNode_v2",
146
+ "cuGraphAddMemAllocNode",
147
+ "cuGraphAddMemFreeNode",
148
+ "cuGraphAddMemcpyNode",
149
+ "cuGraphAddMemsetNode",
150
+ "cuGraphAddNode",
151
+ "cuGraphAddNode_v2",
152
+ "cuGraphBatchMemOpNodeGetParams",
153
+ "cuGraphBatchMemOpNodeSetParams",
154
+ "cuGraphChildGraphNodeGetGraph",
155
+ "cuGraphClone",
156
+ "cuGraphConditionalHandleCreate",
157
+ "cuGraphCreate",
158
+ "cuGraphDebugDotPrint",
159
+ "cuGraphDestroy",
160
+ "cuGraphDestroyNode",
161
+ "cuGraphEventRecordNodeGetEvent",
162
+ "cuGraphEventRecordNodeSetEvent",
163
+ "cuGraphEventWaitNodeGetEvent",
164
+ "cuGraphEventWaitNodeSetEvent",
165
+ "cuGraphExecBatchMemOpNodeSetParams",
166
+ "cuGraphExecChildGraphNodeSetParams",
167
+ "cuGraphExecDestroy",
168
+ "cuGraphExecEventRecordNodeSetEvent",
169
+ "cuGraphExecEventWaitNodeSetEvent",
170
+ "cuGraphExecExternalSemaphoresSignalNodeSetParams",
171
+ "cuGraphExecExternalSemaphoresWaitNodeSetParams",
172
+ "cuGraphExecGetFlags",
173
+ "cuGraphExecHostNodeSetParams",
174
+ "cuGraphExecKernelNodeSetParams",
175
+ "cuGraphExecKernelNodeSetParams_v2",
176
+ "cuGraphExecMemcpyNodeSetParams",
177
+ "cuGraphExecMemsetNodeSetParams",
178
+ "cuGraphExecNodeSetParams",
179
+ "cuGraphExecUpdate",
180
+ "cuGraphExecUpdate_v2",
181
+ "cuGraphExternalSemaphoresSignalNodeGetParams",
182
+ "cuGraphExternalSemaphoresSignalNodeSetParams",
183
+ "cuGraphExternalSemaphoresWaitNodeGetParams",
184
+ "cuGraphExternalSemaphoresWaitNodeSetParams",
185
+ "cuGraphGetEdges",
186
+ "cuGraphGetEdges_v2",
187
+ "cuGraphGetNodes",
188
+ "cuGraphGetRootNodes",
189
+ "cuGraphHostNodeGetParams",
190
+ "cuGraphHostNodeSetParams",
191
+ "cuGraphInstantiate",
192
+ "cuGraphInstantiateWithFlags",
193
+ "cuGraphInstantiateWithParams",
194
+ "cuGraphInstantiateWithParams_ptsz",
195
+ "cuGraphInstantiate_v2",
196
+ "cuGraphKernelNodeCopyAttributes",
197
+ "cuGraphKernelNodeGetAttribute",
198
+ "cuGraphKernelNodeGetParams",
199
+ "cuGraphKernelNodeGetParams_v2",
200
+ "cuGraphKernelNodeSetAttribute",
201
+ "cuGraphKernelNodeSetParams",
202
+ "cuGraphKernelNodeSetParams_v2",
203
+ "cuGraphLaunch",
204
+ "cuGraphLaunch_ptsz",
205
+ "cuGraphMemAllocNodeGetParams",
206
+ "cuGraphMemFreeNodeGetParams",
207
+ "cuGraphMemcpyNodeGetParams",
208
+ "cuGraphMemcpyNodeSetParams",
209
+ "cuGraphMemsetNodeGetParams",
210
+ "cuGraphMemsetNodeSetParams",
211
+ "cuGraphNodeFindInClone",
212
+ "cuGraphNodeGetDependencies",
213
+ "cuGraphNodeGetDependencies_v2",
214
+ "cuGraphNodeGetDependentNodes",
215
+ "cuGraphNodeGetDependentNodes_v2",
216
+ "cuGraphNodeGetEnabled",
217
+ "cuGraphNodeGetType",
218
+ "cuGraphNodeSetEnabled",
219
+ "cuGraphNodeSetParams",
220
+ "cuGraphReleaseUserObject",
221
+ "cuGraphRemoveDependencies",
222
+ "cuGraphRemoveDependencies_v2",
223
+ "cuGraphRetainUserObject",
224
+ "cuGraphUpload",
225
+ "cuGraphUpload_ptsz",
226
+ "cuGraphicsEGLRegisterImage",
227
+ "cuGraphicsGLRegisterBuffer",
228
+ "cuGraphicsGLRegisterImage",
229
+ "cuGraphicsMapResources",
230
+ "cuGraphicsMapResources_ptsz",
231
+ "cuGraphicsResourceGetMappedEglFrame",
232
+ "cuGraphicsResourceGetMappedMipmappedArray",
233
+ "cuGraphicsResourceGetMappedPointer",
234
+ "cuGraphicsResourceGetMappedPointer_v2",
235
+ "cuGraphicsResourceSetMapFlags",
236
+ "cuGraphicsResourceSetMapFlags_v2",
237
+ "cuGraphicsSubResourceGetMappedArray",
238
+ "cuGraphicsUnmapResources",
239
+ "cuGraphicsUnmapResources_ptsz",
240
+ "cuGraphicsUnregisterResource",
241
+ "cuGraphicsVDPAURegisterOutputSurface",
242
+ "cuGraphicsVDPAURegisterVideoSurface",
243
+ "cuImportExternalMemory",
244
+ "cuImportExternalSemaphore",
245
+ "cuInit",
246
+ "cuIpcCloseMemHandle",
247
+ "cuIpcGetEventHandle",
248
+ "cuIpcGetMemHandle",
249
+ "cuIpcOpenEventHandle",
250
+ "cuIpcOpenMemHandle",
251
+ "cuIpcOpenMemHandle_v2",
252
+ "cuKernelGetAttribute",
253
+ "cuKernelGetFunction",
254
+ "cuKernelGetName",
255
+ "cuKernelSetAttribute",
256
+ "cuKernelSetCacheConfig",
257
+ "cuLaunch",
258
+ "cuLaunchCooperativeKernel",
259
+ "cuLaunchCooperativeKernelMultiDevice",
260
+ "cuLaunchCooperativeKernel_ptsz",
261
+ "cuLaunchGrid",
262
+ "cuLaunchGridAsync",
263
+ "cuLaunchHostFunc",
264
+ "cuLaunchHostFunc_ptsz",
265
+ "cuLaunchKernel",
266
+ "cuLaunchKernelEx",
267
+ "cuLaunchKernelEx_ptsz",
268
+ "cuLaunchKernel_ptsz",
269
+ "cuLibraryGetGlobal",
270
+ "cuLibraryGetKernel",
271
+ "cuLibraryGetManaged",
272
+ "cuLibraryGetModule",
273
+ "cuLibraryGetUnifiedFunction",
274
+ "cuLibraryLoadData",
275
+ "cuLibraryLoadFromFile",
276
+ "cuLibraryUnload",
277
+ "cuLinkAddData",
278
+ "cuLinkAddData_v2",
279
+ "cuLinkAddFile",
280
+ "cuLinkAddFile_v2",
281
+ "cuLinkComplete",
282
+ "cuLinkCreate",
283
+ "cuLinkCreate_v2",
284
+ "cuLinkDestroy",
285
+ "cuMemAddressFree",
286
+ "cuMemAddressReserve",
287
+ "cuMemAdvise",
288
+ "cuMemAdvise_v2",
289
+ "cuMemAlloc",
290
+ "cuMemAllocAsync",
291
+ "cuMemAllocAsync_ptsz",
292
+ "cuMemAllocFromPoolAsync",
293
+ "cuMemAllocFromPoolAsync_ptsz",
294
+ "cuMemAllocHost",
295
+ "cuMemAllocHost_v2",
296
+ "cuMemAllocManaged",
297
+ "cuMemAllocPitch",
298
+ "cuMemAllocPitch_v2",
299
+ "cuMemAlloc_v2",
300
+ "cuMemCreate",
301
+ "cuMemExportToShareableHandle",
302
+ "cuMemFree",
303
+ "cuMemFreeAsync",
304
+ "cuMemFreeAsync_ptsz",
305
+ "cuMemFreeHost",
306
+ "cuMemFree_v2",
307
+ "cuMemGetAccess",
308
+ "cuMemGetAddressRange",
309
+ "cuMemGetAddressRange_v2",
310
+ "cuMemGetAllocationGranularity",
311
+ "cuMemGetAllocationPropertiesFromHandle",
312
+ "cuMemGetAttribute",
313
+ "cuMemGetAttribute_v2",
314
+ "cuMemGetHandleForAddressRange",
315
+ "cuMemGetInfo",
316
+ "cuMemGetInfo_v2",
317
+ "cuMemHostAlloc",
318
+ "cuMemHostGetDevicePointer",
319
+ "cuMemHostGetDevicePointer_v2",
320
+ "cuMemHostGetFlags",
321
+ "cuMemHostRegister",
322
+ "cuMemHostRegister_v2",
323
+ "cuMemHostUnregister",
324
+ "cuMemImportFromShareableHandle",
325
+ "cuMemMap",
326
+ "cuMemMapArrayAsync",
327
+ "cuMemMapArrayAsync_ptsz",
328
+ "cuMemPoolCreate",
329
+ "cuMemPoolDestroy",
330
+ "cuMemPoolExportPointer",
331
+ "cuMemPoolExportToShareableHandle",
332
+ "cuMemPoolGetAccess",
333
+ "cuMemPoolGetAttribute",
334
+ "cuMemPoolImportFromShareableHandle",
335
+ "cuMemPoolImportPointer",
336
+ "cuMemPoolSetAccess",
337
+ "cuMemPoolSetAttribute",
338
+ "cuMemPoolTrimTo",
339
+ "cuMemPrefetchAsync",
340
+ "cuMemPrefetchAsync_ptsz",
341
+ "cuMemPrefetchAsync_v2",
342
+ "cuMemPrefetchAsync_v2_ptsz",
343
+ "cuMemRangeGetAttribute",
344
+ "cuMemRangeGetAttributes",
345
+ "cuMemRelease",
346
+ "cuMemRetainAllocationHandle",
347
+ "cuMemSetAccess",
348
+ "cuMemUnmap",
349
+ "cuMemcpy",
350
+ "cuMemcpy2D",
351
+ "cuMemcpy2DAsync",
352
+ "cuMemcpy2DAsync_v2",
353
+ "cuMemcpy2DAsync_v2_ptsz",
354
+ "cuMemcpy2DUnaligned",
355
+ "cuMemcpy2DUnaligned_v2",
356
+ "cuMemcpy2DUnaligned_v2_ptds",
357
+ "cuMemcpy2D_v2",
358
+ "cuMemcpy2D_v2_ptds",
359
+ "cuMemcpy3D",
360
+ "cuMemcpy3DAsync",
361
+ "cuMemcpy3DAsync_v2",
362
+ "cuMemcpy3DAsync_v2_ptsz",
363
+ "cuMemcpy3DPeer",
364
+ "cuMemcpy3DPeerAsync",
365
+ "cuMemcpy3DPeerAsync_ptsz",
366
+ "cuMemcpy3DPeer_ptds",
367
+ "cuMemcpy3D_v2",
368
+ "cuMemcpy3D_v2_ptds",
369
+ "cuMemcpyAsync",
370
+ "cuMemcpyAsync_ptsz",
371
+ "cuMemcpyAtoA",
372
+ "cuMemcpyAtoA_v2",
373
+ "cuMemcpyAtoA_v2_ptds",
374
+ "cuMemcpyAtoD",
375
+ "cuMemcpyAtoD_v2",
376
+ "cuMemcpyAtoD_v2_ptds",
377
+ "cuMemcpyAtoH",
378
+ "cuMemcpyAtoHAsync",
379
+ "cuMemcpyAtoHAsync_v2",
380
+ "cuMemcpyAtoHAsync_v2_ptsz",
381
+ "cuMemcpyAtoH_v2",
382
+ "cuMemcpyAtoH_v2_ptds",
383
+ "cuMemcpyDtoA",
384
+ "cuMemcpyDtoA_v2",
385
+ "cuMemcpyDtoA_v2_ptds",
386
+ "cuMemcpyDtoD",
387
+ "cuMemcpyDtoDAsync",
388
+ "cuMemcpyDtoDAsync_v2",
389
+ "cuMemcpyDtoDAsync_v2_ptsz",
390
+ "cuMemcpyDtoD_v2",
391
+ "cuMemcpyDtoD_v2_ptds",
392
+ "cuMemcpyDtoH",
393
+ "cuMemcpyDtoHAsync",
394
+ "cuMemcpyDtoHAsync_v2",
395
+ "cuMemcpyDtoHAsync_v2_ptsz",
396
+ "cuMemcpyDtoH_v2",
397
+ "cuMemcpyDtoH_v2_ptds",
398
+ "cuMemcpyHtoA",
399
+ "cuMemcpyHtoAAsync",
400
+ "cuMemcpyHtoAAsync_v2",
401
+ "cuMemcpyHtoAAsync_v2_ptsz",
402
+ "cuMemcpyHtoA_v2",
403
+ "cuMemcpyHtoA_v2_ptds",
404
+ "cuMemcpyHtoD",
405
+ "cuMemcpyHtoDAsync",
406
+ "cuMemcpyHtoDAsync_v2",
407
+ "cuMemcpyHtoDAsync_v2_ptsz",
408
+ "cuMemcpyHtoD_v2",
409
+ "cuMemcpyHtoD_v2_ptds",
410
+ "cuMemcpyPeer",
411
+ "cuMemcpyPeerAsync",
412
+ "cuMemcpyPeerAsync_ptsz",
413
+ "cuMemcpyPeer_ptds",
414
+ "cuMemcpy_ptds",
415
+ "cuMemsetD16",
416
+ "cuMemsetD16Async",
417
+ "cuMemsetD16Async_ptsz",
418
+ "cuMemsetD16_v2",
419
+ "cuMemsetD16_v2_ptds",
420
+ "cuMemsetD2D16",
421
+ "cuMemsetD2D16Async",
422
+ "cuMemsetD2D16Async_ptsz",
423
+ "cuMemsetD2D16_v2",
424
+ "cuMemsetD2D16_v2_ptds",
425
+ "cuMemsetD2D32",
426
+ "cuMemsetD2D32Async",
427
+ "cuMemsetD2D32Async_ptsz",
428
+ "cuMemsetD2D32_v2",
429
+ "cuMemsetD2D32_v2_ptds",
430
+ "cuMemsetD2D8",
431
+ "cuMemsetD2D8Async",
432
+ "cuMemsetD2D8Async_ptsz",
433
+ "cuMemsetD2D8_v2",
434
+ "cuMemsetD2D8_v2_ptds",
435
+ "cuMemsetD32",
436
+ "cuMemsetD32Async",
437
+ "cuMemsetD32Async_ptsz",
438
+ "cuMemsetD32_v2",
439
+ "cuMemsetD32_v2_ptds",
440
+ "cuMemsetD8",
441
+ "cuMemsetD8Async",
442
+ "cuMemsetD8Async_ptsz",
443
+ "cuMemsetD8_v2",
444
+ "cuMemsetD8_v2_ptds",
445
+ "cuMipmappedArrayCreate",
446
+ "cuMipmappedArrayDestroy",
447
+ "cuMipmappedArrayGetLevel",
448
+ "cuMipmappedArrayGetMemoryRequirements",
449
+ "cuMipmappedArrayGetSparseProperties",
450
+ "cuModuleGetFunction",
451
+ "cuModuleGetGlobal",
452
+ "cuModuleGetGlobal_v2",
453
+ "cuModuleGetLoadingMode",
454
+ "cuModuleGetSurfRef",
455
+ "cuModuleGetTexRef",
456
+ "cuModuleLoad",
457
+ "cuModuleLoadData",
458
+ "cuModuleLoadDataEx",
459
+ "cuModuleLoadFatBinary",
460
+ "cuModuleUnload",
461
+ "cuMulticastAddDevice",
462
+ "cuMulticastBindAddr",
463
+ "cuMulticastBindMem",
464
+ "cuMulticastCreate",
465
+ "cuMulticastGetGranularity",
466
+ "cuMulticastUnbind",
467
+ "cuOccupancyAvailableDynamicSMemPerBlock",
468
+ "cuOccupancyMaxActiveBlocksPerMultiprocessor",
469
+ "cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags",
470
+ "cuOccupancyMaxActiveClusters",
471
+ "cuOccupancyMaxPotentialBlockSize",
472
+ "cuOccupancyMaxPotentialBlockSizeWithFlags",
473
+ "cuOccupancyMaxPotentialClusterSize",
474
+ "cuParamSetSize",
475
+ "cuParamSetTexRef",
476
+ "cuParamSetf",
477
+ "cuParamSeti",
478
+ "cuParamSetv",
479
+ "cuPointerGetAttribute",
480
+ "cuPointerGetAttributes",
481
+ "cuPointerSetAttribute",
482
+ "cuProfilerInitialize",
483
+ "cuProfilerStart",
484
+ "cuProfilerStop",
485
+ "cuSignalExternalSemaphoresAsync",
486
+ "cuSignalExternalSemaphoresAsync_ptsz",
487
+ "cuStreamAddCallback",
488
+ "cuStreamAddCallback_ptsz",
489
+ "cuStreamAttachMemAsync",
490
+ "cuStreamAttachMemAsync_ptsz",
491
+ "cuStreamBatchMemOp",
492
+ "cuStreamBatchMemOp_ptsz",
493
+ "cuStreamBatchMemOp_v2",
494
+ "cuStreamBatchMemOp_v2_ptsz",
495
+ "cuStreamBeginCapture",
496
+ "cuStreamBeginCaptureToGraph",
497
+ "cuStreamBeginCaptureToGraph_ptsz",
498
+ "cuStreamBeginCapture_ptsz",
499
+ "cuStreamBeginCapture_v2",
500
+ "cuStreamBeginCapture_v2_ptsz",
501
+ "cuStreamCopyAttributes",
502
+ "cuStreamCopyAttributes_ptsz",
503
+ "cuStreamCreate",
504
+ "cuStreamCreateWithPriority",
505
+ "cuStreamDestroy",
506
+ "cuStreamDestroy_v2",
507
+ "cuStreamEndCapture",
508
+ "cuStreamEndCapture_ptsz",
509
+ "cuStreamGetAttribute",
510
+ "cuStreamGetAttribute_ptsz",
511
+ "cuStreamGetCaptureInfo",
512
+ "cuStreamGetCaptureInfo_ptsz",
513
+ "cuStreamGetCaptureInfo_v2",
514
+ "cuStreamGetCaptureInfo_v2_ptsz",
515
+ "cuStreamGetCaptureInfo_v3",
516
+ "cuStreamGetCaptureInfo_v3_ptsz",
517
+ "cuStreamGetCtx",
518
+ "cuStreamGetCtx_ptsz",
519
+ "cuStreamGetFlags",
520
+ "cuStreamGetFlags_ptsz",
521
+ "cuStreamGetId",
522
+ "cuStreamGetId_ptsz",
523
+ "cuStreamGetPriority",
524
+ "cuStreamGetPriority_ptsz",
525
+ "cuStreamIsCapturing",
526
+ "cuStreamIsCapturing_ptsz",
527
+ "cuStreamQuery",
528
+ "cuStreamQuery_ptsz",
529
+ "cuStreamSetAttribute",
530
+ "cuStreamSetAttribute_ptsz",
531
+ "cuStreamSynchronize",
532
+ "cuStreamSynchronize_ptsz",
533
+ "cuStreamUpdateCaptureDependencies",
534
+ "cuStreamUpdateCaptureDependencies_ptsz",
535
+ "cuStreamUpdateCaptureDependencies_v2",
536
+ "cuStreamUpdateCaptureDependencies_v2_ptsz",
537
+ "cuStreamWaitEvent",
538
+ "cuStreamWaitEvent_ptsz",
539
+ "cuStreamWaitValue32",
540
+ "cuStreamWaitValue32_ptsz",
541
+ "cuStreamWaitValue32_v2",
542
+ "cuStreamWaitValue32_v2_ptsz",
543
+ "cuStreamWaitValue64",
544
+ "cuStreamWaitValue64_ptsz",
545
+ "cuStreamWaitValue64_v2",
546
+ "cuStreamWaitValue64_v2_ptsz",
547
+ "cuStreamWriteValue32",
548
+ "cuStreamWriteValue32_ptsz",
549
+ "cuStreamWriteValue32_v2",
550
+ "cuStreamWriteValue32_v2_ptsz",
551
+ "cuStreamWriteValue64",
552
+ "cuStreamWriteValue64_ptsz",
553
+ "cuStreamWriteValue64_v2",
554
+ "cuStreamWriteValue64_v2_ptsz",
555
+ "cuSurfObjectCreate",
556
+ "cuSurfObjectDestroy",
557
+ "cuSurfObjectGetResourceDesc",
558
+ "cuSurfRefGetArray",
559
+ "cuSurfRefSetArray",
560
+ "cuTensorMapEncodeIm2col",
561
+ "cuTensorMapEncodeTiled",
562
+ "cuTensorMapReplaceAddress",
563
+ "cuTexObjectCreate",
564
+ "cuTexObjectDestroy",
565
+ "cuTexObjectGetResourceDesc",
566
+ "cuTexObjectGetResourceViewDesc",
567
+ "cuTexObjectGetTextureDesc",
568
+ "cuTexRefCreate",
569
+ "cuTexRefDestroy",
570
+ "cuTexRefGetAddress",
571
+ "cuTexRefGetAddressMode",
572
+ "cuTexRefGetAddress_v2",
573
+ "cuTexRefGetArray",
574
+ "cuTexRefGetBorderColor",
575
+ "cuTexRefGetFilterMode",
576
+ "cuTexRefGetFlags",
577
+ "cuTexRefGetFormat",
578
+ "cuTexRefGetMaxAnisotropy",
579
+ "cuTexRefGetMipmapFilterMode",
580
+ "cuTexRefGetMipmapLevelBias",
581
+ "cuTexRefGetMipmapLevelClamp",
582
+ "cuTexRefGetMipmappedArray",
583
+ "cuTexRefSetAddress",
584
+ "cuTexRefSetAddress2D",
585
+ "cuTexRefSetAddress2D_v2",
586
+ "cuTexRefSetAddress2D_v3",
587
+ "cuTexRefSetAddressMode",
588
+ "cuTexRefSetAddress_v2",
589
+ "cuTexRefSetArray",
590
+ "cuTexRefSetBorderColor",
591
+ "cuTexRefSetFilterMode",
592
+ "cuTexRefSetFlags",
593
+ "cuTexRefSetFormat",
594
+ "cuTexRefSetMaxAnisotropy",
595
+ "cuTexRefSetMipmapFilterMode",
596
+ "cuTexRefSetMipmapLevelBias",
597
+ "cuTexRefSetMipmapLevelClamp",
598
+ "cuTexRefSetMipmappedArray",
599
+ "cuThreadExchangeStreamCaptureMode",
600
+ "cuUserObjectCreate",
601
+ "cuUserObjectRelease",
602
+ "cuUserObjectRetain",
603
+ "cuVDPAUCtxCreate",
604
+ "cuVDPAUCtxCreate_v2",
605
+ "cuVDPAUGetDevice",
606
+ "cuWaitExternalSemaphoresAsync",
607
+ "cuWaitExternalSemaphoresAsync_ptsz",
608
+ "cudbgApiAttach",
609
+ "cudbgApiClientPid",
610
+ "cudbgApiClientRevision",
611
+ "cudbgApiDetach",
612
+ "cudbgApiInit",
613
+ "cudbgAttachHandlerAvailable",
614
+ "cudbgDebuggerCapabilities",
615
+ "cudbgDebuggerInitialized",
616
+ "cudbgDetachSuspendedDevicesMask",
617
+ "cudbgEnableIntegratedMemcheck",
618
+ "cudbgEnableLaunchBlocking",
619
+ "cudbgEnablePreemptionDebugging",
620
+ "cudbgGetAPI",
621
+ "cudbgGetAPIVersion",
622
+ "cudbgInjectionPath",
623
+ "cudbgIpcFlag",
624
+ "cudbgMain",
625
+ "cudbgReportDriverApiError",
626
+ "cudbgReportDriverApiErrorFlags",
627
+ "cudbgReportDriverInternalError",
628
+ "cudbgReportedDriverApiErrorCode",
629
+ "cudbgReportedDriverApiErrorFuncNameAddr",
630
+ "cudbgReportedDriverApiErrorFuncNameSize",
631
+ "cudbgReportedDriverInternalErrorCode",
632
+ "cudbgResumeForAttachDetach",
633
+ "cudbgRpcEnabled",
634
+ "cudbgSessionId",
635
+ "cudbgUseExternalDebugger",
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/cuda/cudart.inc ADDED
@@ -0,0 +1,413 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "__cudaGetKernel",
2
+ "__cudaInitModule",
3
+ "__cudaLaunchKernel",
4
+ "__cudaLaunchKernel_ptsz",
5
+ "__cudaPopCallConfiguration",
6
+ "__cudaPushCallConfiguration",
7
+ "__cudaRegisterFatBinary",
8
+ "__cudaRegisterFatBinaryEnd",
9
+ "__cudaRegisterFunction",
10
+ "__cudaRegisterHostVar",
11
+ "__cudaRegisterManagedVar",
12
+ "__cudaRegisterUnifiedTable",
13
+ "__cudaRegisterVar",
14
+ "__cudaUnregisterFatBinary",
15
+ "cudaArrayGetInfo",
16
+ "cudaArrayGetMemoryRequirements",
17
+ "cudaArrayGetPlane",
18
+ "cudaArrayGetSparseProperties",
19
+ "cudaChooseDevice",
20
+ "cudaCreateChannelDesc",
21
+ "cudaCreateSurfaceObject",
22
+ "cudaCreateTextureObject",
23
+ "cudaCtxResetPersistingL2Cache",
24
+ "cudaDestroyExternalMemory",
25
+ "cudaDestroyExternalSemaphore",
26
+ "cudaDestroySurfaceObject",
27
+ "cudaDestroyTextureObject",
28
+ "cudaDeviceCanAccessPeer",
29
+ "cudaDeviceDisablePeerAccess",
30
+ "cudaDeviceEnablePeerAccess",
31
+ "cudaDeviceFlushGPUDirectRDMAWrites",
32
+ "cudaDeviceGetAttribute",
33
+ "cudaDeviceGetByPCIBusId",
34
+ "cudaDeviceGetCacheConfig",
35
+ "cudaDeviceGetDefaultMemPool",
36
+ "cudaDeviceGetGraphMemAttribute",
37
+ "cudaDeviceGetLimit",
38
+ "cudaDeviceGetMemPool",
39
+ "cudaDeviceGetNvSciSyncAttributes",
40
+ "cudaDeviceGetP2PAttribute",
41
+ "cudaDeviceGetPCIBusId",
42
+ "cudaDeviceGetSharedMemConfig",
43
+ "cudaDeviceGetStreamPriorityRange",
44
+ "cudaDeviceGetTexture1DLinearMaxWidth",
45
+ "cudaDeviceGraphMemTrim",
46
+ "cudaDeviceReset",
47
+ "cudaDeviceSetCacheConfig",
48
+ "cudaDeviceSetGraphMemAttribute",
49
+ "cudaDeviceSetLimit",
50
+ "cudaDeviceSetMemPool",
51
+ "cudaDeviceSetSharedMemConfig",
52
+ "cudaDeviceSynchronize",
53
+ "cudaDriverGetVersion",
54
+ "cudaEGLStreamConsumerAcquireFrame",
55
+ "cudaEGLStreamConsumerConnect",
56
+ "cudaEGLStreamConsumerConnectWithFlags",
57
+ "cudaEGLStreamConsumerDisconnect",
58
+ "cudaEGLStreamConsumerReleaseFrame",
59
+ "cudaEGLStreamProducerConnect",
60
+ "cudaEGLStreamProducerDisconnect",
61
+ "cudaEGLStreamProducerPresentFrame",
62
+ "cudaEGLStreamProducerReturnFrame",
63
+ "cudaEventCreate",
64
+ "cudaEventCreateFromEGLSync",
65
+ "cudaEventCreateWithFlags",
66
+ "cudaEventDestroy",
67
+ "cudaEventElapsedTime",
68
+ "cudaEventQuery",
69
+ "cudaEventRecord",
70
+ "cudaEventRecordWithFlags",
71
+ "cudaEventRecordWithFlags_ptsz",
72
+ "cudaEventRecord_ptsz",
73
+ "cudaEventSynchronize",
74
+ "cudaExternalMemoryGetMappedBuffer",
75
+ "cudaExternalMemoryGetMappedMipmappedArray",
76
+ "cudaFree",
77
+ "cudaFreeArray",
78
+ "cudaFreeAsync",
79
+ "cudaFreeAsync_ptsz",
80
+ "cudaFreeHost",
81
+ "cudaFreeMipmappedArray",
82
+ "cudaFuncGetAttributes",
83
+ "cudaFuncGetName",
84
+ "cudaFuncSetAttribute",
85
+ "cudaFuncSetCacheConfig",
86
+ "cudaFuncSetSharedMemConfig",
87
+ "cudaGLGetDevices",
88
+ "cudaGLMapBufferObject",
89
+ "cudaGLMapBufferObjectAsync",
90
+ "cudaGLRegisterBufferObject",
91
+ "cudaGLSetBufferObjectMapFlags",
92
+ "cudaGLSetGLDevice",
93
+ "cudaGLUnmapBufferObject",
94
+ "cudaGLUnmapBufferObjectAsync",
95
+ "cudaGLUnregisterBufferObject",
96
+ "cudaGetChannelDesc",
97
+ "cudaGetDevice",
98
+ "cudaGetDeviceCount",
99
+ "cudaGetDeviceFlags",
100
+ "cudaGetDeviceProperties",
101
+ "cudaGetDeviceProperties_v2",
102
+ "cudaGetDriverEntryPoint",
103
+ "cudaGetDriverEntryPoint_ptsz",
104
+ "cudaGetErrorName",
105
+ "cudaGetErrorString",
106
+ "cudaGetExportTable",
107
+ "cudaGetFuncBySymbol",
108
+ "cudaGetKernel",
109
+ "cudaGetLastError",
110
+ "cudaGetMipmappedArrayLevel",
111
+ "cudaGetSurfaceObjectResourceDesc",
112
+ "cudaGetSymbolAddress",
113
+ "cudaGetSymbolSize",
114
+ "cudaGetTextureObjectResourceDesc",
115
+ "cudaGetTextureObjectResourceViewDesc",
116
+ "cudaGetTextureObjectTextureDesc",
117
+ "cudaGraphAddChildGraphNode",
118
+ "cudaGraphAddDependencies",
119
+ "cudaGraphAddDependencies_v2",
120
+ "cudaGraphAddEmptyNode",
121
+ "cudaGraphAddEventRecordNode",
122
+ "cudaGraphAddEventWaitNode",
123
+ "cudaGraphAddExternalSemaphoresSignalNode",
124
+ "cudaGraphAddExternalSemaphoresWaitNode",
125
+ "cudaGraphAddHostNode",
126
+ "cudaGraphAddKernelNode",
127
+ "cudaGraphAddMemAllocNode",
128
+ "cudaGraphAddMemFreeNode",
129
+ "cudaGraphAddMemcpyNode",
130
+ "cudaGraphAddMemcpyNode1D",
131
+ "cudaGraphAddMemcpyNodeFromSymbol",
132
+ "cudaGraphAddMemcpyNodeToSymbol",
133
+ "cudaGraphAddMemsetNode",
134
+ "cudaGraphAddNode",
135
+ "cudaGraphAddNode_v2",
136
+ "cudaGraphChildGraphNodeGetGraph",
137
+ "cudaGraphClone",
138
+ "cudaGraphConditionalHandleCreate",
139
+ "cudaGraphCreate",
140
+ "cudaGraphDebugDotPrint",
141
+ "cudaGraphDestroy",
142
+ "cudaGraphDestroyNode",
143
+ "cudaGraphEventRecordNodeGetEvent",
144
+ "cudaGraphEventRecordNodeSetEvent",
145
+ "cudaGraphEventWaitNodeGetEvent",
146
+ "cudaGraphEventWaitNodeSetEvent",
147
+ "cudaGraphExecChildGraphNodeSetParams",
148
+ "cudaGraphExecDestroy",
149
+ "cudaGraphExecEventRecordNodeSetEvent",
150
+ "cudaGraphExecEventWaitNodeSetEvent",
151
+ "cudaGraphExecExternalSemaphoresSignalNodeSetParams",
152
+ "cudaGraphExecExternalSemaphoresWaitNodeSetParams",
153
+ "cudaGraphExecGetFlags",
154
+ "cudaGraphExecHostNodeSetParams",
155
+ "cudaGraphExecKernelNodeSetParams",
156
+ "cudaGraphExecMemcpyNodeSetParams",
157
+ "cudaGraphExecMemcpyNodeSetParams1D",
158
+ "cudaGraphExecMemcpyNodeSetParamsFromSymbol",
159
+ "cudaGraphExecMemcpyNodeSetParamsToSymbol",
160
+ "cudaGraphExecMemsetNodeSetParams",
161
+ "cudaGraphExecNodeSetParams",
162
+ "cudaGraphExecUpdate",
163
+ "cudaGraphExternalSemaphoresSignalNodeGetParams",
164
+ "cudaGraphExternalSemaphoresSignalNodeSetParams",
165
+ "cudaGraphExternalSemaphoresWaitNodeGetParams",
166
+ "cudaGraphExternalSemaphoresWaitNodeSetParams",
167
+ "cudaGraphGetEdges",
168
+ "cudaGraphGetEdges_v2",
169
+ "cudaGraphGetNodes",
170
+ "cudaGraphGetRootNodes",
171
+ "cudaGraphHostNodeGetParams",
172
+ "cudaGraphHostNodeSetParams",
173
+ "cudaGraphInstantiate",
174
+ "cudaGraphInstantiateWithFlags",
175
+ "cudaGraphInstantiateWithParams",
176
+ "cudaGraphInstantiateWithParams_ptsz",
177
+ "cudaGraphKernelNodeCopyAttributes",
178
+ "cudaGraphKernelNodeGetAttribute",
179
+ "cudaGraphKernelNodeGetParams",
180
+ "cudaGraphKernelNodeSetAttribute",
181
+ "cudaGraphKernelNodeSetParams",
182
+ "cudaGraphLaunch",
183
+ "cudaGraphLaunch_ptsz",
184
+ "cudaGraphMemAllocNodeGetParams",
185
+ "cudaGraphMemFreeNodeGetParams",
186
+ "cudaGraphMemcpyNodeGetParams",
187
+ "cudaGraphMemcpyNodeSetParams",
188
+ "cudaGraphMemcpyNodeSetParams1D",
189
+ "cudaGraphMemcpyNodeSetParamsFromSymbol",
190
+ "cudaGraphMemcpyNodeSetParamsToSymbol",
191
+ "cudaGraphMemsetNodeGetParams",
192
+ "cudaGraphMemsetNodeSetParams",
193
+ "cudaGraphNodeFindInClone",
194
+ "cudaGraphNodeGetDependencies",
195
+ "cudaGraphNodeGetDependencies_v2",
196
+ "cudaGraphNodeGetDependentNodes",
197
+ "cudaGraphNodeGetDependentNodes_v2",
198
+ "cudaGraphNodeGetEnabled",
199
+ "cudaGraphNodeGetType",
200
+ "cudaGraphNodeSetEnabled",
201
+ "cudaGraphNodeSetParams",
202
+ "cudaGraphReleaseUserObject",
203
+ "cudaGraphRemoveDependencies",
204
+ "cudaGraphRemoveDependencies_v2",
205
+ "cudaGraphRetainUserObject",
206
+ "cudaGraphUpload",
207
+ "cudaGraphUpload_ptsz",
208
+ "cudaGraphicsEGLRegisterImage",
209
+ "cudaGraphicsGLRegisterBuffer",
210
+ "cudaGraphicsGLRegisterImage",
211
+ "cudaGraphicsMapResources",
212
+ "cudaGraphicsResourceGetMappedEglFrame",
213
+ "cudaGraphicsResourceGetMappedMipmappedArray",
214
+ "cudaGraphicsResourceGetMappedPointer",
215
+ "cudaGraphicsResourceSetMapFlags",
216
+ "cudaGraphicsSubResourceGetMappedArray",
217
+ "cudaGraphicsUnmapResources",
218
+ "cudaGraphicsUnregisterResource",
219
+ "cudaGraphicsVDPAURegisterOutputSurface",
220
+ "cudaGraphicsVDPAURegisterVideoSurface",
221
+ "cudaHostAlloc",
222
+ "cudaHostGetDevicePointer",
223
+ "cudaHostGetFlags",
224
+ "cudaHostRegister",
225
+ "cudaHostUnregister",
226
+ "cudaImportExternalMemory",
227
+ "cudaImportExternalSemaphore",
228
+ "cudaInitDevice",
229
+ "cudaIpcCloseMemHandle",
230
+ "cudaIpcGetEventHandle",
231
+ "cudaIpcGetMemHandle",
232
+ "cudaIpcOpenEventHandle",
233
+ "cudaIpcOpenMemHandle",
234
+ "cudaLaunchCooperativeKernel",
235
+ "cudaLaunchCooperativeKernelMultiDevice",
236
+ "cudaLaunchCooperativeKernel_ptsz",
237
+ "cudaLaunchHostFunc",
238
+ "cudaLaunchHostFunc_ptsz",
239
+ "cudaLaunchKernel",
240
+ "cudaLaunchKernelExC",
241
+ "cudaLaunchKernelExC_ptsz",
242
+ "cudaLaunchKernel_ptsz",
243
+ "cudaMalloc",
244
+ "cudaMalloc3D",
245
+ "cudaMalloc3DArray",
246
+ "cudaMallocArray",
247
+ "cudaMallocAsync",
248
+ "cudaMallocAsync_ptsz",
249
+ "cudaMallocFromPoolAsync",
250
+ "cudaMallocFromPoolAsync_ptsz",
251
+ "cudaMallocHost",
252
+ "cudaMallocManaged",
253
+ "cudaMallocMipmappedArray",
254
+ "cudaMallocPitch",
255
+ "cudaMemAdvise",
256
+ "cudaMemAdvise_v2",
257
+ "cudaMemGetInfo",
258
+ "cudaMemPoolCreate",
259
+ "cudaMemPoolDestroy",
260
+ "cudaMemPoolExportPointer",
261
+ "cudaMemPoolExportToShareableHandle",
262
+ "cudaMemPoolGetAccess",
263
+ "cudaMemPoolGetAttribute",
264
+ "cudaMemPoolImportFromShareableHandle",
265
+ "cudaMemPoolImportPointer",
266
+ "cudaMemPoolSetAccess",
267
+ "cudaMemPoolSetAttribute",
268
+ "cudaMemPoolTrimTo",
269
+ "cudaMemPrefetchAsync",
270
+ "cudaMemPrefetchAsync_ptsz",
271
+ "cudaMemPrefetchAsync_v2",
272
+ "cudaMemPrefetchAsync_v2_ptsz",
273
+ "cudaMemRangeGetAttribute",
274
+ "cudaMemRangeGetAttributes",
275
+ "cudaMemcpy",
276
+ "cudaMemcpy2D",
277
+ "cudaMemcpy2DArrayToArray",
278
+ "cudaMemcpy2DArrayToArray_ptds",
279
+ "cudaMemcpy2DAsync",
280
+ "cudaMemcpy2DAsync_ptsz",
281
+ "cudaMemcpy2DFromArray",
282
+ "cudaMemcpy2DFromArrayAsync",
283
+ "cudaMemcpy2DFromArrayAsync_ptsz",
284
+ "cudaMemcpy2DFromArray_ptds",
285
+ "cudaMemcpy2DToArray",
286
+ "cudaMemcpy2DToArrayAsync",
287
+ "cudaMemcpy2DToArrayAsync_ptsz",
288
+ "cudaMemcpy2DToArray_ptds",
289
+ "cudaMemcpy2D_ptds",
290
+ "cudaMemcpy3D",
291
+ "cudaMemcpy3DAsync",
292
+ "cudaMemcpy3DAsync_ptsz",
293
+ "cudaMemcpy3DPeer",
294
+ "cudaMemcpy3DPeerAsync",
295
+ "cudaMemcpy3DPeerAsync_ptsz",
296
+ "cudaMemcpy3DPeer_ptds",
297
+ "cudaMemcpy3D_ptds",
298
+ "cudaMemcpyArrayToArray",
299
+ "cudaMemcpyArrayToArray_ptds",
300
+ "cudaMemcpyAsync",
301
+ "cudaMemcpyAsync_ptsz",
302
+ "cudaMemcpyFromArray",
303
+ "cudaMemcpyFromArrayAsync",
304
+ "cudaMemcpyFromArrayAsync_ptsz",
305
+ "cudaMemcpyFromArray_ptds",
306
+ "cudaMemcpyFromSymbol",
307
+ "cudaMemcpyFromSymbolAsync",
308
+ "cudaMemcpyFromSymbolAsync_ptsz",
309
+ "cudaMemcpyFromSymbol_ptds",
310
+ "cudaMemcpyPeer",
311
+ "cudaMemcpyPeerAsync",
312
+ "cudaMemcpyToArray",
313
+ "cudaMemcpyToArrayAsync",
314
+ "cudaMemcpyToArrayAsync_ptsz",
315
+ "cudaMemcpyToArray_ptds",
316
+ "cudaMemcpyToSymbol",
317
+ "cudaMemcpyToSymbolAsync",
318
+ "cudaMemcpyToSymbolAsync_ptsz",
319
+ "cudaMemcpyToSymbol_ptds",
320
+ "cudaMemcpy_ptds",
321
+ "cudaMemset",
322
+ "cudaMemset2D",
323
+ "cudaMemset2DAsync",
324
+ "cudaMemset2DAsync_ptsz",
325
+ "cudaMemset2D_ptds",
326
+ "cudaMemset3D",
327
+ "cudaMemset3DAsync",
328
+ "cudaMemset3DAsync_ptsz",
329
+ "cudaMemset3D_ptds",
330
+ "cudaMemsetAsync",
331
+ "cudaMemsetAsync_ptsz",
332
+ "cudaMemset_ptds",
333
+ "cudaMipmappedArrayGetMemoryRequirements",
334
+ "cudaMipmappedArrayGetSparseProperties",
335
+ "cudaOccupancyAvailableDynamicSMemPerBlock",
336
+ "cudaOccupancyMaxActiveBlocksPerMultiprocessor",
337
+ "cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags",
338
+ "cudaOccupancyMaxActiveClusters",
339
+ "cudaOccupancyMaxPotentialClusterSize",
340
+ "cudaPeekAtLastError",
341
+ "cudaPointerGetAttributes",
342
+ "cudaProfilerStart",
343
+ "cudaProfilerStop",
344
+ "cudaRuntimeGetVersion",
345
+ "cudaSetDevice",
346
+ "cudaSetDeviceFlags",
347
+ "cudaSetDoubleForDevice",
348
+ "cudaSetDoubleForHost",
349
+ "cudaSetValidDevices",
350
+ "cudaSignalExternalSemaphoresAsync",
351
+ "cudaSignalExternalSemaphoresAsync_ptsz",
352
+ "cudaSignalExternalSemaphoresAsync_v2",
353
+ "cudaSignalExternalSemaphoresAsync_v2_ptsz",
354
+ "cudaStreamAddCallback",
355
+ "cudaStreamAddCallback_ptsz",
356
+ "cudaStreamAttachMemAsync",
357
+ "cudaStreamAttachMemAsync_ptsz",
358
+ "cudaStreamBeginCapture",
359
+ "cudaStreamBeginCaptureToGraph",
360
+ "cudaStreamBeginCaptureToGraph_ptsz",
361
+ "cudaStreamBeginCapture_ptsz",
362
+ "cudaStreamCopyAttributes",
363
+ "cudaStreamCopyAttributes_ptsz",
364
+ "cudaStreamCreate",
365
+ "cudaStreamCreateWithFlags",
366
+ "cudaStreamCreateWithPriority",
367
+ "cudaStreamDestroy",
368
+ "cudaStreamEndCapture",
369
+ "cudaStreamEndCapture_ptsz",
370
+ "cudaStreamGetAttribute",
371
+ "cudaStreamGetAttribute_ptsz",
372
+ "cudaStreamGetCaptureInfo",
373
+ "cudaStreamGetCaptureInfo_ptsz",
374
+ "cudaStreamGetCaptureInfo_v2",
375
+ "cudaStreamGetCaptureInfo_v2_ptsz",
376
+ "cudaStreamGetCaptureInfo_v3",
377
+ "cudaStreamGetCaptureInfo_v3_ptsz",
378
+ "cudaStreamGetFlags",
379
+ "cudaStreamGetFlags_ptsz",
380
+ "cudaStreamGetId",
381
+ "cudaStreamGetId_ptsz",
382
+ "cudaStreamGetPriority",
383
+ "cudaStreamGetPriority_ptsz",
384
+ "cudaStreamIsCapturing",
385
+ "cudaStreamIsCapturing_ptsz",
386
+ "cudaStreamQuery",
387
+ "cudaStreamQuery_ptsz",
388
+ "cudaStreamSetAttribute",
389
+ "cudaStreamSetAttribute_ptsz",
390
+ "cudaStreamSynchronize",
391
+ "cudaStreamSynchronize_ptsz",
392
+ "cudaStreamUpdateCaptureDependencies",
393
+ "cudaStreamUpdateCaptureDependencies_ptsz",
394
+ "cudaStreamUpdateCaptureDependencies_v2",
395
+ "cudaStreamUpdateCaptureDependencies_v2_ptsz",
396
+ "cudaStreamWaitEvent",
397
+ "cudaStreamWaitEvent_ptsz",
398
+ "cudaThreadExchangeStreamCaptureMode",
399
+ "cudaThreadExit",
400
+ "cudaThreadGetCacheConfig",
401
+ "cudaThreadGetLimit",
402
+ "cudaThreadSetCacheConfig",
403
+ "cudaThreadSetLimit",
404
+ "cudaThreadSynchronize",
405
+ "cudaUserObjectCreate",
406
+ "cudaUserObjectRelease",
407
+ "cudaUserObjectRetain",
408
+ "cudaVDPAUGetDevice",
409
+ "cudaVDPAUSetVDPAUDevice",
410
+ "cudaWaitExternalSemaphoresAsync",
411
+ "cudaWaitExternalSemaphoresAsync_ptsz",
412
+ "cudaWaitExternalSemaphoresAsync_v2",
413
+ "cudaWaitExternalSemaphoresAsync_v2_ptsz",
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/cuda/cudnn.inc ADDED
@@ -0,0 +1,272 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "cudnnActivationBackward",
2
+ "cudnnActivationForward",
3
+ "cudnnAddTensor",
4
+ "cudnnAdvInferVersionCheck",
5
+ "cudnnAdvTrainVersionCheck",
6
+ "cudnnAdvVersionCheck",
7
+ "cudnnBackendCreateDescriptor",
8
+ "cudnnBackendDestroyDescriptor",
9
+ "cudnnBackendExecute",
10
+ "cudnnBackendFinalize",
11
+ "cudnnBackendGetAttribute",
12
+ "cudnnBackendInitialize",
13
+ "cudnnBackendSetAttribute",
14
+ "cudnnBatchNormalizationBackward",
15
+ "cudnnBatchNormalizationBackwardEx",
16
+ "cudnnBatchNormalizationForwardInference",
17
+ "cudnnBatchNormalizationForwardTraining",
18
+ "cudnnBatchNormalizationForwardTrainingEx",
19
+ "cudnnBuildRNNDynamic",
20
+ "cudnnCTCLoss",
21
+ "cudnnCTCLoss_v8",
22
+ "cudnnCnnInferVersionCheck",
23
+ "cudnnCnnTrainVersionCheck",
24
+ "cudnnCnnVersionCheck",
25
+ "cudnnConvolutionBackwardBias",
26
+ "cudnnConvolutionBackwardData",
27
+ "cudnnConvolutionBackwardFilter",
28
+ "cudnnConvolutionBiasActivationForward",
29
+ "cudnnConvolutionForward",
30
+ "cudnnCopyAlgorithmDescriptor",
31
+ "cudnnCreate",
32
+ "cudnnCreateActivationDescriptor",
33
+ "cudnnCreateAlgorithmDescriptor",
34
+ "cudnnCreateAlgorithmPerformance",
35
+ "cudnnCreateAttnDescriptor",
36
+ "cudnnCreateCTCLossDescriptor",
37
+ "cudnnCreateConvolutionDescriptor",
38
+ "cudnnCreateDropoutDescriptor",
39
+ "cudnnCreateFilterDescriptor",
40
+ "cudnnCreateFusedOpsConstParamPack",
41
+ "cudnnCreateFusedOpsPlan",
42
+ "cudnnCreateFusedOpsVariantParamPack",
43
+ "cudnnCreateLRNDescriptor",
44
+ "cudnnCreateOpTensorDescriptor",
45
+ "cudnnCreatePersistentRNNPlan",
46
+ "cudnnCreatePoolingDescriptor",
47
+ "cudnnCreateRNNDataDescriptor",
48
+ "cudnnCreateRNNDescriptor",
49
+ "cudnnCreateReduceTensorDescriptor",
50
+ "cudnnCreateSeqDataDescriptor",
51
+ "cudnnCreateSpatialTransformerDescriptor",
52
+ "cudnnCreateTensorDescriptor",
53
+ "cudnnCreateTensorTransformDescriptor",
54
+ "cudnnDeriveBNTensorDescriptor",
55
+ "cudnnDeriveNormTensorDescriptor",
56
+ "cudnnDestroy",
57
+ "cudnnDestroyActivationDescriptor",
58
+ "cudnnDestroyAlgorithmDescriptor",
59
+ "cudnnDestroyAlgorithmPerformance",
60
+ "cudnnDestroyAttnDescriptor",
61
+ "cudnnDestroyCTCLossDescriptor",
62
+ "cudnnDestroyConvolutionDescriptor",
63
+ "cudnnDestroyDropoutDescriptor",
64
+ "cudnnDestroyFilterDescriptor",
65
+ "cudnnDestroyFusedOpsConstParamPack",
66
+ "cudnnDestroyFusedOpsPlan",
67
+ "cudnnDestroyFusedOpsVariantParamPack",
68
+ "cudnnDestroyLRNDescriptor",
69
+ "cudnnDestroyOpTensorDescriptor",
70
+ "cudnnDestroyPersistentRNNPlan",
71
+ "cudnnDestroyPoolingDescriptor",
72
+ "cudnnDestroyRNNDataDescriptor",
73
+ "cudnnDestroyRNNDescriptor",
74
+ "cudnnDestroyReduceTensorDescriptor",
75
+ "cudnnDestroySeqDataDescriptor",
76
+ "cudnnDestroySpatialTransformerDescriptor",
77
+ "cudnnDestroyTensorDescriptor",
78
+ "cudnnDestroyTensorTransformDescriptor",
79
+ "cudnnDivisiveNormalizationBackward",
80
+ "cudnnDivisiveNormalizationForward",
81
+ "cudnnDropoutBackward",
82
+ "cudnnDropoutForward",
83
+ "cudnnDropoutGetReserveSpaceSize",
84
+ "cudnnDropoutGetStatesSize",
85
+ "cudnnFindConvolutionBackwardDataAlgorithm",
86
+ "cudnnFindConvolutionBackwardDataAlgorithmEx",
87
+ "cudnnFindConvolutionBackwardFilterAlgorithm",
88
+ "cudnnFindConvolutionBackwardFilterAlgorithmEx",
89
+ "cudnnFindConvolutionForwardAlgorithm",
90
+ "cudnnFindConvolutionForwardAlgorithmEx",
91
+ "cudnnFindRNNBackwardDataAlgorithmEx",
92
+ "cudnnFindRNNBackwardWeightsAlgorithmEx",
93
+ "cudnnFindRNNForwardInferenceAlgorithmEx",
94
+ "cudnnFindRNNForwardTrainingAlgorithmEx",
95
+ "cudnnFusedOpsExecute",
96
+ "cudnnGetActivationDescriptor",
97
+ "cudnnGetActivationDescriptorSwishBeta",
98
+ "cudnnGetAlgorithmDescriptor",
99
+ "cudnnGetAlgorithmPerformance",
100
+ "cudnnGetAlgorithmSpaceSize",
101
+ "cudnnGetAttnDescriptor",
102
+ "cudnnGetBackdoor",
103
+ "cudnnGetBatchNormalizationBackwardExWorkspaceSize",
104
+ "cudnnGetBatchNormalizationForwardTrainingExWorkspaceSize",
105
+ "cudnnGetBatchNormalizationTrainingExReserveSpaceSize",
106
+ "cudnnGetCTCLossDescriptor",
107
+ "cudnnGetCTCLossDescriptorEx",
108
+ "cudnnGetCTCLossDescriptor_v8",
109
+ "cudnnGetCTCLossWorkspaceSize",
110
+ "cudnnGetCTCLossWorkspaceSize_v8",
111
+ "cudnnGetCallback",
112
+ "cudnnGetConvolution2dDescriptor",
113
+ "cudnnGetConvolution2dForwardOutputDim",
114
+ "cudnnGetConvolutionBackwardDataAlgorithmMaxCount",
115
+ "cudnnGetConvolutionBackwardDataAlgorithm_v7",
116
+ "cudnnGetConvolutionBackwardDataWorkspaceSize",
117
+ "cudnnGetConvolutionBackwardFilterAlgorithmMaxCount",
118
+ "cudnnGetConvolutionBackwardFilterAlgorithm_v7",
119
+ "cudnnGetConvolutionBackwardFilterWorkspaceSize",
120
+ "cudnnGetConvolutionForwardAlgorithmMaxCount",
121
+ "cudnnGetConvolutionForwardAlgorithm_v7",
122
+ "cudnnGetConvolutionForwardWorkspaceSize",
123
+ "cudnnGetConvolutionGroupCount",
124
+ "cudnnGetConvolutionMathType",
125
+ "cudnnGetConvolutionNdDescriptor",
126
+ "cudnnGetConvolutionNdForwardOutputDim",
127
+ "cudnnGetConvolutionReorderType",
128
+ "cudnnGetCudartVersion",
129
+ "cudnnGetDropoutDescriptor",
130
+ "cudnnGetErrorString",
131
+ "cudnnGetFilter4dDescriptor",
132
+ "cudnnGetFilterNdDescriptor",
133
+ "cudnnGetFilterSizeInBytes",
134
+ "cudnnGetFoldedConvBackwardDataDescriptors",
135
+ "cudnnGetFusedOpsConstParamPackAttribute",
136
+ "cudnnGetFusedOpsVariantParamPackAttribute",
137
+ "cudnnGetLRNDescriptor",
138
+ "cudnnGetMaxDeviceVersion",
139
+ "cudnnGetMultiHeadAttnBuffers",
140
+ "cudnnGetMultiHeadAttnWeights",
141
+ "cudnnGetNormalizationBackwardWorkspaceSize",
142
+ "cudnnGetNormalizationForwardTrainingWorkspaceSize",
143
+ "cudnnGetNormalizationTrainingReserveSpaceSize",
144
+ "cudnnGetOpTensorDescriptor",
145
+ "cudnnGetPooling2dDescriptor",
146
+ "cudnnGetPooling2dForwardOutputDim",
147
+ "cudnnGetPoolingNdDescriptor",
148
+ "cudnnGetPoolingNdForwardOutputDim",
149
+ "cudnnGetProperty",
150
+ "cudnnGetRNNBackwardDataAlgorithmMaxCount",
151
+ "cudnnGetRNNBackwardWeightsAlgorithmMaxCount",
152
+ "cudnnGetRNNBiasMode",
153
+ "cudnnGetRNNDataDescriptor",
154
+ "cudnnGetRNNDescriptor_v6",
155
+ "cudnnGetRNNDescriptor_v8",
156
+ "cudnnGetRNNDropoutLocationsInternal",
157
+ "cudnnGetRNNForwardInferenceAlgorithmMaxCount",
158
+ "cudnnGetRNNForwardTrainingAlgorithmMaxCount",
159
+ "cudnnGetRNNLinLayerBiasParams",
160
+ "cudnnGetRNNLinLayerMatrixParams",
161
+ "cudnnGetRNNMatrixMathType",
162
+ "cudnnGetRNNPaddingMode",
163
+ "cudnnGetRNNParamsSize",
164
+ "cudnnGetRNNProjectionLayers",
165
+ "cudnnGetRNNTempSpaceSizes",
166
+ "cudnnGetRNNTrainingReserveSize",
167
+ "cudnnGetRNNWeightParams",
168
+ "cudnnGetRNNWeightSpaceSize",
169
+ "cudnnGetRNNWorkspaceSize",
170
+ "cudnnGetReduceTensorDescriptor",
171
+ "cudnnGetReductionIndicesSize",
172
+ "cudnnGetReductionWorkspaceSize",
173
+ "cudnnGetSeqDataDescriptor",
174
+ "cudnnGetStream",
175
+ "cudnnGetTensor4dDescriptor",
176
+ "cudnnGetTensorNdDescriptor",
177
+ "cudnnGetTensorSizeInBytes",
178
+ "cudnnGetTensorTransformDescriptor",
179
+ "cudnnGetVersion",
180
+ "cudnnGraphVersionCheck",
181
+ "cudnnIm2Col",
182
+ "cudnnInitTransformDest",
183
+ "cudnnLRNCrossChannelBackward",
184
+ "cudnnLRNCrossChannelForward",
185
+ "cudnnMakeFusedOpsPlan",
186
+ "cudnnMultiHeadAttnBackwardData",
187
+ "cudnnMultiHeadAttnBackwardWeights",
188
+ "cudnnMultiHeadAttnForward",
189
+ "cudnnNormalizationBackward",
190
+ "cudnnNormalizationForwardInference",
191
+ "cudnnNormalizationForwardTraining",
192
+ "cudnnOpTensor",
193
+ "cudnnOpsInferVersionCheck",
194
+ "cudnnOpsTrainVersionCheck",
195
+ "cudnnOpsVersionCheck",
196
+ "cudnnPoolingBackward",
197
+ "cudnnPoolingForward",
198
+ "cudnnQueryRuntimeError",
199
+ "cudnnRNNBackwardData",
200
+ "cudnnRNNBackwardDataEx",
201
+ "cudnnRNNBackwardData_v8",
202
+ "cudnnRNNBackwardWeights",
203
+ "cudnnRNNBackwardWeightsEx",
204
+ "cudnnRNNBackwardWeights_v8",
205
+ "cudnnRNNForward",
206
+ "cudnnRNNForwardInference",
207
+ "cudnnRNNForwardInferenceEx",
208
+ "cudnnRNNForwardTraining",
209
+ "cudnnRNNForwardTrainingEx",
210
+ "cudnnRNNGetClip",
211
+ "cudnnRNNGetClip_v8",
212
+ "cudnnRNNSetClip",
213
+ "cudnnRNNSetClip_v8",
214
+ "cudnnReduceTensor",
215
+ "cudnnReorderFilterAndBias",
216
+ "cudnnRestoreAlgorithm",
217
+ "cudnnRestoreDropoutDescriptor",
218
+ "cudnnSaveAlgorithm",
219
+ "cudnnScaleTensor",
220
+ "cudnnSetActivationDescriptor",
221
+ "cudnnSetActivationDescriptorSwishBeta",
222
+ "cudnnSetAlgorithmDescriptor",
223
+ "cudnnSetAlgorithmPerformance",
224
+ "cudnnSetAttnDescriptor",
225
+ "cudnnSetBackdoor",
226
+ "cudnnSetBackdoorEx",
227
+ "cudnnSetCTCLossDescriptor",
228
+ "cudnnSetCTCLossDescriptorEx",
229
+ "cudnnSetCTCLossDescriptor_v8",
230
+ "cudnnSetCallback",
231
+ "cudnnSetConvolution2dDescriptor",
232
+ "cudnnSetConvolutionGroupCount",
233
+ "cudnnSetConvolutionMathType",
234
+ "cudnnSetConvolutionNdDescriptor",
235
+ "cudnnSetConvolutionReorderType",
236
+ "cudnnSetDropoutDescriptor",
237
+ "cudnnSetFilter4dDescriptor",
238
+ "cudnnSetFilterNdDescriptor",
239
+ "cudnnSetFusedOpsConstParamPackAttribute",
240
+ "cudnnSetFusedOpsVariantParamPackAttribute",
241
+ "cudnnSetLRNDescriptor",
242
+ "cudnnSetOpTensorDescriptor",
243
+ "cudnnSetPersistentRNNPlan",
244
+ "cudnnSetPooling2dDescriptor",
245
+ "cudnnSetPoolingNdDescriptor",
246
+ "cudnnSetRNNAlgorithmDescriptor",
247
+ "cudnnSetRNNBiasMode",
248
+ "cudnnSetRNNDataDescriptor",
249
+ "cudnnSetRNNDescriptor_v6",
250
+ "cudnnSetRNNDescriptor_v8",
251
+ "cudnnSetRNNMatrixMathType",
252
+ "cudnnSetRNNPaddingMode",
253
+ "cudnnSetRNNProjectionLayers",
254
+ "cudnnSetReduceTensorDescriptor",
255
+ "cudnnSetSeqDataDescriptor",
256
+ "cudnnSetSpatialTransformerNdDescriptor",
257
+ "cudnnSetStream",
258
+ "cudnnSetTensor",
259
+ "cudnnSetTensor4dDescriptor",
260
+ "cudnnSetTensor4dDescriptorEx",
261
+ "cudnnSetTensorNdDescriptor",
262
+ "cudnnSetTensorNdDescriptorEx",
263
+ "cudnnSetTensorTransformDescriptor",
264
+ "cudnnSoftmaxBackward",
265
+ "cudnnSoftmaxForward",
266
+ "cudnnSpatialTfGridGeneratorBackward",
267
+ "cudnnSpatialTfGridGeneratorForward",
268
+ "cudnnSpatialTfSamplerBackward",
269
+ "cudnnSpatialTfSamplerForward",
270
+ "cudnnTransformFilter",
271
+ "cudnnTransformTensor",
272
+ "cudnnTransformTensorEx",
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/lib/gtl/compactptrset.h ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_LIB_GTL_COMPACTPTRSET_H_
17
+ #define TENSORFLOW_TSL_LIB_GTL_COMPACTPTRSET_H_
18
+
19
+ #include <type_traits>
20
+
21
+ #include "tsl/lib/gtl/flatset.h"
22
+
23
+ namespace tsl {
24
+ namespace gtl {
25
+
26
+ // CompactPointerSet<T> is like a std::unordered_set<T> but is optimized
27
+ // for small sets (<= 1 element). T must be a pointer type.
28
+ template <typename T>
29
+ class CompactPointerSet {
30
+ private:
31
+ using BigRep = FlatSet<T>;
32
+
33
+ public:
34
+ using value_type = T;
35
+
36
+ CompactPointerSet() : rep_(0) {}
37
+
38
+ ~CompactPointerSet() {
39
+ static_assert(
40
+ std::is_pointer<T>::value,
41
+ "CompactPointerSet<T> can only be used with T's that are pointers");
42
+ if (isbig()) delete big();
43
+ }
44
+
45
+ CompactPointerSet(const CompactPointerSet& other) : rep_(0) { *this = other; }
46
+
47
+ CompactPointerSet& operator=(const CompactPointerSet& other) {
48
+ if (this == &other) return *this;
49
+ if (other.isbig()) {
50
+ // big => any
51
+ if (!isbig()) MakeBig();
52
+ *big() = *other.big();
53
+ } else if (isbig()) {
54
+ // !big => big
55
+ big()->clear();
56
+ if (other.rep_ != 0) {
57
+ big()->insert(reinterpret_cast<T>(other.rep_));
58
+ }
59
+ } else {
60
+ // !big => !big
61
+ rep_ = other.rep_;
62
+ }
63
+ return *this;
64
+ }
65
+
66
+ class iterator {
67
+ public:
68
+ typedef ssize_t difference_type;
69
+ typedef T value_type;
70
+ typedef const T* pointer;
71
+ typedef const T& reference;
72
+ typedef ::std::forward_iterator_tag iterator_category;
73
+
74
+ explicit iterator(uintptr_t rep)
75
+ : bigrep_(false), single_(reinterpret_cast<T>(rep)) {}
76
+ explicit iterator(typename BigRep::iterator iter)
77
+ : bigrep_(true), single_(nullptr), iter_(iter) {}
78
+
79
+ iterator& operator++() {
80
+ if (bigrep_) {
81
+ ++iter_;
82
+ } else {
83
+ DCHECK(single_ != nullptr);
84
+ single_ = nullptr;
85
+ }
86
+ return *this;
87
+ }
88
+ // maybe post-increment?
89
+
90
+ bool operator==(const iterator& other) const {
91
+ if (bigrep_) {
92
+ return iter_ == other.iter_;
93
+ } else {
94
+ return single_ == other.single_;
95
+ }
96
+ }
97
+ bool operator!=(const iterator& other) const { return !(*this == other); }
98
+
99
+ const T& operator*() const {
100
+ if (bigrep_) {
101
+ return *iter_;
102
+ } else {
103
+ DCHECK(single_ != nullptr);
104
+ return single_;
105
+ }
106
+ }
107
+
108
+ private:
109
+ friend class CompactPointerSet;
110
+ bool bigrep_;
111
+ T single_;
112
+ typename BigRep::iterator iter_;
113
+ };
114
+ using const_iterator = iterator;
115
+
116
+ bool empty() const { return isbig() ? big()->empty() : (rep_ == 0); }
117
+ size_t size() const { return isbig() ? big()->size() : (rep_ == 0 ? 0 : 1); }
118
+
119
+ void clear() {
120
+ if (isbig()) {
121
+ delete big();
122
+ }
123
+ rep_ = 0;
124
+ }
125
+
126
+ std::pair<iterator, bool> insert(T elem) {
127
+ if (!isbig()) {
128
+ if (rep_ == 0) {
129
+ uintptr_t v = reinterpret_cast<uintptr_t>(elem);
130
+ if (v == 0 || ((v & 0x3) != 0)) {
131
+ // Cannot use small representation for nullptr. Fall through.
132
+ } else {
133
+ rep_ = v;
134
+ return {iterator(v), true};
135
+ }
136
+ }
137
+ MakeBig();
138
+ }
139
+ auto p = big()->insert(elem);
140
+ return {iterator(p.first), p.second};
141
+ }
142
+
143
+ template <typename InputIter>
144
+ void insert(InputIter begin, InputIter end) {
145
+ for (; begin != end; ++begin) {
146
+ insert(*begin);
147
+ }
148
+ }
149
+
150
+ const_iterator begin() const {
151
+ return isbig() ? iterator(big()->begin()) : iterator(rep_);
152
+ }
153
+ const_iterator end() const {
154
+ return isbig() ? iterator(big()->end()) : iterator(0);
155
+ }
156
+
157
+ iterator find(T elem) const {
158
+ if (rep_ == reinterpret_cast<uintptr_t>(elem)) {
159
+ return iterator(rep_);
160
+ } else if (!isbig()) {
161
+ return iterator(0);
162
+ } else {
163
+ return iterator(big()->find(elem));
164
+ }
165
+ }
166
+
167
+ size_t count(T elem) const { return find(elem) != end() ? 1 : 0; }
168
+
169
+ size_t erase(T elem) {
170
+ if (!isbig()) {
171
+ if (rep_ == reinterpret_cast<uintptr_t>(elem)) {
172
+ rep_ = 0;
173
+ return 1;
174
+ } else {
175
+ return 0;
176
+ }
177
+ } else {
178
+ return big()->erase(elem);
179
+ }
180
+ }
181
+
182
+ private:
183
+ // Size rep_
184
+ // -------------------------------------------------------------------------
185
+ // 0 0
186
+ // 1 The pointer itself (bottom bits == 00)
187
+ // large Pointer to a BigRep (bottom bits == 01)
188
+ uintptr_t rep_;
189
+
190
+ bool isbig() const { return (rep_ & 0x3) == 1; }
191
+ BigRep* big() const {
192
+ DCHECK(isbig());
193
+ return reinterpret_cast<BigRep*>(rep_ - 1);
194
+ }
195
+
196
+ void MakeBig() {
197
+ DCHECK(!isbig());
198
+ BigRep* big = new BigRep;
199
+ if (rep_ != 0) {
200
+ big->insert(reinterpret_cast<T>(rep_));
201
+ }
202
+ rep_ = reinterpret_cast<uintptr_t>(big) + 0x1;
203
+ }
204
+ };
205
+
206
+ } // namespace gtl
207
+ } // namespace tsl
208
+
209
+ #endif // TENSORFLOW_TSL_LIB_GTL_COMPACTPTRSET_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/lib/math/math_util.h ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_LIB_MATH_MATH_UTIL_H_
17
+ #define TENSORFLOW_TSL_LIB_MATH_MATH_UTIL_H_
18
+
19
+ #include <type_traits>
20
+
21
+ #include "absl/base/macros.h"
22
+
23
+ namespace tsl {
24
+
25
+ class MathUtil {
26
+ public:
27
+ // ----------------------------------------------------------------------
28
+ // CeilOfRatio<IntegralType>
29
+ // FloorOfRatio<IntegralType>
30
+ // Returns the ceil (resp. floor) of the ratio of two integers.
31
+ //
32
+ // * IntegralType: any integral type, whether signed or not.
33
+ // * numerator: any integer: positive, negative, or zero.
34
+ // * denominator: a non-zero integer, positive or negative.
35
+ //
36
+ // This implementation is correct, meaning there is never any precision loss,
37
+ // and there is never an overflow. However, if the type is signed, having
38
+ // numerator == MathLimits<IntegralType>::kMin and denominator == -1 is not a
39
+ // valid input, because kMin has a greater absolute value than kMax.
40
+ //
41
+ // Input validity is DCHECKed. When not in debug mode, invalid inputs raise
42
+ // SIGFPE.
43
+ //
44
+ // This method has been designed and tested so that it should always be
45
+ // preferred to alternatives. Indeed, there exist popular recipes to compute
46
+ // the result, such as casting to double, but they are in general incorrect.
47
+ // In cases where an alternative technique is correct, performance measurement
48
+ // showed the provided implementation is faster.
49
+ template <typename IntegralType>
50
+ static constexpr IntegralType CeilOfRatio(IntegralType numerator,
51
+ IntegralType denominator) {
52
+ return CeilOrFloorOfRatio<IntegralType, true>(numerator, denominator);
53
+ }
54
+ template <typename IntegralType>
55
+ static constexpr IntegralType FloorOfRatio(IntegralType numerator,
56
+ IntegralType denominator) {
57
+ return CeilOrFloorOfRatio<IntegralType, false>(numerator, denominator);
58
+ }
59
+
60
+ template <typename IntegralType, bool ceil>
61
+ static constexpr IntegralType CeilOrFloorOfRatio(IntegralType numerator,
62
+ IntegralType denominator);
63
+
64
+ template <typename IntegralType>
65
+ static constexpr IntegralType GCD(IntegralType x, IntegralType y);
66
+
67
+ // ----------------------------------------------------------------------
68
+ // IPow<T>
69
+ // Computes the result of raising a number to a non-negative integral power.
70
+ //
71
+ // * T: An integral type, floating-point type, or user-defined type for which
72
+ // operator*= is defined.
73
+ // * base: the base "v" of the operation
74
+ // * exp: the exponent "i" of the operation; must be non-negative.
75
+ //
76
+ // Computes v^i, in a way that is faster than std::pow (which supports
77
+ // arbitrary real exponents).
78
+ //
79
+ // When T is a floating point type, this has the same semantics as std::pow,
80
+ // but it is much faster. When T is an integral type, computations are
81
+ // performed in the value domain of T, and overflow semantics are those of T.
82
+ //
83
+ // Input validity is DCHECKed.
84
+ template <typename T>
85
+ static constexpr T IPow(T base, int exp);
86
+ };
87
+
88
+ // ---- CeilOrFloorOfRatio ----
89
+ // This is a branching-free, cast-to-double-free implementation.
90
+ //
91
+ // Casting to double is in general incorrect because of loss of precision
92
+ // when casting an int64 into a double.
93
+ //
94
+ // There's a bunch of 'recipes' to compute a integer ceil (or floor) on the web,
95
+ // and most of them are incorrect.
96
+ template <typename IntegralType, bool ceil>
97
+ constexpr IntegralType MathUtil::CeilOrFloorOfRatio(IntegralType numerator,
98
+ IntegralType denominator) {
99
+ ABSL_ASSERT(denominator != 0);
100
+
101
+ const IntegralType rounded_toward_zero = numerator / denominator;
102
+ const IntegralType intermediate_product = rounded_toward_zero * denominator;
103
+
104
+ if (ceil) { // Compile-time condition: not an actual branching
105
+ // When rounded_toward_zero is negative, then an adjustment is never needed:
106
+ // the real ratio is negative, and so rounded toward zero is the ceil.
107
+ // When rounded_toward_zero is non-negative, an adjustment is needed if the
108
+ // sign of the difference numerator - intermediate_product is the same as
109
+ // the sign of the denominator.
110
+ //
111
+ //
112
+ // Using a bool and then a static_cast to IntegralType is not strictly
113
+ // necessary, but it makes the code clear, and anyway the compiler should
114
+ // get rid of it.
115
+ const bool needs_adjustment =
116
+ (rounded_toward_zero >= 0) &&
117
+ ((denominator > 0 && numerator > intermediate_product) ||
118
+ (denominator < 0 && numerator < intermediate_product));
119
+ const IntegralType adjustment = static_cast<IntegralType>(needs_adjustment);
120
+ const IntegralType ceil_of_ratio = rounded_toward_zero + adjustment;
121
+ return ceil_of_ratio;
122
+ } else {
123
+ // Floor case: symmetrical to the previous one
124
+ const bool needs_adjustment =
125
+ (rounded_toward_zero <= 0) &&
126
+ ((denominator > 0 && numerator < intermediate_product) ||
127
+ (denominator < 0 && numerator > intermediate_product));
128
+ const IntegralType adjustment = static_cast<IntegralType>(needs_adjustment);
129
+ const IntegralType floor_of_ratio = rounded_toward_zero - adjustment;
130
+ return floor_of_ratio;
131
+ }
132
+ }
133
+
134
+ template <typename IntegralType>
135
+ constexpr IntegralType MathUtil::GCD(IntegralType x, IntegralType y) {
136
+ static_assert(std::is_unsigned_v<IntegralType>, "signed GCD not supported!");
137
+ while (y != 0) {
138
+ IntegralType r = x % y;
139
+ x = y;
140
+ y = r;
141
+ }
142
+ return x;
143
+ }
144
+
145
+ // ---- IPow ----
146
+ // Implemented with the squared exponentiation method (a.k.a. double-and-add).
147
+ //
148
+ // Note that "exp >>= 1" is faster than "exp /= 2" on at least one platform.
149
+ template <typename T>
150
+ constexpr T MathUtil::IPow(T base, int exp) {
151
+ ABSL_ASSERT(exp >= 0);
152
+ for (T result(1);; base *= base) {
153
+ if ((exp & 1) != 0) result *= base;
154
+ exp >>= 1;
155
+ if (exp == 0) return result;
156
+ }
157
+ }
158
+
159
+ } // namespace tsl
160
+
161
+ #endif // TENSORFLOW_TSL_LIB_MATH_MATH_UTIL_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/byte_order.h ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_PLATFORM_BYTE_ORDER_H_
17
+ #define TENSORFLOW_TSL_PLATFORM_BYTE_ORDER_H_
18
+
19
+ // Byte order defines provided by gcc. MSVC doesn't define those so
20
+ // we define them here.
21
+ // We assume that all windows platform out there are little endian.
22
+ #if defined(_MSC_VER) && !defined(__clang__)
23
+ #define __ORDER_LITTLE_ENDIAN__ 0x4d2
24
+ #define __ORDER_BIG_ENDIAN__ 0x10e1
25
+ #define __BYTE_ORDER__ __ORDER_LITTLE_ENDIAN__
26
+ #endif
27
+
28
+ namespace tsl {
29
+ namespace port {
30
+
31
+ // TODO(jeff,sanjay): Make portable
32
+ constexpr bool kLittleEndian = __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__;
33
+
34
+ } // namespace port
35
+ } // namespace tsl
36
+
37
+ #endif // TENSORFLOW_TSL_PLATFORM_BYTE_ORDER_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/cloud/auth_provider.h ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_PLATFORM_CLOUD_AUTH_PROVIDER_H_
17
+ #define TENSORFLOW_TSL_PLATFORM_CLOUD_AUTH_PROVIDER_H_
18
+
19
+ #include <string>
20
+
21
+ #include "tsl/platform/errors.h"
22
+ #include "tsl/platform/status.h"
23
+
24
+ namespace tsl {
25
+
26
+ /// Interface for a provider of authentication bearer tokens.
27
+ class AuthProvider {
28
+ public:
29
+ virtual ~AuthProvider() {}
30
+
31
+ /// \brief Returns the short-term authentication bearer token.
32
+ ///
33
+ /// Safe for concurrent use by multiple threads.
34
+ virtual Status GetToken(string* t) = 0;
35
+
36
+ static Status GetToken(AuthProvider* provider, string* token) {
37
+ if (!provider) {
38
+ return errors::Internal("Auth provider is required.");
39
+ }
40
+ return provider->GetToken(token);
41
+ }
42
+ };
43
+
44
+ /// No-op auth provider, which will only work for public objects.
45
+ class EmptyAuthProvider : public AuthProvider {
46
+ public:
47
+ Status GetToken(string* token) override {
48
+ *token = "";
49
+ return OkStatus();
50
+ }
51
+ };
52
+
53
+ } // namespace tsl
54
+
55
+ #endif // TENSORFLOW_TSL_PLATFORM_CLOUD_AUTH_PROVIDER_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/cloud/compute_engine_metadata_client.h ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_PLATFORM_CLOUD_COMPUTE_ENGINE_METADATA_CLIENT_H_
17
+ #define TENSORFLOW_TSL_PLATFORM_CLOUD_COMPUTE_ENGINE_METADATA_CLIENT_H_
18
+
19
+ #include "tsl/platform/cloud/http_request.h"
20
+ #include "tsl/platform/retrying_utils.h"
21
+ #include "tsl/platform/status.h"
22
+
23
+ namespace tsl {
24
+
25
+ /// \brief A client that accesses to the metadata server running on GCE hosts.
26
+ ///
27
+ /// Uses the provided HttpRequest::Factory to make requests to the local
28
+ /// metadata service
29
+ /// (https://cloud.google.com/compute/docs/storing-retrieving-metadata).
30
+ /// Retries on recoverable failures using exponential backoff with the initial
31
+ /// retry wait configurable via initial_retry_delay_usec.
32
+ class ComputeEngineMetadataClient {
33
+ public:
34
+ explicit ComputeEngineMetadataClient(
35
+ std::shared_ptr<HttpRequest::Factory> http_request_factory,
36
+ const RetryConfig& config = RetryConfig(
37
+ 10000, /* init_delay_time_us = 1 ms */
38
+ 1000000 /* max_delay_time_us = 1 s */
39
+ ));
40
+ virtual ~ComputeEngineMetadataClient() {}
41
+
42
+ /// \brief Get the metadata value for a given attribute of the metadata
43
+ /// service.
44
+ ///
45
+ /// Given a metadata path relative
46
+ /// to http://metadata.google.internal/computeMetadata/v1/,
47
+ /// fills response_buffer with the metadata. Returns OK if the server returns
48
+ /// the response for the given metadata path successfully.
49
+ ///
50
+ /// Example usage:
51
+ /// To get the zone of an instance:
52
+ /// compute_engine_metadata_client.GetMetadata(
53
+ /// "instance/zone", response_buffer);
54
+ virtual Status GetMetadata(const string& path,
55
+ std::vector<char>* response_buffer);
56
+
57
+ private:
58
+ std::shared_ptr<HttpRequest::Factory> http_request_factory_;
59
+ const RetryConfig retry_config_;
60
+
61
+ ComputeEngineMetadataClient(const ComputeEngineMetadataClient&) = delete;
62
+ void operator=(const ComputeEngineMetadataClient&) = delete;
63
+ };
64
+
65
+ } // namespace tsl
66
+
67
+ #endif // TENSORFLOW_TSL_PLATFORM_CLOUD_COMPUTE_ENGINE_METADATA_CLIENT_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/cloud/compute_engine_zone_provider.h ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_PLATFORM_CLOUD_COMPUTE_ENGINE_ZONE_PROVIDER_H_
17
+ #define TENSORFLOW_TSL_PLATFORM_CLOUD_COMPUTE_ENGINE_ZONE_PROVIDER_H_
18
+
19
+ #include "tsl/platform/cloud/compute_engine_metadata_client.h"
20
+ #include "tsl/platform/cloud/zone_provider.h"
21
+
22
+ namespace tsl {
23
+
24
+ class ComputeEngineZoneProvider : public ZoneProvider {
25
+ public:
26
+ explicit ComputeEngineZoneProvider(
27
+ std::shared_ptr<ComputeEngineMetadataClient> google_metadata_client);
28
+ virtual ~ComputeEngineZoneProvider();
29
+
30
+ Status GetZone(string* zone) override;
31
+
32
+ private:
33
+ std::shared_ptr<ComputeEngineMetadataClient> google_metadata_client_;
34
+ string cached_zone;
35
+ ComputeEngineZoneProvider(const ComputeEngineZoneProvider&) = delete;
36
+ void operator=(const ComputeEngineZoneProvider&) = delete;
37
+ };
38
+
39
+ } // namespace tsl
40
+
41
+ #endif // TENSORFLOW_TSL_PLATFORM_CLOUD_COMPUTE_ENGINE_ZONE_PROVIDER_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/cloud/curl_http_request.h ADDED
@@ -0,0 +1,275 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_PLATFORM_CLOUD_CURL_HTTP_REQUEST_H_
17
+ #define TENSORFLOW_TSL_PLATFORM_CLOUD_CURL_HTTP_REQUEST_H_
18
+
19
+ #include <string>
20
+ #include <unordered_map>
21
+ #include <vector>
22
+
23
+ #include <curl/curl.h>
24
+ #include "tsl/platform/cloud/http_request.h"
25
+ #include "tsl/platform/env.h"
26
+ #include "tsl/platform/errors.h"
27
+ #include "tsl/platform/macros.h"
28
+ #include "tsl/platform/protobuf.h"
29
+ #include "tsl/platform/status.h"
30
+ #include "tsl/platform/stringpiece.h"
31
+ #include "tsl/platform/types.h"
32
+
33
+ namespace tsl {
34
+
35
+ class LibCurl; // libcurl interface as a class, for dependency injection.
36
+
37
+ /// \brief A basic HTTP client based on the libcurl library.
38
+ ///
39
+ /// The usage pattern for the class reflects the one of the libcurl library:
40
+ /// create a request object, set request parameters and call Send().
41
+ ///
42
+ /// For example:
43
+ /// std::unique_ptr<HttpRequest> request(http_request_factory->Create());
44
+ /// request->SetUri("http://www.google.com");
45
+ /// request->SetResultsBuffer(out_buffer);
46
+ /// request->Send();
47
+ class CurlHttpRequest : public HttpRequest {
48
+ public:
49
+ class Factory : public HttpRequest::Factory {
50
+ public:
51
+ virtual ~Factory() {}
52
+ virtual HttpRequest* Create() { return new CurlHttpRequest(); }
53
+ };
54
+
55
+ CurlHttpRequest();
56
+ explicit CurlHttpRequest(LibCurl* libcurl)
57
+ : CurlHttpRequest(libcurl, Env::Default()) {}
58
+ CurlHttpRequest(LibCurl* libcurl, Env* env);
59
+ ~CurlHttpRequest() override;
60
+
61
+ /// Sets the request URI.
62
+ void SetUri(const string& uri) override;
63
+
64
+ /// \brief Sets the Range header.
65
+ ///
66
+ /// Used for random seeks, for example "0-999" returns the first 1000 bytes
67
+ /// (note that the right border is included).
68
+ void SetRange(uint64 start, uint64 end) override;
69
+
70
+ /// Sets a request header.
71
+ void AddHeader(const string& name, const string& value) override;
72
+
73
+ void AddResolveOverride(const string& hostname, int64_t port,
74
+ const string& ip_addr) override;
75
+
76
+ /// Sets the 'Authorization' header to the value of 'Bearer ' + auth_token.
77
+ void AddAuthBearerHeader(const string& auth_token) override;
78
+
79
+ void SetRequestStats(RequestStats* stats) override;
80
+
81
+ /// Makes the request a DELETE request.
82
+ void SetDeleteRequest() override;
83
+
84
+ /// \brief Makes the request a PUT request.
85
+ ///
86
+ /// The request body will be taken from the specified file starting from
87
+ /// the given offset.
88
+ Status SetPutFromFile(const string& body_filepath, size_t offset) override;
89
+
90
+ /// Makes the request a PUT request with an empty body.
91
+ void SetPutEmptyBody() override;
92
+
93
+ /// \brief Makes the request a POST request.
94
+ ///
95
+ /// The request body will be taken from the specified buffer.
96
+ void SetPostFromBuffer(const char* buffer, size_t size) override;
97
+
98
+ /// Makes the request a POST request with an empty body.
99
+ void SetPostEmptyBody() override;
100
+
101
+ /// \brief Specifies the buffer for receiving the response body.
102
+ ///
103
+ /// Size of out_buffer after an access will be exactly the number of bytes
104
+ /// read. Existing content of the vector will be cleared.
105
+ void SetResultBuffer(std::vector<char>* out_buffer) override;
106
+
107
+ /// \brief Specifies the buffer for receiving the response body, when the
108
+ /// caller knows the maximum size of the response body.
109
+ ///
110
+ /// This method allows the caller to receive the response body without an
111
+ /// additional intermediate buffer allocation and copy. This method should
112
+ /// be called before calling Send(). After Send() has succeeded, the caller
113
+ /// should use the GetResultBufferDirectBytesTransferred() method in order
114
+ /// to learn how many bytes were transferred.
115
+ ///
116
+ /// Using this method is mutually exclusive with using SetResultBuffer().
117
+ void SetResultBufferDirect(char* buffer, size_t size) override;
118
+
119
+ /// \brief Distinguish response type (direct vs. implicit).
120
+ bool IsDirectResponse() const;
121
+
122
+ /// \brief Returns the number of bytes (of the response body) that were
123
+ /// transferred, when using the SetResultBufferDirect() method. The returned
124
+ /// value will always be less than or equal to the 'size' parameter that
125
+ /// was passed to SetResultBufferDirect(). If the actual HTTP response body
126
+ /// was greater than 'size' bytes, then this transfer method will only copy
127
+ /// the first 'size' bytes, and the rest will be ignored.
128
+ size_t GetResultBufferDirectBytesTransferred() override;
129
+
130
+ /// \brief Returns the response headers of a completed request.
131
+ ///
132
+ /// If the header is not found, returns an empty string.
133
+ string GetResponseHeader(const string& name) const override;
134
+
135
+ /// Returns the response code of a completed request.
136
+ uint64 GetResponseCode() const override;
137
+
138
+ /// \brief Sends the formed request.
139
+ ///
140
+ /// If the result buffer was defined, the response will be written there.
141
+ /// The object is not designed to be re-used after Send() is executed.
142
+ Status Send() override;
143
+
144
+ // Url encodes str and returns a new string.
145
+ string EscapeString(const string& str) override;
146
+
147
+ void SetTimeouts(uint32 connection, uint32 inactivity, uint32 total) override;
148
+
149
+ private:
150
+ /// A write callback in the form which can be accepted by libcurl.
151
+ static size_t WriteCallback(const void* ptr, size_t size, size_t nmemb,
152
+ void* userdata);
153
+
154
+ /// Processes response body content received when using SetResultBufferDirect.
155
+ static size_t WriteCallbackDirect(const void* ptr, size_t size, size_t nmemb,
156
+ void* userdata);
157
+ /// A read callback in the form which can be accepted by libcurl.
158
+ static size_t ReadCallback(void* ptr, size_t size, size_t nmemb,
159
+ FILE* userdata);
160
+ /// A header callback in the form which can be accepted by libcurl.
161
+ static size_t HeaderCallback(const void* ptr, size_t size, size_t nmemb,
162
+ void* this_object);
163
+ /// A progress meter callback in the form which can be accepted by libcurl.
164
+ static int ProgressCallback(void* this_object, curl_off_t dltotal,
165
+ curl_off_t dlnow, curl_off_t ultotal,
166
+ curl_off_t ulnow);
167
+ void CheckMethodNotSet() const;
168
+ void CheckNotSent() const;
169
+ StringPiece GetResponse() const;
170
+
171
+ /// Helper to convert the given CURLcode and error buffer, representing the
172
+ /// result of performing a transfer, into a Status with an error message.
173
+ Status CURLcodeToStatus(CURLcode code, const char* error_buffer);
174
+
175
+ LibCurl* libcurl_;
176
+ Env* env_;
177
+
178
+ FILE* put_body_ = nullptr;
179
+
180
+ StringPiece post_body_buffer_;
181
+ size_t post_body_read_ = 0;
182
+
183
+ std::vector<char>* response_buffer_ = nullptr;
184
+
185
+ struct DirectResponseState {
186
+ char* buffer_;
187
+ size_t buffer_size_;
188
+ size_t bytes_transferred_;
189
+ size_t bytes_received_;
190
+ };
191
+ DirectResponseState direct_response_ = {};
192
+
193
+ CURL* curl_ = nullptr;
194
+ curl_slist* curl_headers_ = nullptr;
195
+ curl_slist* resolve_list_ = nullptr;
196
+
197
+ RequestStats* stats_ = nullptr;
198
+
199
+ std::vector<char> default_response_buffer_;
200
+
201
+ std::unordered_map<string, string> response_headers_;
202
+ uint64 response_code_ = 0;
203
+
204
+ // The timestamp of the last activity related to the request execution, in
205
+ // seconds since epoch.
206
+ uint64 last_progress_timestamp_ = 0;
207
+ // The last progress in terms of bytes transmitted.
208
+ curl_off_t last_progress_bytes_ = 0;
209
+
210
+ // The maximum period of request inactivity.
211
+ uint32 inactivity_timeout_secs_ = 60; // 1 minute
212
+
213
+ // Timeout for the connection phase.
214
+ uint32 connect_timeout_secs_ = 120; // 2 minutes
215
+
216
+ // Timeout for the whole request. Set only to prevent hanging indefinitely.
217
+ uint32 request_timeout_secs_ = 3600; // 1 hour
218
+
219
+ // Members to enforce the usage flow.
220
+ bool is_uri_set_ = false;
221
+ bool is_method_set_ = false;
222
+ bool is_sent_ = false;
223
+
224
+ // Store the URI to help disambiguate requests when errors occur.
225
+ string uri_;
226
+ RequestMethod method_ = RequestMethod::kGet;
227
+
228
+ // Limit the size of an http response that is copied into an error message.
229
+ const size_t response_to_error_limit_ = 500;
230
+
231
+ CurlHttpRequest(const CurlHttpRequest&) = delete;
232
+ void operator=(const CurlHttpRequest&) = delete;
233
+ };
234
+
235
+ /// \brief A proxy to the libcurl C interface as a dependency injection measure.
236
+ ///
237
+ /// This class is meant as a very thin wrapper for the libcurl C library.
238
+ class LibCurl {
239
+ public:
240
+ virtual ~LibCurl() {}
241
+
242
+ virtual CURL* curl_easy_init() = 0;
243
+ virtual CURLcode curl_easy_setopt(CURL* curl, CURLoption option,
244
+ uint64 param) TF_MUST_USE_RESULT = 0;
245
+ virtual CURLcode curl_easy_setopt(CURL* curl, CURLoption option,
246
+ const char* param) TF_MUST_USE_RESULT = 0;
247
+ virtual CURLcode curl_easy_setopt(CURL* curl, CURLoption option,
248
+ void* param) TF_MUST_USE_RESULT = 0;
249
+ virtual CURLcode curl_easy_setopt(
250
+ CURL* curl, CURLoption option,
251
+ size_t (*param)(void*, size_t, size_t, FILE*)) TF_MUST_USE_RESULT = 0;
252
+ virtual CURLcode curl_easy_setopt(CURL* curl, CURLoption option,
253
+ size_t (*param)(const void*, size_t, size_t,
254
+ void*))
255
+ TF_MUST_USE_RESULT = 0;
256
+ virtual CURLcode curl_easy_setopt(
257
+ CURL* curl, CURLoption option,
258
+ int (*param)(void* clientp, curl_off_t dltotal, curl_off_t dlnow,
259
+ curl_off_t ultotal,
260
+ curl_off_t ulnow)) TF_MUST_USE_RESULT = 0;
261
+ virtual CURLcode curl_easy_perform(CURL* curl) TF_MUST_USE_RESULT = 0;
262
+ virtual CURLcode curl_easy_getinfo(CURL* curl, CURLINFO info,
263
+ uint64* value) TF_MUST_USE_RESULT = 0;
264
+ virtual CURLcode curl_easy_getinfo(CURL* curl, CURLINFO info,
265
+ double* value) TF_MUST_USE_RESULT = 0;
266
+ virtual void curl_easy_cleanup(CURL* curl) = 0;
267
+ virtual curl_slist* curl_slist_append(curl_slist* list, const char* str) = 0;
268
+ virtual void curl_slist_free_all(curl_slist* list) = 0;
269
+ virtual char* curl_easy_escape(CURL* curl, const char* str, int length) = 0;
270
+ virtual void curl_free(void* p) = 0;
271
+ };
272
+
273
+ } // namespace tsl
274
+
275
+ #endif // TENSORFLOW_TSL_PLATFORM_CLOUD_CURL_HTTP_REQUEST_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/cloud/expiring_lru_cache.h ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_PLATFORM_CLOUD_EXPIRING_LRU_CACHE_H_
17
+ #define TENSORFLOW_TSL_PLATFORM_CLOUD_EXPIRING_LRU_CACHE_H_
18
+
19
+ #include <list>
20
+ #include <map>
21
+ #include <memory>
22
+ #include <string>
23
+
24
+ #include "tsl/platform/env.h"
25
+ #include "tsl/platform/mutex.h"
26
+ #include "tsl/platform/thread_annotations.h"
27
+ #include "tsl/platform/types.h"
28
+
29
+ namespace tsl {
30
+
31
+ /// \brief An LRU cache of string keys and arbitrary values, with configurable
32
+ /// max item age (in seconds) and max entries.
33
+ ///
34
+ /// This class is thread safe.
35
+ template <typename T>
36
+ class ExpiringLRUCache {
37
+ public:
38
+ /// A `max_age` of 0 means that nothing is cached. A `max_entries` of 0 means
39
+ /// that there is no limit on the number of entries in the cache (however, if
40
+ /// `max_age` is also 0, the cache will not be populated).
41
+ ExpiringLRUCache(uint64 max_age, size_t max_entries,
42
+ Env* env = Env::Default())
43
+ : max_age_(max_age), max_entries_(max_entries), env_(env) {}
44
+
45
+ /// Insert `value` with key `key`. This will replace any previous entry with
46
+ /// the same key.
47
+ void Insert(const string& key, const T& value) {
48
+ if (max_age_ == 0) {
49
+ return;
50
+ }
51
+ mutex_lock lock(mu_);
52
+ InsertLocked(key, value);
53
+ }
54
+
55
+ // Delete the entry with key `key`. Return true if the entry was found for
56
+ // `key`, false if the entry was not found. In both cases, there is no entry
57
+ // with key `key` existed after the call.
58
+ bool Delete(const string& key) {
59
+ mutex_lock lock(mu_);
60
+ return DeleteLocked(key);
61
+ }
62
+
63
+ /// Look up the entry with key `key` and copy it to `value` if found. Returns
64
+ /// true if an entry was found for `key`, and its timestamp is not more than
65
+ /// max_age_ seconds in the past.
66
+ bool Lookup(const string& key, T* value) {
67
+ if (max_age_ == 0) {
68
+ return false;
69
+ }
70
+ mutex_lock lock(mu_);
71
+ return LookupLocked(key, value);
72
+ }
73
+
74
+ typedef std::function<Status(const string&, T*)> ComputeFunc;
75
+
76
+ /// Look up the entry with key `key` and copy it to `value` if found. If not
77
+ /// found, call `compute_func`. If `compute_func` returns successfully, store
78
+ /// a copy of the output parameter in the cache, and another copy in `value`.
79
+ Status LookupOrCompute(const string& key, T* value,
80
+ const ComputeFunc& compute_func) {
81
+ if (max_age_ == 0) {
82
+ return compute_func(key, value);
83
+ }
84
+
85
+ // Note: we hold onto mu_ for the rest of this function. In practice, this
86
+ // is okay, as stat requests are typically fast, and concurrent requests are
87
+ // often for the same file. Future work can split this up into one lock per
88
+ // key if this proves to be a significant performance bottleneck.
89
+ mutex_lock lock(mu_);
90
+ if (LookupLocked(key, value)) {
91
+ return OkStatus();
92
+ }
93
+ Status s = compute_func(key, value);
94
+ if (s.ok()) {
95
+ InsertLocked(key, *value);
96
+ }
97
+ return s;
98
+ }
99
+
100
+ /// Clear the cache.
101
+ void Clear() {
102
+ mutex_lock lock(mu_);
103
+ cache_.clear();
104
+ lru_list_.clear();
105
+ }
106
+
107
+ /// Accessors for cache parameters.
108
+ uint64 max_age() const { return max_age_; }
109
+ size_t max_entries() const { return max_entries_; }
110
+
111
+ private:
112
+ struct Entry {
113
+ /// The timestamp (seconds) at which the entry was added to the cache.
114
+ uint64 timestamp;
115
+
116
+ /// The entry's value.
117
+ T value;
118
+
119
+ /// A list iterator pointing to the entry's position in the LRU list.
120
+ std::list<string>::iterator lru_iterator;
121
+ };
122
+
123
+ bool LookupLocked(const string& key, T* value)
124
+ TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
125
+ auto it = cache_.find(key);
126
+ if (it == cache_.end()) {
127
+ return false;
128
+ }
129
+ lru_list_.erase(it->second.lru_iterator);
130
+ if (env_->NowSeconds() - it->second.timestamp > max_age_) {
131
+ cache_.erase(it);
132
+ return false;
133
+ }
134
+ *value = it->second.value;
135
+ lru_list_.push_front(it->first);
136
+ it->second.lru_iterator = lru_list_.begin();
137
+ return true;
138
+ }
139
+
140
+ void InsertLocked(const string& key, const T& value)
141
+ TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
142
+ lru_list_.push_front(key);
143
+ Entry entry{env_->NowSeconds(), value, lru_list_.begin()};
144
+ auto insert = cache_.insert(std::make_pair(key, entry));
145
+ if (!insert.second) {
146
+ lru_list_.erase(insert.first->second.lru_iterator);
147
+ insert.first->second = entry;
148
+ } else if (max_entries_ > 0 && cache_.size() > max_entries_) {
149
+ cache_.erase(lru_list_.back());
150
+ lru_list_.pop_back();
151
+ }
152
+ }
153
+
154
+ bool DeleteLocked(const string& key) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
155
+ auto it = cache_.find(key);
156
+ if (it == cache_.end()) {
157
+ return false;
158
+ }
159
+ lru_list_.erase(it->second.lru_iterator);
160
+ cache_.erase(it);
161
+ return true;
162
+ }
163
+
164
+ /// The maximum age of entries in the cache, in seconds. A value of 0 means
165
+ /// that no entry is ever placed in the cache.
166
+ const uint64 max_age_;
167
+
168
+ /// The maximum number of entries in the cache. A value of 0 means there is no
169
+ /// limit on entry count.
170
+ const size_t max_entries_;
171
+
172
+ /// The Env from which we read timestamps.
173
+ Env* const env_; // not owned
174
+
175
+ /// Guards access to the cache and the LRU list.
176
+ mutex mu_;
177
+
178
+ /// The cache (a map from string key to Entry).
179
+ std::map<string, Entry> cache_ TF_GUARDED_BY(mu_);
180
+
181
+ /// The LRU list of entries. The front of the list identifies the most
182
+ /// recently accessed entry.
183
+ std::list<string> lru_list_ TF_GUARDED_BY(mu_);
184
+ };
185
+
186
+ } // namespace tsl
187
+
188
+ #endif // TENSORFLOW_TSL_PLATFORM_CLOUD_EXPIRING_LRU_CACHE_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/cloud/file_block_cache.h ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_PLATFORM_CLOUD_FILE_BLOCK_CACHE_H_
17
+ #define TENSORFLOW_TSL_PLATFORM_CLOUD_FILE_BLOCK_CACHE_H_
18
+
19
+ #include <functional>
20
+ #include <list>
21
+ #include <map>
22
+ #include <memory>
23
+ #include <string>
24
+ #include <vector>
25
+
26
+ #include "tsl/platform/env.h"
27
+ #include "tsl/platform/mutex.h"
28
+ #include "tsl/platform/notification.h"
29
+ #include "tsl/platform/status.h"
30
+ #include "tsl/platform/stringpiece.h"
31
+ #include "tsl/platform/thread_annotations.h"
32
+ #include "tsl/platform/types.h"
33
+
34
+ namespace tsl {
35
+
36
+ class FileBlockCache;
37
+
38
+ /// FileBlockCacheStatsInterface allows for instrumentation of the block cache.
39
+ ///
40
+ /// FileBlockCacheStatsInterface and its subclasses must be safe to use from
41
+ /// multiple threads concurrently.
42
+ ///
43
+ /// WARNING! This is an experimental interface that may change or go away at any
44
+ /// time.
45
+ class FileBlockCacheStatsInterface {
46
+ public:
47
+ /// Configure is called to provide instrumentation hooks.
48
+ ///
49
+ /// Note: Configure can be called multiple times (e.g. if the block cache is
50
+ /// re-initialized).
51
+ virtual void Configure(const FileBlockCache* block_cache) = 0;
52
+
53
+ /// RecordBlockLoadRequest is called to record the size of a hit block.
54
+ virtual void RecordCacheHitBlockSize(size_t bytes_transferred) = 0;
55
+
56
+ /// RecordBlockLoadRequest is called to record the size of a missed block.
57
+ virtual void RecordCacheMissBlockSize(size_t bytes_transferred) = 0;
58
+
59
+ virtual ~FileBlockCacheStatsInterface() = default;
60
+ };
61
+
62
+ /// \brief A block cache of file contents, keyed by {filename, offset}.
63
+ ///
64
+ /// This class should be shared by read-only random access files on a remote
65
+ /// filesystem (e.g. GCS).
66
+ class FileBlockCache {
67
+ public:
68
+ /// The callback executed when a block is not found in the cache, and needs to
69
+ /// be fetched from the backing filesystem. This callback is provided when the
70
+ /// cache is constructed. The returned Status should be OK as long as the
71
+ /// read from the remote filesystem succeeded (similar to the semantics of the
72
+ /// read(2) system call).
73
+ typedef std::function<Status(const string& filename, size_t offset,
74
+ size_t buffer_size, char* buffer,
75
+ size_t* bytes_transferred)>
76
+ BlockFetcher;
77
+
78
+ virtual ~FileBlockCache() {}
79
+
80
+ /// Read `n` bytes from `filename` starting at `offset` into `out`. This
81
+ /// method will return:
82
+ ///
83
+ /// 1) The error from the remote filesystem, if the read from the remote
84
+ /// filesystem failed.
85
+ /// 2) PRECONDITION_FAILED if the read from the remote filesystem succeeded,
86
+ /// but the read returned a partial block, and the LRU cache contained a
87
+ /// block at a higher offset (indicating that the partial block should have
88
+ /// been a full block).
89
+ /// 3) OUT_OF_RANGE if the read from the remote filesystem succeeded, but
90
+ /// the file contents do not extend past `offset` and thus nothing was
91
+ /// placed in `out`.
92
+ /// 4) OK otherwise (i.e. the read succeeded, and at least one byte was placed
93
+ /// in `out`).
94
+ virtual Status Read(const string& filename, size_t offset, size_t n,
95
+ char* buffer, size_t* bytes_transferred) = 0;
96
+
97
+ // Validate the given file signature with the existing file signature in the
98
+ // cache. Returns true if the signature doesn't change or the file did not
99
+ // exist before. If the signature changes, update the existing signature with
100
+ // the new one and remove the file from cache.
101
+ virtual bool ValidateAndUpdateFileSignature(const string& filename,
102
+ int64_t file_signature) = 0;
103
+
104
+ /// Remove all cached blocks for `filename`.
105
+ virtual void RemoveFile(const string& filename) = 0;
106
+
107
+ /// Remove all cached data.
108
+ virtual void Flush() = 0;
109
+
110
+ /// Accessors for cache parameters.
111
+ virtual size_t block_size() const = 0;
112
+ virtual size_t max_bytes() const = 0;
113
+ virtual uint64 max_staleness() const = 0;
114
+
115
+ /// The current size (in bytes) of the cache.
116
+ virtual size_t CacheSize() const = 0;
117
+
118
+ // Returns true if the cache is enabled. If false, the BlockFetcher callback
119
+ // is always executed during Read.
120
+ virtual bool IsCacheEnabled() const = 0;
121
+
122
+ void SetStats(FileBlockCacheStatsInterface* stats) {
123
+ if (stats == nullptr) {
124
+ LOG(ERROR)
125
+ << "Attempted to monitor a NULL stats object. This may prevent the "
126
+ "corresponding monitoring data from being exported";
127
+ return;
128
+ }
129
+ cache_stats_ = stats;
130
+ cache_stats_->Configure(this);
131
+ }
132
+
133
+ protected:
134
+ FileBlockCacheStatsInterface* cache_stats_ = nullptr; // Not owned.
135
+ };
136
+
137
+ } // namespace tsl
138
+
139
+ #endif // TENSORFLOW_TSL_PLATFORM_CLOUD_FILE_BLOCK_CACHE_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/cloud/gcs_dns_cache.h ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_PLATFORM_CLOUD_GCS_DNS_CACHE_H_
17
+ #define TENSORFLOW_TSL_PLATFORM_CLOUD_GCS_DNS_CACHE_H_
18
+
19
+ #include <random>
20
+
21
+ #include "tsl/platform/cloud/http_request.h"
22
+ #include "tsl/platform/env.h"
23
+
24
+ namespace tsl {
25
+ const int64_t kDefaultRefreshRateSecs = 60;
26
+
27
+ // DnsCache is a userspace DNS cache specialized for the GCS filesystem.
28
+ //
29
+ // Some environments have unreliable DNS resolvers. DnsCache ameliorates the
30
+ // situation by radically reducing the number of DNS requests by performing
31
+ // 2 DNS queries per minute (by default) on a background thread. Updated cache
32
+ // entries are used to override curl's DNS resolution processes.
33
+ class GcsDnsCache {
34
+ public:
35
+ // Default no-argument constructor.
36
+ GcsDnsCache() : GcsDnsCache(kDefaultRefreshRateSecs) {}
37
+
38
+ // Constructs a GcsDnsCache with the specified refresh rate.
39
+ GcsDnsCache(int64_t refresh_rate_secs)
40
+ : GcsDnsCache(Env::Default(), refresh_rate_secs) {}
41
+
42
+ GcsDnsCache(Env* env, int64_t refresh_rate_secs);
43
+
44
+ ~GcsDnsCache() {
45
+ mutex_lock l(mu_);
46
+ cancelled_ = true;
47
+ cond_var_.notify_one();
48
+ }
49
+
50
+ // Annotate the given HttpRequest with resolve overrides from the cache.
51
+ void AnnotateRequest(HttpRequest* request);
52
+
53
+ private:
54
+ static std::vector<string> ResolveName(const string& name);
55
+ static std::vector<std::vector<string>> ResolveNames(
56
+ const std::vector<string>& names);
57
+ void WorkerThread();
58
+
59
+ // Define a friend class for testing.
60
+ friend class GcsDnsCacheTest;
61
+
62
+ mutex mu_;
63
+ Env* env_;
64
+ condition_variable cond_var_;
65
+ std::default_random_engine random_ TF_GUARDED_BY(mu_);
66
+ bool started_ TF_GUARDED_BY(mu_) = false;
67
+ bool cancelled_ TF_GUARDED_BY(mu_) = false;
68
+ std::unique_ptr<Thread> worker_ TF_GUARDED_BY(mu_); // After mutable vars.
69
+ const int64_t refresh_rate_secs_;
70
+
71
+ // Entries in this vector correspond to entries in kCachedDomainNames.
72
+ std::vector<std::vector<string>> addresses_ TF_GUARDED_BY(mu_);
73
+ };
74
+
75
+ } // namespace tsl
76
+
77
+ #endif // TENSORFLOW_TSL_PLATFORM_CLOUD_GCS_DNS_CACHE_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/cloud/gcs_file_system.h ADDED
@@ -0,0 +1,456 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_PLATFORM_CLOUD_GCS_FILE_SYSTEM_H_
17
+ #define TENSORFLOW_TSL_PLATFORM_CLOUD_GCS_FILE_SYSTEM_H_
18
+
19
+ #include <string>
20
+ #include <unordered_set>
21
+ #include <utility>
22
+ #include <vector>
23
+
24
+ #include "tsl/platform/cloud/auth_provider.h"
25
+ #include "tsl/platform/cloud/compute_engine_metadata_client.h"
26
+ #include "tsl/platform/cloud/compute_engine_zone_provider.h"
27
+ #include "tsl/platform/cloud/expiring_lru_cache.h"
28
+ #include "tsl/platform/cloud/file_block_cache.h"
29
+ #include "tsl/platform/cloud/gcs_dns_cache.h"
30
+ #include "tsl/platform/cloud/gcs_throttle.h"
31
+ #include "tsl/platform/cloud/http_request.h"
32
+ #include "tsl/platform/file_system.h"
33
+ #include "tsl/platform/retrying_file_system.h"
34
+ #include "tsl/platform/status.h"
35
+
36
+ namespace tsl {
37
+
38
+ class GcsFileSystem;
39
+
40
+ // The environment variable that overrides the block size for aligned reads from
41
+ // GCS. Specified in MB (e.g. "16" = 16 x 1024 x 1024 = 16777216 bytes).
42
+ constexpr char kBlockSize[] = "GCS_READ_CACHE_BLOCK_SIZE_MB";
43
+ #if defined(LIBTPU_ON_GCE)
44
+ // Overwrite the default max block size for `libtpu` BUILDs which do not
45
+ // offer a mechanism to override the default through environment variable.
46
+ constexpr size_t kDefaultBlockSize = 512 * 1024 * 1024;
47
+ #else
48
+ constexpr size_t kDefaultBlockSize = 64 * 1024 * 1024;
49
+ #endif
50
+ // The environment variable that overrides the max size of the LRU cache of
51
+ // blocks read from GCS. Specified in MB.
52
+ constexpr char kMaxCacheSize[] = "GCS_READ_CACHE_MAX_SIZE_MB";
53
+ #if defined(LIBTPU_ON_GCE)
54
+ // Overwrite the default max cache size for `libtpu` BUILDs which do not
55
+ // offer a mechanism to override the default through environment variable.
56
+ constexpr size_t kDefaultMaxCacheSize = 163840LL * 1024LL * 1024LL;
57
+ #else
58
+ constexpr size_t kDefaultMaxCacheSize = 0;
59
+ #endif
60
+ // The environment variable that overrides the maximum staleness of cached file
61
+ // contents. Once any block of a file reaches this staleness, all cached blocks
62
+ // will be evicted on the next read.
63
+ constexpr char kMaxStaleness[] = "GCS_READ_CACHE_MAX_STALENESS";
64
+ constexpr uint64 kDefaultMaxStaleness = 0;
65
+
66
+ // Helper function to extract an environment variable and convert it into a
67
+ // value of type T.
68
+ template <typename T>
69
+ bool GetEnvVar(const char* varname, bool (*convert)(StringPiece, T*),
70
+ T* value) {
71
+ const char* env_value = std::getenv(varname);
72
+ if (env_value == nullptr) {
73
+ return false;
74
+ }
75
+ return convert(env_value, value);
76
+ }
77
+
78
+ /// GcsStatsInterface allows for instrumentation of the GCS file system.
79
+ ///
80
+ /// GcsStatsInterface and its subclasses must be safe to use from multiple
81
+ /// threads concurrently.
82
+ ///
83
+ /// WARNING! This is an experimental interface that may change or go away at any
84
+ /// time.
85
+ class GcsStatsInterface {
86
+ public:
87
+ /// Configure is called by the GcsFileSystem to provide instrumentation hooks.
88
+ ///
89
+ /// Note: Configure can be called multiple times (e.g. if the block cache is
90
+ /// re-initialized).
91
+ virtual void Configure(GcsFileSystem* fs, GcsThrottle* throttle,
92
+ const FileBlockCache* block_cache) = 0;
93
+
94
+ /// RecordBlockLoadRequest is called to record a block load request is about
95
+ /// to be made.
96
+ virtual void RecordBlockLoadRequest(const string& file, size_t offset) = 0;
97
+
98
+ /// RecordBlockRetrieved is called once a block within the file has been
99
+ /// retrieved.
100
+ virtual void RecordBlockRetrieved(const string& file, size_t offset,
101
+ size_t bytes_transferred) = 0;
102
+
103
+ // RecordStatObjectRequest is called once a statting object request over GCS
104
+ // is about to be made.
105
+ virtual void RecordStatObjectRequest() = 0;
106
+
107
+ /// HttpStats is called to optionally provide a RequestStats listener
108
+ /// to be annotated on every HTTP request made to the GCS API.
109
+ ///
110
+ /// HttpStats() may return nullptr.
111
+ virtual HttpRequest::RequestStats* HttpStats() = 0;
112
+
113
+ virtual ~GcsStatsInterface() = default;
114
+ };
115
+
116
+ struct UploadSessionHandle {
117
+ std::string session_uri;
118
+ bool resumable;
119
+ };
120
+
121
+ /// Google Cloud Storage implementation of a file system.
122
+ ///
123
+ /// The clients should use RetryingGcsFileSystem defined below,
124
+ /// which adds retry logic to GCS operations.
125
+ class GcsFileSystem : public FileSystem {
126
+ public:
127
+ struct TimeoutConfig;
128
+
129
+ // Main constructor used (via RetryingFileSystem) throughout Tensorflow
130
+ explicit GcsFileSystem(bool make_default_cache = true);
131
+ // Used mostly for unit testing or use cases which need to customize the
132
+ // filesystem from defaults
133
+ GcsFileSystem(std::unique_ptr<AuthProvider> auth_provider,
134
+ std::unique_ptr<HttpRequest::Factory> http_request_factory,
135
+ std::unique_ptr<ZoneProvider> zone_provider, size_t block_size,
136
+ size_t max_bytes, uint64 max_staleness,
137
+ uint64 stat_cache_max_age, size_t stat_cache_max_entries,
138
+ uint64 matching_paths_cache_max_age,
139
+ size_t matching_paths_cache_max_entries,
140
+ RetryConfig retry_config, TimeoutConfig timeouts,
141
+ const std::unordered_set<string>& allowed_locations,
142
+ std::pair<const string, const string>* additional_header,
143
+ bool compose_append);
144
+
145
+ TF_USE_FILESYSTEM_METHODS_WITH_NO_TRANSACTION_SUPPORT;
146
+
147
+ Status NewRandomAccessFile(
148
+ const string& fname, TransactionToken* token,
149
+ std::unique_ptr<RandomAccessFile>* result) override;
150
+
151
+ Status NewWritableFile(const string& fname, TransactionToken* token,
152
+ std::unique_ptr<WritableFile>* result) override;
153
+
154
+ Status NewAppendableFile(const string& fname, TransactionToken* token,
155
+ std::unique_ptr<WritableFile>* result) override;
156
+
157
+ Status NewReadOnlyMemoryRegionFromFile(
158
+ const string& fname, TransactionToken* token,
159
+ std::unique_ptr<ReadOnlyMemoryRegion>* result) override;
160
+
161
+ Status FileExists(const string& fname, TransactionToken* token) override;
162
+
163
+ Status Stat(const string& fname, TransactionToken* token,
164
+ FileStatistics* stat) override;
165
+
166
+ Status GetChildren(const string& dir, TransactionToken* token,
167
+ std::vector<string>* result) override;
168
+
169
+ Status GetMatchingPaths(const string& pattern, TransactionToken* token,
170
+ std::vector<string>* results) override;
171
+
172
+ Status DeleteFile(const string& fname, TransactionToken* token) override;
173
+
174
+ Status CreateDir(const string& dirname, TransactionToken* token) override;
175
+
176
+ Status DeleteDir(const string& dirname, TransactionToken* token) override;
177
+
178
+ Status GetFileSize(const string& fname, TransactionToken* token,
179
+ uint64* file_size) override;
180
+
181
+ Status RenameFile(const string& src, const string& target,
182
+ TransactionToken* token) override;
183
+
184
+ Status IsDirectory(const string& fname, TransactionToken* token) override;
185
+
186
+ Status DeleteRecursively(const string& dirname, TransactionToken* token,
187
+ int64_t* undeleted_files,
188
+ int64_t* undeleted_dirs) override;
189
+
190
+ void FlushCaches(TransactionToken* token) override;
191
+
192
+ /// Set an object to collect runtime statistics from the GcsFilesystem.
193
+ void SetStats(GcsStatsInterface* stats);
194
+
195
+ /// Set an object to collect file block cache stats.
196
+ void SetCacheStats(FileBlockCacheStatsInterface* cache_stats);
197
+
198
+ /// These accessors are mainly for testing purposes, to verify that the
199
+ /// environment variables that control these parameters are handled correctly.
200
+ size_t block_size() {
201
+ tf_shared_lock l(block_cache_lock_);
202
+ return file_block_cache_->block_size();
203
+ }
204
+ size_t max_bytes() {
205
+ tf_shared_lock l(block_cache_lock_);
206
+ return file_block_cache_->max_bytes();
207
+ }
208
+ uint64 max_staleness() {
209
+ tf_shared_lock l(block_cache_lock_);
210
+ return file_block_cache_->max_staleness();
211
+ }
212
+ TimeoutConfig timeouts() const { return timeouts_; }
213
+ std::unordered_set<string> allowed_locations() const {
214
+ return allowed_locations_;
215
+ }
216
+
217
+ bool compose_append() const { return compose_append_; }
218
+ string additional_header_name() const {
219
+ return additional_header_ ? additional_header_->first : "";
220
+ }
221
+ string additional_header_value() const {
222
+ return additional_header_ ? additional_header_->second : "";
223
+ }
224
+
225
+ uint64 stat_cache_max_age() const { return stat_cache_->max_age(); }
226
+ size_t stat_cache_max_entries() const { return stat_cache_->max_entries(); }
227
+
228
+ uint64 matching_paths_cache_max_age() const {
229
+ return matching_paths_cache_->max_age();
230
+ }
231
+ size_t matching_paths_cache_max_entries() const {
232
+ return matching_paths_cache_->max_entries();
233
+ }
234
+
235
+ /// Structure containing the information for timeouts related to accessing the
236
+ /// GCS APIs.
237
+ ///
238
+ /// All values are in seconds.
239
+ struct TimeoutConfig {
240
+ // The request connection timeout. If a connection cannot be established
241
+ // within `connect` seconds, abort the request.
242
+ uint32 connect = 120; // 2 minutes
243
+
244
+ // The request idle timeout. If a request has seen no activity in `idle`
245
+ // seconds, abort the request.
246
+ uint32 idle = 60; // 1 minute
247
+
248
+ // The maximum total time a metadata request can take. If a request has not
249
+ // completed within `metadata` seconds, the request is aborted.
250
+ uint32 metadata = 3600; // 1 hour
251
+
252
+ // The maximum total time a block read request can take. If a request has
253
+ // not completed within `read` seconds, the request is aborted.
254
+ uint32 read = 3600; // 1 hour
255
+
256
+ // The maximum total time an upload request can take. If a request has not
257
+ // completed within `write` seconds, the request is aborted.
258
+ uint32 write = 3600; // 1 hour
259
+
260
+ TimeoutConfig() {}
261
+ TimeoutConfig(uint32 connect, uint32 idle, uint32 metadata, uint32 read,
262
+ uint32 write)
263
+ : connect(connect),
264
+ idle(idle),
265
+ metadata(metadata),
266
+ read(read),
267
+ write(write) {}
268
+ };
269
+
270
+ Status CreateHttpRequest(std::unique_ptr<HttpRequest>* request);
271
+
272
+ /// \brief Sets a new AuthProvider on the GCS FileSystem.
273
+ ///
274
+ /// The new auth provider will be used for all subsequent requests.
275
+ void SetAuthProvider(std::unique_ptr<AuthProvider> auth_provider);
276
+
277
+ /// \brief Resets the block cache and re-instantiates it with the new values.
278
+ ///
279
+ /// This method can be used to clear the existing block cache and/or to
280
+ /// re-configure the block cache for different values.
281
+ ///
282
+ /// Note: the existing block cache is not cleaned up until all existing files
283
+ /// have been closed.
284
+ void ResetFileBlockCache(size_t block_size_bytes, size_t max_bytes,
285
+ uint64 max_staleness_secs);
286
+
287
+ protected:
288
+ virtual std::unique_ptr<FileBlockCache> MakeFileBlockCache(
289
+ size_t block_size, size_t max_bytes, uint64 max_staleness);
290
+
291
+ /// Loads file contents from GCS for a given filename, offset, and length.
292
+ virtual Status LoadBufferFromGCS(const string& fname, size_t offset, size_t n,
293
+ char* buffer, size_t* bytes_transferred);
294
+
295
+ // Creates an upload session for an upcoming GCS object upload.
296
+ virtual Status CreateNewUploadSession(uint64 start_offset,
297
+ const std::string& object_to_upload,
298
+ const std::string& bucket,
299
+ uint64 file_size,
300
+ const std::string& gcs_path,
301
+ UploadSessionHandle* session_handle);
302
+
303
+ // Uploads object data to session.
304
+ virtual Status UploadToSession(const std::string& session_uri,
305
+ uint64 start_offset, uint64 already_uploaded,
306
+ const std::string& tmp_content_filename,
307
+ uint64 file_size,
308
+ const std::string& file_path);
309
+
310
+ /// \brief Requests status of a previously initiated upload session.
311
+ ///
312
+ /// If the upload has already succeeded, sets 'completed' to true.
313
+ /// Otherwise sets 'completed' to false and 'uploaded' to the currently
314
+ /// uploaded size in bytes.
315
+ virtual Status RequestUploadSessionStatus(const string& session_uri,
316
+ uint64 file_size,
317
+ const std::string& gcs_path,
318
+ bool* completed, uint64* uploaded);
319
+
320
+ Status ParseGcsPathForScheme(StringPiece fname, string scheme,
321
+ bool empty_object_ok, string* bucket,
322
+ string* object);
323
+
324
+ /// \brief Splits a GCS path to a bucket and an object.
325
+ ///
326
+ /// For example, "gs://bucket-name/path/to/file.txt" gets split into
327
+ /// "bucket-name" and "path/to/file.txt".
328
+ /// If fname only contains the bucket and empty_object_ok = true, the returned
329
+ /// object is empty.
330
+ virtual Status ParseGcsPath(StringPiece fname, bool empty_object_ok,
331
+ string* bucket, string* object);
332
+
333
+ std::shared_ptr<ComputeEngineMetadataClient> compute_engine_metadata_client_;
334
+
335
+ // Used by a subclass.
336
+ TimeoutConfig timeouts_;
337
+
338
+ /// The retry configuration used for retrying failed calls.
339
+ RetryConfig retry_config_;
340
+
341
+ private:
342
+ // GCS file statistics.
343
+ struct GcsFileStat {
344
+ FileStatistics base;
345
+ int64_t generation_number = 0;
346
+ };
347
+
348
+ /// \brief Checks if the bucket exists. Returns OK if the check succeeded.
349
+ ///
350
+ /// 'result' is set if the function returns OK. 'result' cannot be nullptr.
351
+ Status BucketExists(const string& bucket, bool* result);
352
+
353
+ /// \brief Retrieves the GCS bucket location. Returns OK if the location was
354
+ /// retrieved.
355
+ ///
356
+ /// Given a string bucket the GCS bucket metadata API will be called and the
357
+ /// location string filled with the location of the bucket.
358
+ ///
359
+ /// This requires the bucket metadata permission.
360
+ /// Repeated calls for the same bucket are cached so this function can be
361
+ /// called frequently without causing an extra API call
362
+ Status GetBucketLocation(const string& bucket, string* location);
363
+
364
+ /// \brief Check if the GCS buckets location is allowed with the current
365
+ /// constraint configuration
366
+ Status CheckBucketLocationConstraint(const string& bucket);
367
+
368
+ /// \brief Given the input bucket `bucket`, fills `result_buffer` with the
369
+ /// results of the metadata. Returns OK if the API call succeeds without
370
+ /// error.
371
+ Status GetBucketMetadata(const string& bucket,
372
+ std::vector<char>* result_buffer);
373
+
374
+ /// \brief Checks if the object exists. Returns OK if the check succeeded.
375
+ ///
376
+ /// 'result' is set if the function returns OK. 'result' cannot be nullptr.
377
+ Status ObjectExists(const string& fname, const string& bucket,
378
+ const string& object, bool* result);
379
+
380
+ /// \brief Checks if the folder exists. Returns OK if the check succeeded.
381
+ ///
382
+ /// 'result' is set if the function returns OK. 'result' cannot be nullptr.
383
+ Status FolderExists(const string& dirname, bool* result);
384
+
385
+ /// \brief Internal version of GetChildren with more knobs.
386
+ ///
387
+ /// If 'recursively' is true, returns all objects in all subfolders.
388
+ /// Otherwise only returns the immediate children in the directory.
389
+ ///
390
+ /// If 'include_self_directory_marker' is true and there is a GCS directory
391
+ /// marker at the path 'dir', GetChildrenBound will return an empty string
392
+ /// as one of the children that represents this marker.
393
+ Status GetChildrenBounded(const string& dir, uint64 max_results,
394
+ std::vector<string>* result, bool recursively,
395
+ bool include_self_directory_marker);
396
+
397
+ /// Retrieves file statistics assuming fname points to a GCS object. The data
398
+ /// may be read from cache or from GCS directly.
399
+ Status StatForObject(const string& fname, const string& bucket,
400
+ const string& object, GcsFileStat* stat);
401
+ /// Retrieves file statistics of file fname directly from GCS.
402
+ Status UncachedStatForObject(const string& fname, const string& bucket,
403
+ const string& object, GcsFileStat* stat);
404
+
405
+ Status RenameObject(const string& src, const string& target);
406
+
407
+ // Clear all the caches related to the file with name `filename`.
408
+ void ClearFileCaches(const string& fname);
409
+
410
+ mutex mu_;
411
+ std::unique_ptr<AuthProvider> auth_provider_ TF_GUARDED_BY(mu_);
412
+ std::shared_ptr<HttpRequest::Factory> http_request_factory_;
413
+ std::unique_ptr<ZoneProvider> zone_provider_;
414
+
415
+ // Reads smaller than block_size_ will trigger a read of block_size_.
416
+ uint64 block_size_;
417
+
418
+ // block_cache_lock_ protects the file_block_cache_ pointer (Note that
419
+ // FileBlockCache instances are themselves threadsafe).
420
+ mutex block_cache_lock_;
421
+ std::unique_ptr<FileBlockCache> file_block_cache_
422
+ TF_GUARDED_BY(block_cache_lock_);
423
+
424
+ bool cache_enabled_;
425
+ std::unique_ptr<GcsDnsCache> dns_cache_;
426
+ GcsThrottle throttle_;
427
+
428
+ using StatCache = ExpiringLRUCache<GcsFileStat>;
429
+ std::unique_ptr<StatCache> stat_cache_;
430
+
431
+ using MatchingPathsCache = ExpiringLRUCache<std::vector<string>>;
432
+ std::unique_ptr<MatchingPathsCache> matching_paths_cache_;
433
+
434
+ using BucketLocationCache = ExpiringLRUCache<string>;
435
+ std::unique_ptr<BucketLocationCache> bucket_location_cache_;
436
+ std::unordered_set<string> allowed_locations_;
437
+ bool compose_append_;
438
+
439
+ GcsStatsInterface* stats_ = nullptr; // Not owned.
440
+
441
+ // Additional header material to be transmitted with all GCS requests
442
+ std::unique_ptr<std::pair<const string, const string>> additional_header_;
443
+
444
+ GcsFileSystem(const GcsFileSystem&) = delete;
445
+ void operator=(const GcsFileSystem&) = delete;
446
+ };
447
+
448
+ /// Google Cloud Storage implementation of a file system with retry on failures.
449
+ class RetryingGcsFileSystem : public RetryingFileSystem<GcsFileSystem> {
450
+ public:
451
+ RetryingGcsFileSystem();
452
+ };
453
+
454
+ } // namespace tsl
455
+
456
+ #endif // TENSORFLOW_TSL_PLATFORM_CLOUD_GCS_FILE_SYSTEM_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/cloud/gcs_throttle.h ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_PLATFORM_CLOUD_GCS_THROTTLE_H_
17
+ #define TENSORFLOW_TSL_PLATFORM_CLOUD_GCS_THROTTLE_H_
18
+
19
+ #include "tsl/platform/env.h"
20
+
21
+ namespace tsl {
22
+
23
+ /**
24
+ * GcsThrottleConfig is used to configure the GcsThrottle.
25
+ */
26
+ struct GcsThrottleConfig {
27
+ /**
28
+ * enabled is true if GcsThrottle should throttle requests, false otherwise.
29
+ */
30
+ bool enabled = false;
31
+
32
+ /**
33
+ * token_rate is the number of tokens accrued every second that can be used
34
+ * for making requests to the GCS service.
35
+ */
36
+ int64_t token_rate =
37
+ 100000; // Approximately 800 MBits/second bandwidth-only.
38
+
39
+ /**
40
+ * bucket_size is the maximum number of available tokens the GcsThrottle can
41
+ * accrue.
42
+ */
43
+ int64_t bucket_size = 10000000; // 10 million tokens total
44
+
45
+ /**
46
+ * tokens_per_request determines the number of tokens consumed for every
47
+ * request.
48
+ *
49
+ * Note: tokens are also consumed in proportion to the response size.
50
+ */
51
+ int64_t tokens_per_request = 100;
52
+
53
+ /**
54
+ * initial_tokens determines how many tokens should be available immediately
55
+ * after the GcsThrottle is constructed.
56
+ */
57
+ int64_t initial_tokens = 0;
58
+ };
59
+
60
+ /**
61
+ * GcsThrottle is used to ensure fair use of the available GCS capacity.
62
+ *
63
+ * GcsThrottle operates around a concept of tokens. Tokens are consumed when
64
+ * making requests to the GCS service. Tokens are consumed both based on the
65
+ * number of requests made, as well as the bandwidth consumed (response sizes).
66
+ *
67
+ * GcsThrottle is thread safe and can be used from multiple threads.
68
+ */
69
+ class GcsThrottle {
70
+ public:
71
+ /**
72
+ * Constructs a GcsThrottle.
73
+ */
74
+ explicit GcsThrottle(EnvTime* env_time = nullptr);
75
+
76
+ /**
77
+ * AdmitRequest updates the GcsThrottle to record a request will be made.
78
+ *
79
+ * AdmitRequest should be called before any request is made. AdmitRequest
80
+ * returns false if the request should be denied. If AdmitRequest
81
+ * returns false, no tokens are consumed. If true is returned, the configured
82
+ * number of tokens are consumed.
83
+ */
84
+ bool AdmitRequest();
85
+
86
+ /**
87
+ * RecordResponse updates the GcsThrottle to record a request has been made.
88
+ *
89
+ * RecordResponse should be called after the response has been received.
90
+ * RecordResponse will update the internal state based on the number of bytes
91
+ * in the response.
92
+ *
93
+ * Note: we split up the request and the response in this fashion in order to
94
+ * avoid penalizing consumers who are using large readahead buffers at higher
95
+ * layers of the I/O stack.
96
+ */
97
+ void RecordResponse(size_t num_bytes);
98
+
99
+ /**
100
+ * SetConfig sets the configuration for GcsThrottle and re-initializes state.
101
+ *
102
+ * After calling this, the token pool will be config.initial_tokens.
103
+ */
104
+ void SetConfig(GcsThrottleConfig config);
105
+
106
+ /**
107
+ * available_tokens gives a snapshot of how many tokens are available.
108
+ *
109
+ * The returned value should not be used to make admission decisions. The
110
+ * purpose of this function is to make available to monitoring or other
111
+ * instrumentation the number of available tokens in the pool.
112
+ */
113
+ inline int64_t available_tokens() TF_LOCKS_EXCLUDED(mu_) {
114
+ mutex_lock l(mu_);
115
+ UpdateState();
116
+ return available_tokens_;
117
+ }
118
+
119
+ /**
120
+ * is_enabled determines if the throttle is enabled.
121
+ *
122
+ * If !is_enabled(), AdmitRequest() will always return true. To enable the
123
+ * throttle, call SetConfig passing in a configuration that has enabled set to
124
+ * true.
125
+ */
126
+ bool is_enabled() TF_LOCKS_EXCLUDED(mu_) {
127
+ mutex_lock l(mu_);
128
+ return config_.enabled;
129
+ }
130
+
131
+ private:
132
+ /**
133
+ * UpdateState updates the available_tokens_ and last_updated_secs_ variables.
134
+ *
135
+ * UpdateState should be called in order to mark the passage of time, and
136
+ * therefore add tokens to the available_tokens_ pool.
137
+ */
138
+ void UpdateState() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
139
+
140
+ inline uint64 request_bytes_to_tokens(size_t num_bytes) {
141
+ return num_bytes >> 10;
142
+ }
143
+
144
+ mutex mu_;
145
+
146
+ /**
147
+ * last_updated_secs_ records the number of seconds since the Unix epoch that
148
+ * the internal state of the GcsThrottle was updated. This is important when
149
+ * determining the number of tokens to add to the available_tokens_ pool.
150
+ */
151
+ uint64 last_updated_secs_ TF_GUARDED_BY(mu_) = 0;
152
+
153
+ /**
154
+ * available_tokens_ records how many tokens are available to be consumed.
155
+ *
156
+ * Note: it is possible for available_tokens_ to become negative. If a
157
+ * response comes back that consumes more than the available tokens, the count
158
+ * will go negative, and block future requests until we have available tokens.
159
+ */
160
+ int64_t available_tokens_ TF_GUARDED_BY(mu_) = 0;
161
+
162
+ EnvTime* const env_time_;
163
+ GcsThrottleConfig config_ TF_GUARDED_BY(mu_);
164
+ };
165
+
166
+ } // namespace tsl
167
+
168
+ #endif // TENSORFLOW_TSL_PLATFORM_CLOUD_GCS_THROTTLE_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/cloud/google_auth_provider.h ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_PLATFORM_CLOUD_GOOGLE_AUTH_PROVIDER_H_
17
+ #define TENSORFLOW_TSL_PLATFORM_CLOUD_GOOGLE_AUTH_PROVIDER_H_
18
+
19
+ #include <memory>
20
+
21
+ #include "tsl/platform/cloud/auth_provider.h"
22
+ #include "tsl/platform/cloud/compute_engine_metadata_client.h"
23
+ #include "tsl/platform/cloud/oauth_client.h"
24
+ #include "tsl/platform/mutex.h"
25
+ #include "tsl/platform/thread_annotations.h"
26
+
27
+ namespace tsl {
28
+
29
+ /// Implementation based on Google Application Default Credentials.
30
+ class GoogleAuthProvider : public AuthProvider {
31
+ public:
32
+ GoogleAuthProvider(std::shared_ptr<ComputeEngineMetadataClient>
33
+ compute_engine_metadata_client);
34
+ explicit GoogleAuthProvider(std::unique_ptr<OAuthClient> oauth_client,
35
+ std::shared_ptr<ComputeEngineMetadataClient>
36
+ compute_engine_metadata_client,
37
+ Env* env);
38
+ virtual ~GoogleAuthProvider() {}
39
+
40
+ /// \brief Returns the short-term authentication bearer token.
41
+ ///
42
+ /// Safe for concurrent use by multiple threads.
43
+ Status GetToken(string* token) override;
44
+
45
+ private:
46
+ /// \brief Gets the bearer token from files.
47
+ ///
48
+ /// Tries the file from $GOOGLE_APPLICATION_CREDENTIALS and the
49
+ /// standard gcloud tool's location.
50
+ Status GetTokenFromFiles() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
51
+
52
+ /// Gets the bearer token from Google Compute Engine environment.
53
+ Status GetTokenFromGce() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
54
+
55
+ /// Gets the bearer token from the system env variable, for testing purposes.
56
+ Status GetTokenForTesting() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
57
+
58
+ std::unique_ptr<OAuthClient> oauth_client_;
59
+ std::shared_ptr<ComputeEngineMetadataClient> compute_engine_metadata_client_;
60
+ Env* env_;
61
+ mutex mu_;
62
+ string current_token_ TF_GUARDED_BY(mu_);
63
+ uint64 expiration_timestamp_sec_ TF_GUARDED_BY(mu_) = 0;
64
+ GoogleAuthProvider(const GoogleAuthProvider&) = delete;
65
+ void operator=(const GoogleAuthProvider&) = delete;
66
+ };
67
+
68
+ } // namespace tsl
69
+
70
+ #endif // TENSORFLOW_TSL_PLATFORM_CLOUD_GOOGLE_AUTH_PROVIDER_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/cloud/http_request.h ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_PLATFORM_CLOUD_HTTP_REQUEST_H_
17
+ #define TENSORFLOW_TSL_PLATFORM_CLOUD_HTTP_REQUEST_H_
18
+
19
+ #include <string>
20
+ #include <unordered_map>
21
+ #include <vector>
22
+
23
+ #include "tsl/platform/env.h"
24
+ #include "tsl/platform/errors.h"
25
+ #include "tsl/platform/macros.h"
26
+ #include "tsl/platform/protobuf.h"
27
+ #include "tsl/platform/status.h"
28
+ #include "tsl/platform/stringpiece.h"
29
+ #include "tsl/platform/types.h"
30
+
31
+ namespace tsl {
32
+
33
+ /// \brief An abstract basic HTTP client.
34
+ ///
35
+ /// The usage pattern for the class is based on the libcurl library:
36
+ /// create a request object, set request parameters and call Send().
37
+ ///
38
+ /// For example:
39
+ /// HttpRequest request;
40
+ /// request.SetUri("http://www.google.com");
41
+ /// request.SetResultsBuffer(out_buffer);
42
+ /// request.Send();
43
+ class HttpRequest {
44
+ public:
45
+ class Factory {
46
+ public:
47
+ virtual ~Factory() {}
48
+ virtual HttpRequest* Create() = 0;
49
+ };
50
+
51
+ /// RequestMethod is used to capture what type of HTTP request is made and
52
+ /// is used in conjunction with RequestStats for instrumentation and
53
+ /// monitoring of HTTP requests and their responses.
54
+ enum class RequestMethod : char {
55
+ kGet,
56
+ kPost,
57
+ kPut,
58
+ kDelete,
59
+ };
60
+
61
+ /// RequestMethodName converts a RequestMethod to the canonical method string.
62
+ inline static const char* RequestMethodName(RequestMethod m) {
63
+ switch (m) {
64
+ case RequestMethod::kGet:
65
+ return "GET";
66
+ case RequestMethod::kPost:
67
+ return "POST";
68
+ case RequestMethod::kPut:
69
+ return "PUT";
70
+ case RequestMethod::kDelete:
71
+ return "DELETE";
72
+ default:
73
+ return "???";
74
+ }
75
+ }
76
+
77
+ /// RequestStats is a class that can be used to instrument an Http Request.
78
+ class RequestStats {
79
+ public:
80
+ virtual ~RequestStats() = default;
81
+
82
+ /// RecordRequest is called right before a request is sent on the wire.
83
+ virtual void RecordRequest(const HttpRequest* request, const string& uri,
84
+ RequestMethod method) = 0;
85
+
86
+ /// RecordResponse is called after the response has been received.
87
+ virtual void RecordResponse(const HttpRequest* request, const string& uri,
88
+ RequestMethod method, const Status& result) = 0;
89
+ };
90
+
91
+ HttpRequest() {}
92
+ virtual ~HttpRequest() {}
93
+
94
+ /// Sets the request URI.
95
+ virtual void SetUri(const string& uri) = 0;
96
+
97
+ /// \brief Sets the Range header.
98
+ ///
99
+ /// Used for random seeks, for example "0-999" returns the first 1000 bytes
100
+ /// (note that the right border is included).
101
+ virtual void SetRange(uint64 start, uint64 end) = 0;
102
+
103
+ /// Sets a request header.
104
+ virtual void AddHeader(const string& name, const string& value) = 0;
105
+
106
+ /// Sets a DNS resolve mapping (to skip DNS resolution).
107
+ ///
108
+ /// Note: because GCS is available over HTTPS, we cannot replace the hostname
109
+ /// in the URI with an IP address, as that will cause the certificate check
110
+ /// to fail.
111
+ virtual void AddResolveOverride(const string& hostname, int64_t port,
112
+ const string& ip_addr) = 0;
113
+
114
+ /// Sets the 'Authorization' header to the value of 'Bearer ' + auth_token.
115
+ virtual void AddAuthBearerHeader(const string& auth_token) = 0;
116
+
117
+ /// Sets the RequestStats object to use to record the request and response.
118
+ virtual void SetRequestStats(RequestStats* stats) = 0;
119
+
120
+ /// Makes the request a DELETE request.
121
+ virtual void SetDeleteRequest() = 0;
122
+
123
+ /// \brief Makes the request a PUT request.
124
+ ///
125
+ /// The request body will be taken from the specified file starting from
126
+ /// the given offset.
127
+ virtual Status SetPutFromFile(const string& body_filepath, size_t offset) = 0;
128
+
129
+ /// Makes the request a PUT request with an empty body.
130
+ virtual void SetPutEmptyBody() = 0;
131
+
132
+ /// \brief Makes the request a POST request.
133
+ ///
134
+ /// The request body will be taken from the specified buffer.
135
+ virtual void SetPostFromBuffer(const char* buffer, size_t size) = 0;
136
+
137
+ /// Makes the request a POST request with an empty body.
138
+ virtual void SetPostEmptyBody() = 0;
139
+
140
+ /// \brief Specifies the buffer for receiving the response body.
141
+ ///
142
+ /// Size of out_buffer after an access will be exactly the number of bytes
143
+ /// read. Existing content of the vector will be cleared.
144
+ virtual void SetResultBuffer(std::vector<char>* out_buffer) = 0;
145
+
146
+ /// \brief Specifies the buffer for receiving the response body.
147
+ ///
148
+ /// This method should be used when a caller knows the upper bound of the
149
+ /// size of the response data. The caller provides a pre-allocated buffer
150
+ /// and its size. After the Send() method is called, the
151
+ /// GetResultBufferDirectBytesTransferred() method may be used to learn to the
152
+ /// number of bytes that were transferred using this method.
153
+ virtual void SetResultBufferDirect(char* buffer, size_t size) = 0;
154
+
155
+ /// \brief Returns the number of bytes transferred, when using
156
+ /// SetResultBufferDirect(). This method may only be used when using
157
+ /// SetResultBufferDirect().
158
+ virtual size_t GetResultBufferDirectBytesTransferred() = 0;
159
+
160
+ /// \brief Returns the response headers of a completed request.
161
+ ///
162
+ /// If the header is not found, returns an empty string.
163
+ virtual string GetResponseHeader(const string& name) const = 0;
164
+
165
+ /// Returns the response code of a completed request.
166
+ virtual uint64 GetResponseCode() const = 0;
167
+
168
+ /// \brief Sends the formed request.
169
+ ///
170
+ /// If the result buffer was defined, the response will be written there.
171
+ /// The object is not designed to be re-used after Send() is executed.
172
+ virtual Status Send() = 0;
173
+
174
+ // Url encodes str and returns a new string.
175
+ virtual string EscapeString(const string& str) = 0;
176
+
177
+ /// \brief Set timeouts for this request.
178
+ ///
179
+ /// The connection parameter controls how long we should wait for the
180
+ /// connection to be established. The inactivity parameter controls how long
181
+ /// we should wait between additional responses from the server. Finally the
182
+ /// total parameter controls the maximum total connection time to prevent
183
+ /// hanging indefinitely.
184
+ virtual void SetTimeouts(uint32 connection, uint32 inactivity,
185
+ uint32 total) = 0;
186
+
187
+ HttpRequest(const HttpRequest&) = delete;
188
+ void operator=(const HttpRequest&) = delete;
189
+ };
190
+
191
+ } // namespace tsl
192
+
193
+ #endif // TENSORFLOW_TSL_PLATFORM_CLOUD_HTTP_REQUEST_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/cloud/oauth_client.h ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_PLATFORM_CLOUD_OAUTH_CLIENT_H_
17
+ #define TENSORFLOW_TSL_PLATFORM_CLOUD_OAUTH_CLIENT_H_
18
+
19
+ #include <memory>
20
+
21
+ #include "json/json.h"
22
+ #include "tsl/platform/cloud/http_request.h"
23
+ #include "tsl/platform/env.h"
24
+ #include "tsl/platform/status.h"
25
+
26
+ namespace tsl {
27
+
28
+ /// OAuth 2.0 client.
29
+ class OAuthClient {
30
+ public:
31
+ OAuthClient();
32
+ explicit OAuthClient(
33
+ std::unique_ptr<HttpRequest::Factory> http_request_factory, Env* env);
34
+ virtual ~OAuthClient() {}
35
+
36
+ /// \brief Retrieves a bearer token using a private key.
37
+ ///
38
+ /// Retrieves the authentication bearer token using a JSON file
39
+ /// with the client's private key.
40
+ virtual Status GetTokenFromServiceAccountJson(
41
+ Json::Value json, StringPiece oauth_server_uri, StringPiece scope,
42
+ string* token, uint64* expiration_timestamp_sec);
43
+
44
+ /// Retrieves a bearer token using a refresh token.
45
+ virtual Status GetTokenFromRefreshTokenJson(Json::Value json,
46
+ StringPiece oauth_server_uri,
47
+ string* token,
48
+ uint64* expiration_timestamp_sec);
49
+
50
+ /// Parses the JSON response with the token from an OAuth 2.0 server.
51
+ virtual Status ParseOAuthResponse(StringPiece response,
52
+ uint64 request_timestamp_sec, string* token,
53
+ uint64* expiration_timestamp_sec);
54
+
55
+ private:
56
+ std::unique_ptr<HttpRequest::Factory> http_request_factory_;
57
+ Env* env_;
58
+ OAuthClient(const OAuthClient&) = delete;
59
+ void operator=(const OAuthClient&) = delete;
60
+ };
61
+
62
+ } // namespace tsl
63
+
64
+ #endif // TENSORFLOW_TSL_PLATFORM_CLOUD_OAUTH_CLIENT_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/cloud/ram_file_block_cache.h ADDED
@@ -0,0 +1,250 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_PLATFORM_CLOUD_RAM_FILE_BLOCK_CACHE_H_
17
+ #define TENSORFLOW_TSL_PLATFORM_CLOUD_RAM_FILE_BLOCK_CACHE_H_
18
+
19
+ #include <functional>
20
+ #include <list>
21
+ #include <map>
22
+ #include <memory>
23
+ #include <string>
24
+ #include <vector>
25
+
26
+ #include "tsl/platform/cloud/file_block_cache.h"
27
+ #include "tsl/platform/env.h"
28
+ #include "tsl/platform/mutex.h"
29
+ #include "tsl/platform/notification.h"
30
+ #include "tsl/platform/status.h"
31
+ #include "tsl/platform/stringpiece.h"
32
+ #include "tsl/platform/thread_annotations.h"
33
+ #include "tsl/platform/types.h"
34
+
35
+ namespace tsl {
36
+
37
+ /// \brief An LRU block cache of file contents, keyed by {filename, offset}.
38
+ ///
39
+ /// This class should be shared by read-only random access files on a remote
40
+ /// filesystem (e.g. GCS).
41
+ class RamFileBlockCache : public FileBlockCache {
42
+ public:
43
+ /// The callback executed when a block is not found in the cache, and needs to
44
+ /// be fetched from the backing filesystem. This callback is provided when the
45
+ /// cache is constructed. The returned Status should be OK as long as the
46
+ /// read from the remote filesystem succeeded (similar to the semantics of the
47
+ /// read(2) system call).
48
+ typedef std::function<Status(const string& filename, size_t offset,
49
+ size_t buffer_size, char* buffer,
50
+ size_t* bytes_transferred)>
51
+ BlockFetcher;
52
+
53
+ RamFileBlockCache(size_t block_size, size_t max_bytes, uint64 max_staleness,
54
+ BlockFetcher block_fetcher, Env* env = Env::Default())
55
+ : block_size_(block_size),
56
+ max_bytes_(max_bytes),
57
+ max_staleness_(max_staleness),
58
+ block_fetcher_(block_fetcher),
59
+ env_(env) {
60
+ if (max_staleness_ > 0) {
61
+ pruning_thread_.reset(env_->StartThread(ThreadOptions(), "TF_prune_FBC",
62
+ [this] { Prune(); }));
63
+ }
64
+ VLOG(1) << "GCS file block cache is "
65
+ << (IsCacheEnabled() ? "enabled" : "disabled");
66
+ }
67
+
68
+ ~RamFileBlockCache() override {
69
+ if (pruning_thread_) {
70
+ stop_pruning_thread_.Notify();
71
+ // Destroying pruning_thread_ will block until Prune() receives the above
72
+ // notification and returns.
73
+ pruning_thread_.reset();
74
+ }
75
+ }
76
+
77
+ /// Read `n` bytes from `filename` starting at `offset` into `out`. This
78
+ /// method will return:
79
+ ///
80
+ /// 1) The error from the remote filesystem, if the read from the remote
81
+ /// filesystem failed.
82
+ /// 2) PRECONDITION_FAILED if the read from the remote filesystem succeeded,
83
+ /// but the read returned a partial block, and the LRU cache contained a
84
+ /// block at a higher offset (indicating that the partial block should have
85
+ /// been a full block).
86
+ /// 3) OUT_OF_RANGE if the read from the remote filesystem succeeded, but
87
+ /// the file contents do not extend past `offset` and thus nothing was
88
+ /// placed in `out`.
89
+ /// 4) OK otherwise (i.e. the read succeeded, and at least one byte was placed
90
+ /// in `out`).
91
+ Status Read(const string& filename, size_t offset, size_t n, char* buffer,
92
+ size_t* bytes_transferred) override;
93
+
94
+ // Validate the given file signature with the existing file signature in the
95
+ // cache. Returns true if the signature doesn't change or the file doesn't
96
+ // exist before. If the signature changes, update the existing signature with
97
+ // the new one and remove the file from cache.
98
+ bool ValidateAndUpdateFileSignature(const string& filename,
99
+ int64_t file_signature) override
100
+ TF_LOCKS_EXCLUDED(mu_);
101
+
102
+ /// Remove all cached blocks for `filename`.
103
+ void RemoveFile(const string& filename) override TF_LOCKS_EXCLUDED(mu_);
104
+
105
+ /// Remove all cached data.
106
+ void Flush() override TF_LOCKS_EXCLUDED(mu_);
107
+
108
+ /// Accessors for cache parameters.
109
+ size_t block_size() const override { return block_size_; }
110
+ size_t max_bytes() const override { return max_bytes_; }
111
+ uint64 max_staleness() const override { return max_staleness_; }
112
+
113
+ /// The current size (in bytes) of the cache.
114
+ size_t CacheSize() const override TF_LOCKS_EXCLUDED(mu_);
115
+
116
+ // Returns true if the cache is enabled. If false, the BlockFetcher callback
117
+ // is always executed during Read.
118
+ bool IsCacheEnabled() const override {
119
+ return block_size_ > 0 && max_bytes_ > 0;
120
+ }
121
+
122
+ private:
123
+ /// The size of the blocks stored in the LRU cache, as well as the size of the
124
+ /// reads from the underlying filesystem.
125
+ const size_t block_size_;
126
+ /// The maximum number of bytes (sum of block sizes) allowed in the LRU cache.
127
+ const size_t max_bytes_;
128
+ /// The maximum staleness of any block in the LRU cache, in seconds.
129
+ const uint64 max_staleness_;
130
+ /// The callback to read a block from the underlying filesystem.
131
+ const BlockFetcher block_fetcher_;
132
+ /// The Env from which we read timestamps.
133
+ Env* const env_; // not owned
134
+
135
+ /// \brief The key type for the file block cache.
136
+ ///
137
+ /// The file block cache key is a {filename, offset} pair.
138
+ typedef std::pair<string, size_t> Key;
139
+
140
+ /// \brief The state of a block.
141
+ ///
142
+ /// A block begins in the CREATED stage. The first thread will attempt to read
143
+ /// the block from the filesystem, transitioning the state of the block to
144
+ /// FETCHING. After completing, if the read was successful the state should
145
+ /// be FINISHED. Otherwise the state should be ERROR. A subsequent read can
146
+ /// re-fetch the block if the state is ERROR.
147
+ enum class FetchState {
148
+ CREATED,
149
+ FETCHING,
150
+ FINISHED,
151
+ ERROR,
152
+ };
153
+
154
+ /// \brief A block of a file.
155
+ ///
156
+ /// A file block consists of the block data, the block's current position in
157
+ /// the LRU cache, the timestamp (seconds since epoch) at which the block
158
+ /// was cached, a coordination lock, and state & condition variables.
159
+ ///
160
+ /// Thread safety:
161
+ /// The iterator and timestamp fields should only be accessed while holding
162
+ /// the block-cache-wide mu_ instance variable. The state variable should only
163
+ /// be accessed while holding the Block's mu lock. The data vector should only
164
+ /// be accessed after state == FINISHED, and it should never be modified.
165
+ ///
166
+ /// In order to prevent deadlocks, never grab the block-cache-wide mu_ lock
167
+ /// AFTER grabbing any block's mu lock. It is safe to grab mu without locking
168
+ /// mu_.
169
+ struct Block {
170
+ /// The block data.
171
+ std::vector<char> data;
172
+ /// A list iterator pointing to the block's position in the LRU list.
173
+ std::list<Key>::iterator lru_iterator;
174
+ /// A list iterator pointing to the block's position in the LRA list.
175
+ std::list<Key>::iterator lra_iterator;
176
+ /// The timestamp (seconds since epoch) at which the block was cached.
177
+ uint64 timestamp;
178
+ /// Mutex to guard state variable
179
+ mutex mu;
180
+ /// The state of the block.
181
+ FetchState state TF_GUARDED_BY(mu) = FetchState::CREATED;
182
+ /// Wait on cond_var if state is FETCHING.
183
+ condition_variable cond_var;
184
+ };
185
+
186
+ /// \brief The block map type for the file block cache.
187
+ ///
188
+ /// The block map is an ordered map from Key to Block.
189
+ typedef std::map<Key, std::shared_ptr<Block>> BlockMap;
190
+
191
+ /// Prune the cache by removing files with expired blocks.
192
+ void Prune() TF_LOCKS_EXCLUDED(mu_);
193
+
194
+ bool BlockNotStale(const std::shared_ptr<Block>& block)
195
+ TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
196
+
197
+ /// Look up a Key in the block cache.
198
+ std::shared_ptr<Block> Lookup(const Key& key) TF_LOCKS_EXCLUDED(mu_);
199
+
200
+ Status MaybeFetch(const Key& key, const std::shared_ptr<Block>& block)
201
+ TF_LOCKS_EXCLUDED(mu_);
202
+
203
+ /// Trim the block cache to make room for another entry.
204
+ void Trim() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
205
+
206
+ /// Update the LRU iterator for the block at `key`.
207
+ Status UpdateLRU(const Key& key, const std::shared_ptr<Block>& block)
208
+ TF_LOCKS_EXCLUDED(mu_);
209
+
210
+ /// Remove all blocks of a file, with mu_ already held.
211
+ void RemoveFile_Locked(const string& filename)
212
+ TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
213
+
214
+ /// Remove the block `entry` from the block map and LRU list, and update the
215
+ /// cache size accordingly.
216
+ void RemoveBlock(BlockMap::iterator entry) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
217
+
218
+ /// The cache pruning thread that removes files with expired blocks.
219
+ std::unique_ptr<Thread> pruning_thread_;
220
+
221
+ /// Notification for stopping the cache pruning thread.
222
+ Notification stop_pruning_thread_;
223
+
224
+ /// Guards access to the block map, LRU list, and cached byte count.
225
+ mutable mutex mu_;
226
+
227
+ /// The block map (map from Key to Block).
228
+ BlockMap block_map_ TF_GUARDED_BY(mu_);
229
+
230
+ /// The LRU list of block keys. The front of the list identifies the most
231
+ /// recently accessed block.
232
+ std::list<Key> lru_list_ TF_GUARDED_BY(mu_);
233
+
234
+ /// The LRA (least recently added) list of block keys. The front of the list
235
+ /// identifies the most recently added block.
236
+ ///
237
+ /// Note: blocks are added to lra_list_ only after they have successfully been
238
+ /// fetched from the underlying block store.
239
+ std::list<Key> lra_list_ TF_GUARDED_BY(mu_);
240
+
241
+ /// The combined number of bytes in all of the cached blocks.
242
+ size_t cache_size_ TF_GUARDED_BY(mu_) = 0;
243
+
244
+ // A filename->file_signature map.
245
+ std::map<string, int64_t> file_signature_map_ TF_GUARDED_BY(mu_);
246
+ };
247
+
248
+ } // namespace tsl
249
+
250
+ #endif // TENSORFLOW_TSL_PLATFORM_CLOUD_RAM_FILE_BLOCK_CACHE_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/cloud/time_util.h ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_PLATFORM_CLOUD_TIME_UTIL_H_
17
+ #define TENSORFLOW_TSL_PLATFORM_CLOUD_TIME_UTIL_H_
18
+
19
+ #include "tsl/platform/status.h"
20
+
21
+ namespace tsl {
22
+
23
+ /// Parses the timestamp in RFC 3339 format and returns it
24
+ /// as nanoseconds since epoch.
25
+ Status ParseRfc3339Time(const string& time, int64_t* mtime_nsec);
26
+
27
+ } // namespace tsl
28
+
29
+ #endif // TENSORFLOW_TSL_PLATFORM_CLOUD_TIME_UTIL_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/cloud/zone_provider.h ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_PLATFORM_CLOUD_ZONE_PROVIDER_H_
17
+ #define TENSORFLOW_TSL_PLATFORM_CLOUD_ZONE_PROVIDER_H_
18
+
19
+ #include <string>
20
+
21
+ #include "tsl/platform/errors.h"
22
+ #include "tsl/platform/status.h"
23
+
24
+ namespace tsl {
25
+
26
+ /// Interface for a provider of cloud instance zone
27
+ class ZoneProvider {
28
+ public:
29
+ virtual ~ZoneProvider() {}
30
+
31
+ /// \brief Gets the zone of the Cloud instance and set the result in `zone`.
32
+ /// Returns OK if success.
33
+ ///
34
+ /// Returns an empty string in the case where the zone does not match the
35
+ /// expected format
36
+ /// Safe for concurrent use by multiple threads.
37
+ virtual Status GetZone(string* zone) = 0;
38
+
39
+ static Status GetZone(ZoneProvider* provider, string* zone) {
40
+ if (!provider) {
41
+ return errors::Internal("Zone provider is required.");
42
+ }
43
+ return provider->GetZone(zone);
44
+ }
45
+ };
46
+
47
+ } // namespace tsl
48
+
49
+ #endif // TENSORFLOW_TSL_PLATFORM_CLOUD_ZONE_PROVIDER_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/crash_analysis.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_PLATFORM_CRASH_ANALYSIS_H_
17
+ #define TENSORFLOW_TSL_PLATFORM_CRASH_ANALYSIS_H_
18
+
19
+ #include "tsl/platform/platform.h"
20
+
21
+ // Include appropriate platform-dependent implementations
22
+ #if defined(PLATFORM_GOOGLE)
23
+ #include "tsl/platform/google/crash_analysis.h" // IWYU pragma: export
24
+ #else
25
+ #include "tsl/platform/default/crash_analysis.h" // IWYU pragma: export
26
+ #endif
27
+
28
+ #endif // TENSORFLOW_TSL_PLATFORM_CRASH_ANALYSIS_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/criticality.h ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_PLATFORM_CRITICALITY_H_
17
+ #define TENSORFLOW_TSL_PLATFORM_CRITICALITY_H_
18
+
19
+ #include "tsl/platform/platform.h"
20
+
21
+ namespace tsl {
22
+
23
+ namespace criticality {
24
+
25
+ enum class Criticality {
26
+ // Frequent full and paritial unavailability is expected and not a cause for
27
+ // concern.
28
+ kSheddable = 0,
29
+ // Partial unavailability is expected and not necessarily a cause for concern.
30
+ kSheddablePlus = 1,
31
+ // Any outage is a serious concern. This is the default priority for RPCs
32
+ // sent from production jobs.
33
+ kCritical = 2,
34
+ // Any outage is a serious concern. Less than 50% of requests to a service
35
+ // can be in this band. During an outage, this band will be prioritized above
36
+ // all others.
37
+ kCriticalPlus = 3,
38
+ };
39
+
40
+ } // namespace criticality
41
+
42
+ } // namespace tsl
43
+
44
+ #if defined(PLATFORM_GOOGLE)
45
+ #include "tsl/platform/google/criticality.h" // IWYU pragma: export
46
+ #else
47
+ #include "tsl/platform/default/criticality.h" // IWYU pragma: export
48
+ #endif
49
+
50
+ #endif // TENSORFLOW_TSL_PLATFORM_CRITICALITY_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/ctstring_internal.h ADDED
@@ -0,0 +1,455 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_PLATFORM_CTSTRING_INTERNAL_H_
17
+ #define TENSORFLOW_TSL_PLATFORM_CTSTRING_INTERNAL_H_
18
+
19
+ #include <limits.h>
20
+ #include <stdint.h>
21
+ #include <stdlib.h>
22
+ #include <string.h>
23
+
24
+ #if (defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && \
25
+ __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || \
26
+ defined(_WIN32)
27
+ #define TF_TSTRING_LITTLE_ENDIAN 1
28
+ #elif defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \
29
+ __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
30
+ #define TF_TSTRING_LITTLE_ENDIAN 0
31
+ #else
32
+ #error "Unable to detect endianness."
33
+ #endif
34
+
35
+ #if defined(__clang__) || \
36
+ (defined(__GNUC__) && \
37
+ ((__GNUC__ == 4 && __GNUC_MINOR__ >= 8) || __GNUC__ >= 5))
38
+ static inline uint32_t TF_swap32(uint32_t host_int) {
39
+ return __builtin_bswap32(host_int);
40
+ }
41
+
42
+ #elif defined(_MSC_VER)
43
+ static inline uint32_t TF_swap32(uint32_t host_int) {
44
+ return _byteswap_ulong(host_int);
45
+ }
46
+
47
+ #elif defined(__APPLE__)
48
+ static inline uint32_t TF_swap32(uint32_t host_int) {
49
+ return OSSwapInt32(host_int);
50
+ }
51
+
52
+ #else
53
+ static inline uint32_t TF_swap32(uint32_t host_int) {
54
+ #if defined(__GLIBC__)
55
+ return bswap_32(host_int);
56
+ #else // defined(__GLIBC__)
57
+ return (((host_int & uint32_t{0xFF}) << 24) |
58
+ ((host_int & uint32_t{0xFF00}) << 8) |
59
+ ((host_int & uint32_t{0xFF0000}) >> 8) |
60
+ ((host_int & uint32_t{0xFF000000}) >> 24));
61
+ #endif // defined(__GLIBC__)
62
+ }
63
+ #endif
64
+
65
+ #if TF_TSTRING_LITTLE_ENDIAN
66
+ #define TF_le32toh(x) x
67
+ #else // TF_TSTRING_LITTLE_ENDIAN
68
+ #define TF_le32toh(x) TF_swap32(x)
69
+ #endif // TF_TSTRING_LITTLE_ENDIAN
70
+
71
+ static inline size_t TF_align16(size_t i) { return (i + 0xF) & ~0xF; }
72
+
73
+ static inline size_t TF_max(size_t a, size_t b) { return a > b ? a : b; }
74
+ static inline size_t TF_min(size_t a, size_t b) { return a < b ? a : b; }
75
+
76
+ typedef enum TF_TString_Type { // NOLINT
77
+ TF_TSTR_SMALL = 0x00,
78
+ TF_TSTR_LARGE = 0x01,
79
+ TF_TSTR_OFFSET = 0x02,
80
+ TF_TSTR_VIEW = 0x03,
81
+ TF_TSTR_TYPE_MASK = 0x03
82
+ } TF_TString_Type;
83
+
84
+ typedef struct TF_TString_Large { // NOLINT
85
+ size_t size;
86
+ size_t cap;
87
+ char *ptr;
88
+ } TF_TString_Large;
89
+
90
+ typedef struct TF_TString_Offset { // NOLINT
91
+ uint32_t size;
92
+ uint32_t offset;
93
+ uint32_t count;
94
+ } TF_TString_Offset;
95
+
96
+ typedef struct TF_TString_View { // NOLINT
97
+ size_t size;
98
+ const char *ptr;
99
+ } TF_TString_View;
100
+
101
+ typedef struct TF_TString_Raw { // NOLINT
102
+ uint8_t raw[24];
103
+ } TF_TString_Raw;
104
+
105
+ typedef union TF_TString_Union { // NOLINT
106
+ TF_TString_Large large;
107
+ TF_TString_Offset offset;
108
+ TF_TString_View view;
109
+ TF_TString_Raw raw;
110
+ } TF_TString_Union;
111
+
112
+ enum {
113
+ TF_TString_SmallCapacity =
114
+ (sizeof(TF_TString_Union) - sizeof(/* null delim */ char) -
115
+ sizeof(/* uint8_t size */ uint8_t)),
116
+ };
117
+
118
+ typedef struct TF_TString_Small { // NOLINT
119
+ uint8_t size;
120
+ char str[TF_TString_SmallCapacity + sizeof(/* null delim */ char)];
121
+ } TF_TString_Small;
122
+
123
+ typedef struct TF_TString { // NOLINT
124
+ union {
125
+ // small conflicts with '#define small char' in RpcNdr.h for MSVC, so we use
126
+ // smll instead.
127
+ TF_TString_Small smll;
128
+ TF_TString_Large large;
129
+ TF_TString_Offset offset;
130
+ TF_TString_View view;
131
+ TF_TString_Raw raw;
132
+ } u;
133
+ } TF_TString;
134
+
135
+ // TODO(dero): Fix for OSS, and add C only build test.
136
+ // _Static_assert(CHAR_BIT == 8);
137
+ // _Static_assert(sizeof(TF_TString) == 24);
138
+
139
+ static inline TF_TString_Type TF_TString_GetType(const TF_TString *str) {
140
+ return (TF_TString_Type)(str->u.raw.raw[0] & TF_TSTR_TYPE_MASK); // NOLINT
141
+ }
142
+
143
+ // XXX(dero): For the big-endian case, this function could potentially be more
144
+ // performant and readable by always storing the string size as little-endian
145
+ // and always byte-swapping on big endian, resulting in a simple 'bswap'+'shr'
146
+ // (for architectures that have a bswap op).
147
+ static inline size_t TF_TString_ToActualSizeT(size_t size) {
148
+ #if TF_TSTRING_LITTLE_ENDIAN
149
+ return size >> 2;
150
+ #else // TF_TSTRING_LITTLE_ENDIAN
151
+ // 0xFF000000 or 0xFF00000000000000 depending on platform
152
+ static const size_t mask = ~((~(size_t)0) >> 8);
153
+
154
+ return (((mask << 2) & size) >> 2) | (~mask & size);
155
+ #endif // TF_TSTRING_LITTLE_ENDIAN
156
+ }
157
+
158
+ static inline size_t TF_TString_ToInternalSizeT(size_t size,
159
+ TF_TString_Type type) {
160
+ #if TF_TSTRING_LITTLE_ENDIAN
161
+ return (size << 2) | type;
162
+ #else // TF_TSTRING_LITTLE_ENDIAN
163
+ // 0xFF000000 or 0xFF00000000000000 depending on platform
164
+ static const size_t mask = ~((~(size_t)0) >> 8);
165
+
166
+ return (mask & (size << 2)) | (~mask & size) |
167
+ ((size_t)type << ((sizeof(size_t) - 1) * 8)); // NOLINT
168
+ #endif // TF_TSTRING_LITTLE_ENDIAN
169
+ }
170
+
171
+ static inline void TF_TString_Init(TF_TString *str) {
172
+ memset(str->u.raw.raw, 0, sizeof(TF_TString_Raw));
173
+ }
174
+
175
+ static inline void TF_TString_Dealloc(TF_TString *str) {
176
+ if (TF_TString_GetType(str) == TF_TSTR_LARGE &&
177
+ str->u.large.ptr != NULL) { // NOLINT
178
+ free(str->u.large.ptr);
179
+ TF_TString_Init(str);
180
+ }
181
+ }
182
+
183
+ static inline size_t TF_TString_GetSize(const TF_TString *str) {
184
+ switch (TF_TString_GetType(str)) {
185
+ case TF_TSTR_SMALL:
186
+ return str->u.smll.size >> 2;
187
+ case TF_TSTR_LARGE:
188
+ return TF_TString_ToActualSizeT(str->u.large.size);
189
+ case TF_TSTR_OFFSET:
190
+ return TF_le32toh(str->u.offset.size) >> 2;
191
+ case TF_TSTR_VIEW:
192
+ return TF_TString_ToActualSizeT(str->u.view.size);
193
+ default:
194
+ return 0; // Unreachable.
195
+ }
196
+ }
197
+
198
+ static inline size_t TF_TString_GetCapacity(const TF_TString *str) {
199
+ switch (TF_TString_GetType(str)) {
200
+ case TF_TSTR_SMALL:
201
+ return TF_TString_SmallCapacity;
202
+ case TF_TSTR_LARGE:
203
+ return str->u.large.cap;
204
+ case TF_TSTR_OFFSET:
205
+ case TF_TSTR_VIEW:
206
+ default:
207
+ return 0;
208
+ }
209
+ }
210
+
211
+ static inline const char *TF_TString_GetDataPointer(const TF_TString *str) {
212
+ switch (TF_TString_GetType(str)) {
213
+ case TF_TSTR_SMALL:
214
+ return str->u.smll.str;
215
+ case TF_TSTR_LARGE:
216
+ return str->u.large.ptr;
217
+ case TF_TSTR_OFFSET:
218
+ return (const char *)str + TF_le32toh(str->u.offset.offset); // NOLINT
219
+ case TF_TSTR_VIEW:
220
+ return str->u.view.ptr;
221
+ default:
222
+ // Unreachable.
223
+ return NULL; // NOLINT
224
+ }
225
+ }
226
+
227
+ static inline char *TF_TString_ResizeUninitialized(TF_TString *str,
228
+ size_t new_size) {
229
+ size_t curr_size = TF_TString_GetSize(str);
230
+ size_t copy_size = TF_min(new_size, curr_size);
231
+
232
+ TF_TString_Type curr_type = TF_TString_GetType(str);
233
+ const char *curr_ptr = TF_TString_GetDataPointer(str);
234
+
235
+ // Case: SMALL/LARGE/VIEW/OFFSET -> SMALL
236
+ if (new_size <= TF_TString_SmallCapacity) {
237
+ str->u.smll.size = (uint8_t)((new_size << 2) | TF_TSTR_SMALL); // NOLINT
238
+ str->u.smll.str[new_size] = '\0';
239
+
240
+ if (curr_type != TF_TSTR_SMALL && copy_size) {
241
+ memcpy(str->u.smll.str, curr_ptr, copy_size);
242
+ }
243
+
244
+ if (curr_type == TF_TSTR_LARGE) {
245
+ free((void *)curr_ptr); // NOLINT
246
+ }
247
+
248
+ // We do not clear out the newly excluded region.
249
+
250
+ return str->u.smll.str;
251
+ }
252
+
253
+ // Case: SMALL/LARGE/VIEW/OFFSET -> LARGE
254
+ size_t new_cap;
255
+ size_t curr_cap = TF_TString_GetCapacity(str);
256
+
257
+ if (new_size < curr_size && new_size < curr_cap / 2) {
258
+ // TODO(dero): Replace with shrink_to_fit flag.
259
+ new_cap = TF_align16(curr_cap / 2 + 1) - 1;
260
+ } else if (new_size > curr_cap) {
261
+ new_cap = TF_align16(new_size + 1) - 1;
262
+ } else {
263
+ new_cap = curr_cap;
264
+ }
265
+
266
+ char *new_ptr;
267
+ if (new_cap == curr_cap) {
268
+ new_ptr = str->u.large.ptr;
269
+ } else if (curr_type == TF_TSTR_LARGE) {
270
+ new_ptr = (char *)realloc(str->u.large.ptr, new_cap + 1); // NOLINT
271
+ } else {
272
+ new_ptr = (char *)malloc(new_cap + 1); // NOLINT
273
+ if (copy_size) {
274
+ memcpy(new_ptr, curr_ptr, copy_size);
275
+ }
276
+ }
277
+
278
+ str->u.large.size = TF_TString_ToInternalSizeT(new_size, TF_TSTR_LARGE);
279
+ str->u.large.ptr = new_ptr;
280
+ str->u.large.ptr[new_size] = '\0';
281
+ str->u.large.cap = new_cap;
282
+
283
+ return str->u.large.ptr;
284
+ }
285
+
286
+ static inline char *TF_TString_GetMutableDataPointer(TF_TString *str) {
287
+ switch (TF_TString_GetType(str)) {
288
+ case TF_TSTR_SMALL:
289
+ return str->u.smll.str;
290
+ case TF_TSTR_OFFSET:
291
+ case TF_TSTR_VIEW:
292
+ // Convert OFFSET/VIEW to SMALL/LARGE
293
+ TF_TString_ResizeUninitialized(str, TF_TString_GetSize(str));
294
+ return (TF_TString_GetType(str) == TF_TSTR_SMALL) ? str->u.smll.str
295
+ : str->u.large.ptr;
296
+ case TF_TSTR_LARGE:
297
+ return str->u.large.ptr;
298
+ default:
299
+ // Unreachable.
300
+ return NULL; // NOLINT
301
+ }
302
+ }
303
+
304
+ static inline void TF_TString_Reserve(TF_TString *str, size_t new_cap) {
305
+ TF_TString_Type curr_type = TF_TString_GetType(str);
306
+
307
+ if (new_cap <= TF_TString_SmallCapacity) {
308
+ // We do nothing, we let Resize/GetMutableDataPointer handle the
309
+ // conversion to SMALL from VIEW/OFFSET when the need arises.
310
+ // In the degenerate case, where new_cap <= TF_TString_SmallCapacity,
311
+ // curr_size > TF_TString_SmallCapacity, and the type is VIEW/OFFSET, we
312
+ // defer the malloc to Resize/GetMutableDataPointer.
313
+ return;
314
+ }
315
+
316
+ if (curr_type == TF_TSTR_LARGE && new_cap <= str->u.large.cap) {
317
+ // We handle reduced cap in resize.
318
+ return;
319
+ }
320
+
321
+ // Case: VIEW/OFFSET -> LARGE or grow an existing LARGE type
322
+ size_t curr_size = TF_TString_GetSize(str);
323
+ const char *curr_ptr = TF_TString_GetDataPointer(str);
324
+
325
+ // Since VIEW and OFFSET types are read-only, their capacity is effectively 0.
326
+ // So we make sure we have enough room in the VIEW and OFFSET cases.
327
+ new_cap = TF_align16(TF_max(new_cap, curr_size) + 1) - 1;
328
+
329
+ if (curr_type == TF_TSTR_LARGE) {
330
+ str->u.large.ptr =
331
+ (char *)realloc(str->u.large.ptr, new_cap + 1); // NOLINT
332
+ } else {
333
+ // Convert to Large
334
+ char *new_ptr = (char *)malloc(new_cap + 1); // NOLINT
335
+ memcpy(new_ptr, curr_ptr, curr_size);
336
+
337
+ str->u.large.size = TF_TString_ToInternalSizeT(curr_size, TF_TSTR_LARGE);
338
+ str->u.large.ptr = new_ptr;
339
+ str->u.large.ptr[curr_size] = '\0';
340
+ }
341
+
342
+ str->u.large.cap = new_cap;
343
+ }
344
+
345
+ static inline void TF_TString_ReserveAmortized(TF_TString *str,
346
+ size_t new_cap) {
347
+ const size_t curr_cap = TF_TString_GetCapacity(str);
348
+ if (new_cap > curr_cap) {
349
+ TF_TString_Reserve(str, new_cap > 2 * curr_cap ? new_cap : 2 * curr_cap);
350
+ }
351
+ }
352
+
353
+ static inline char *TF_TString_Resize(TF_TString *str, size_t new_size,
354
+ char c) {
355
+ size_t curr_size = TF_TString_GetSize(str);
356
+ char *cstr = TF_TString_ResizeUninitialized(str, new_size);
357
+
358
+ if (new_size > curr_size) {
359
+ memset(cstr + curr_size, c, new_size - curr_size);
360
+ }
361
+
362
+ return cstr;
363
+ }
364
+
365
+ static inline void TF_TString_AssignView(TF_TString *dst, const char *src,
366
+ size_t size) {
367
+ TF_TString_Dealloc(dst);
368
+
369
+ dst->u.view.size = TF_TString_ToInternalSizeT(size, TF_TSTR_VIEW);
370
+ dst->u.view.ptr = src;
371
+ }
372
+
373
+ static inline void TF_TString_AppendN(TF_TString *dst, const char *src,
374
+ size_t src_size) {
375
+ if (!src_size) return;
376
+
377
+ size_t dst_size = TF_TString_GetSize(dst);
378
+
379
+ // For append use cases, we want to ensure amortized growth.
380
+ TF_TString_ReserveAmortized(dst, dst_size + src_size);
381
+ char *dst_c = TF_TString_ResizeUninitialized(dst, dst_size + src_size);
382
+
383
+ memcpy(dst_c + dst_size, src, src_size);
384
+ }
385
+
386
+ static inline void TF_TString_Append(TF_TString *dst, const TF_TString *src) {
387
+ const char *src_c = TF_TString_GetDataPointer(src);
388
+ size_t size = TF_TString_GetSize(src);
389
+
390
+ TF_TString_AppendN(dst, src_c, size);
391
+ }
392
+
393
+ static inline void TF_TString_Copy(TF_TString *dst, const char *src,
394
+ size_t size) {
395
+ char *dst_c = TF_TString_ResizeUninitialized(dst, size);
396
+
397
+ if (size) memcpy(dst_c, src, size);
398
+ }
399
+
400
+ static inline void TF_TString_Assign(TF_TString *dst, const TF_TString *src) {
401
+ if (dst == src) return;
402
+
403
+ TF_TString_Dealloc(dst);
404
+
405
+ switch (TF_TString_GetType(src)) {
406
+ case TF_TSTR_SMALL:
407
+ case TF_TSTR_VIEW:
408
+ *dst = *src;
409
+ return;
410
+ case TF_TSTR_LARGE: {
411
+ const char *src_c = TF_TString_GetDataPointer(src);
412
+ size_t size = TF_TString_GetSize(src);
413
+
414
+ TF_TString_Copy(dst, src_c, size);
415
+ }
416
+ return;
417
+ case TF_TSTR_OFFSET: {
418
+ const char *src_c = TF_TString_GetDataPointer(src);
419
+ size_t size = TF_TString_GetSize(src);
420
+
421
+ TF_TString_AssignView(dst, src_c, size);
422
+ }
423
+ return;
424
+ default:
425
+ return; // Unreachable.
426
+ }
427
+ }
428
+
429
+ static inline void TF_TString_Move(TF_TString *dst, TF_TString *src) {
430
+ if (dst == src) return;
431
+
432
+ TF_TString_Dealloc(dst);
433
+
434
+ switch (TF_TString_GetType(src)) {
435
+ case TF_TSTR_SMALL:
436
+ case TF_TSTR_VIEW:
437
+ *dst = *src;
438
+ return;
439
+ case TF_TSTR_LARGE:
440
+ *dst = *src;
441
+ TF_TString_Init(src);
442
+ return;
443
+ case TF_TSTR_OFFSET: {
444
+ const char *src_c = TF_TString_GetDataPointer(src);
445
+ size_t size = TF_TString_GetSize(src);
446
+
447
+ TF_TString_AssignView(dst, src_c, size);
448
+ }
449
+ return;
450
+ default:
451
+ return; // Unreachable.
452
+ }
453
+ }
454
+
455
+ #endif // TENSORFLOW_TSL_PLATFORM_CTSTRING_INTERNAL_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/default/casts.h ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_PLATFORM_DEFAULT_CASTS_H_
17
+ #define TENSORFLOW_TSL_PLATFORM_DEFAULT_CASTS_H_
18
+
19
+ #include <assert.h> // for use with down_cast<>
20
+
21
+ #include <type_traits>
22
+
23
+ namespace tensorflow {
24
+
25
+ // An "upcast", i.e. a conversion from a pointer to an object to a pointer to a
26
+ // base subobject, always succeeds if the base is unambiguous and accessible,
27
+ // and so it's fine to use implicit_cast.
28
+ //
29
+ // A "downcast", i.e. a conversion from a pointer to an object to a pointer
30
+ // to a more-derived object that may contain the original object as a base
31
+ // subobject, cannot safely be done using static_cast, because you do not
32
+ // generally know whether the source object is really the base subobject of
33
+ // a containing, more-derived object of the target type. Thus, when you
34
+ // downcast in a polymorphic type hierarchy, you should use the following
35
+ // function template.
36
+ //
37
+ // In debug mode, we use dynamic_cast to double-check whether the downcast is
38
+ // legal (we die if it's not). In normal mode, we do the efficient static_cast
39
+ // instead. Thus, it's important to test in debug mode to make sure the cast is
40
+ // legal!
41
+ //
42
+ // This is the only place in the codebase we should use dynamic_cast.
43
+ // In particular, you should NOT use dynamic_cast for RTTI, e.g. for
44
+ // code like this:
45
+ // if (auto* p = dynamic_cast<Subclass1*>(foo)) HandleASubclass1Object(p);
46
+ // if (auto* p = dynamic_cast<Subclass2*>(foo)) HandleASubclass2Object(p);
47
+ // You should design the code some other way not to need this.
48
+
49
+ template <typename To, typename From> // use like this: down_cast<T*>(foo);
50
+ inline To down_cast(From* f) { // so we only accept pointers
51
+ static_assert(
52
+ (std::is_base_of<From, typename std::remove_pointer<To>::type>::value),
53
+ "target type not derived from source type");
54
+
55
+ // We skip the assert and hence the dynamic_cast if RTTI is disabled.
56
+ #if !defined(__GNUC__) || defined(__GXX_RTTI)
57
+ // Uses RTTI in dbg and fastbuild. asserts are disabled in opt builds.
58
+ assert(f == nullptr || dynamic_cast<To>(f) != nullptr);
59
+ #endif // !defined(__GNUC__) || defined(__GXX_RTTI)
60
+
61
+ return static_cast<To>(f);
62
+ }
63
+
64
+ // Overload of down_cast for references. Use like this: down_cast<T&>(foo).
65
+ // The code is slightly convoluted because we're still using the pointer
66
+ // form of dynamic cast. (The reference form throws an exception if it
67
+ // fails.)
68
+ //
69
+ // There's no need for a special const overload either for the pointer
70
+ // or the reference form. If you call down_cast with a const T&, the
71
+ // compiler will just bind From to const T.
72
+ template <typename To, typename From>
73
+ inline To down_cast(From& f) {
74
+ static_assert(std::is_lvalue_reference<To>::value,
75
+ "target type not a reference");
76
+ static_assert(
77
+ (std::is_base_of<From, typename std::remove_reference<To>::type>::value),
78
+ "target type not derived from source type");
79
+
80
+ // We skip the assert and hence the dynamic_cast if RTTI is disabled.
81
+ #if !defined(__GNUC__) || defined(__GXX_RTTI)
82
+ // RTTI: debug mode only
83
+ assert(dynamic_cast<typename std::remove_reference<To>::type*>(&f) !=
84
+ nullptr);
85
+ #endif // !defined(__GNUC__) || defined(__GXX_RTTI)
86
+
87
+ return static_cast<To>(f);
88
+ }
89
+
90
+ } // namespace tensorflow
91
+
92
+ #endif // TENSORFLOW_TSL_PLATFORM_DEFAULT_CASTS_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/default/context.h ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_PLATFORM_DEFAULT_CONTEXT_H_
17
+ #define TENSORFLOW_TSL_PLATFORM_DEFAULT_CONTEXT_H_
18
+
19
+ namespace tsl {
20
+
21
+ class Context {
22
+ public:
23
+ Context() {}
24
+ Context(const ContextKind kind) {}
25
+
26
+ bool operator==(const Context& other) const { return true; }
27
+ };
28
+
29
+ class WithContext {
30
+ public:
31
+ explicit WithContext(const Context& x) {}
32
+ ~WithContext() {}
33
+ };
34
+
35
+ } // namespace tsl
36
+
37
+ #endif // TENSORFLOW_TSL_PLATFORM_DEFAULT_CONTEXT_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/default/crash_analysis.h ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_PLATFORM_DEFAULT_CRASH_ANALYSIS_H_
17
+ #define TENSORFLOW_TSL_PLATFORM_DEFAULT_CRASH_ANALYSIS_H_
18
+
19
+ #include <string>
20
+
21
+ #include "tsl/platform/protobuf.h"
22
+
23
+ namespace tensorflow {
24
+ namespace crash_analysis {
25
+
26
+ class BufferedDataSource {};
27
+
28
+ // Reports `message` proto which will be stored in the `file_name` in case
29
+ // of a process crash.
30
+ // Default implementation is currently NOOP.
31
+ BufferedDataSource* ReportProtoDataOnCrash(
32
+ const std::string& file_name, const tsl::protobuf::Message& message);
33
+
34
+ // Removes `data_source` from the list of data reported in case of a process
35
+ // crash.
36
+ // Default implementation is currently NOOP.
37
+ void RemoveReportData(const BufferedDataSource* data_source);
38
+
39
+ // Reports `event_data` with the associated `message` under `event_name` to the
40
+ // crash analysis system. This does not require process crash.
41
+ // Default implementation is currently NOOP.
42
+ void ReportEvent(const std::string& event_name, const std::string& message,
43
+ const std::string& event_data);
44
+
45
+ } // namespace crash_analysis
46
+ } // namespace tensorflow
47
+
48
+ #endif // TENSORFLOW_TSL_PLATFORM_DEFAULT_CRASH_ANALYSIS_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/default/criticality.h ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_PLATFORM_DEFAULT_CRITICALITY_H_
17
+ #define TENSORFLOW_TSL_PLATFORM_DEFAULT_CRITICALITY_H_
18
+
19
+ namespace tsl {
20
+
21
+ namespace criticality {
22
+
23
+ inline Criticality GetCriticality() {
24
+ // For default platforms, return the default criticality.
25
+ return Criticality::kCritical;
26
+ }
27
+
28
+ } // namespace criticality
29
+
30
+ } // namespace tsl
31
+
32
+ #endif // TENSORFLOW_TSL_PLATFORM_DEFAULT_CRITICALITY_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/default/dso_loader.h ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ // Common DSO loading functionality: exposes callables that dlopen DSOs
17
+ // in either the runfiles directories
18
+
19
+ #ifndef TENSORFLOW_TSL_PLATFORM_DEFAULT_DSO_LOADER_H_
20
+ #define TENSORFLOW_TSL_PLATFORM_DEFAULT_DSO_LOADER_H_
21
+
22
+ #include "absl/status/status.h"
23
+ #include "absl/status/statusor.h"
24
+
25
+ namespace tsl {
26
+ namespace internal {
27
+
28
+ namespace DsoLoader {
29
+ // The following methods either load the DSO of interest and return a dlopen
30
+ // handle or error status.
31
+ absl::StatusOr<void*> GetCudaDriverDsoHandle();
32
+ absl::StatusOr<void*> GetCudaRuntimeDsoHandle();
33
+ absl::StatusOr<void*> GetCublasDsoHandle();
34
+ absl::StatusOr<void*> GetCublasLtDsoHandle();
35
+ absl::StatusOr<void*> GetCufftDsoHandle();
36
+ absl::StatusOr<void*> GetCusolverDsoHandle();
37
+ absl::StatusOr<void*> GetCusparseDsoHandle();
38
+ absl::StatusOr<void*> GetCuptiDsoHandle();
39
+ absl::StatusOr<void*> GetCudnnDsoHandle();
40
+ absl::StatusOr<void*> GetNcclDsoHandle();
41
+ absl::StatusOr<void*> GetNvInferDsoHandle();
42
+ absl::StatusOr<void*> GetNvInferPluginDsoHandle();
43
+
44
+ absl::StatusOr<void*> GetRocblasDsoHandle();
45
+ absl::StatusOr<void*> GetMiopenDsoHandle();
46
+ absl::StatusOr<void*> GetHipfftDsoHandle();
47
+ absl::StatusOr<void*> GetRocrandDsoHandle();
48
+ absl::StatusOr<void*> GetRoctracerDsoHandle();
49
+ absl::StatusOr<void*> GetRocsolverDsoHandle();
50
+ absl::StatusOr<void*> GetHipsolverDsoHandle();
51
+ absl::StatusOr<void*> GetHipsparseDsoHandle();
52
+ absl::StatusOr<void*> GetHipDsoHandle();
53
+
54
+ // The following method tries to dlopen all necessary GPU libraries for the GPU
55
+ // platform TF is built with (CUDA or ROCm) only when these libraries should be
56
+ // dynamically loaded. Error status is returned when any of the libraries cannot
57
+ // be dlopened.
58
+ absl::Status MaybeTryDlopenGPULibraries();
59
+
60
+ // The following method tries to dlopen all necessary TensorRT libraries when
61
+ // these libraries should be dynamically loaded. Error status is returned when
62
+ // any of the libraries cannot be dlopened.
63
+ absl::Status TryDlopenTensorRTLibraries();
64
+ } // namespace DsoLoader
65
+
66
+ // Wrapper around the DsoLoader that prevents us from dlopen'ing any of the DSOs
67
+ // more than once.
68
+ namespace CachedDsoLoader {
69
+ // Cached versions of the corresponding DsoLoader methods above.
70
+ absl::StatusOr<void*> GetCudaDriverDsoHandle();
71
+ absl::StatusOr<void*> GetCudaRuntimeDsoHandle();
72
+ absl::StatusOr<void*> GetCublasDsoHandle();
73
+ absl::StatusOr<void*> GetCublasLtDsoHandle();
74
+ absl::StatusOr<void*> GetCufftDsoHandle();
75
+ absl::StatusOr<void*> GetCusolverDsoHandle();
76
+ absl::StatusOr<void*> GetCusparseDsoHandle();
77
+ absl::StatusOr<void*> GetCuptiDsoHandle();
78
+ absl::StatusOr<void*> GetCudnnDsoHandle();
79
+
80
+ absl::StatusOr<void*> GetRocblasDsoHandle();
81
+ absl::StatusOr<void*> GetMiopenDsoHandle();
82
+ absl::StatusOr<void*> GetHipfftDsoHandle();
83
+ absl::StatusOr<void*> GetRocrandDsoHandle();
84
+ absl::StatusOr<void*> GetRocsolverDsoHandle();
85
+ absl::StatusOr<void*> GetHipsolverDsoHandle();
86
+ absl::StatusOr<void*> GetRoctracerDsoHandle();
87
+ absl::StatusOr<void*> GetHipsparseDsoHandle();
88
+ absl::StatusOr<void*> GetHipblasltDsoHandle();
89
+ absl::StatusOr<void*> GetHipDsoHandle();
90
+ } // namespace CachedDsoLoader
91
+
92
+ } // namespace internal
93
+ } // namespace tsl
94
+
95
+ #endif // TENSORFLOW_TSL_PLATFORM_DEFAULT_DSO_LOADER_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/default/integral_types.h ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_PLATFORM_DEFAULT_INTEGRAL_TYPES_H_
17
+ #define TENSORFLOW_TSL_PLATFORM_DEFAULT_INTEGRAL_TYPES_H_
18
+
19
+ #include <cstdint>
20
+
21
+ // IWYU pragma: private, include "tsl/platform/types.h"
22
+ // IWYU pragma: friend third_party/tensorflow/tsl/platform/types.h
23
+
24
+ namespace tsl {
25
+
26
+ typedef signed char int8;
27
+ typedef short int16;
28
+ typedef int int32;
29
+ typedef ::std::int64_t int64;
30
+
31
+ typedef unsigned char uint8;
32
+ typedef unsigned short uint16;
33
+ typedef unsigned int uint32;
34
+ typedef std::uint64_t uint64;
35
+
36
+ } // namespace tsl
37
+
38
+ #endif // TENSORFLOW_TSL_PLATFORM_DEFAULT_INTEGRAL_TYPES_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/default/logging.h ADDED
@@ -0,0 +1,651 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #if defined(_WIN32)
17
+ // prevent compile error because MSVC doesn't realize in debug build that
18
+ // LOG(FATAL) finally invokes abort()
19
+ #pragma warning(disable : 4716)
20
+ #endif // _WIN32
21
+
22
+ #ifndef TENSORFLOW_TSL_PLATFORM_DEFAULT_LOGGING_H_
23
+ #define TENSORFLOW_TSL_PLATFORM_DEFAULT_LOGGING_H_
24
+
25
+ // IWYU pragma: private, include "tsl/platform/logging.h"
26
+ // IWYU pragma: friend third_party/tensorflow/tsl/platform/logging.h
27
+
28
+ #include <atomic>
29
+ #include <limits>
30
+ #include <memory>
31
+ #include <sstream>
32
+ #include <string>
33
+ #include <vector>
34
+
35
+ #include "absl/base/log_severity.h"
36
+ #include "absl/strings/string_view.h"
37
+ #include "tsl/platform/macros.h"
38
+ #include "tsl/platform/types.h"
39
+
40
+ // TODO(mrry): Prevent this Windows.h #define from leaking out of our headers.
41
+ #undef ERROR
42
+
43
+ // Undef everything in case we're being mixed with some other Google library
44
+ // which already defined them itself. Presumably all Google libraries will
45
+ // support the same syntax for these so it should not be a big deal if they
46
+ // end up using our definitions instead.
47
+ #undef LOG
48
+ #undef LOG_EVERY_N
49
+ #undef LOG_FIRST_N
50
+ #undef LOG_EVERY_POW_2
51
+ #undef LOG_EVERY_N_SEC
52
+ #undef VLOG
53
+
54
+ #undef CHECK
55
+ #undef CHECK_EQ
56
+ #undef CHECK_NE
57
+ #undef CHECK_LT
58
+ #undef CHECK_LE
59
+ #undef CHECK_GT
60
+ #undef CHECK_GE
61
+
62
+ #undef DCHECK
63
+ #undef DCHECK_EQ
64
+ #undef DCHECK_NE
65
+ #undef DCHECK_LT
66
+ #undef DCHECK_LE
67
+ #undef DCHECK_GT
68
+ #undef DCHECK_GE
69
+
70
+ #undef QCHECK
71
+ #undef QCHECK_EQ
72
+ #undef QCHECK_NE
73
+ #undef QCHECK_LT
74
+ #undef QCHECK_LE
75
+ #undef QCHECK_GT
76
+ #undef QCHECK_GE
77
+
78
+ #undef PCHECK
79
+
80
+ namespace tsl {
81
+ const int INFO = 0; // base_logging::INFO;
82
+ const int WARNING = 1; // base_logging::WARNING;
83
+ const int ERROR = 2; // base_logging::ERROR;
84
+ const int FATAL = 3; // base_logging::FATAL;
85
+ const int NUM_SEVERITIES = 4; // base_logging::NUM_SEVERITIES;
86
+
87
+ namespace internal {
88
+
89
+ // Emit "message" as a log message to the log for the specified
90
+ // "severity" as if it came from a LOG call at "fname:line"
91
+ void LogString(const char* fname, int line, int severity,
92
+ const std::string& message);
93
+
94
+ class LogMessage : public std::basic_ostringstream<char> {
95
+ public:
96
+ LogMessage(const char* fname, int line, int severity);
97
+ ~LogMessage() override;
98
+
99
+ // Change the location of the log message.
100
+ LogMessage& AtLocation(const char* fname, int line);
101
+
102
+ // Returns the maximum log level for VLOG statements.
103
+ // E.g., if MaxVLogLevel() is 2, then VLOG(2) statements will produce output,
104
+ // but VLOG(3) will not. Defaults to 0.
105
+ static int64_t MaxVLogLevel();
106
+
107
+ // Returns whether VLOG level lvl is activated for the file fname.
108
+ //
109
+ // E.g. if the environment variable TF_CPP_VMODULE contains foo=3 and fname is
110
+ // foo.cc and lvl is <= 3, this will return true. It will also return true if
111
+ // the level is lower or equal to TF_CPP_MAX_VLOG_LEVEL (default zero).
112
+ //
113
+ // It is expected that the result of this query will be cached in the VLOG-ing
114
+ // call site to avoid repeated lookups. This routine performs a hash-map
115
+ // access against the VLOG-ing specification provided by the env var.
116
+ static bool VmoduleActivated(const char* fname, int level);
117
+
118
+ protected:
119
+ void GenerateLogMessage();
120
+
121
+ private:
122
+ const char* fname_;
123
+ int line_;
124
+ int severity_;
125
+ };
126
+
127
+ // Uses the lower operator & precedence to voidify a LogMessage reference, so
128
+ // that the ternary VLOG() implementation is balanced, type wise.
129
+ struct Voidifier {
130
+ template <typename T>
131
+ void operator&(const T&) const {}
132
+ };
133
+
134
+ // LogMessageFatal ensures the process will exit in failure after
135
+ // logging this message.
136
+ class LogMessageFatal : public LogMessage {
137
+ public:
138
+ LogMessageFatal(const char* file, int line) TF_ATTRIBUTE_COLD;
139
+ TF_ATTRIBUTE_NORETURN ~LogMessageFatal() override;
140
+ };
141
+
142
+ // LogMessageNull supports the DVLOG macro by simply dropping any log messages.
143
+ class LogMessageNull : public std::basic_ostringstream<char> {
144
+ public:
145
+ LogMessageNull() {}
146
+ ~LogMessageNull() override {}
147
+ };
148
+
149
+ #define _TF_LOG_INFO \
150
+ ::tsl::internal::LogMessage(__FILE__, __LINE__, ::tsl::INFO)
151
+ #define _TF_LOG_WARNING \
152
+ ::tsl::internal::LogMessage(__FILE__, __LINE__, ::tsl::WARNING)
153
+ #define _TF_LOG_ERROR \
154
+ ::tsl::internal::LogMessage(__FILE__, __LINE__, ::tsl::ERROR)
155
+ #define _TF_LOG_FATAL ::tsl::internal::LogMessageFatal(__FILE__, __LINE__)
156
+
157
+ #define _TF_LOG_QFATAL _TF_LOG_FATAL
158
+
159
+ #ifdef NDEBUG
160
+ #define _TF_LOG_DFATAL _TF_LOG_ERROR
161
+ #else
162
+ #define _TF_LOG_DFATAL _TF_LOG_FATAL
163
+ #endif
164
+
165
+ #define LOG(severity) _TF_LOG_##severity
166
+
167
+ #ifdef IS_MOBILE_PLATFORM
168
+
169
+ // Turn VLOG off when under mobile devices for considerations of binary size.
170
+ #define VLOG_IS_ON(lvl) ((lvl) <= 0)
171
+
172
+ #else
173
+
174
+ // Otherwise, set TF_CPP_MAX_VLOG_LEVEL environment to update minimum log level
175
+ // of VLOG, or TF_CPP_VMODULE to set the minimum log level for individual
176
+ // translation units.
177
+ #define VLOG_IS_ON(lvl) \
178
+ (([](int level, const char* fname) { \
179
+ static const bool vmodule_activated = \
180
+ ::tsl::internal::LogMessage::VmoduleActivated(fname, level); \
181
+ return vmodule_activated; \
182
+ })(lvl, __FILE__))
183
+
184
+ #endif
185
+
186
+ #define VLOG(level) \
187
+ TF_PREDICT_TRUE(!VLOG_IS_ON(level)) \
188
+ ? (void)0 \
189
+ : ::tsl::internal::Voidifier() & \
190
+ ::tsl::internal::LogMessage(__FILE__, __LINE__, tsl::INFO)
191
+
192
+ // `DVLOG` behaves like `VLOG` in debug mode (i.e. `#ifndef NDEBUG`).
193
+ // Otherwise, it compiles away and does nothing.
194
+ #ifndef NDEBUG
195
+ #define DVLOG VLOG
196
+ #else
197
+ #define DVLOG(verbose_level) \
198
+ while (false && (verbose_level) > 0) ::tsl::internal::LogMessageNull()
199
+ #endif
200
+
201
+ class LogEveryNState {
202
+ public:
203
+ bool ShouldLog(int n);
204
+ uint32_t counter() { return counter_.load(std::memory_order_relaxed); }
205
+
206
+ private:
207
+ std::atomic<uint32> counter_{0};
208
+ };
209
+
210
+ class LogFirstNState {
211
+ public:
212
+ bool ShouldLog(int n);
213
+ uint32 counter() { return counter_.load(std::memory_order_relaxed); }
214
+
215
+ private:
216
+ std::atomic<uint32> counter_{0};
217
+ };
218
+
219
+ class LogEveryPow2State {
220
+ public:
221
+ bool ShouldLog(int ignored);
222
+ uint32 counter() { return counter_.load(std::memory_order_relaxed); }
223
+
224
+ private:
225
+ std::atomic<uint32> counter_{0};
226
+ };
227
+
228
+ class LogEveryNSecState {
229
+ public:
230
+ bool ShouldLog(double seconds);
231
+ uint32 counter() { return counter_.load(std::memory_order_relaxed); }
232
+
233
+ private:
234
+ std::atomic<uint32> counter_{0};
235
+ // Cycle count according to CycleClock that we should next log at.
236
+ std::atomic<int64_t> next_log_time_cycles_{0};
237
+ };
238
+
239
+ // This macro has a lot going on!
240
+ //
241
+ // * A local static (`logging_internal_stateful_condition_state`) is
242
+ // declared in a scope such that each `LOG_EVERY_N` (etc.) line has its own
243
+ // state.
244
+ // * `COUNTER`, the third variable, is used to support `<< COUNTER`. It is not
245
+ // mangled, so shadowing can be a problem, albeit more of a
246
+ // shoot-yourself-in-the-foot one. Don't name your variables `COUNTER`.
247
+ // * A single for loop can declare state and also test
248
+ // `condition && state.ShouldLog()`, but there's no way to constrain it to run
249
+ // only once (or not at all) without declaring another variable. The outer
250
+ // for-loop declares this variable (`do_log`).
251
+ // * Using for loops instead of if statements means there's no risk of an
252
+ // ambiguous dangling else statement.
253
+ #define LOGGING_INTERNAL_STATEFUL_CONDITION(kind, condition, arg) \
254
+ for (bool logging_internal_stateful_condition_do_log(condition); \
255
+ logging_internal_stateful_condition_do_log; \
256
+ logging_internal_stateful_condition_do_log = false) \
257
+ for (static ::tsl::internal::Log##kind##State \
258
+ logging_internal_stateful_condition_state; \
259
+ logging_internal_stateful_condition_do_log && \
260
+ logging_internal_stateful_condition_state.ShouldLog(arg); \
261
+ logging_internal_stateful_condition_do_log = false) \
262
+ for (const uint32_t COUNTER ABSL_ATTRIBUTE_UNUSED = \
263
+ logging_internal_stateful_condition_state.counter(); \
264
+ logging_internal_stateful_condition_do_log; \
265
+ logging_internal_stateful_condition_do_log = false)
266
+
267
+ // An instance of `LOG_EVERY_N` increments a hidden zero-initialized counter
268
+ // every time execution passes through it and logs the specified message when
269
+ // the counter's value is a multiple of `n`, doing nothing otherwise. Each
270
+ // instance has its own counter. The counter's value can be logged by streaming
271
+ // the symbol `COUNTER`. `LOG_EVERY_N` is thread-safe.
272
+ // Example:
273
+ //
274
+ // for (const auto& user : all_users) {
275
+ // LOG_EVERY_N(INFO, 1000) << "Processing user #" << COUNTER;
276
+ // ProcessUser(user);
277
+ // }
278
+ #define LOG_EVERY_N(severity, n) \
279
+ LOGGING_INTERNAL_STATEFUL_CONDITION(EveryN, true, n) \
280
+ LOG(severity)
281
+ // `LOG_FIRST_N` behaves like `LOG_EVERY_N` except that the specified message is
282
+ // logged when the counter's value is less than `n`. `LOG_FIRST_N` is
283
+ // thread-safe.
284
+ #define LOG_FIRST_N(severity, n) \
285
+ LOGGING_INTERNAL_STATEFUL_CONDITION(FirstN, true, n) \
286
+ LOG(severity)
287
+ // `LOG_EVERY_POW_2` behaves like `LOG_EVERY_N` except that the specified
288
+ // message is logged when the counter's value is a power of 2.
289
+ // `LOG_EVERY_POW_2` is thread-safe.
290
+ #define LOG_EVERY_POW_2(severity) \
291
+ LOGGING_INTERNAL_STATEFUL_CONDITION(EveryPow2, true, 0) \
292
+ LOG(severity)
293
+ // An instance of `LOG_EVERY_N_SEC` uses a hidden state variable to log the
294
+ // specified message at most once every `n_seconds`. A hidden counter of
295
+ // executions (whether a message is logged or not) is also maintained and can be
296
+ // logged by streaming the symbol `COUNTER`. `LOG_EVERY_N_SEC` is thread-safe.
297
+ // Example:
298
+ //
299
+ // LOG_EVERY_N_SEC(INFO, 2.5) << "Got " << COUNTER << " cookies so far";
300
+ #define LOG_EVERY_N_SEC(severity, n_seconds) \
301
+ LOGGING_INTERNAL_STATEFUL_CONDITION(EveryNSec, true, n_seconds) \
302
+ LOG(severity)
303
+
304
+ // CHECK dies with a fatal error if condition is not true. It is *not*
305
+ // controlled by NDEBUG, so the check will be executed regardless of
306
+ // compilation mode. Therefore, it is safe to do things like:
307
+ // CHECK(fp->Write(x) == 4)
308
+ #define CHECK(condition) \
309
+ if (TF_PREDICT_FALSE(!(condition))) \
310
+ LOG(FATAL) << "Check failed: " #condition " "
311
+
312
+ // Function is overloaded for integral types to allow static const
313
+ // integrals declared in classes and not defined to be used as arguments to
314
+ // CHECK* macros. It's not encouraged though.
315
+ template <typename T>
316
+ inline const T& GetReferenceableValue(const T& t) {
317
+ return t;
318
+ }
319
+ inline char GetReferenceableValue(char t) { return t; }
320
+ inline unsigned char GetReferenceableValue(unsigned char t) { return t; }
321
+ inline signed char GetReferenceableValue(signed char t) { return t; }
322
+ inline int16 GetReferenceableValue(int16_t t) { return t; }
323
+ inline uint16 GetReferenceableValue(uint16 t) { return t; }
324
+ inline int GetReferenceableValue(int t) { return t; }
325
+ inline unsigned int GetReferenceableValue(unsigned int t) { return t; }
326
+ inline int64_t GetReferenceableValue(int64_t t) { return t; }
327
+ inline uint64 GetReferenceableValue(uint64 t) { return t; }
328
+
329
+ // This formats a value for a failing CHECK_XX statement. Ordinarily,
330
+ // it uses the definition for operator<<, with a few special cases below.
331
+ template <typename T>
332
+ inline void MakeCheckOpValueString(std::ostream* os, const T& v) {
333
+ (*os) << v;
334
+ }
335
+
336
+ // Overrides for char types provide readable values for unprintable
337
+ // characters.
338
+ template <>
339
+ void MakeCheckOpValueString(std::ostream* os, const char& v);
340
+ template <>
341
+ void MakeCheckOpValueString(std::ostream* os, const signed char& v);
342
+ template <>
343
+ void MakeCheckOpValueString(std::ostream* os, const unsigned char& v);
344
+
345
+ #if LANG_CXX11
346
+ // We need an explicit specialization for std::nullptr_t.
347
+ template <>
348
+ void MakeCheckOpValueString(std::ostream* os, const std::nullptr_t& v);
349
+ #endif
350
+
351
+ // A container for a string pointer which can be evaluated to a bool -
352
+ // true iff the pointer is non-NULL.
353
+ struct CheckOpString {
354
+ explicit CheckOpString(string* str) : str_(str) {}
355
+ // No destructor: if str_ is non-NULL, we're about to LOG(FATAL),
356
+ // so there's no point in cleaning up str_.
357
+ explicit operator bool() const { return TF_PREDICT_FALSE(str_ != nullptr); }
358
+ string* str_;
359
+ };
360
+
361
+ // Build the error message string. Specify no inlining for code size.
362
+ template <typename T1, typename T2>
363
+ string* MakeCheckOpString(const T1& v1, const T2& v2,
364
+ const char* exprtext) TF_ATTRIBUTE_NOINLINE;
365
+
366
+ // A helper class for formatting "expr (V1 vs. V2)" in a CHECK_XX
367
+ // statement. See MakeCheckOpString for sample usage. Other
368
+ // approaches were considered: use of a template method (e.g.,
369
+ // base::BuildCheckOpString(exprtext, base::Print<T1>, &v1,
370
+ // base::Print<T2>, &v2), however this approach has complications
371
+ // related to volatile arguments and function-pointer arguments).
372
+ class CheckOpMessageBuilder {
373
+ public:
374
+ // Inserts "exprtext" and " (" to the stream.
375
+ explicit CheckOpMessageBuilder(const char* exprtext);
376
+ // Deletes "stream_".
377
+ ~CheckOpMessageBuilder();
378
+ // For inserting the first variable.
379
+ std::ostream* ForVar1() { return stream_; }
380
+ // For inserting the second variable (adds an intermediate " vs. ").
381
+ std::ostream* ForVar2();
382
+ // Get the result (inserts the closing ")").
383
+ string* NewString();
384
+
385
+ private:
386
+ std::ostringstream* stream_;
387
+ };
388
+
389
+ template <typename T1, typename T2>
390
+ string* MakeCheckOpString(const T1& v1, const T2& v2, const char* exprtext) {
391
+ CheckOpMessageBuilder comb(exprtext);
392
+ MakeCheckOpValueString(comb.ForVar1(), v1);
393
+ MakeCheckOpValueString(comb.ForVar2(), v2);
394
+ return comb.NewString();
395
+ }
396
+
397
+ // Helper functions for CHECK_OP macro.
398
+ // We use the full name Check_EQ, Check_NE, etc. in case the file including
399
+ // base/logging.h provides its own #defines for the simpler names EQ, NE, etc.
400
+ // This happens if, for example, those are used as token names in a
401
+ // yacc grammar.
402
+ // The (int, int) overload works around the issue that the compiler
403
+ // will not instantiate the template version of the function on values of
404
+ // unnamed enum type - see comment below.
405
+ #define TF_DEFINE_CHECK_OP_IMPL(name, op) \
406
+ template <typename T1, typename T2> \
407
+ inline string* name##Impl(const T1& v1, const T2& v2, \
408
+ const char* exprtext) { \
409
+ if (TF_PREDICT_TRUE(v1 op v2)) \
410
+ return NULL; \
411
+ else \
412
+ return ::tsl::internal::MakeCheckOpString(v1, v2, exprtext); \
413
+ } \
414
+ inline string* name##Impl(int v1, int v2, const char* exprtext) { \
415
+ return name##Impl<int, int>(v1, v2, exprtext); \
416
+ }
417
+
418
+ // The (size_t, int) and (int, size_t) specialization are to handle unsigned
419
+ // comparison errors while still being thorough with the comparison.
420
+
421
+ TF_DEFINE_CHECK_OP_IMPL(Check_EQ, ==)
422
+ // Compilation error with CHECK_EQ(NULL, x)?
423
+ // Use CHECK(x == NULL) instead.
424
+
425
+ inline string* Check_EQImpl(int v1, size_t v2, const char* exprtext) {
426
+ if (TF_PREDICT_FALSE(v1 < 0))
427
+ ::tsl::internal::MakeCheckOpString(v1, v2, exprtext);
428
+
429
+ return Check_EQImpl(size_t(v1), v2, exprtext);
430
+ }
431
+
432
+ inline string* Check_EQImpl(size_t v1, int v2, const char* exprtext) {
433
+ return Check_EQImpl(v2, v1, exprtext);
434
+ }
435
+
436
+ TF_DEFINE_CHECK_OP_IMPL(Check_NE, !=)
437
+
438
+ inline string* Check_NEImpl(int v1, size_t v2, const char* exprtext) {
439
+ if (v1 < 0) return NULL;
440
+
441
+ return Check_NEImpl(size_t(v1), v2, exprtext);
442
+ }
443
+
444
+ inline string* Check_NEImpl(size_t v1, int v2, const char* exprtext) {
445
+ return Check_NEImpl(v2, v1, exprtext);
446
+ }
447
+
448
+ TF_DEFINE_CHECK_OP_IMPL(Check_LE, <=)
449
+
450
+ inline string* Check_LEImpl(int v1, size_t v2, const char* exprtext) {
451
+ if (v1 <= 0) return NULL;
452
+
453
+ return Check_LEImpl(size_t(v1), v2, exprtext);
454
+ }
455
+
456
+ inline string* Check_LEImpl(size_t v1, int v2, const char* exprtext) {
457
+ if (TF_PREDICT_FALSE(v2 < 0))
458
+ return ::tsl::internal::MakeCheckOpString(v1, v2, exprtext);
459
+ return Check_LEImpl(v1, size_t(v2), exprtext);
460
+ }
461
+
462
+ TF_DEFINE_CHECK_OP_IMPL(Check_LT, <)
463
+
464
+ inline string* Check_LTImpl(int v1, size_t v2, const char* exprtext) {
465
+ if (v1 < 0) return NULL;
466
+
467
+ return Check_LTImpl(size_t(v1), v2, exprtext);
468
+ }
469
+
470
+ inline string* Check_LTImpl(size_t v1, int v2, const char* exprtext) {
471
+ if (v2 < 0) return ::tsl::internal::MakeCheckOpString(v1, v2, exprtext);
472
+ return Check_LTImpl(v1, size_t(v2), exprtext);
473
+ }
474
+
475
+ // Implement GE,GT in terms of LE,LT
476
+ template <typename T1, typename T2>
477
+ inline string* Check_GEImpl(const T1& v1, const T2& v2, const char* exprtext) {
478
+ return Check_LEImpl(v2, v1, exprtext);
479
+ }
480
+
481
+ template <typename T1, typename T2>
482
+ inline string* Check_GTImpl(const T1& v1, const T2& v2, const char* exprtext) {
483
+ return Check_LTImpl(v2, v1, exprtext);
484
+ }
485
+
486
+ #undef TF_DEFINE_CHECK_OP_IMPL
487
+
488
+ // In optimized mode, use CheckOpString to hint to compiler that
489
+ // the while condition is unlikely.
490
+ #define CHECK_OP_LOG(name, op, val1, val2) \
491
+ while (::tsl::internal::CheckOpString _result{::tsl::internal::name##Impl( \
492
+ ::tsl::internal::GetReferenceableValue(val1), \
493
+ ::tsl::internal::GetReferenceableValue(val2), #val1 " " #op " " #val2)}) \
494
+ ::tsl::internal::LogMessageFatal(__FILE__, __LINE__) << *(_result.str_)
495
+
496
+ #define CHECK_OP(name, op, val1, val2) CHECK_OP_LOG(name, op, val1, val2)
497
+
498
+ // CHECK_EQ/NE/...
499
+ #define CHECK_EQ(val1, val2) CHECK_OP(Check_EQ, ==, val1, val2)
500
+ #define CHECK_NE(val1, val2) CHECK_OP(Check_NE, !=, val1, val2)
501
+ #define CHECK_LE(val1, val2) CHECK_OP(Check_LE, <=, val1, val2)
502
+ #define CHECK_LT(val1, val2) CHECK_OP(Check_LT, <, val1, val2)
503
+ #define CHECK_GE(val1, val2) CHECK_OP(Check_GE, >=, val1, val2)
504
+ #define CHECK_GT(val1, val2) CHECK_OP(Check_GT, >, val1, val2)
505
+ #define CHECK_NOTNULL(val) \
506
+ ::tsl::internal::CheckNotNull(__FILE__, __LINE__, \
507
+ "'" #val "' Must be non NULL", (val))
508
+
509
+ #ifndef NDEBUG
510
+ // DCHECK_EQ/NE/...
511
+ #define DCHECK(condition) CHECK(condition)
512
+ #define DCHECK_EQ(val1, val2) CHECK_EQ(val1, val2)
513
+ #define DCHECK_NE(val1, val2) CHECK_NE(val1, val2)
514
+ #define DCHECK_LE(val1, val2) CHECK_LE(val1, val2)
515
+ #define DCHECK_LT(val1, val2) CHECK_LT(val1, val2)
516
+ #define DCHECK_GE(val1, val2) CHECK_GE(val1, val2)
517
+ #define DCHECK_GT(val1, val2) CHECK_GT(val1, val2)
518
+
519
+ #else
520
+
521
+ #define DCHECK(condition) \
522
+ while (false && (condition)) LOG(FATAL)
523
+
524
+ // NDEBUG is defined, so DCHECK_EQ(x, y) and so on do nothing.
525
+ // However, we still want the compiler to parse x and y, because
526
+ // we don't want to lose potentially useful errors and warnings.
527
+ // _DCHECK_NOP is a helper, and should not be used outside of this file.
528
+ #define _TF_DCHECK_NOP(x, y) \
529
+ while (false && ((void)(x), (void)(y), 0)) LOG(FATAL)
530
+
531
+ #define DCHECK_EQ(x, y) _TF_DCHECK_NOP(x, y)
532
+ #define DCHECK_NE(x, y) _TF_DCHECK_NOP(x, y)
533
+ #define DCHECK_LE(x, y) _TF_DCHECK_NOP(x, y)
534
+ #define DCHECK_LT(x, y) _TF_DCHECK_NOP(x, y)
535
+ #define DCHECK_GE(x, y) _TF_DCHECK_NOP(x, y)
536
+ #define DCHECK_GT(x, y) _TF_DCHECK_NOP(x, y)
537
+
538
+ #endif
539
+
540
+ // These are for when you don't want a CHECK failure to print a verbose
541
+ // stack trace. The implementation of CHECK* in this file already doesn't.
542
+ #define QCHECK(condition) CHECK(condition)
543
+ #define QCHECK_EQ(x, y) CHECK_EQ(x, y)
544
+ #define QCHECK_NE(x, y) CHECK_NE(x, y)
545
+ #define QCHECK_LE(x, y) CHECK_LE(x, y)
546
+ #define QCHECK_LT(x, y) CHECK_LT(x, y)
547
+ #define QCHECK_GE(x, y) CHECK_GE(x, y)
548
+ #define QCHECK_GT(x, y) CHECK_GT(x, y)
549
+
550
+ template <typename T>
551
+ T&& CheckNotNull(const char* file, int line, const char* exprtext, T&& t) {
552
+ if (t == nullptr) {
553
+ LogMessageFatal(file, line) << string(exprtext);
554
+ }
555
+ return std::forward<T>(t);
556
+ }
557
+
558
+ int64_t MinLogLevelFromEnv();
559
+
560
+ int64_t MaxVLogLevelFromEnv();
561
+
562
+ } // namespace internal
563
+
564
+ // LogSink support adapted from //base/logging.h
565
+ //
566
+ // `LogSink` is an interface which can be extended to intercept and process
567
+ // all log messages. LogSink implementations must be thread-safe. A single
568
+ // instance will be called from whichever thread is performing a logging
569
+ // operation.
570
+ class TFLogEntry {
571
+ static absl::LogSeverity AsAbslLogSeverity(int severity) {
572
+ return static_cast<absl::LogSeverity>(severity);
573
+ }
574
+
575
+ public:
576
+ explicit TFLogEntry(int severity, absl::string_view message)
577
+ : severity_(AsAbslLogSeverity(severity)), message_(message) {}
578
+
579
+ explicit TFLogEntry(int severity, absl::string_view fname, int line,
580
+ absl::string_view message)
581
+ : severity_(AsAbslLogSeverity(severity)),
582
+ fname_(fname),
583
+ line_(line),
584
+ message_(message) {}
585
+
586
+ absl::LogSeverity log_severity() const { return severity_; }
587
+ std::string FName() const { return fname_; }
588
+ int Line() const { return line_; }
589
+ std::string ToString() const { return message_; }
590
+ absl::string_view text_message() const { return message_; }
591
+
592
+ // Returning similar result as `text_message` as there is no prefix in this
593
+ // implementation.
594
+ absl::string_view text_message_with_prefix() const { return message_; }
595
+
596
+ private:
597
+ const absl::LogSeverity severity_;
598
+ const std::string fname_;
599
+ int line_ = -1;
600
+ const std::string message_;
601
+ };
602
+
603
+ class TFLogSink {
604
+ public:
605
+ virtual ~TFLogSink() = default;
606
+
607
+ // `Send` is called synchronously during the log statement. The logging
608
+ // module guarantees not to call `Send` concurrently on the same log sink.
609
+ // Implementations should be careful not to call`LOG` or `CHECK` or take
610
+ // any locks that might be held by the `LOG` caller, to avoid deadlock.
611
+ //
612
+ // `e` is guaranteed to remain valid until the subsequent call to
613
+ // `WaitTillSent` completes, so implementations may store a pointer to or
614
+ // copy of `e` (e.g. in a thread local variable) for use in `WaitTillSent`.
615
+ virtual void Send(const TFLogEntry& entry) = 0;
616
+
617
+ // `WaitTillSent` blocks the calling thread (the thread that generated a log
618
+ // message) until the sink has finished processing the log message.
619
+ // `WaitTillSent` is called once per log message, following the call to
620
+ // `Send`. This may be useful when log messages are buffered or processed
621
+ // asynchronously by an expensive log sink.
622
+ // The default implementation returns immediately. Like `Send`,
623
+ // implementations should be careful not to call `LOG` or `CHECK or take any
624
+ // locks that might be held by the `LOG` caller, to avoid deadlock.
625
+ virtual void WaitTillSent() {}
626
+ };
627
+
628
+ // This is the default log sink. This log sink is used if there are no other
629
+ // log sinks registered. To disable the default log sink, set the
630
+ // "no_default_logger" Bazel config setting to true or define a
631
+ // NO_DEFAULT_LOGGER preprocessor symbol. This log sink will always log to
632
+ // stderr.
633
+ class TFDefaultLogSink : public TFLogSink {
634
+ public:
635
+ void Send(const TFLogEntry& entry) override;
636
+ };
637
+
638
+ // Add or remove a `LogSink` as a consumer of logging data. Thread-safe.
639
+ void TFAddLogSink(TFLogSink* sink);
640
+ void TFRemoveLogSink(TFLogSink* sink);
641
+
642
+ // Get all the log sinks. Thread-safe.
643
+ std::vector<TFLogSink*> TFGetLogSinks();
644
+
645
+ // Change verbose level of pre-defined files if envorionment
646
+ // variable `env_var` is defined. This is currently a no op.
647
+ void UpdateLogVerbosityIfDefined(const char* env_var);
648
+
649
+ } // namespace tsl
650
+
651
+ #endif // TENSORFLOW_TSL_PLATFORM_DEFAULT_LOGGING_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/default/mutex.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_PLATFORM_DEFAULT_MUTEX_H_
17
+ #define TENSORFLOW_TSL_PLATFORM_DEFAULT_MUTEX_H_
18
+
19
+ // IWYU pragma: private, include "tsl/platform/mutex.h"
20
+ // IWYU pragma: friend third_party/tensorflow/tsl/platform/mutex.h
21
+
22
+ namespace tsl {
23
+
24
+ namespace internal {
25
+ std::cv_status wait_until_system_clock(
26
+ CVData *cv_data, MuData *mu_data,
27
+ const std::chrono::system_clock::time_point timeout_time);
28
+ } // namespace internal
29
+
30
+ template <class Rep, class Period>
31
+ std::cv_status condition_variable::wait_for(
32
+ mutex_lock &lock, std::chrono::duration<Rep, Period> dur) {
33
+ return tsl::internal::wait_until_system_clock(
34
+ &this->cv_, &lock.mutex()->mu_, std::chrono::system_clock::now() + dur);
35
+ }
36
+
37
+ } // namespace tsl
38
+
39
+ #endif // TENSORFLOW_TSL_PLATFORM_DEFAULT_MUTEX_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/default/mutex_data.h ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_PLATFORM_DEFAULT_MUTEX_DATA_H_
17
+ #define TENSORFLOW_TSL_PLATFORM_DEFAULT_MUTEX_DATA_H_
18
+
19
+ namespace tsl {
20
+ namespace internal {
21
+
22
+ // The internal state of a mutex.
23
+ struct MuData {
24
+ void* space[2];
25
+ };
26
+
27
+ // The internal state of a condition_variable.
28
+ struct CVData {
29
+ void* space[2];
30
+ };
31
+
32
+ } // namespace internal
33
+ } // namespace tsl
34
+
35
+ #endif // TENSORFLOW_TSL_PLATFORM_DEFAULT_MUTEX_DATA_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/default/posix_file_system.h ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_PLATFORM_DEFAULT_POSIX_FILE_SYSTEM_H_
17
+ #define TENSORFLOW_TSL_PLATFORM_DEFAULT_POSIX_FILE_SYSTEM_H_
18
+
19
+ #include "tsl/platform/env.h"
20
+ #include "tsl/platform/path.h"
21
+
22
+ namespace tsl {
23
+
24
+ class PosixFileSystem : public FileSystem {
25
+ public:
26
+ PosixFileSystem() {}
27
+
28
+ ~PosixFileSystem() override {}
29
+
30
+ TF_USE_FILESYSTEM_METHODS_WITH_NO_TRANSACTION_SUPPORT;
31
+
32
+ Status NewRandomAccessFile(
33
+ const string& filename, TransactionToken* token,
34
+ std::unique_ptr<RandomAccessFile>* result) override;
35
+
36
+ Status NewWritableFile(const string& fname, TransactionToken* token,
37
+ std::unique_ptr<WritableFile>* result) override;
38
+
39
+ Status NewAppendableFile(const string& fname, TransactionToken* token,
40
+ std::unique_ptr<WritableFile>* result) override;
41
+
42
+ Status NewReadOnlyMemoryRegionFromFile(
43
+ const string& filename, TransactionToken* token,
44
+ std::unique_ptr<ReadOnlyMemoryRegion>* result) override;
45
+
46
+ Status FileExists(const string& fname, TransactionToken* token) override;
47
+
48
+ Status GetChildren(const string& dir, TransactionToken* token,
49
+ std::vector<string>* result) override;
50
+
51
+ Status Stat(const string& fname, TransactionToken* token,
52
+ FileStatistics* stats) override;
53
+
54
+ Status GetMatchingPaths(const string& pattern, TransactionToken* token,
55
+ std::vector<string>* results) override;
56
+
57
+ Status DeleteFile(const string& fname, TransactionToken* token) override;
58
+
59
+ Status CreateDir(const string& name, TransactionToken* token) override;
60
+
61
+ Status DeleteDir(const string& name, TransactionToken* token) override;
62
+
63
+ Status GetFileSize(const string& fname, TransactionToken* token,
64
+ uint64* size) override;
65
+
66
+ Status RenameFile(const string& src, const string& target,
67
+ TransactionToken* token) override;
68
+
69
+ Status CopyFile(const string& src, const string& target,
70
+ TransactionToken* token) override;
71
+ };
72
+
73
+ class LocalPosixFileSystem : public PosixFileSystem {
74
+ public:
75
+ string TranslateName(const string& name) const override {
76
+ StringPiece scheme, host, path;
77
+ io::ParseURI(name, &scheme, &host, &path);
78
+ return string(path);
79
+ }
80
+ };
81
+
82
+ } // namespace tsl
83
+
84
+ #endif // TENSORFLOW_TSL_PLATFORM_DEFAULT_POSIX_FILE_SYSTEM_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/default/stacktrace.h ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_PLATFORM_DEFAULT_STACKTRACE_H_
17
+ #define TENSORFLOW_TSL_PLATFORM_DEFAULT_STACKTRACE_H_
18
+
19
+ // clang-format off
20
+ #include "tsl/platform/platform.h"
21
+ // clang-format on
22
+
23
+ #if !defined(IS_MOBILE_PLATFORM) && (defined(__clang__) || defined(__GNUC__))
24
+ #define TF_HAS_STACKTRACE
25
+ #endif
26
+
27
+ #if defined(TF_HAS_STACKTRACE)
28
+ #include <dlfcn.h>
29
+ #include <execinfo.h>
30
+ #include <stdio.h>
31
+ #include <string.h>
32
+ #include <unistd.h>
33
+ #endif // defined(TF_GENERATE_BACKTRACE)
34
+
35
+ #include <sstream>
36
+ #include <string>
37
+
38
+ #include "tsl/platform/abi.h"
39
+
40
+ namespace tsl {
41
+
42
+ // Function to create a pretty stacktrace.
43
+ inline std::string CurrentStackTrace() {
44
+ #if defined(TF_HAS_STACKTRACE)
45
+ std::stringstream ss("");
46
+ ss << "*** Begin stack trace ***" << std::endl;
47
+
48
+ // Get the mangled stack trace.
49
+ int buffer_size = 128;
50
+ void* trace[128];
51
+ buffer_size = backtrace(trace, buffer_size);
52
+
53
+ for (int i = 0; i < buffer_size; ++i) {
54
+ const char* symbol = "";
55
+ Dl_info info;
56
+ if (dladdr(trace[i], &info)) {
57
+ if (info.dli_sname != nullptr) {
58
+ symbol = info.dli_sname;
59
+ }
60
+ }
61
+
62
+ std::string demangled = port::MaybeAbiDemangle(symbol);
63
+ if (demangled.length()) {
64
+ ss << "\t" << demangled << std::endl;
65
+ } else {
66
+ ss << "\t" << symbol << std::endl;
67
+ }
68
+ }
69
+
70
+ ss << "*** End stack trace ***" << std::endl;
71
+ return ss.str();
72
+ #else
73
+ return std::string();
74
+ #endif // defined(TF_HAS_STACKTRACE)
75
+ }
76
+
77
+ inline void DebugWriteToString(const char* data, void* arg) {
78
+ reinterpret_cast<std::string*>(arg)->append(data);
79
+ }
80
+
81
+ // A dummy class that does nothing. Someday, add real support.
82
+ class SavedStackTrace {
83
+ public:
84
+ SavedStackTrace() {}
85
+
86
+ void CreateCurrent(int skip_count) {}
87
+
88
+ void Reset() {}
89
+
90
+ typedef void DebugWriter(const char*, void*);
91
+ void Dump(DebugWriter* writerfn, void* arg) const {}
92
+
93
+ int depth() const { return 0; }
94
+ void* const* stack() const { return stack_; }
95
+
96
+ private:
97
+ void* stack_[32];
98
+ };
99
+
100
+ } // namespace tsl
101
+
102
+ #endif // TENSORFLOW_TSL_PLATFORM_DEFAULT_STACKTRACE_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/default/status.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+ #ifndef TENSORFLOW_TSL_PLATFORM_DEFAULT_STATUS_H_
16
+ #define TENSORFLOW_TSL_PLATFORM_DEFAULT_STATUS_H_
17
+
18
+ #define MAYBE_ADD_SOURCE_LOCATION(status) \
19
+ {}
20
+
21
+ #define ADD_SOURCE_LOCATION(status) status
22
+
23
+ #endif // TENSORFLOW_TSL_PLATFORM_DEFAULT_STATUS_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/default/statusor.h ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+ #ifndef TENSORFLOW_TSL_PLATFORM_DEFAULT_STATUSOR_H_
16
+ #define TENSORFLOW_TSL_PLATFORM_DEFAULT_STATUSOR_H_
17
+
18
+ #include "absl/status/statusor.h"
19
+ #include "tsl/platform/macros.h"
20
+ #include "tsl/platform/status.h"
21
+
22
+ #define TF_ASSIGN_OR_RETURN(lhs, rexpr) \
23
+ TF_ASSIGN_OR_RETURN_IMPL( \
24
+ TF_STATUS_MACROS_CONCAT_NAME(_status_or_value, __COUNTER__), lhs, rexpr)
25
+
26
+ #define TF_ASSIGN_OR_RETURN_IMPL(statusor, lhs, rexpr) \
27
+ auto statusor = (rexpr); \
28
+ if (TF_PREDICT_FALSE(!statusor.ok())) { \
29
+ return statusor.status(); \
30
+ } \
31
+ lhs = std::move(statusor).value()
32
+
33
+ #endif // TENSORFLOW_TSL_PLATFORM_DEFAULT_STATUSOR_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/default/subprocess.h ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_PLATFORM_DEFAULT_SUBPROCESS_H_
17
+ #define TENSORFLOW_TSL_PLATFORM_DEFAULT_SUBPROCESS_H_
18
+
19
+ #include <errno.h>
20
+ #include <unistd.h>
21
+
22
+ #include <string>
23
+ #include <vector>
24
+
25
+ #include "tsl/platform/macros.h"
26
+ #include "tsl/platform/mutex.h"
27
+ #include "tsl/platform/types.h"
28
+
29
+ namespace tsl {
30
+
31
+ class SubProcess {
32
+ public:
33
+ // SubProcess()
34
+ // nfds: The number of file descriptors to use.
35
+ explicit SubProcess(int nfds = 3);
36
+
37
+ // Virtual for backwards compatibility; do not create new subclasses.
38
+ // It is illegal to delete the SubProcess within its exit callback.
39
+ virtual ~SubProcess();
40
+
41
+ // SetChannelAction()
42
+ // Set how to handle a channel. The default action is ACTION_CLOSE.
43
+ // The action is set for all subsequent processes, until SetChannel()
44
+ // is called again.
45
+ //
46
+ // SetChannel may not be called while the process is running.
47
+ //
48
+ // chan: Which channel this applies to.
49
+ // action: What to do with the channel.
50
+ // Virtual for backwards compatibility; do not create new subclasses.
51
+ virtual void SetChannelAction(Channel chan, ChannelAction action);
52
+
53
+ // SetProgram()
54
+ // Set up a program and argument list for execution, with the full
55
+ // "raw" argument list passed as a vector of strings. argv[0]
56
+ // should be the program name, just as in execv().
57
+ //
58
+ // file: The file containing the program. This must be an absolute path
59
+ // name - $PATH is not searched.
60
+ // argv: The argument list.
61
+ virtual void SetProgram(const string& file, const std::vector<string>& argv);
62
+
63
+ // Start()
64
+ // Run the command that was previously set up with SetProgram().
65
+ // The following are fatal programming errors:
66
+ // * Attempting to start when a process is already running.
67
+ // * Attempting to start without first setting the command.
68
+ // Note, however, that Start() does not try to validate that the binary
69
+ // does anything reasonable (e.g. exists or can execute); as such, you can
70
+ // specify a non-existent binary and Start() will still return true. You
71
+ // will get a failure from the process, but only after Start() returns.
72
+ //
73
+ // Return true normally, or false if the program couldn't be started
74
+ // because of some error.
75
+ // Virtual for backwards compatibility; do not create new subclasses.
76
+ virtual bool Start();
77
+
78
+ // Kill()
79
+ // Send the given signal to the process.
80
+ // Return true normally, or false if we couldn't send the signal - likely
81
+ // because the process doesn't exist.
82
+ virtual bool Kill(int signal);
83
+
84
+ // Wait()
85
+ // Block until the process exits.
86
+ // Return true normally, or false if the process wasn't running.
87
+ virtual bool Wait();
88
+
89
+ // Communicate()
90
+ // Read from stdout and stderr and writes to stdin until all pipes have
91
+ // closed, then waits for the process to exit.
92
+ // Note: Do NOT call Wait() after calling Communicate as it will always
93
+ // fail, since Communicate calls Wait() internally.
94
+ // 'stdin_input', 'stdout_output', and 'stderr_output' may be NULL.
95
+ // If this process is not configured to send stdout or stderr to pipes,
96
+ // the output strings will not be modified.
97
+ // If this process is not configured to take stdin from a pipe, stdin_input
98
+ // will be ignored.
99
+ // Returns the command's exit status.
100
+ virtual int Communicate(const string* stdin_input, string* stdout_output,
101
+ string* stderr_output);
102
+
103
+ private:
104
+ static constexpr int kNFds = 3;
105
+ static bool chan_valid(int chan) { return ((chan >= 0) && (chan < kNFds)); }
106
+ static bool retry(int e) {
107
+ return ((e == EINTR) || (e == EAGAIN) || (e == EWOULDBLOCK));
108
+ }
109
+ void FreeArgs() TF_EXCLUSIVE_LOCKS_REQUIRED(data_mu_);
110
+ void ClosePipes() TF_EXCLUSIVE_LOCKS_REQUIRED(data_mu_);
111
+ bool WaitInternal(int* status);
112
+
113
+ // The separation between proc_mu_ and data_mu_ mutexes allows Kill() to be
114
+ // called by a thread while another thread is inside Wait() or Communicate().
115
+ mutable mutex proc_mu_;
116
+ bool running_ TF_GUARDED_BY(proc_mu_);
117
+ pid_t pid_ TF_GUARDED_BY(proc_mu_);
118
+
119
+ mutable mutex data_mu_ TF_ACQUIRED_AFTER(proc_mu_);
120
+ char* exec_path_ TF_GUARDED_BY(data_mu_);
121
+ char** exec_argv_ TF_GUARDED_BY(data_mu_);
122
+ ChannelAction action_[kNFds] TF_GUARDED_BY(data_mu_);
123
+ int parent_pipe_[kNFds] TF_GUARDED_BY(data_mu_);
124
+ int child_pipe_[kNFds] TF_GUARDED_BY(data_mu_);
125
+
126
+ SubProcess(const SubProcess&) = delete;
127
+ void operator=(const SubProcess&) = delete;
128
+ };
129
+
130
+ } // namespace tsl
131
+
132
+ #endif // TENSORFLOW_TSL_PLATFORM_DEFAULT_SUBPROCESS_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/default/tracing_impl.h ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_PLATFORM_DEFAULT_TRACING_IMPL_H_
17
+ #define TENSORFLOW_TSL_PLATFORM_DEFAULT_TRACING_IMPL_H_
18
+
19
+ // Stub implementations of tracing functionality.
20
+
21
+ // Definitions that do nothing for platforms that don't have underlying thread
22
+ // tracing support.
23
+ #define TRACELITERAL(a) \
24
+ do { \
25
+ } while (0)
26
+ #define TRACESTRING(s) \
27
+ do { \
28
+ } while (0)
29
+ #define TRACEPRINTF(format, ...) \
30
+ do { \
31
+ } while (0)
32
+
33
+ namespace tsl {
34
+ namespace tracing {
35
+
36
+ inline bool EventCollector::IsEnabled() { return false; }
37
+
38
+ } // namespace tracing
39
+ } // namespace tsl
40
+
41
+ #endif // TENSORFLOW_TSL_PLATFORM_DEFAULT_TRACING_IMPL_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/default/unbounded_work_queue.h ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+ #ifndef TENSORFLOW_TSL_PLATFORM_DEFAULT_UNBOUNDED_WORK_QUEUE_H_
16
+ #define TENSORFLOW_TSL_PLATFORM_DEFAULT_UNBOUNDED_WORK_QUEUE_H_
17
+
18
+ #include <deque>
19
+ #include <memory>
20
+ #include <vector>
21
+
22
+ #include "tsl/platform/env.h"
23
+ #include "tsl/platform/mutex.h"
24
+ #include "tsl/platform/notification.h"
25
+
26
+ namespace tsl {
27
+
28
+ // An `UnboundedWorkQueue` provides a mechanism for temporally multiplexing a
29
+ // potentially large number of "logical" threads onto a smaller number of
30
+ // "physical" threads. The multiplexing is achieved by maintaining an internal
31
+ // pool of long-running "physical" threads that are used to execute the
32
+ // "logical" threads. Like a regular thread, a "logical" thread may block on
33
+ // other threads, and the size of the pool will increase to ensure that progress
34
+ // is made. This mechanism is recommended in situations where short-lived
35
+ // threads are created repeatedly, to avoid the overhead and memory
36
+ // fragmentation that can result from excessive thread creation.
37
+ class UnboundedWorkQueue {
38
+ public:
39
+ UnboundedWorkQueue(Env* env, const string& thread_name,
40
+ const ThreadOptions& thread_options = {});
41
+ ~UnboundedWorkQueue();
42
+
43
+ using WorkFunction = std::function<void()>;
44
+
45
+ // Schedule `fn` on a thread. `fn` may perform blocking work, so if all the
46
+ // existing threads are blocked or busy, this may spawn a new thread which
47
+ // will be added to the thread pool managed by this work queue.
48
+ void Schedule(WorkFunction fn);
49
+
50
+ private:
51
+ void PooledThreadFunc();
52
+
53
+ Env* const env_; // Not owned.
54
+ const string thread_name_;
55
+ const ThreadOptions thread_options_;
56
+ mutex work_queue_mu_;
57
+ condition_variable work_queue_cv_ TF_GUARDED_BY(work_queue_mu_);
58
+ size_t num_idle_threads_ TF_GUARDED_BY(work_queue_mu_) = 0;
59
+ bool cancelled_ TF_GUARDED_BY(work_queue_mu_) = false;
60
+ std::deque<WorkFunction> work_queue_ TF_GUARDED_BY(work_queue_mu_);
61
+ mutex thread_pool_mu_;
62
+ std::vector<std::unique_ptr<Thread>> thread_pool_
63
+ TF_GUARDED_BY(thread_pool_mu_);
64
+ };
65
+
66
+ } // namespace tsl
67
+
68
+ #endif // TENSORFLOW_TSL_PLATFORM_DEFAULT_UNBOUNDED_WORK_QUEUE_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/env_time.h ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+ #ifndef TENSORFLOW_TSL_PLATFORM_ENV_TIME_H_
16
+ #define TENSORFLOW_TSL_PLATFORM_ENV_TIME_H_
17
+
18
+ #include <stdint.h>
19
+
20
+ #include "tsl/platform/types.h"
21
+
22
+ namespace tsl {
23
+
24
+ /// \brief An interface used by the tsl implementation to
25
+ /// access timer related operations.
26
+ class EnvTime {
27
+ public:
28
+ static constexpr uint64 kMicrosToPicos = 1000ULL * 1000ULL;
29
+ static constexpr uint64 kMicrosToNanos = 1000ULL;
30
+ static constexpr uint64 kMillisToMicros = 1000ULL;
31
+ static constexpr uint64 kMillisToNanos = 1000ULL * 1000ULL;
32
+ static constexpr uint64 kNanosToPicos = 1000ULL;
33
+ static constexpr uint64 kSecondsToMillis = 1000ULL;
34
+ static constexpr uint64 kSecondsToMicros = 1000ULL * 1000ULL;
35
+ static constexpr uint64 kSecondsToNanos = 1000ULL * 1000ULL * 1000ULL;
36
+
37
+ EnvTime() = default;
38
+ virtual ~EnvTime() = default;
39
+
40
+ /// \brief Returns the number of nano-seconds since the Unix epoch.
41
+ static uint64 NowNanos();
42
+
43
+ /// \brief Returns the number of micro-seconds since the Unix epoch.
44
+ static uint64 NowMicros() { return NowNanos() / kMicrosToNanos; }
45
+
46
+ /// \brief Returns the number of seconds since the Unix epoch.
47
+ static uint64 NowSeconds() { return NowNanos() / kSecondsToNanos; }
48
+
49
+ /// \brief A version of NowNanos() that may be overridden by a subclass.
50
+ virtual uint64 GetOverridableNowNanos() const { return NowNanos(); }
51
+
52
+ /// \brief A version of NowMicros() that may be overridden by a subclass.
53
+ virtual uint64 GetOverridableNowMicros() const {
54
+ return GetOverridableNowNanos() / kMicrosToNanos;
55
+ }
56
+
57
+ /// \brief A version of NowSeconds() that may be overridden by a subclass.
58
+ virtual uint64 GetOverridableNowSeconds() const {
59
+ return GetOverridableNowNanos() / kSecondsToNanos;
60
+ }
61
+ };
62
+
63
+ } // namespace tsl
64
+
65
+ #endif // TENSORFLOW_TSL_PLATFORM_ENV_TIME_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/intrusive_ptr.h ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+ #ifndef TENSORFLOW_TSL_PLATFORM_INTRUSIVE_PTR_H_
16
+ #define TENSORFLOW_TSL_PLATFORM_INTRUSIVE_PTR_H_
17
+
18
+ #include <algorithm>
19
+ namespace tsl {
20
+ namespace core {
21
+
22
+ // A utility for managing the lifetime of ref-counted objects.
23
+ //
24
+ // Generally used for objects that derive from `tensorflow::RefCounted`.
25
+ template <class T>
26
+ class IntrusivePtr {
27
+ public:
28
+ // add_ref=false indicates that IntrusivePtr owns the underlying pointer.
29
+ //
30
+ // In most cases, we expect this to be called with add_ref=false, except in
31
+ // special circumstances where the lifetime of the underlying RefCounted
32
+ // object needs to be externally managed.
33
+ IntrusivePtr(T* h, bool add_ref) { reset(h, add_ref); }
34
+ IntrusivePtr(const IntrusivePtr& o) { reset(o.handle_, /*add_ref=*/true); }
35
+ IntrusivePtr(IntrusivePtr&& o) { *this = std::move(o); }
36
+ IntrusivePtr() {}
37
+ void reset(T* h, bool add_ref) {
38
+ if (h != handle_) {
39
+ if (add_ref && h) h->Ref();
40
+ if (handle_) handle_->Unref();
41
+ handle_ = h;
42
+ }
43
+ }
44
+ IntrusivePtr& operator=(const IntrusivePtr& o) {
45
+ reset(o.handle_, /*add_ref=*/true);
46
+ return *this;
47
+ }
48
+ IntrusivePtr& operator=(IntrusivePtr&& o) {
49
+ if (handle_ != o.handle_) {
50
+ // Must clear o.handle_ before calling reset to capture the case where
51
+ // handle_->member == o. In this case, calling handle_->Unref first would
52
+ // delete o.handle_ so we clear it out first.
53
+ reset(o.detach(), /*add_ref=*/false);
54
+ }
55
+ return *this;
56
+ }
57
+ bool operator==(const IntrusivePtr& o) const { return handle_ == o.handle_; }
58
+ T* operator->() const { return handle_; }
59
+ T& operator*() const { return *handle_; }
60
+ explicit operator bool() const noexcept { return get(); }
61
+ T* get() const { return handle_; }
62
+ // Releases ownership of the pointer without unreffing. Caller is responsible
63
+ // for calling Unref on the returned pointer.
64
+ T* detach() {
65
+ T* handle = handle_;
66
+ handle_ = nullptr;
67
+ return handle;
68
+ }
69
+
70
+ ~IntrusivePtr() {
71
+ if (handle_) handle_->Unref();
72
+ }
73
+
74
+ private:
75
+ T* handle_ = nullptr;
76
+ };
77
+
78
+ } // namespace core
79
+ } // namespace tsl
80
+
81
+ #endif // TENSORFLOW_TSL_PLATFORM_INTRUSIVE_PTR_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/platform.h ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_PLATFORM_PLATFORM_H_
17
+ #define TENSORFLOW_TSL_PLATFORM_PLATFORM_H_
18
+
19
+ // Set one PLATFORM_* macro and set IS_MOBILE_PLATFORM if the platform is for
20
+ // mobile.
21
+
22
+ #if !defined(PLATFORM_POSIX) && !defined(PLATFORM_GOOGLE) && \
23
+ !defined(PLATFORM_POSIX_ANDROID) && !defined(PLATFORM_GOOGLE_ANDROID) && \
24
+ !defined(PLATFORM_WINDOWS)
25
+
26
+ // Choose which platform we are on.
27
+ #if defined(ANDROID) || defined(__ANDROID__)
28
+ #define PLATFORM_POSIX_ANDROID
29
+ #define IS_MOBILE_PLATFORM
30
+
31
+ #elif defined(__APPLE__)
32
+ #include "TargetConditionals.h"
33
+ #if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE
34
+ #define PLATFORM_POSIX_IOS
35
+ #define IS_MOBILE_PLATFORM
36
+ #else
37
+ // If no platform specified, use:
38
+ #define PLATFORM_POSIX
39
+ #endif
40
+
41
+ #elif defined(_WIN32)
42
+ #define PLATFORM_WINDOWS
43
+
44
+ #elif defined(__EMSCRIPTEN__)
45
+ #define PLATFORM_PORTABLE_GOOGLE
46
+ #define PLATFORM_POSIX
47
+ // EMSCRIPTEN builds are considered "mobile" for the sake of portability.
48
+ #define IS_MOBILE_PLATFORM
49
+
50
+ #elif defined(__TF_CHROMIUMOS__)
51
+ #define PLATFORM_PORTABLE_GOOGLE
52
+ #define PLATFORM_POSIX
53
+ #define PLATFORM_CHROMIUMOS
54
+
55
+ #elif defined(__Fuchsia__)
56
+ #define PLATFORM_FUCHSIA
57
+ // PLATFORM_GOOGLE needs to be defined by default to get the right header
58
+ // files.
59
+ #define PLATFORM_GOOGLE
60
+
61
+ #else
62
+ // If no platform specified, use:
63
+ #define PLATFORM_POSIX
64
+
65
+ #endif
66
+ #endif
67
+
68
+ // Look for both gcc/clang and Visual Studio macros indicating we're compiling
69
+ // for an x86 device.
70
+ #if defined(__x86_64__) || defined(__amd64__) || defined(_M_IX86) || \
71
+ defined(_M_X64)
72
+ #define PLATFORM_IS_X86
73
+ #endif
74
+
75
+ // Check if we are compmiling for an arm device.
76
+ #if defined(__arm__) || defined(__aarch64__)
77
+ #define PLATFORM_IS_ARM
78
+ #if defined(__aarch64__)
79
+ #define PLATFORM_IS_ARM64
80
+ #else
81
+ #define PLATFORM_IS_ARM32
82
+ #endif
83
+ #endif
84
+
85
+ #define TSL_IS_IN_OSS 1
86
+
87
+ #endif // TENSORFLOW_TSL_PLATFORM_PLATFORM_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/platform_strings_computed.h ADDED
@@ -0,0 +1,735 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+ // Generated from platform_strings.h.
16
+
17
+ #ifndef TENSORFLOW_TSL_PLATFORM_PLATFORM_STRINGS_COMPUTED_H_
18
+ #define TENSORFLOW_TSL_PLATFORM_PLATFORM_STRINGS_COMPUTED_H_
19
+
20
+ #if defined(_M_IX86_FP)
21
+ #define TF_PLAT_STR__M_IX86_FP TF_PLAT_STR_(_M_IX86_FP)
22
+ #else
23
+ #define TF_PLAT_STR__M_IX86_FP
24
+ #endif
25
+ #if defined(_NO_PREFETCHW)
26
+ #define TF_PLAT_STR__NO_PREFETCHW TF_PLAT_STR_(_NO_PREFETCHW)
27
+ #else
28
+ #define TF_PLAT_STR__NO_PREFETCHW
29
+ #endif
30
+ #if defined(__3dNOW_A__)
31
+ #define TF_PLAT_STR___3dNOW_A__ TF_PLAT_STR_(__3dNOW_A__)
32
+ #else
33
+ #define TF_PLAT_STR___3dNOW_A__
34
+ #endif
35
+ #if defined(__3dNOW__)
36
+ #define TF_PLAT_STR___3dNOW__ TF_PLAT_STR_(__3dNOW__)
37
+ #else
38
+ #define TF_PLAT_STR___3dNOW__
39
+ #endif
40
+ #if defined(__ABM__)
41
+ #define TF_PLAT_STR___ABM__ TF_PLAT_STR_(__ABM__)
42
+ #else
43
+ #define TF_PLAT_STR___ABM__
44
+ #endif
45
+ #if defined(__ADX__)
46
+ #define TF_PLAT_STR___ADX__ TF_PLAT_STR_(__ADX__)
47
+ #else
48
+ #define TF_PLAT_STR___ADX__
49
+ #endif
50
+ #if defined(__AES__)
51
+ #define TF_PLAT_STR___AES__ TF_PLAT_STR_(__AES__)
52
+ #else
53
+ #define TF_PLAT_STR___AES__
54
+ #endif
55
+ #if defined(__AVX2__)
56
+ #define TF_PLAT_STR___AVX2__ TF_PLAT_STR_(__AVX2__)
57
+ #else
58
+ #define TF_PLAT_STR___AVX2__
59
+ #endif
60
+ #if defined(__AVX512BW__)
61
+ #define TF_PLAT_STR___AVX512BW__ TF_PLAT_STR_(__AVX512BW__)
62
+ #else
63
+ #define TF_PLAT_STR___AVX512BW__
64
+ #endif
65
+ #if defined(__AVX512CD__)
66
+ #define TF_PLAT_STR___AVX512CD__ TF_PLAT_STR_(__AVX512CD__)
67
+ #else
68
+ #define TF_PLAT_STR___AVX512CD__
69
+ #endif
70
+ #if defined(__AVX512DQ__)
71
+ #define TF_PLAT_STR___AVX512DQ__ TF_PLAT_STR_(__AVX512DQ__)
72
+ #else
73
+ #define TF_PLAT_STR___AVX512DQ__
74
+ #endif
75
+ #if defined(__AVX512ER__)
76
+ #define TF_PLAT_STR___AVX512ER__ TF_PLAT_STR_(__AVX512ER__)
77
+ #else
78
+ #define TF_PLAT_STR___AVX512ER__
79
+ #endif
80
+ #if defined(__AVX512F__)
81
+ #define TF_PLAT_STR___AVX512F__ TF_PLAT_STR_(__AVX512F__)
82
+ #else
83
+ #define TF_PLAT_STR___AVX512F__
84
+ #endif
85
+ #if defined(__AVX512IFMA__)
86
+ #define TF_PLAT_STR___AVX512IFMA__ TF_PLAT_STR_(__AVX512IFMA__)
87
+ #else
88
+ #define TF_PLAT_STR___AVX512IFMA__
89
+ #endif
90
+ #if defined(__AVX512PF__)
91
+ #define TF_PLAT_STR___AVX512PF__ TF_PLAT_STR_(__AVX512PF__)
92
+ #else
93
+ #define TF_PLAT_STR___AVX512PF__
94
+ #endif
95
+ #if defined(__AVX512VBMI__)
96
+ #define TF_PLAT_STR___AVX512VBMI__ TF_PLAT_STR_(__AVX512VBMI__)
97
+ #else
98
+ #define TF_PLAT_STR___AVX512VBMI__
99
+ #endif
100
+ #if defined(__AVX512VL__)
101
+ #define TF_PLAT_STR___AVX512VL__ TF_PLAT_STR_(__AVX512VL__)
102
+ #else
103
+ #define TF_PLAT_STR___AVX512VL__
104
+ #endif
105
+ #if defined(__AVX__)
106
+ #define TF_PLAT_STR___AVX__ TF_PLAT_STR_(__AVX__)
107
+ #else
108
+ #define TF_PLAT_STR___AVX__
109
+ #endif
110
+ #if defined(__BMI2__)
111
+ #define TF_PLAT_STR___BMI2__ TF_PLAT_STR_(__BMI2__)
112
+ #else
113
+ #define TF_PLAT_STR___BMI2__
114
+ #endif
115
+ #if defined(__BMI__)
116
+ #define TF_PLAT_STR___BMI__ TF_PLAT_STR_(__BMI__)
117
+ #else
118
+ #define TF_PLAT_STR___BMI__
119
+ #endif
120
+ #if defined(__CLFLUSHOPT__)
121
+ #define TF_PLAT_STR___CLFLUSHOPT__ TF_PLAT_STR_(__CLFLUSHOPT__)
122
+ #else
123
+ #define TF_PLAT_STR___CLFLUSHOPT__
124
+ #endif
125
+ #if defined(__CLZERO__)
126
+ #define TF_PLAT_STR___CLZERO__ TF_PLAT_STR_(__CLZERO__)
127
+ #else
128
+ #define TF_PLAT_STR___CLZERO__
129
+ #endif
130
+ #if defined(__F16C__)
131
+ #define TF_PLAT_STR___F16C__ TF_PLAT_STR_(__F16C__)
132
+ #else
133
+ #define TF_PLAT_STR___F16C__
134
+ #endif
135
+ #if defined(__FMA4__)
136
+ #define TF_PLAT_STR___FMA4__ TF_PLAT_STR_(__FMA4__)
137
+ #else
138
+ #define TF_PLAT_STR___FMA4__
139
+ #endif
140
+ #if defined(__FMA__)
141
+ #define TF_PLAT_STR___FMA__ TF_PLAT_STR_(__FMA__)
142
+ #else
143
+ #define TF_PLAT_STR___FMA__
144
+ #endif
145
+ #if defined(__FP_FAST_FMA)
146
+ #define TF_PLAT_STR___FP_FAST_FMA TF_PLAT_STR_(__FP_FAST_FMA)
147
+ #else
148
+ #define TF_PLAT_STR___FP_FAST_FMA
149
+ #endif
150
+ #if defined(__FP_FAST_FMAF)
151
+ #define TF_PLAT_STR___FP_FAST_FMAF TF_PLAT_STR_(__FP_FAST_FMAF)
152
+ #else
153
+ #define TF_PLAT_STR___FP_FAST_FMAF
154
+ #endif
155
+ #if defined(__FSGSBASE__)
156
+ #define TF_PLAT_STR___FSGSBASE__ TF_PLAT_STR_(__FSGSBASE__)
157
+ #else
158
+ #define TF_PLAT_STR___FSGSBASE__
159
+ #endif
160
+ #if defined(__FXSR__)
161
+ #define TF_PLAT_STR___FXSR__ TF_PLAT_STR_(__FXSR__)
162
+ #else
163
+ #define TF_PLAT_STR___FXSR__
164
+ #endif
165
+ #if defined(__LWP__)
166
+ #define TF_PLAT_STR___LWP__ TF_PLAT_STR_(__LWP__)
167
+ #else
168
+ #define TF_PLAT_STR___LWP__
169
+ #endif
170
+ #if defined(__LZCNT__)
171
+ #define TF_PLAT_STR___LZCNT__ TF_PLAT_STR_(__LZCNT__)
172
+ #else
173
+ #define TF_PLAT_STR___LZCNT__
174
+ #endif
175
+ #if defined(__MMX__)
176
+ #define TF_PLAT_STR___MMX__ TF_PLAT_STR_(__MMX__)
177
+ #else
178
+ #define TF_PLAT_STR___MMX__
179
+ #endif
180
+ #if defined(__MWAITX__)
181
+ #define TF_PLAT_STR___MWAITX__ TF_PLAT_STR_(__MWAITX__)
182
+ #else
183
+ #define TF_PLAT_STR___MWAITX__
184
+ #endif
185
+ #if defined(__PCLMUL__)
186
+ #define TF_PLAT_STR___PCLMUL__ TF_PLAT_STR_(__PCLMUL__)
187
+ #else
188
+ #define TF_PLAT_STR___PCLMUL__
189
+ #endif
190
+ #if defined(__PKU__)
191
+ #define TF_PLAT_STR___PKU__ TF_PLAT_STR_(__PKU__)
192
+ #else
193
+ #define TF_PLAT_STR___PKU__
194
+ #endif
195
+ #if defined(__POPCNT__)
196
+ #define TF_PLAT_STR___POPCNT__ TF_PLAT_STR_(__POPCNT__)
197
+ #else
198
+ #define TF_PLAT_STR___POPCNT__
199
+ #endif
200
+ #if defined(__PRFCHW__)
201
+ #define TF_PLAT_STR___PRFCHW__ TF_PLAT_STR_(__PRFCHW__)
202
+ #else
203
+ #define TF_PLAT_STR___PRFCHW__
204
+ #endif
205
+ #if defined(__RDRND__)
206
+ #define TF_PLAT_STR___RDRND__ TF_PLAT_STR_(__RDRND__)
207
+ #else
208
+ #define TF_PLAT_STR___RDRND__
209
+ #endif
210
+ #if defined(__RDSEED__)
211
+ #define TF_PLAT_STR___RDSEED__ TF_PLAT_STR_(__RDSEED__)
212
+ #else
213
+ #define TF_PLAT_STR___RDSEED__
214
+ #endif
215
+ #if defined(__RTM__)
216
+ #define TF_PLAT_STR___RTM__ TF_PLAT_STR_(__RTM__)
217
+ #else
218
+ #define TF_PLAT_STR___RTM__
219
+ #endif
220
+ #if defined(__SHA__)
221
+ #define TF_PLAT_STR___SHA__ TF_PLAT_STR_(__SHA__)
222
+ #else
223
+ #define TF_PLAT_STR___SHA__
224
+ #endif
225
+ #if defined(__SSE2_MATH__)
226
+ #define TF_PLAT_STR___SSE2_MATH__ TF_PLAT_STR_(__SSE2_MATH__)
227
+ #else
228
+ #define TF_PLAT_STR___SSE2_MATH__
229
+ #endif
230
+ #if defined(__SSE2__)
231
+ #define TF_PLAT_STR___SSE2__ TF_PLAT_STR_(__SSE2__)
232
+ #else
233
+ #define TF_PLAT_STR___SSE2__
234
+ #endif
235
+ #if defined(__SSE_MATH__)
236
+ #define TF_PLAT_STR___SSE_MATH__ TF_PLAT_STR_(__SSE_MATH__)
237
+ #else
238
+ #define TF_PLAT_STR___SSE_MATH__
239
+ #endif
240
+ #if defined(__SSE__)
241
+ #define TF_PLAT_STR___SSE__ TF_PLAT_STR_(__SSE__)
242
+ #else
243
+ #define TF_PLAT_STR___SSE__
244
+ #endif
245
+ #if defined(__SSE3__)
246
+ #define TF_PLAT_STR___SSE3__ TF_PLAT_STR_(__SSE3__)
247
+ #else
248
+ #define TF_PLAT_STR___SSE3__
249
+ #endif
250
+ #if defined(__SSE4A__)
251
+ #define TF_PLAT_STR___SSE4A__ TF_PLAT_STR_(__SSE4A__)
252
+ #else
253
+ #define TF_PLAT_STR___SSE4A__
254
+ #endif
255
+ #if defined(__SSE4_1__)
256
+ #define TF_PLAT_STR___SSE4_1__ TF_PLAT_STR_(__SSE4_1__)
257
+ #else
258
+ #define TF_PLAT_STR___SSE4_1__
259
+ #endif
260
+ #if defined(__SSE4_2__)
261
+ #define TF_PLAT_STR___SSE4_2__ TF_PLAT_STR_(__SSE4_2__)
262
+ #else
263
+ #define TF_PLAT_STR___SSE4_2__
264
+ #endif
265
+ #if defined(__SSSE3__)
266
+ #define TF_PLAT_STR___SSSE3__ TF_PLAT_STR_(__SSSE3__)
267
+ #else
268
+ #define TF_PLAT_STR___SSSE3__
269
+ #endif
270
+ #if defined(__TBM__)
271
+ #define TF_PLAT_STR___TBM__ TF_PLAT_STR_(__TBM__)
272
+ #else
273
+ #define TF_PLAT_STR___TBM__
274
+ #endif
275
+ #if defined(__XOP__)
276
+ #define TF_PLAT_STR___XOP__ TF_PLAT_STR_(__XOP__)
277
+ #else
278
+ #define TF_PLAT_STR___XOP__
279
+ #endif
280
+ #if defined(__XSAVEC__)
281
+ #define TF_PLAT_STR___XSAVEC__ TF_PLAT_STR_(__XSAVEC__)
282
+ #else
283
+ #define TF_PLAT_STR___XSAVEC__
284
+ #endif
285
+ #if defined(__XSAVEOPT__)
286
+ #define TF_PLAT_STR___XSAVEOPT__ TF_PLAT_STR_(__XSAVEOPT__)
287
+ #else
288
+ #define TF_PLAT_STR___XSAVEOPT__
289
+ #endif
290
+ #if defined(__XSAVES__)
291
+ #define TF_PLAT_STR___XSAVES__ TF_PLAT_STR_(__XSAVES__)
292
+ #else
293
+ #define TF_PLAT_STR___XSAVES__
294
+ #endif
295
+ #if defined(__XSAVE__)
296
+ #define TF_PLAT_STR___XSAVE__ TF_PLAT_STR_(__XSAVE__)
297
+ #else
298
+ #define TF_PLAT_STR___XSAVE__
299
+ #endif
300
+ #if defined(_SOFT_DOUBLE)
301
+ #define TF_PLAT_STR__SOFT_DOUBLE TF_PLAT_STR_(_SOFT_DOUBLE)
302
+ #else
303
+ #define TF_PLAT_STR__SOFT_DOUBLE
304
+ #endif
305
+ #if defined(_SOFT_FLOAT)
306
+ #define TF_PLAT_STR__SOFT_FLOAT TF_PLAT_STR_(_SOFT_FLOAT)
307
+ #else
308
+ #define TF_PLAT_STR__SOFT_FLOAT
309
+ #endif
310
+ #if defined(__ALTIVEC__)
311
+ #define TF_PLAT_STR___ALTIVEC__ TF_PLAT_STR_(__ALTIVEC__)
312
+ #else
313
+ #define TF_PLAT_STR___ALTIVEC__
314
+ #endif
315
+ #if defined(__APPLE_ALTIVEC__)
316
+ #define TF_PLAT_STR___APPLE_ALTIVEC__ TF_PLAT_STR_(__APPLE_ALTIVEC__)
317
+ #else
318
+ #define TF_PLAT_STR___APPLE_ALTIVEC__
319
+ #endif
320
+ #if defined(__CRYPTO__)
321
+ #define TF_PLAT_STR___CRYPTO__ TF_PLAT_STR_(__CRYPTO__)
322
+ #else
323
+ #define TF_PLAT_STR___CRYPTO__
324
+ #endif
325
+ #if defined(__FLOAT128_HARDWARE__)
326
+ #define TF_PLAT_STR___FLOAT128_HARDWARE__ TF_PLAT_STR_(__FLOAT128_HARDWARE__)
327
+ #else
328
+ #define TF_PLAT_STR___FLOAT128_HARDWARE__
329
+ #endif
330
+ #if defined(__FLOAT128_TYPE__)
331
+ #define TF_PLAT_STR___FLOAT128_TYPE__ TF_PLAT_STR_(__FLOAT128_TYPE__)
332
+ #else
333
+ #define TF_PLAT_STR___FLOAT128_TYPE__
334
+ #endif
335
+ #if defined(__FP_FAST_FMA)
336
+ #define TF_PLAT_STR___FP_FAST_FMA TF_PLAT_STR_(__FP_FAST_FMA)
337
+ #else
338
+ #define TF_PLAT_STR___FP_FAST_FMA
339
+ #endif
340
+ #if defined(__FP_FAST_FMAF)
341
+ #define TF_PLAT_STR___FP_FAST_FMAF TF_PLAT_STR_(__FP_FAST_FMAF)
342
+ #else
343
+ #define TF_PLAT_STR___FP_FAST_FMAF
344
+ #endif
345
+ #if defined(__HTM__)
346
+ #define TF_PLAT_STR___HTM__ TF_PLAT_STR_(__HTM__)
347
+ #else
348
+ #define TF_PLAT_STR___HTM__
349
+ #endif
350
+ #if defined(__NO_FPRS__)
351
+ #define TF_PLAT_STR___NO_FPRS__ TF_PLAT_STR_(__NO_FPRS__)
352
+ #else
353
+ #define TF_PLAT_STR___NO_FPRS__
354
+ #endif
355
+ #if defined(__NO_LWSYNC__)
356
+ #define TF_PLAT_STR___NO_LWSYNC__ TF_PLAT_STR_(__NO_LWSYNC__)
357
+ #else
358
+ #define TF_PLAT_STR___NO_LWSYNC__
359
+ #endif
360
+ #if defined(__POWER8_VECTOR__)
361
+ #define TF_PLAT_STR___POWER8_VECTOR__ TF_PLAT_STR_(__POWER8_VECTOR__)
362
+ #else
363
+ #define TF_PLAT_STR___POWER8_VECTOR__
364
+ #endif
365
+ #if defined(__POWER9_VECTOR__)
366
+ #define TF_PLAT_STR___POWER9_VECTOR__ TF_PLAT_STR_(__POWER9_VECTOR__)
367
+ #else
368
+ #define TF_PLAT_STR___POWER9_VECTOR__
369
+ #endif
370
+ #if defined(__PPC405__)
371
+ #define TF_PLAT_STR___PPC405__ TF_PLAT_STR_(__PPC405__)
372
+ #else
373
+ #define TF_PLAT_STR___PPC405__
374
+ #endif
375
+ #if defined(__QUAD_MEMORY_ATOMIC__)
376
+ #define TF_PLAT_STR___QUAD_MEMORY_ATOMIC__ TF_PLAT_STR_(__QUAD_MEMORY_ATOMIC__)
377
+ #else
378
+ #define TF_PLAT_STR___QUAD_MEMORY_ATOMIC__
379
+ #endif
380
+ #if defined(__RECIPF__)
381
+ #define TF_PLAT_STR___RECIPF__ TF_PLAT_STR_(__RECIPF__)
382
+ #else
383
+ #define TF_PLAT_STR___RECIPF__
384
+ #endif
385
+ #if defined(__RECIP_PRECISION__)
386
+ #define TF_PLAT_STR___RECIP_PRECISION__ TF_PLAT_STR_(__RECIP_PRECISION__)
387
+ #else
388
+ #define TF_PLAT_STR___RECIP_PRECISION__
389
+ #endif
390
+ #if defined(__RECIP__)
391
+ #define TF_PLAT_STR___RECIP__ TF_PLAT_STR_(__RECIP__)
392
+ #else
393
+ #define TF_PLAT_STR___RECIP__
394
+ #endif
395
+ #if defined(__RSQRTEF__)
396
+ #define TF_PLAT_STR___RSQRTEF__ TF_PLAT_STR_(__RSQRTEF__)
397
+ #else
398
+ #define TF_PLAT_STR___RSQRTEF__
399
+ #endif
400
+ #if defined(__RSQRTE__)
401
+ #define TF_PLAT_STR___RSQRTE__ TF_PLAT_STR_(__RSQRTE__)
402
+ #else
403
+ #define TF_PLAT_STR___RSQRTE__
404
+ #endif
405
+ #if defined(__TM_FENCE__)
406
+ #define TF_PLAT_STR___TM_FENCE__ TF_PLAT_STR_(__TM_FENCE__)
407
+ #else
408
+ #define TF_PLAT_STR___TM_FENCE__
409
+ #endif
410
+ #if defined(__UPPER_REGS_DF__)
411
+ #define TF_PLAT_STR___UPPER_REGS_DF__ TF_PLAT_STR_(__UPPER_REGS_DF__)
412
+ #else
413
+ #define TF_PLAT_STR___UPPER_REGS_DF__
414
+ #endif
415
+ #if defined(__UPPER_REGS_SF__)
416
+ #define TF_PLAT_STR___UPPER_REGS_SF__ TF_PLAT_STR_(__UPPER_REGS_SF__)
417
+ #else
418
+ #define TF_PLAT_STR___UPPER_REGS_SF__
419
+ #endif
420
+ #if defined(__VEC__)
421
+ #define TF_PLAT_STR___VEC__ TF_PLAT_STR_(__VEC__)
422
+ #else
423
+ #define TF_PLAT_STR___VEC__
424
+ #endif
425
+ #if defined(__VSX__)
426
+ #define TF_PLAT_STR___VSX__ TF_PLAT_STR_(__VSX__)
427
+ #else
428
+ #define TF_PLAT_STR___VSX__
429
+ #endif
430
+ #if defined(__ARM_ARCH)
431
+ #define TF_PLAT_STR___ARM_ARCH TF_PLAT_STR_(__ARM_ARCH)
432
+ #else
433
+ #define TF_PLAT_STR___ARM_ARCH
434
+ #endif
435
+ #if defined(__ARM_FEATURE_CLZ)
436
+ #define TF_PLAT_STR___ARM_FEATURE_CLZ TF_PLAT_STR_(__ARM_FEATURE_CLZ)
437
+ #else
438
+ #define TF_PLAT_STR___ARM_FEATURE_CLZ
439
+ #endif
440
+ #if defined(__ARM_FEATURE_CRC32)
441
+ #define TF_PLAT_STR___ARM_FEATURE_CRC32 TF_PLAT_STR_(__ARM_FEATURE_CRC32)
442
+ #else
443
+ #define TF_PLAT_STR___ARM_FEATURE_CRC32
444
+ #endif
445
+ #if defined(__ARM_FEATURE_CRC32)
446
+ #define TF_PLAT_STR___ARM_FEATURE_CRC32 TF_PLAT_STR_(__ARM_FEATURE_CRC32)
447
+ #else
448
+ #define TF_PLAT_STR___ARM_FEATURE_CRC32
449
+ #endif
450
+ #if defined(__ARM_FEATURE_CRYPTO)
451
+ #define TF_PLAT_STR___ARM_FEATURE_CRYPTO TF_PLAT_STR_(__ARM_FEATURE_CRYPTO)
452
+ #else
453
+ #define TF_PLAT_STR___ARM_FEATURE_CRYPTO
454
+ #endif
455
+ #if defined(__ARM_FEATURE_DIRECTED_ROUNDING)
456
+ #define TF_PLAT_STR___ARM_FEATURE_DIRECTED_ROUNDING \
457
+ TF_PLAT_STR_(__ARM_FEATURE_DIRECTED_ROUNDING)
458
+ #else
459
+ #define TF_PLAT_STR___ARM_FEATURE_DIRECTED_ROUNDING
460
+ #endif
461
+ #if defined(__ARM_FEATURE_DSP)
462
+ #define TF_PLAT_STR___ARM_FEATURE_DSP TF_PLAT_STR_(__ARM_FEATURE_DSP)
463
+ #else
464
+ #define TF_PLAT_STR___ARM_FEATURE_DSP
465
+ #endif
466
+ #if defined(__ARM_FEATURE_FMA)
467
+ #define TF_PLAT_STR___ARM_FEATURE_FMA TF_PLAT_STR_(__ARM_FEATURE_FMA)
468
+ #else
469
+ #define TF_PLAT_STR___ARM_FEATURE_FMA
470
+ #endif
471
+ #if defined(__ARM_FEATURE_IDIV)
472
+ #define TF_PLAT_STR___ARM_FEATURE_IDIV TF_PLAT_STR_(__ARM_FEATURE_IDIV)
473
+ #else
474
+ #define TF_PLAT_STR___ARM_FEATURE_IDIV
475
+ #endif
476
+ #if defined(__ARM_FEATURE_LDREX)
477
+ #define TF_PLAT_STR___ARM_FEATURE_LDREX TF_PLAT_STR_(__ARM_FEATURE_LDREX)
478
+ #else
479
+ #define TF_PLAT_STR___ARM_FEATURE_LDREX
480
+ #endif
481
+ #if defined(__ARM_FEATURE_NUMERIC_MAXMIN)
482
+ #define TF_PLAT_STR___ARM_FEATURE_NUMERIC_MAXMIN \
483
+ TF_PLAT_STR_(__ARM_FEATURE_NUMERIC_MAXMIN)
484
+ #else
485
+ #define TF_PLAT_STR___ARM_FEATURE_NUMERIC_MAXMIN
486
+ #endif
487
+ #if defined(__ARM_FEATURE_QBIT)
488
+ #define TF_PLAT_STR___ARM_FEATURE_QBIT TF_PLAT_STR_(__ARM_FEATURE_QBIT)
489
+ #else
490
+ #define TF_PLAT_STR___ARM_FEATURE_QBIT
491
+ #endif
492
+ #if defined(__ARM_FEATURE_QRDMX)
493
+ #define TF_PLAT_STR___ARM_FEATURE_QRDMX TF_PLAT_STR_(__ARM_FEATURE_QRDMX)
494
+ #else
495
+ #define TF_PLAT_STR___ARM_FEATURE_QRDMX
496
+ #endif
497
+ #if defined(__ARM_FEATURE_SAT)
498
+ #define TF_PLAT_STR___ARM_FEATURE_SAT TF_PLAT_STR_(__ARM_FEATURE_SAT)
499
+ #else
500
+ #define TF_PLAT_STR___ARM_FEATURE_SAT
501
+ #endif
502
+ #if defined(__ARM_FEATURE_SIMD32)
503
+ #define TF_PLAT_STR___ARM_FEATURE_SIMD32 TF_PLAT_STR_(__ARM_FEATURE_SIMD32)
504
+ #else
505
+ #define TF_PLAT_STR___ARM_FEATURE_SIMD32
506
+ #endif
507
+ #if defined(__ARM_FEATURE_UNALIGNED)
508
+ #define TF_PLAT_STR___ARM_FEATURE_UNALIGNED \
509
+ TF_PLAT_STR_(__ARM_FEATURE_UNALIGNED)
510
+ #else
511
+ #define TF_PLAT_STR___ARM_FEATURE_UNALIGNED
512
+ #endif
513
+ #if defined(__ARM_FP)
514
+ #define TF_PLAT_STR___ARM_FP TF_PLAT_STR_(__ARM_FP)
515
+ #else
516
+ #define TF_PLAT_STR___ARM_FP
517
+ #endif
518
+ #if defined(__ARM_NEON_FP)
519
+ #define TF_PLAT_STR___ARM_NEON_FP TF_PLAT_STR_(__ARM_NEON_FP)
520
+ #else
521
+ #define TF_PLAT_STR___ARM_NEON_FP
522
+ #endif
523
+ #if defined(__ARM_NEON__)
524
+ #define TF_PLAT_STR___ARM_NEON__ TF_PLAT_STR_(__ARM_NEON__)
525
+ #else
526
+ #define TF_PLAT_STR___ARM_NEON__
527
+ #endif
528
+ #if defined(__ARM_WMMX)
529
+ #define TF_PLAT_STR___ARM_WMMX TF_PLAT_STR_(__ARM_WMMX)
530
+ #else
531
+ #define TF_PLAT_STR___ARM_WMMX
532
+ #endif
533
+ #if defined(__IWMMXT2__)
534
+ #define TF_PLAT_STR___IWMMXT2__ TF_PLAT_STR_(__IWMMXT2__)
535
+ #else
536
+ #define TF_PLAT_STR___IWMMXT2__
537
+ #endif
538
+ #if defined(__IWMMXT__)
539
+ #define TF_PLAT_STR___IWMMXT__ TF_PLAT_STR_(__IWMMXT__)
540
+ #else
541
+ #define TF_PLAT_STR___IWMMXT__
542
+ #endif
543
+ #if defined(__VFP_FP__)
544
+ #define TF_PLAT_STR___VFP_FP__ TF_PLAT_STR_(__VFP_FP__)
545
+ #else
546
+ #define TF_PLAT_STR___VFP_FP__
547
+ #endif
548
+ #if defined(TARGET_IPHONE_SIMULATOR)
549
+ #define TF_PLAT_STR_TARGET_IPHONE_SIMULATOR \
550
+ TF_PLAT_STR_(TARGET_IPHONE_SIMULATOR)
551
+ #else
552
+ #define TF_PLAT_STR_TARGET_IPHONE_SIMULATOR
553
+ #endif
554
+ #if defined(TARGET_OS_IOS)
555
+ #define TF_PLAT_STR_TARGET_OS_IOS TF_PLAT_STR_(TARGET_OS_IOS)
556
+ #else
557
+ #define TF_PLAT_STR_TARGET_OS_IOS
558
+ #endif
559
+ #if defined(TARGET_OS_IPHONE)
560
+ #define TF_PLAT_STR_TARGET_OS_IPHONE TF_PLAT_STR_(TARGET_OS_IPHONE)
561
+ #else
562
+ #define TF_PLAT_STR_TARGET_OS_IPHONE
563
+ #endif
564
+ #if defined(_MSC_VER)
565
+ #define TF_PLAT_STR__MSC_VER TF_PLAT_STR_(_MSC_VER)
566
+ #else
567
+ #define TF_PLAT_STR__MSC_VER
568
+ #endif
569
+ #if defined(_M_ARM)
570
+ #define TF_PLAT_STR__M_ARM TF_PLAT_STR_(_M_ARM)
571
+ #else
572
+ #define TF_PLAT_STR__M_ARM
573
+ #endif
574
+ #if defined(_M_ARM64)
575
+ #define TF_PLAT_STR__M_ARM64 TF_PLAT_STR_(_M_ARM64)
576
+ #else
577
+ #define TF_PLAT_STR__M_ARM64
578
+ #endif
579
+ #if defined(_M_ARM_ARMV7VE)
580
+ #define TF_PLAT_STR__M_ARM_ARMV7VE TF_PLAT_STR_(_M_ARM_ARMV7VE)
581
+ #else
582
+ #define TF_PLAT_STR__M_ARM_ARMV7VE
583
+ #endif
584
+ #if defined(_M_ARM_FP)
585
+ #define TF_PLAT_STR__M_ARM_FP TF_PLAT_STR_(_M_ARM_FP)
586
+ #else
587
+ #define TF_PLAT_STR__M_ARM_FP
588
+ #endif
589
+ #if defined(_M_IX86)
590
+ #define TF_PLAT_STR__M_IX86 TF_PLAT_STR_(_M_IX86)
591
+ #else
592
+ #define TF_PLAT_STR__M_IX86
593
+ #endif
594
+ #if defined(_M_X64)
595
+ #define TF_PLAT_STR__M_X64 TF_PLAT_STR_(_M_X64)
596
+ #else
597
+ #define TF_PLAT_STR__M_X64
598
+ #endif
599
+ #if defined(_WIN32)
600
+ #define TF_PLAT_STR__WIN32 TF_PLAT_STR_(_WIN32)
601
+ #else
602
+ #define TF_PLAT_STR__WIN32
603
+ #endif
604
+ #if defined(_WIN64)
605
+ #define TF_PLAT_STR__WIN64 TF_PLAT_STR_(_WIN64)
606
+ #else
607
+ #define TF_PLAT_STR__WIN64
608
+ #endif
609
+ #if defined(__ANDROID__)
610
+ #define TF_PLAT_STR___ANDROID__ TF_PLAT_STR_(__ANDROID__)
611
+ #else
612
+ #define TF_PLAT_STR___ANDROID__
613
+ #endif
614
+ #if defined(__APPLE__)
615
+ #define TF_PLAT_STR___APPLE__ TF_PLAT_STR_(__APPLE__)
616
+ #else
617
+ #define TF_PLAT_STR___APPLE__
618
+ #endif
619
+ #if defined(__BYTE_ORDER__)
620
+ #define TF_PLAT_STR___BYTE_ORDER__ TF_PLAT_STR_(__BYTE_ORDER__)
621
+ #else
622
+ #define TF_PLAT_STR___BYTE_ORDER__
623
+ #endif
624
+ #if defined(__CYGWIN__)
625
+ #define TF_PLAT_STR___CYGWIN__ TF_PLAT_STR_(__CYGWIN__)
626
+ #else
627
+ #define TF_PLAT_STR___CYGWIN__
628
+ #endif
629
+ #if defined(__FreeBSD__)
630
+ #define TF_PLAT_STR___FreeBSD__ TF_PLAT_STR_(__FreeBSD__)
631
+ #else
632
+ #define TF_PLAT_STR___FreeBSD__
633
+ #endif
634
+ #if defined(__LITTLE_ENDIAN__)
635
+ #define TF_PLAT_STR___LITTLE_ENDIAN__ TF_PLAT_STR_(__LITTLE_ENDIAN__)
636
+ #else
637
+ #define TF_PLAT_STR___LITTLE_ENDIAN__
638
+ #endif
639
+ #if defined(__NetBSD__)
640
+ #define TF_PLAT_STR___NetBSD__ TF_PLAT_STR_(__NetBSD__)
641
+ #else
642
+ #define TF_PLAT_STR___NetBSD__
643
+ #endif
644
+ #if defined(__OpenBSD__)
645
+ #define TF_PLAT_STR___OpenBSD__ TF_PLAT_STR_(__OpenBSD__)
646
+ #else
647
+ #define TF_PLAT_STR___OpenBSD__
648
+ #endif
649
+ #if defined(____MSYS__)
650
+ #define TF_PLAT_STR_____MSYS__ TF_PLAT_STR_(____MSYS__)
651
+ #else
652
+ #define TF_PLAT_STR_____MSYS__
653
+ #endif
654
+ #if defined(__aarch64__)
655
+ #define TF_PLAT_STR___aarch64__ TF_PLAT_STR_(__aarch64__)
656
+ #else
657
+ #define TF_PLAT_STR___aarch64__
658
+ #endif
659
+ #if defined(__alpha__)
660
+ #define TF_PLAT_STR___alpha__ TF_PLAT_STR_(__alpha__)
661
+ #else
662
+ #define TF_PLAT_STR___alpha__
663
+ #endif
664
+ #if defined(__arm__)
665
+ #define TF_PLAT_STR___arm__ TF_PLAT_STR_(__arm__)
666
+ #else
667
+ #define TF_PLAT_STR___arm__
668
+ #endif
669
+ #if defined(__i386__)
670
+ #define TF_PLAT_STR___i386__ TF_PLAT_STR_(__i386__)
671
+ #else
672
+ #define TF_PLAT_STR___i386__
673
+ #endif
674
+ #if defined(__i686__)
675
+ #define TF_PLAT_STR___i686__ TF_PLAT_STR_(__i686__)
676
+ #else
677
+ #define TF_PLAT_STR___i686__
678
+ #endif
679
+ #if defined(__ia64__)
680
+ #define TF_PLAT_STR___ia64__ TF_PLAT_STR_(__ia64__)
681
+ #else
682
+ #define TF_PLAT_STR___ia64__
683
+ #endif
684
+ #if defined(__linux__)
685
+ #define TF_PLAT_STR___linux__ TF_PLAT_STR_(__linux__)
686
+ #else
687
+ #define TF_PLAT_STR___linux__
688
+ #endif
689
+ #if defined(__mips32__)
690
+ #define TF_PLAT_STR___mips32__ TF_PLAT_STR_(__mips32__)
691
+ #else
692
+ #define TF_PLAT_STR___mips32__
693
+ #endif
694
+ #if defined(__mips64__)
695
+ #define TF_PLAT_STR___mips64__ TF_PLAT_STR_(__mips64__)
696
+ #else
697
+ #define TF_PLAT_STR___mips64__
698
+ #endif
699
+ #if defined(__powerpc64__)
700
+ #define TF_PLAT_STR___powerpc64__ TF_PLAT_STR_(__powerpc64__)
701
+ #else
702
+ #define TF_PLAT_STR___powerpc64__
703
+ #endif
704
+ #if defined(__powerpc__)
705
+ #define TF_PLAT_STR___powerpc__ TF_PLAT_STR_(__powerpc__)
706
+ #else
707
+ #define TF_PLAT_STR___powerpc__
708
+ #endif
709
+ #if defined(__riscv___)
710
+ #define TF_PLAT_STR___riscv___ TF_PLAT_STR_(__riscv___)
711
+ #else
712
+ #define TF_PLAT_STR___riscv___
713
+ #endif
714
+ #if defined(__s390x__)
715
+ #define TF_PLAT_STR___s390x__ TF_PLAT_STR_(__s390x__)
716
+ #else
717
+ #define TF_PLAT_STR___s390x__
718
+ #endif
719
+ #if defined(__sparc64__)
720
+ #define TF_PLAT_STR___sparc64__ TF_PLAT_STR_(__sparc64__)
721
+ #else
722
+ #define TF_PLAT_STR___sparc64__
723
+ #endif
724
+ #if defined(__sparc__)
725
+ #define TF_PLAT_STR___sparc__ TF_PLAT_STR_(__sparc__)
726
+ #else
727
+ #define TF_PLAT_STR___sparc__
728
+ #endif
729
+ #if defined(__x86_64__)
730
+ #define TF_PLAT_STR___x86_64__ TF_PLAT_STR_(__x86_64__)
731
+ #else
732
+ #define TF_PLAT_STR___x86_64__
733
+ #endif
734
+
735
+ #endif // TENSORFLOW_TSL_PLATFORM_PLATFORM_STRINGS_COMPUTED_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/png.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_PLATFORM_PNG_H_
17
+ #define TENSORFLOW_TSL_PLATFORM_PNG_H_
18
+
19
+ #include "tsl/platform/platform.h"
20
+
21
+ #if defined(PLATFORM_GOOGLE) && !defined(IS_MOBILE_PLATFORM)
22
+ #include "png.h" // from @png // IWYU pragma: export
23
+ #elif defined(PLATFORM_POSIX) || defined(PLATFORM_WINDOWS) || \
24
+ defined(PLATFORM_POSIX_ANDROID) || defined(IS_MOBILE_PLATFORM)
25
+ #include <png.h> // IWYU pragma: export
26
+ #else
27
+ #error Define the appropriate PLATFORM_<foo> macro for this platform
28
+ #endif
29
+
30
+ #endif // TENSORFLOW_TSL_PLATFORM_PNG_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/profile_utils/android_armv7a_cpu_utils_helper.h ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_PLATFORM_PROFILE_UTILS_ANDROID_ARMV7A_CPU_UTILS_HELPER_H_
17
+ #define TENSORFLOW_TSL_PLATFORM_PROFILE_UTILS_ANDROID_ARMV7A_CPU_UTILS_HELPER_H_
18
+
19
+ #include <sys/types.h>
20
+
21
+ #include "tsl/platform/macros.h"
22
+ #include "tsl/platform/profile_utils/i_cpu_utils_helper.h"
23
+ #include "tsl/platform/types.h"
24
+
25
+ #if defined(__ANDROID__) && (__ANDROID_API__ >= 21) && \
26
+ (defined(__ARM_ARCH_7A__) || defined(__aarch64__))
27
+
28
+ struct perf_event_attr;
29
+
30
+ namespace tsl {
31
+ namespace profile_utils {
32
+
33
+ // Implementation of CpuUtilsHelper for Android armv7a
34
+ class AndroidArmV7ACpuUtilsHelper : public ICpuUtilsHelper {
35
+ public:
36
+ AndroidArmV7ACpuUtilsHelper() = default;
37
+ void ResetClockCycle() final;
38
+ uint64 GetCurrentClockCycle() final;
39
+ void EnableClockCycleProfiling() final;
40
+ void DisableClockCycleProfiling() final;
41
+ int64 CalculateCpuFrequency() final;
42
+
43
+ private:
44
+ static constexpr int INVALID_FD = -1;
45
+ static constexpr int64 INVALID_CPU_FREQUENCY = -1;
46
+
47
+ void InitializeInternal();
48
+
49
+ // syscall __NR_perf_event_open with arguments
50
+ int OpenPerfEvent(perf_event_attr *const hw_event, const pid_t pid,
51
+ const int cpu, const int group_fd,
52
+ const unsigned long flags);
53
+
54
+ int64 ReadCpuFrequencyFile(const int cpu_id, const char *const type);
55
+
56
+ bool is_initialized_{false};
57
+ int fd_{INVALID_FD};
58
+
59
+ AndroidArmV7ACpuUtilsHelper(const AndroidArmV7ACpuUtilsHelper &) = delete;
60
+ void operator=(const AndroidArmV7ACpuUtilsHelper &) = delete;
61
+ };
62
+
63
+ } // namespace profile_utils
64
+ } // namespace tsl
65
+
66
+ #endif // defined(__ANDROID__) && (__ANDROID_API__ >= 21) &&
67
+ // (defined(__ARM_ARCH_7A__) || defined(__aarch64__))
68
+
69
+ #endif // TENSORFLOW_TSL_PLATFORM_PROFILE_UTILS_ANDROID_ARMV7A_CPU_UTILS_HELPER_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/profile_utils/clock_cycle_profiler.h ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_PLATFORM_PROFILE_UTILS_CLOCK_CYCLE_PROFILER_H_
17
+ #define TENSORFLOW_TSL_PLATFORM_PROFILE_UTILS_CLOCK_CYCLE_PROFILER_H_
18
+
19
+ #include <algorithm>
20
+
21
+ #include "tsl/platform/logging.h"
22
+ #include "tsl/platform/macros.h"
23
+ #include "tsl/platform/profile_utils/cpu_utils.h"
24
+
25
+ namespace tsl {
26
+
27
+ class ClockCycleProfiler {
28
+ public:
29
+ ClockCycleProfiler() = default;
30
+
31
+ // Start counting clock cycle.
32
+ inline void Start() {
33
+ CHECK(!IsStarted()) << "Profiler has been already started.";
34
+ start_clock_ = GetCurrentClockCycleInternal();
35
+ }
36
+
37
+ // Stop counting clock cycle.
38
+ inline void Stop() {
39
+ CHECK(IsStarted()) << "Profiler is not started yet.";
40
+ AccumulateClockCycle();
41
+ }
42
+
43
+ // Get how many times Start() is called.
44
+ inline double GetCount() {
45
+ CHECK(!IsStarted());
46
+ return count_;
47
+ }
48
+
49
+ // Get average clock cycle.
50
+ inline double GetAverageClockCycle() {
51
+ CHECK(!IsStarted());
52
+ return average_clock_cycle_;
53
+ }
54
+
55
+ // TODO(satok): Support more statistics (e.g. standard deviation)
56
+ // Get worst clock cycle.
57
+ inline double GetWorstClockCycle() {
58
+ CHECK(!IsStarted());
59
+ return worst_clock_cycle_;
60
+ }
61
+
62
+ // Dump statistics
63
+ void DumpStatistics(const string& tag);
64
+
65
+ private:
66
+ inline uint64 GetCurrentClockCycleInternal() {
67
+ const uint64 clockCycle = profile_utils::CpuUtils::GetCurrentClockCycle();
68
+ if (clockCycle <= 0) {
69
+ if (valid_) {
70
+ LOG(WARNING) << "GetCurrentClockCycle is not implemented."
71
+ << " Return 1 instead.";
72
+ valid_ = false;
73
+ }
74
+ return 1;
75
+ } else {
76
+ return clockCycle;
77
+ }
78
+ }
79
+
80
+ inline bool IsStarted() const { return start_clock_ > 0; }
81
+
82
+ inline void AccumulateClockCycle() {
83
+ const uint64 now = GetCurrentClockCycleInternal();
84
+ const double clock_diff = static_cast<double>(now - start_clock_);
85
+ const double next_count = count_ + 1.0;
86
+ const double next_count_inv = 1.0 / next_count;
87
+ const double next_ave_cpu_clock =
88
+ next_count_inv * (average_clock_cycle_ * count_ + clock_diff);
89
+ count_ = next_count;
90
+ average_clock_cycle_ = next_ave_cpu_clock;
91
+ worst_clock_cycle_ = std::max(worst_clock_cycle_, clock_diff);
92
+ start_clock_ = 0;
93
+ }
94
+
95
+ uint64 start_clock_{0};
96
+ double count_{0.0};
97
+ double average_clock_cycle_{0.0};
98
+ double worst_clock_cycle_{0.0};
99
+ bool valid_{true};
100
+
101
+ ClockCycleProfiler(const ClockCycleProfiler&) = delete;
102
+ void operator=(const ClockCycleProfiler&) = delete;
103
+ };
104
+
105
+ } // namespace tsl
106
+
107
+ #endif // TENSORFLOW_TSL_PLATFORM_PROFILE_UTILS_CLOCK_CYCLE_PROFILER_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/profile_utils/cpu_utils.h ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+ // This class is designed to get accurate profile for programs.
16
+
17
+ #ifndef TENSORFLOW_TSL_PLATFORM_PROFILE_UTILS_CPU_UTILS_H_
18
+ #define TENSORFLOW_TSL_PLATFORM_PROFILE_UTILS_CPU_UTILS_H_
19
+
20
+ #include <chrono>
21
+ #include <memory>
22
+
23
+ #include "tsl/platform/macros.h"
24
+ #include "tsl/platform/profile_utils/i_cpu_utils_helper.h"
25
+ #include "tsl/platform/types.h"
26
+
27
+ #if defined(ARMV6) || defined(__ARM_ARCH_7A__)
28
+ #include <sys/time.h>
29
+ #endif
30
+
31
+ #if defined(_WIN32)
32
+ #include <intrin.h>
33
+ #endif
34
+
35
+ namespace tsl {
36
+
37
+ namespace profile_utils {
38
+
39
+ // CpuUtils is a profiling tool with static functions
40
+ // designed to be called from multiple classes.
41
+ // A dedicated class which inherits ICpuUtilsHelper is
42
+ // stored as a function-local static variable which inherits
43
+ // GetCpuUtilsHelperSingletonInstance that caches CPU information,
44
+ // because loading CPU information may take a long time.
45
+ // Users must call EnableClockCycleProfiling before using CpuUtils.
46
+ class CpuUtils {
47
+ public:
48
+ // Constant for invalid frequency.
49
+ // This value is returned when the frequency is not obtained somehow.
50
+ static constexpr int64_t INVALID_FREQUENCY = -1;
51
+ static constexpr uint64 DUMMY_CYCLE_CLOCK = 1;
52
+
53
+ // Return current clock cycle. This function is designed to
54
+ // minimize the overhead to get clock and maximize the accuracy of
55
+ // time for profile.
56
+ // This returns unsigned int because there is no guarantee that rdtsc
57
+ // is less than 2 ^ 61.
58
+ static inline uint64 GetCurrentClockCycle() {
59
+ #if defined(__ANDROID__)
60
+ return GetCpuUtilsHelperSingletonInstance().GetCurrentClockCycle();
61
+ // ----------------------------------------------------------------
62
+ #elif defined(_WIN32)
63
+ return __rdtsc();
64
+ // ----------------------------------------------------------------
65
+ #elif defined(__x86_64__) || defined(__amd64__)
66
+ uint64_t high, low;
67
+ __asm__ volatile("rdtsc" : "=a"(low), "=d"(high));
68
+ return (high << 32) | low;
69
+ // ----------------------------------------------------------------
70
+ #elif defined(__aarch64__)
71
+ // System timer of ARMv8 runs at a different frequency than the CPU's.
72
+ // The frequency is fixed, typically in the range 1-50MHz. It can because
73
+ // read at CNTFRQ special register. We assume the OS has set up
74
+ // the virtual timer properly.
75
+ uint64_t virtual_timer_value;
76
+ asm volatile("mrs %0, cntvct_el0" : "=r"(virtual_timer_value));
77
+ return virtual_timer_value;
78
+ // ----------------------------------------------------------------
79
+ // V6 is the earliest arm that has a standard cyclecount
80
+ #elif defined(ARMV6) || defined(__ARM_ARCH_7A__)
81
+ uint32_t pmccntr;
82
+ uint32_t pmuseren;
83
+ uint32_t pmcntenset;
84
+ // Read the user mode perf monitor counter access permissions.
85
+ asm volatile("mrc p15, 0, %0, c9, c14, 0" : "=r"(pmuseren));
86
+ if (pmuseren & 1) { // Allows reading perfmon counters for user mode code.
87
+ asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r"(pmcntenset));
88
+ if (pmcntenset & 0x80000000ul) { // Is it counting?
89
+ asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r"(pmccntr));
90
+ // The counter is set up to count every 64th cyclecount
91
+ return static_cast<uint64>(pmccntr) * 64; // Should optimize to << 64
92
+ }
93
+ }
94
+ // Returning dummy clock when can't access to the counter
95
+ return DUMMY_CYCLE_CLOCK;
96
+ #elif defined(__powerpc64__) || defined(__ppc64__)
97
+ uint64 __t;
98
+ __asm__ __volatile__("mfspr %0,268" : "=r"(__t));
99
+ return __t;
100
+
101
+ #elif defined(__powerpc__) || defined(__ppc__)
102
+ uint64 upper, lower, tmp;
103
+ __asm__ volatile(
104
+ "0: \n"
105
+ "\tmftbu %0 \n"
106
+ "\tmftb %1 \n"
107
+ "\tmftbu %2 \n"
108
+ "\tcmpw %2,%0 \n"
109
+ "\tbne 0b \n"
110
+ : "=r"(upper), "=r"(lower), "=r"(tmp));
111
+ return ((static_cast<uint64>(upper) << 32) | lower);
112
+ #elif defined(__s390x__)
113
+ // TOD Clock of s390x runs at a different frequency than the CPU's.
114
+ // The stepping is 244 picoseconds (~4Ghz).
115
+ uint64 t;
116
+ __asm__ __volatile__("stckf %0" : "=Q"(t));
117
+ return t;
118
+ #else
119
+ // TODO(satok): Support generic way to emulate clock count.
120
+ // TODO(satok): Support other architectures if wanted.
121
+ // Returning dummy clock when can't access to the counter
122
+ return DUMMY_CYCLE_CLOCK;
123
+ #endif
124
+ }
125
+
126
+ // Return cycle counter frequency.
127
+ // As this method caches the cpu frequency internally,
128
+ // the first call will incur overhead, but not subsequent calls.
129
+ #if (defined(__powerpc__) || \
130
+ defined(__ppc__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)) || \
131
+ (defined(__s390x__))
132
+ static uint64 GetCycleCounterFrequency();
133
+ #else
134
+ static int64_t GetCycleCounterFrequency();
135
+ #endif
136
+
137
+ // Return micro second per each clock
138
+ // As this method caches the cpu frequency internally,
139
+ // the first call will incur overhead, but not subsequent calls.
140
+ static double GetMicroSecPerClock();
141
+
142
+ // Reset clock cycle
143
+ // Resetting clock cycle is recommended to prevent
144
+ // clock cycle counters from overflowing on some platforms.
145
+ static void ResetClockCycle();
146
+
147
+ // Enable/Disable clock cycle profile
148
+ // You can enable / disable profile if it's supported by the platform
149
+ static void EnableClockCycleProfiling();
150
+ static void DisableClockCycleProfiling();
151
+
152
+ // Return chrono::duration per each clock
153
+ static std::chrono::duration<double> ConvertClockCycleToTime(
154
+ const int64_t clock_cycle);
155
+
156
+ private:
157
+ class DefaultCpuUtilsHelper : public ICpuUtilsHelper {
158
+ public:
159
+ DefaultCpuUtilsHelper() = default;
160
+ void ResetClockCycle() final {}
161
+ uint64 GetCurrentClockCycle() final { return DUMMY_CYCLE_CLOCK; }
162
+ void EnableClockCycleProfiling() final {}
163
+ void DisableClockCycleProfiling() final {}
164
+ int64_t CalculateCpuFrequency() final { return INVALID_FREQUENCY; }
165
+
166
+ private:
167
+ DefaultCpuUtilsHelper(const DefaultCpuUtilsHelper&) = delete;
168
+ void operator=(const DefaultCpuUtilsHelper&) = delete;
169
+ };
170
+
171
+ // Return cpu frequency.
172
+ // CAVEAT: as this method calls system call and parse the message,
173
+ // this call may be slow. This is why this class caches the value by
174
+ // StaticVariableInitializer.
175
+ static int64_t GetCycleCounterFrequencyImpl();
176
+
177
+ // Return a singleton of ICpuUtilsHelper
178
+ // ICpuUtilsHelper is declared as a function-local static variable
179
+ // for the following two reasons:
180
+ // 1. Avoid passing instances to all classes which want
181
+ // to use profiling tools in CpuUtils
182
+ // 2. Minimize the overhead of acquiring ICpuUtilsHelper
183
+ static ICpuUtilsHelper& GetCpuUtilsHelperSingletonInstance();
184
+
185
+ CpuUtils(const CpuUtils&) = delete;
186
+ void operator=(const CpuUtils&) = delete;
187
+ };
188
+
189
+ } // namespace profile_utils
190
+
191
+ } // namespace tsl
192
+
193
+ #endif // TENSORFLOW_TSL_PLATFORM_PROFILE_UTILS_CPU_UTILS_H_