ZTWHHH commited on
Commit
7f7ad8b
·
verified ·
1 Parent(s): fd52fe2

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/call_options.h +82 -0
  2. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/coordination/coordination_client.h +146 -0
  3. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/coordination/coordination_service.h +253 -0
  4. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/coordination/coordination_service_rpc_handler.h +102 -0
  5. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/preemption/preemption_notifier.h +147 -0
  6. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/rpc/grpc_call.h +521 -0
  7. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/rpc/grpc_channel.h +100 -0
  8. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/rpc/grpc_channel_common.h +103 -0
  9. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/rpc/grpc_client_cq_tag.h +41 -0
  10. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/rpc/grpc_util.h +130 -0
  11. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/allocator.h +430 -0
  12. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/allocator_registry.h +154 -0
  13. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/allocator_retry.h +60 -0
  14. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/bfc_allocator.h +629 -0
  15. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/cancellation.h +217 -0
  16. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/contraction/eigen_contraction_kernel.h +905 -0
  17. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/convolution/eigen_convolution_helpers.h +87 -0
  18. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/convolution/eigen_spatial_convolutions-inl.h +1772 -0
  19. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/convolution/eigen_spatial_convolutions.h +445 -0
  20. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/device_id.h +89 -0
  21. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/device_id_manager.h +53 -0
  22. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/device_id_utils.h +72 -0
  23. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/device_type.h +50 -0
  24. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/fixedpoint/FixedPoint.h +53 -0
  25. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/fixedpoint/MatMatProduct.h +363 -0
  26. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/fixedpoint/MatMatProductAVX2.h +2314 -0
  27. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/fixedpoint/MatMatProductNEON.h +316 -0
  28. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/fixedpoint/MatVecProduct.h +151 -0
  29. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/fixedpoint/PacketMathAVX.h +164 -0
  30. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/fixedpoint/PacketMathAVX2.h +560 -0
  31. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/fixedpoint/PacketMathAVX512.h +531 -0
  32. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/fixedpoint/TypeCastingAVX2.h +108 -0
  33. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/fixedpoint/TypeCastingAVX512.h +206 -0
  34. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/fixedpoint_types.h +354 -0
  35. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/metrics.h +30 -0
  36. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/numeric_types.h +74 -0
  37. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/shared_counter.h +35 -0
  38. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/tracking_allocator.h +137 -0
  39. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/type_traits.h +109 -0
  40. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/connected_traceme.h +118 -0
  41. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/context_types.h +59 -0
  42. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/nvtx_utils.h +105 -0
  43. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/profiler_factory.h +47 -0
  44. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/profiler_interface.h +49 -0
  45. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/profiler_lock.h +73 -0
  46. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/profiler_session.h +93 -0
  47. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/scoped_annotation.h +159 -0
  48. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/scoped_annotation_stack.h +118 -0
  49. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/scoped_memory_debug_annotation.h +112 -0
  50. videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/traceme.h +333 -0
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/call_options.h ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_CALL_OPTIONS_H_
17
+ #define TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_CALL_OPTIONS_H_
18
+
19
+ #include <functional>
20
+
21
+ #include "tsl/platform/macros.h"
22
+ #include "tsl/platform/mutex.h"
23
+ #include "tsl/platform/thread_annotations.h"
24
+ #include "tsl/platform/types.h"
25
+
26
+ namespace tsl {
27
+
28
+ // Options passed to interface calls. This class provides portable
29
+ // functionality across different RPC systems on top of
30
+ // platform-specific mechanisms (for client and server contexts,
31
+ // cancellation, etc.).
32
+ //
33
+ // TODO(zhifengc): Maybe change all RPC methods to take CallOptions.
34
+ class CallOptions {
35
+ public:
36
+ CallOptions();
37
+
38
+ // Cancellation.
39
+ //
40
+ // The caller may call StartCancel() anytime as long as this
41
+ // CallOptions object is alive. The callee may or may not receive
42
+ // the cancellation notification depending on the rpc layer
43
+ // implementation.
44
+ void StartCancel();
45
+
46
+ // The callee (the rpc layer implementation) must set a cancellation
47
+ // notifier before its blocking operation and clear the notifier
48
+ // before the call returns.
49
+ //
50
+ // "cancel_func" may be called zero, once or more time. Therefore, it
51
+ // should _not_ be responsible for memory management of any objects.
52
+ //
53
+ // "cancel_func" must be very light-weight. It should not block on
54
+ // IO or locking. Typically, it just calls the rpc implementation
55
+ // layer's specific cancellation mechanism and does nothing else.
56
+ //
57
+ // NOTE: "cancel_func" itself is pass-by-value. Therefore, we do not
58
+ // worry about its ownership here.
59
+ typedef std::function<void()> CancelFunction;
60
+ void SetCancelCallback(CancelFunction cancel_func);
61
+ void ClearCancelCallback();
62
+
63
+ // Get and set operation timeout. Timeout value is in milliseconds.
64
+ //
65
+ // Default: 0. indicating there is no timeout for this call.
66
+ int64_t GetTimeout();
67
+ void SetTimeout(int64_t ms);
68
+
69
+ private:
70
+ mutex mu_;
71
+ CancelFunction cancel_func_ TF_GUARDED_BY(mu_);
72
+
73
+ // RPC operation timeout in milliseconds.
74
+ int64_t timeout_in_ms_ TF_GUARDED_BY(mu_) = 0;
75
+
76
+ CallOptions(const CallOptions&) = delete;
77
+ void operator=(const CallOptions&) = delete;
78
+ };
79
+
80
+ } // namespace tsl
81
+
82
+ #endif // TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_CALL_OPTIONS_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/coordination/coordination_client.h ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_COORDINATION_COORDINATION_CLIENT_H_
17
+ #define TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_COORDINATION_COORDINATION_CLIENT_H_
18
+
19
+ #include <memory>
20
+ #include <string>
21
+
22
+ #include "tsl/distributed_runtime/call_options.h"
23
+ #include "tsl/platform/status.h"
24
+ #include "tsl/protobuf/coordination_service.pb.h"
25
+
26
+ namespace tsl {
27
+ using tensorflow::BarrierRequest;
28
+ using tensorflow::BarrierResponse;
29
+ using tensorflow::CancelBarrierRequest;
30
+ using tensorflow::CancelBarrierResponse;
31
+ using tensorflow::DeleteKeyValueRequest;
32
+ using tensorflow::DeleteKeyValueResponse;
33
+ using tensorflow::GetKeyValueDirRequest;
34
+ using tensorflow::GetKeyValueDirResponse;
35
+ using tensorflow::GetKeyValueRequest;
36
+ using tensorflow::GetKeyValueResponse;
37
+ using tensorflow::GetTaskStateRequest;
38
+ using tensorflow::GetTaskStateResponse;
39
+ using tensorflow::HeartbeatRequest;
40
+ using tensorflow::HeartbeatResponse;
41
+ using tensorflow::InsertKeyValueRequest;
42
+ using tensorflow::InsertKeyValueResponse;
43
+ using tensorflow::RegisterTaskRequest;
44
+ using tensorflow::RegisterTaskResponse;
45
+ using tensorflow::ReportErrorToServiceRequest;
46
+ using tensorflow::ReportErrorToServiceResponse;
47
+ using tensorflow::ReportErrorToTaskRequest;
48
+ using tensorflow::ReportErrorToTaskResponse;
49
+ using tensorflow::ResetTaskRequest;
50
+ using tensorflow::ResetTaskResponse;
51
+ using tensorflow::ShutdownTaskRequest;
52
+ using tensorflow::ShutdownTaskResponse;
53
+ using tensorflow::TryGetKeyValueRequest;
54
+ using tensorflow::TryGetKeyValueResponse;
55
+ using tensorflow::WaitForAllTasksRequest;
56
+ using tensorflow::WaitForAllTasksResponse;
57
+
58
+ // Base class of client interface for communicating with coordination service.
59
+ // Can be implemented by a variety of transports such as gRPC.
60
+ class CoordinationClient {
61
+ public:
62
+ virtual ~CoordinationClient() = default;
63
+
64
+ virtual void RegisterTaskAsync(CallOptions* call_opts,
65
+ const RegisterTaskRequest* request,
66
+ RegisterTaskResponse* response,
67
+ StatusCallback done) = 0;
68
+
69
+ virtual void HeartbeatAsync(CallOptions* call_opts,
70
+ const HeartbeatRequest* request,
71
+ HeartbeatResponse* response,
72
+ StatusCallback done) = 0;
73
+
74
+ virtual void WaitForAllTasksAsync(const WaitForAllTasksRequest* request,
75
+ WaitForAllTasksResponse* response,
76
+ StatusCallback done) = 0;
77
+
78
+ virtual void ShutdownTaskAsync(CallOptions* call_opts,
79
+ const ShutdownTaskRequest* request,
80
+ ShutdownTaskResponse* response,
81
+ StatusCallback done) = 0;
82
+
83
+ virtual void ResetTaskAsync(const ResetTaskRequest* request,
84
+ ResetTaskResponse* response,
85
+ StatusCallback done) = 0;
86
+
87
+ virtual void ReportErrorToTaskAsync(CallOptions* call_opts,
88
+ const ReportErrorToTaskRequest* request,
89
+ ReportErrorToTaskResponse* response,
90
+ StatusCallback done) = 0;
91
+
92
+ virtual void ReportErrorToServiceAsync(
93
+ const ReportErrorToServiceRequest* request,
94
+ ReportErrorToServiceResponse* response, StatusCallback done) = 0;
95
+
96
+ virtual void GetTaskStateAsync(const GetTaskStateRequest* request,
97
+ GetTaskStateResponse* response,
98
+ StatusCallback done) = 0;
99
+
100
+ virtual void InsertKeyValueAsync(const InsertKeyValueRequest* request,
101
+ InsertKeyValueResponse* response,
102
+ StatusCallback done) = 0;
103
+
104
+ virtual void GetKeyValueAsync(CallOptions* call_opts,
105
+ const GetKeyValueRequest* request,
106
+ GetKeyValueResponse* response,
107
+ StatusCallback done) = 0;
108
+
109
+ virtual void TryGetKeyValueAsync(const TryGetKeyValueRequest* request,
110
+ TryGetKeyValueResponse* response,
111
+ StatusCallback done) = 0;
112
+
113
+ virtual void GetKeyValueDirAsync(const GetKeyValueDirRequest* request,
114
+ GetKeyValueDirResponse* response,
115
+ StatusCallback done) = 0;
116
+
117
+ virtual void DeleteKeyValueAsync(const DeleteKeyValueRequest* request,
118
+ DeleteKeyValueResponse* response,
119
+ StatusCallback done) = 0;
120
+
121
+ virtual void BarrierAsync(const BarrierRequest* request,
122
+ BarrierResponse* response, StatusCallback done) = 0;
123
+
124
+ virtual void CancelBarrierAsync(const CancelBarrierRequest* request,
125
+ CancelBarrierResponse* response,
126
+ StatusCallback done) = 0;
127
+ };
128
+
129
+ // Simple wrapper class that can be used to retrieve CoordinationClients.
130
+ class CoordinationClientCache {
131
+ public:
132
+ virtual ~CoordinationClientCache() = default;
133
+
134
+ // If the `target` names a remote task, returns a pointer of the
135
+ // CoordinationClient object wrapping that channel to the remote task.
136
+ virtual CoordinationClient* GetClient(const std::string& target) = 0;
137
+
138
+ // If the `target` names a remote task, returns an owned pointer of the
139
+ // CoordinationClient object wrapping that channel to the remote task.
140
+ virtual std::unique_ptr<CoordinationClient> GetOwnedClient(
141
+ const std::string& target) = 0;
142
+ };
143
+
144
+ } // namespace tsl
145
+
146
+ #endif // TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_COORDINATION_COORDINATION_CLIENT_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/coordination/coordination_service.h ADDED
@@ -0,0 +1,253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_COORDINATION_COORDINATION_SERVICE_H_
17
+ #define TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_COORDINATION_COORDINATION_SERVICE_H_
18
+
19
+ #include <functional>
20
+ #include <memory>
21
+ #include <string>
22
+ #include <unordered_map>
23
+ #include <utility>
24
+ #include <vector>
25
+
26
+ #include "absl/strings/string_view.h"
27
+ #include "absl/time/time.h"
28
+ #include "tsl/distributed_runtime/coordination/coordination_client.h"
29
+ #include "tsl/platform/status.h"
30
+ #include "tsl/platform/statusor.h"
31
+ #include "tsl/protobuf/coordination_config.pb.h"
32
+
33
+ namespace tsl {
34
+ class Env;
35
+
36
+ // Static registration for coordination service implementations.
37
+ #define REGISTER_COORDINATION_SERVICE(service_type_name, factory_fn) \
38
+ REGISTER_COORDINATION_SERVICE_UNIQ_HELPER(__COUNTER__, service_type_name, \
39
+ factory_fn)
40
+ #define REGISTER_COORDINATION_SERVICE_UNIQ_HELPER(counter, service_type_name, \
41
+ factory_fn) \
42
+ static bool static_coordination_service_##counter TF_ATTRIBUTE_UNUSED = \
43
+ []() { \
44
+ ::tsl::CoordinationServiceInterface::RegisterCoordinationService( \
45
+ service_type_name, std::move(factory_fn)); \
46
+ return true; \
47
+ }()
48
+
49
+ // Coordination service is used for controlling and coordinating distributed
50
+ // execution in a cluster of multiple tasks.
51
+ //
52
+ // When enabled, the service keeps track of cluster configurations and the state
53
+ // of cluster members. TF runtime and libraries can use it to orchastrate
54
+ // cluster initialization, check the healthiness of tasks, and propagate error
55
+ // messages to the cluster.
56
+ //
57
+ // Normally, the service should first Start(), then perform the supported
58
+ // coordination operations, and finally Stop(). When service runs into error or
59
+ // SetError() is called, all subsequent operations will be in error state.
60
+ //
61
+ // CoordinationServiceInterface defines the service interface for distributed
62
+ // coordination. One instance of the service should be deployed in a cluster,
63
+ // handling various requests and stores configuration key-value data for the
64
+ // tasks. Each task interacts with the service through CoordinationServiceAgent.
65
+ class CoordinationServiceInterface {
66
+ public:
67
+ using CoordinationServiceFactory =
68
+ std::function<std::unique_ptr<CoordinationServiceInterface>(
69
+ Env* env, const tensorflow::CoordinationServiceConfig& config,
70
+ std::unique_ptr<CoordinationClientCache> cache)>;
71
+
72
+ using StatusOrValueCallback =
73
+ std::function<void(const StatusOr<std::string>&)>;
74
+
75
+ virtual ~CoordinationServiceInterface() = default;
76
+
77
+ static void RegisterCoordinationService(
78
+ const std::string& service_type_name,
79
+ CoordinationServiceFactory factory_fn) {
80
+ auto factories = GetCoordinationServiceFactories();
81
+ factories->emplace(service_type_name, factory_fn);
82
+ }
83
+
84
+ static std::unique_ptr<CoordinationServiceInterface>
85
+ EnableCoordinationService(Env* env,
86
+ const tensorflow::CoordinationServiceConfig& config,
87
+ std::unique_ptr<CoordinationClientCache> cache) {
88
+ const auto* factories = GetCoordinationServiceFactories();
89
+ auto factories_iter = factories->find(config.service_type());
90
+ if (factories_iter == factories->end()) {
91
+ LOG(ERROR) << "No coordination service factory found for service type "
92
+ << config.service_type();
93
+ return nullptr;
94
+ }
95
+ auto service = factories_iter->second(env, config, std::move(cache));
96
+ if (service != nullptr) {
97
+ *GetCoordinationServiceInstancePtr() = service.get();
98
+ }
99
+ return service;
100
+ }
101
+
102
+ static CoordinationServiceInterface* GetCoordinationServiceInstance() {
103
+ return *GetCoordinationServiceInstancePtr();
104
+ }
105
+
106
+ // This function is invoked after each task's local devices are appended in a
107
+ // deterministic order during WaitForAllTasks(). This is useful to convert the
108
+ // result into another message, or set global device ids.
109
+ virtual void SetDeviceAggregationFunction(
110
+ std::function<
111
+ tensorflow::DeviceInfo(const tensorflow::DeviceInfo& devices)>
112
+ post_aggregate_device_fn) = 0;
113
+
114
+ // Register a task to the service.
115
+ // Possible service errors:
116
+ // - InvalidArgument: Unexpected task request.
117
+ // - Aborted: (1) task is in error state, or (2) task is in connected state
118
+ // with a different incarnation, indicating that it restarted.
119
+ virtual Status RegisterTask(const tensorflow::CoordinatedTask& task,
120
+ uint64_t incarnation) = 0;
121
+
122
+ // Wait for all tasks to be up and running, and register local device
123
+ // info. The callback is invoked when all tasks are up and registered, or some
124
+ // error occurs.
125
+ // Each task's local devices will be appended in a deterministic order, and
126
+ // post-processed by the callback in SetDeviceAggregationFunction() (if set).
127
+ virtual void WaitForAllTasks(const tensorflow::CoordinatedTask& task,
128
+ const tensorflow::DeviceInfo& devices,
129
+ StatusCallback done) = 0;
130
+
131
+ // Disconnects task from the service. If `shutdown_barrier_timeout_in_ms` is
132
+ // specified in the config, blocks until all tasks reach the barrier before
133
+ // disconnecting together.
134
+ // Possible service errors:
135
+ // - InvalidArgument: Unexpected task request.
136
+ // - FailedPrecondition: task has already disconnected.
137
+ virtual void ShutdownTaskAsync(const tensorflow::CoordinatedTask& task,
138
+ StatusCallback done) = 0;
139
+
140
+ // Disconnects task from the service and cleans up its internal error state.
141
+ // Possible service errors:
142
+ // - InvalidArgument: Unexpected task request.
143
+ // - FailedPrecondition: task has already disconnected.
144
+ virtual Status ResetTask(const tensorflow::CoordinatedTask& task) = 0;
145
+
146
+ // Update the heartbeat timestamp of a task. This should only be invoked on
147
+ // the leader of the cluster.
148
+ virtual Status RecordHeartbeat(const tensorflow::CoordinatedTask& task,
149
+ uint64_t incarnation) = 0;
150
+
151
+ // Set a task in error state permanently.
152
+ virtual Status ReportTaskError(const tensorflow::CoordinatedTask& task,
153
+ Status error) = 0;
154
+
155
+ // Get the state and the error status of the tasks.
156
+ virtual std::vector<tensorflow::CoordinatedTaskStateInfo> GetTaskState(
157
+ const std::vector<tensorflow::CoordinatedTask>& task) = 0;
158
+
159
+ // Insert a configuration key-value in the coordination service.
160
+ // For now, a key-value can only be inserted once and cannot be updated.
161
+ // The key-values are not persisted and will be lost if the leader fails.
162
+ virtual Status InsertKeyValue(const std::string& key,
163
+ const std::string& value) = 0;
164
+
165
+ // Get a configuration key-value from the coordination service. The `done`
166
+ // callback is invoked when the key-value becomes available.
167
+ virtual void GetKeyValueAsync(const std::string& key,
168
+ StatusOrValueCallback done) = 0;
169
+
170
+ // Get a configuration key-value from the coordination service. If the key
171
+ // does not exist, return NotFound error.
172
+ virtual StatusOr<std::string> TryGetKeyValue(const std::string& key) = 0;
173
+
174
+ // Gets all values under a directory (key).
175
+ // A value is considered to be in the directory if its key is prefixed with
176
+ // the directory. This is not a blocking call. Agent does not need to be
177
+ // connected to utilize the distributed key-value store.
178
+ virtual std::vector<tensorflow::KeyValueEntry> GetKeyValueDir(
179
+ absl::string_view directory_key) = 0;
180
+
181
+ // Delete configuration key-value. If key is a directory, recursively clean
182
+ // up all key-values under the directory.
183
+ virtual Status DeleteKeyValue(const std::string& key) = 0;
184
+
185
+ // Blocks until all (or a subset of) tasks are at the barrier or the barrier
186
+ // fails.
187
+ //
188
+ // `barrier_id` should be unique across barriers. Once the barrier has passed
189
+ // or failed, subsequent calls will not block, and immediately respond with
190
+ // the previous response.
191
+ //
192
+ // The first WaitAtBarrier() call received by the service for a particular
193
+ // barrier id is special in that it determines the barrier deadline based on
194
+ // timeout duration.
195
+ // However, if subsequent calls by different agents specify a different set of
196
+ // `participating_tasks` for the same `barrier_id`, the barrier will fail
197
+ // instantly.
198
+ //
199
+ // If no tasks are specified (default), the barrier will block for all the
200
+ // connected tasks.
201
+ //
202
+ // Possible service errors:
203
+ // - DeadlineExceeded: Timed out waiting for specified tasks at the barrier.
204
+ // Deadline is determined by the server timestamp when it receives the
205
+ // first WaitAtBarrier() + timeout duration.
206
+ // - Cancelled: One of the tasks called CancelBarrier().
207
+ // - Aborted: Service is shutting down.
208
+ // - Internal: Any participating task is in ERROR state.
209
+ // - InvalidArgument: (1) Conflicting tasks specified by different agents
210
+ // for the same barrier, (2) one of the participating tasks is not in
211
+ // the cluster, or (3) task making the request is not included in the
212
+ // list of participating tasks.
213
+ // - FailedPrecondition: Agent is in UNINITIALIZED or ERROR state.
214
+ virtual void BarrierAsync(
215
+ const std::string& barrier_id, absl::Duration timeout,
216
+ const tensorflow::CoordinatedTask& task,
217
+ const std::vector<tensorflow::CoordinatedTask>& participating_tasks,
218
+ StatusCallback done) = 0;
219
+
220
+ // Aborts the barrier if it is ongoing.
221
+ // Current and future WaitAtBarrier() calls with the same id will return a
222
+ // CANCELLED error status.
223
+ // Possible service errors:
224
+ // - FailedPrecondition: Barrier has already been passed.
225
+ virtual Status CancelBarrier(const std::string& barrier_id,
226
+ const tensorflow::CoordinatedTask& task) = 0;
227
+
228
+ private:
229
+ friend class CoordinationServiceRpcHandler;
230
+ friend class CoordinationServiceTest_ListClusterDevices_TfDevice_Test;
231
+ friend class CoordinationServiceTest_ListClusterDevices_XlaDevice_Test;
232
+ friend class
233
+ CoordinationServiceTest_ListClusterDevices_DevicesAreNotAddedTwice_Test;
234
+
235
+ virtual const tensorflow::DeviceInfo& ListClusterDevices() = 0;
236
+ virtual uint64_t GetServiceIncarnation() = 0;
237
+
238
+ static std::unordered_map<std::string, CoordinationServiceFactory>*
239
+ GetCoordinationServiceFactories() {
240
+ static auto* coordination_service_factories =
241
+ new std::unordered_map<std::string, CoordinationServiceFactory>();
242
+ return coordination_service_factories;
243
+ }
244
+
245
+ static CoordinationServiceInterface** GetCoordinationServiceInstancePtr() {
246
+ static CoordinationServiceInterface* instance = nullptr;
247
+ return &instance;
248
+ }
249
+ };
250
+
251
+ } // namespace tsl
252
+
253
+ #endif // TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_COORDINATION_COORDINATION_SERVICE_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/coordination/coordination_service_rpc_handler.h ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_COORDINATION_COORDINATION_SERVICE_RPC_HANDLER_H_
17
+ #define TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_COORDINATION_COORDINATION_SERVICE_RPC_HANDLER_H_
18
+
19
+ #include "tsl/distributed_runtime/coordination/coordination_service.h"
20
+ #include "tsl/distributed_runtime/coordination/coordination_service_agent.h"
21
+ #include "tsl/platform/mutex.h"
22
+ #include "tsl/platform/status.h"
23
+ #include "tsl/platform/thread_annotations.h"
24
+ #include "tsl/protobuf/coordination_service.pb.h"
25
+
26
+ namespace tsl {
27
+ class CoordinationServiceRpcHandler {
28
+ public:
29
+ explicit CoordinationServiceRpcHandler() {}
30
+
31
+ void SetAgentInstance(CoordinationServiceAgent* agent);
32
+
33
+ void SetServiceInstance(CoordinationServiceInterface* service);
34
+
35
+ void RegisterTaskAsync(const tensorflow::RegisterTaskRequest* request,
36
+ tensorflow::RegisterTaskResponse* response,
37
+ StatusCallback done);
38
+
39
+ void HeartbeatAsync(const tensorflow::HeartbeatRequest* request,
40
+ tensorflow::HeartbeatResponse* response,
41
+ StatusCallback done);
42
+
43
+ void WaitForAllTasksAsync(const tensorflow::WaitForAllTasksRequest* request,
44
+ tensorflow::WaitForAllTasksResponse* response,
45
+ StatusCallback done);
46
+
47
+ void ShutdownTaskAsync(const tensorflow::ShutdownTaskRequest* request,
48
+ tensorflow::ShutdownTaskResponse* response,
49
+ StatusCallback done);
50
+
51
+ void ResetTaskAsync(const tensorflow::ResetTaskRequest* request,
52
+ tensorflow::ResetTaskResponse* response,
53
+ StatusCallback done);
54
+
55
+ void ReportErrorToTaskAsync(
56
+ const tensorflow::ReportErrorToTaskRequest* request,
57
+ tensorflow::ReportErrorToTaskResponse* response, StatusCallback done);
58
+
59
+ void ReportErrorToServiceAsync(
60
+ const tensorflow::ReportErrorToServiceRequest* request,
61
+ tensorflow::ReportErrorToServiceResponse* response, StatusCallback done);
62
+
63
+ void GetTaskStateAsync(const tensorflow::GetTaskStateRequest* request,
64
+ tensorflow::GetTaskStateResponse* response,
65
+ StatusCallback done);
66
+
67
+ void InsertKeyValueAsync(const tensorflow::InsertKeyValueRequest* request,
68
+ tensorflow::InsertKeyValueResponse* response,
69
+ StatusCallback done);
70
+
71
+ void GetKeyValueAsync(const tensorflow::GetKeyValueRequest* request,
72
+ tensorflow::GetKeyValueResponse* response,
73
+ StatusCallback done);
74
+
75
+ void TryGetKeyValueAsync(const tensorflow::TryGetKeyValueRequest* request,
76
+ tensorflow::TryGetKeyValueResponse* response,
77
+ StatusCallback done);
78
+
79
+ void GetKeyValueDirAsync(const tensorflow::GetKeyValueDirRequest* request,
80
+ tensorflow::GetKeyValueDirResponse* response,
81
+ StatusCallback done);
82
+
83
+ void DeleteKeyValueAsync(const tensorflow::DeleteKeyValueRequest* request,
84
+ tensorflow::DeleteKeyValueResponse* response,
85
+ StatusCallback done);
86
+
87
+ void BarrierAsync(const tensorflow::BarrierRequest* request,
88
+ tensorflow::BarrierResponse* response, StatusCallback done);
89
+
90
+ void CancelBarrierAsync(const tensorflow::CancelBarrierRequest* request,
91
+ tensorflow::CancelBarrierResponse* response,
92
+ StatusCallback done);
93
+
94
+ private:
95
+ mutex mu_;
96
+ CoordinationServiceAgent* agent_ TF_GUARDED_BY(mu_) = nullptr;
97
+ CoordinationServiceInterface* service_ TF_GUARDED_BY(mu_) = nullptr;
98
+ };
99
+
100
+ } // namespace tsl
101
+
102
+ #endif // TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_COORDINATION_COORDINATION_SERVICE_RPC_HANDLER_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/preemption/preemption_notifier.h ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+ #ifndef TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_PREEMPTION_PREEMPTION_NOTIFIER_H_
16
+ #define TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_PREEMPTION_PREEMPTION_NOTIFIER_H_
17
+
18
+ #include <functional>
19
+ #include <memory>
20
+ #include <string>
21
+ #include <unordered_map>
22
+ #include <utility>
23
+ #include <vector>
24
+
25
+ #include "absl/strings/str_join.h"
26
+ #include "absl/time/time.h"
27
+ #include "tsl/platform/env.h"
28
+ #include "tsl/platform/mutex.h"
29
+ #include "tsl/platform/statusor.h"
30
+
31
+ namespace tsl {
32
+
33
+ // Static registration for preemption notifiers.
34
+ #define REGISTER_PREEMPTION_NOTIFIER(notifier_type_name, factory_fn) \
35
+ REGISTER_PREEMPTION_NOTIFIER_UNIQ_HELPER(__COUNTER__, notifier_type_name, \
36
+ factory_fn)
37
+ #define REGISTER_PREEMPTION_NOTIFIER_UNIQ_HELPER(counter, notifier_type_name, \
38
+ factory_fn) \
39
+ static bool static_preemption_notifier_##counter TF_ATTRIBUTE_UNUSED = \
40
+ []() { \
41
+ ::tsl::PreemptionNotifier::RegisterPreemptionNotifier( \
42
+ notifier_type_name, factory_fn); \
43
+ return true; \
44
+ }()
45
+
46
+ // Base class for listening and propagating task preemption notices.
47
+ //
48
+ // This class provides common mechanism to block on waiting for preemption
49
+ // signals, or register callbacks that will be triggered upon preemption.
50
+ //
51
+ // Example:
52
+ //
53
+ // // Monitors the SIGTERM preemption signal
54
+ // notifier = PreemptionNotifier::CreatePreemptionNotifier("sigterm", env);
55
+ //
56
+ // // Register callback that will be invoked once preempted
57
+ // notifier->WillBePreemptedAtAsync(
58
+ // [](StatusOr<absl::Time> status_or_time) {
59
+ // if (status_or_time.ok()) {
60
+ // LOG(INFO) << "Preempted at time: " << status_or_time.value();
61
+ // } else {
62
+ // LOG(ERROR) << "Received error: " << status_or_time.status();
63
+ // }
64
+ // });
65
+ //
66
+ // // Block current thread until preemption
67
+ // absl::Time preempt_time = notifier->WillBePreemptedAt().value();
68
+ //
69
+ // Users can extend this class to support custom preemption signals, by subclass
70
+ // `PreemptionNotifier` with a custom constructor, register its creator (factory
71
+ // function) with `REGISTER_PREEMPTION_NOTIFIER`. The custom constructor should
72
+ // set up the communication with the cluster scheduler, and invoke the
73
+ // `NotifyRegisteredListeners` method once a preemption signal is received.
74
+ // See `SigtermNotifier` as an example.
75
+
76
+ class PreemptionNotifier {
77
+ public:
78
+ typedef std::function<void(StatusOr<absl::Time>)> PreemptTimeCallback;
79
+ using PreemptionNotifierFactory =
80
+ std::function<std::unique_ptr<PreemptionNotifier>(Env* env)>;
81
+
82
+ explicit PreemptionNotifier(Env* env) : env_(env) {}
83
+ virtual ~PreemptionNotifier() = default;
84
+
85
+ static void RegisterPreemptionNotifier(const std::string& notifier_type_name,
86
+ PreemptionNotifierFactory factory_fn) {
87
+ GetPreemptionNotifierFactories()->emplace(notifier_type_name,
88
+ std::move(factory_fn));
89
+ }
90
+
91
+ static std::unique_ptr<PreemptionNotifier> CreatePreemptionNotifier(
92
+ const std::string& notifier_type, Env* env) {
93
+ const auto* factories = GetPreemptionNotifierFactories();
94
+ auto it = factories->find(notifier_type);
95
+ if (it == factories->end()) {
96
+ std::vector<std::string> registered_types;
97
+ registered_types.reserve(factories->size());
98
+ for (auto& kv : *factories) {
99
+ registered_types.push_back(kv.first);
100
+ }
101
+ LOG(ERROR) << "No preemption notifier factory found for notifier type "
102
+ << notifier_type
103
+ << ". All registered preemption notifier types are: "
104
+ << absl::StrJoin(registered_types, ", ")
105
+ << ". Make sure the library is loaded to the program.";
106
+ return nullptr;
107
+ }
108
+ return it->second(env);
109
+ }
110
+
111
+ // This is a blocking call that returns a death time when preemption /
112
+ // termination will occur once the listener receives the preemption
113
+ // notification. If no death time is specified, absl::Now() is returned.
114
+ // Returns error::Cancelled if UnregisterListeners() is called.
115
+ StatusOr<absl::Time> WillBePreemptedAt();
116
+
117
+ // Registers a callback that takes the death time as input once the listener
118
+ // receives the preemption notification.
119
+ // If no death time is specified, absl::Now() is specified as input.
120
+ // Note: callback should be kept as simple and fast as possible (e.g. simply
121
+ // retrieve result). It should not wait for work done by another callback, and
122
+ // invoke ahy PreemptionNotifier method (e.g. Reset(), destructor).
123
+ void WillBePreemptedAtAsync(PreemptTimeCallback callback);
124
+
125
+ protected:
126
+ Env* GetEnv() { return env_; }
127
+ // Invokes all pending callbacks upon receipt of preemption notice with death
128
+ // time or errors (e.g. cancellation during shutdown).
129
+ void NotifyRegisteredListeners(StatusOr<absl::Time> death_time);
130
+
131
+ private:
132
+ static std::unordered_map<std::string, PreemptionNotifierFactory>*
133
+ GetPreemptionNotifierFactories() {
134
+ static auto* preemption_notifier_factories =
135
+ new std::unordered_map<std::string, PreemptionNotifierFactory>();
136
+ return preemption_notifier_factories;
137
+ }
138
+
139
+ Env* env_; // Not owned.
140
+ mutex mu_;
141
+ absl::Time death_time_ TF_GUARDED_BY(mu_) = absl::InfinitePast();
142
+ std::vector<PreemptTimeCallback> callbacks_ TF_GUARDED_BY(mu_);
143
+ };
144
+
145
+ } // namespace tsl
146
+
147
+ #endif // TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_PREEMPTION_PREEMPTION_NOTIFIER_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/rpc/grpc_call.h ADDED
@@ -0,0 +1,521 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_RPC_GRPC_CALL_H_
17
+ #define TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_RPC_GRPC_CALL_H_
18
+
19
+ #include "grpcpp/completion_queue.h"
20
+ #include "grpcpp/impl/service_type.h"
21
+ #include "grpcpp/server_builder.h"
22
+ #include "grpcpp/server_context.h"
23
+ #include "grpcpp/support/async_stream.h"
24
+ #include "grpcpp/support/async_unary_call.h"
25
+ #include "tsl/platform/mutex.h"
26
+ #include "tsl/platform/refcount.h"
27
+
28
+ namespace tsl {
29
+
30
+ // CALL STRUCTURES
31
+ // ===============
32
+ //
33
+ // Each pending (incoming) request corresponds to a call object that
34
+ // encapsulates the state of the call. Templates and
35
+ // pointers-to-member functions are used to avoid boilerplate and
36
+ // redundant closure creation. The class hierarchy is as follows:
37
+ //
38
+ // * `UntypedCall<Service>`: The base class represents a call that
39
+ // could be associated with any of the methods on a service of type
40
+ // `Service`. Also defines a `Tag` nested class that can be used as
41
+ // the tag in a `grpc::CompletionQueue`. Each class that
42
+ // instantiates `Service` should have a completion queue polling
43
+ // loop that knows about `UntypedCall<Service>::Tag` objects, and
44
+ // invokes their `OnCompleted()` method to continue processing.
45
+ //
46
+ // * `Call<Service, GrpcService, Req, Resp>`: This class extends
47
+ // `UntypedCall<Service>` and is additionally parameterized by the
48
+ // gRPC-generated asynchronous service class, and the request and
49
+ // response message types. It defines the state associated with a
50
+ // call (whose type depends on the message types), and stores a
51
+ // pointer to a `Service::HandleFoo()` handler method. Each
52
+ // `Service::HandleFoo()` method knows about the corresponding
53
+ // `Call` type, in order to access its state, and invoke its
54
+ // `SendResponse()` method.
55
+ //
56
+ // The lifecycle of a call object is as follows.
57
+ //
58
+ // 1. A `Service` creates a `Call` for a particular method and
59
+ // enqueues it in its completion queue (via an
60
+ // `UntypedCall<Service>::Tag`).
61
+ //
62
+ // 2. When the tag is returned from `cq_->Next()`, the
63
+ // `UntypedCall::RequestReceived()` method is invoked and takes
64
+ // ownership of the call object. This indirectly invokes the
65
+ // appropriate handler method on `Service`.
66
+ //
67
+ // 3. After the response has been written (perhaps in another thread),
68
+ // the `Call::SendResponse()` method is invoked. It transfers
69
+ // ownership of the call object back to the completion queue (via
70
+ // an `UntypedCall::Tag`).
71
+ //
72
+ // 4. When the response has been sent, the tag is returned from
73
+ // `cq_->Next()`, and the call object is deleted.
74
+ //
75
+
76
+ template <class Service>
77
+ class GrpcCallTag {
78
+ public:
79
+ virtual ~GrpcCallTag() {}
80
+
81
+ // Calls the callback associated with this tag.
82
+ virtual void OnCompleted(Service* service, bool ok) = 0;
83
+ };
84
+
85
+ // Represents a pending request with unknown message types.
86
+ template <class Service>
87
+ class UntypedCall : public core::RefCounted {
88
+ public:
89
+ virtual ~UntypedCall() {}
90
+
91
+ // The implementation of this method should use `service` to handle
92
+ // an incoming request, and (perhaps asynchronously) send the
93
+ // response.
94
+ //
95
+ // One reference on `this` is transferred to the callee, and the
96
+ // callee is responsible for releasing it (typically via
97
+ // `Call::SendResponse()`).
98
+ //
99
+ // `ok` is true if the request was received in a "regular event",
100
+ // otherwise false.
101
+ virtual void RequestReceived(Service* service, bool ok) = 0;
102
+
103
+ // This method will be called either (i) when the server is notified
104
+ // that the request has been canceled, or (ii) when the request completes
105
+ // normally. The implementation should distinguish these cases by querying
106
+ // the `grpc::ServerContext` associated with the request.
107
+ virtual void RequestCancelled(Service* service, bool ok) = 0;
108
+
109
+ // Associates a tag in a `::grpc::CompletionQueue` with a callback
110
+ // for an incoming RPC. An active Tag owns a reference on the corresponding
111
+ // Call object.
112
+ class Tag : public GrpcCallTag<Service> {
113
+ public:
114
+ // One enum value per supported callback.
115
+ enum Callback { kRequestReceived, kResponseSent, kCancelled };
116
+
117
+ Tag(UntypedCall* call, Callback cb) : call_(call), callback_(cb) {}
118
+
119
+ // Calls the callback associated with this tag.
120
+ //
121
+ // The callback takes ownership of `this->call_`.
122
+ void OnCompleted(Service* service, bool ok) override {
123
+ switch (callback_) {
124
+ case kRequestReceived:
125
+ call_->RequestReceived(service, ok);
126
+ break;
127
+ case kResponseSent:
128
+ // No special handling needed apart from the Unref below.
129
+ break;
130
+ case kCancelled:
131
+ call_->RequestCancelled(service, ok);
132
+ break;
133
+ }
134
+ call_->Unref(); // Ref acquired when tag handed to grpc.
135
+ }
136
+
137
+ private:
138
+ UntypedCall* const call_; // `this` owns one reference.
139
+ Callback callback_;
140
+ };
141
+ };
142
+
143
+ // Represents a pending call with known request and response message
144
+ // types, and a known request-handling method.
145
+ template <class Service, class GrpcService, class RequestMessage,
146
+ class ResponseMessage>
147
+ class Call : public UntypedCall<Service> {
148
+ public:
149
+ // Represents the generic signature of a generated
150
+ // `GrpcService::RequestFoo()` method, where `Foo` is the name of an
151
+ // RPC method.
152
+ using EnqueueFunction = void (GrpcService::*)(
153
+ ::grpc::ServerContext*, RequestMessage*,
154
+ ::grpc::ServerAsyncResponseWriter<ResponseMessage>*,
155
+ ::grpc::CompletionQueue*, ::grpc::ServerCompletionQueue*, void*);
156
+
157
+ // Represents the generic signature of a `Service::HandleFoo()`
158
+ // method, where `Foo` is the name of an RPC method.
159
+ using HandleRequestFunction = void (Service::*)(
160
+ Call<Service, GrpcService, RequestMessage, ResponseMessage>*);
161
+
162
+ Call(HandleRequestFunction handle_request_function)
163
+ : handle_request_function_(handle_request_function), responder_(&ctx_) {}
164
+
165
+ virtual ~Call() {}
166
+
167
+ void RequestReceived(Service* service, bool ok) override {
168
+ if (ok) {
169
+ this->Ref();
170
+ (service->*handle_request_function_)(this);
171
+ }
172
+ }
173
+
174
+ void SendResponse(::grpc::Status status) {
175
+ this->Ref(); // Ref for grpc; released in Tag callback.
176
+ responder_.Finish(response, status, &response_sent_tag_);
177
+ this->Unref();
178
+ }
179
+
180
+ void RequestCancelled(Service* service, bool ok) override {
181
+ if (ctx_.IsCancelled()) {
182
+ mutex_lock l(mu_);
183
+ if (cancel_callback_) {
184
+ cancel_callback_();
185
+ }
186
+ }
187
+ }
188
+
189
+ // Registers `callback` as the function that should be called if and when this
190
+ // call is canceled by the client.
191
+ void SetCancelCallback(std::function<void()> callback) {
192
+ mutex_lock l(mu_);
193
+ cancel_callback_ = std::move(callback);
194
+ }
195
+
196
+ // Clears any cancellation callback that has been registered for this call.
197
+ void ClearCancelCallback() {
198
+ mutex_lock l(mu_);
199
+ cancel_callback_ = nullptr;
200
+ }
201
+
202
+ // Enqueues a new request for the given service on the given
203
+ // completion queue, using the given `enqueue_function`.
204
+ //
205
+ // The request will be handled with the given
206
+ // `handle_request_function`.
207
+ static void EnqueueRequest(GrpcService* grpc_service,
208
+ ::grpc::ServerCompletionQueue* cq,
209
+ EnqueueFunction enqueue_function,
210
+ HandleRequestFunction handle_request_function,
211
+ bool supports_cancel) {
212
+ auto call = new Call<Service, GrpcService, RequestMessage, ResponseMessage>(
213
+ handle_request_function);
214
+ if (supports_cancel) {
215
+ call->RegisterCancellationHandler();
216
+ }
217
+
218
+ // Initial ref for call handed to grpc; released in Tag callback.
219
+ (grpc_service->*enqueue_function)(&call->ctx_, &call->request,
220
+ &call->responder_, cq, cq,
221
+ &call->request_received_tag_);
222
+ }
223
+
224
+ // Enqueues a new request for the given service on the given
225
+ // completion queue, using the given `method_id`.
226
+ //
227
+ // The request will be handled with the given
228
+ // `handle_request_function`.
229
+ static void EnqueueRequestForMethod(
230
+ GrpcService* grpc_service, ::grpc::ServerCompletionQueue* cq,
231
+ int method_id, HandleRequestFunction handle_request_function,
232
+ bool supports_cancel) {
233
+ auto call = new Call<Service, GrpcService, RequestMessage, ResponseMessage>(
234
+ handle_request_function);
235
+ if (supports_cancel) {
236
+ call->RegisterCancellationHandler();
237
+ }
238
+
239
+ // Initial ref for call handed to grpc; released in Tag callback.
240
+ grpc_service->RequestAsyncUnary(method_id, &call->ctx_, &call->request,
241
+ &call->responder_, cq, cq,
242
+ &call->request_received_tag_);
243
+ }
244
+
245
+ RequestMessage request;
246
+ ResponseMessage response;
247
+
248
+ const std::multimap<::grpc::string_ref, ::grpc::string_ref>& client_metadata()
249
+ const {
250
+ return ctx_.client_metadata();
251
+ }
252
+
253
+ private:
254
+ // Creates a completion queue tag for handling cancellation by the client.
255
+ // NOTE: This method must be called before this call is enqueued on a
256
+ // completion queue.
257
+ void RegisterCancellationHandler() {
258
+ this->Ref(); // Ref for grpc; released in Tag callback.
259
+ ctx_.AsyncNotifyWhenDone(&cancelled_tag_);
260
+ }
261
+
262
+ HandleRequestFunction handle_request_function_;
263
+ ::grpc::ServerContext ctx_;
264
+ ::grpc::ServerAsyncResponseWriter<ResponseMessage> responder_;
265
+
266
+ // Used as void* completion markers from grpc to indicate different
267
+ // events of interest for a Call.
268
+ typedef typename UntypedCall<Service>::Tag Tag;
269
+ Tag request_received_tag_{this, Tag::kRequestReceived};
270
+ Tag response_sent_tag_{this, Tag::kResponseSent};
271
+ Tag cancelled_tag_{this, Tag::kCancelled};
272
+
273
+ mutex mu_;
274
+ std::function<void()> cancel_callback_ TF_GUARDED_BY(mu_);
275
+ };
276
+
277
+ // Lifetime of a server-side bidirectional streaming call:
278
+ // - The call is created in the static EnqueueRequest method. It transfers
279
+ // ownership to the kCallOpen tag pushed onto the completion queue.
280
+ // - If kCallOpen completes successfully, a read is requested and the
281
+ // kRequestReceived tag takes ownership of the call. If kCallOpen fails,
282
+ // e.g. server is shutdown, no further requests are pushed and the call is
283
+ // destroyed (at the end of Tag::OnCompleted).
284
+ // - When the first request is received, we Ref() the call and invoke the
285
+ // handler method thereby transferring ownership to the handler method.
286
+ // The handler is responsible for calling SendResponse() or Finish() on this
287
+ // call.
288
+ // - If the handler calls Finish(), e.g. the request was invalid, Finish()
289
+ // transfers ownership from the handler to the kServerFinished tag that
290
+ // it pushes on the completion queue. The ownership is transferred because
291
+ // the ref count is not incremented before putting the tag on the queue.
292
+ // - If the handler calls SendResponse(), SendResponse() transfers ownership
293
+ // to the kResponseSent tag.
294
+ // - When kResponseSent completes, we request a new read, which owns the call
295
+ // now.
296
+ // - When the next request is received, it is handled the same way as the first
297
+ // request.
298
+ //
299
+ // Because we request a read only after the write is sent, we can safely reuse
300
+ // the same request and response messages for the whole call.
301
+ template <class Service>
302
+ class ServerUntypedBidirectionalStreamingCall : public core::RefCounted {
303
+ public:
304
+ virtual void RequestReceived(Service* service) = 0;
305
+
306
+ // Enqueues a request on the completion queue to read the next request.
307
+ virtual void CallOpen() = 0;
308
+
309
+ virtual void RequestRead() = 0;
310
+
311
+ // Associates a tag in a `::grpc::CompletionQueue` with a callback.
312
+ // An active Tag owns a reference on the corresponding Call object.
313
+ class Tag : public GrpcCallTag<Service> {
314
+ public:
315
+ // One enum value per supported callback.
316
+ enum class TagType {
317
+ kCallOpen,
318
+ kRequestReceived,
319
+ kResponseSent,
320
+ kServerFinished,
321
+ };
322
+
323
+ Tag(ServerUntypedBidirectionalStreamingCall* call, TagType cb)
324
+ : call_(call), callback_(cb) {}
325
+
326
+ // Calls the callback associated with this tag and Unrefs this->call_.
327
+ void OnCompleted(Service* service, bool ok) override {
328
+ switch (callback_) {
329
+ case TagType::kCallOpen:
330
+ // Non-ok value indicates that the server has been shutdown before we
331
+ // received a message for this call type. We do nothing to let this
332
+ // call object be destroyed and avoid enqueuing request for another
333
+ // call.
334
+ if (ok) {
335
+ call_->CallOpen();
336
+ }
337
+ break;
338
+ case TagType::kRequestReceived:
339
+ // Non-ok value from completion queue here means that we will not
340
+ // receive any more messages from the client, e.g. the client called
341
+ // WritesDone. There is nothing we need to do in this case. The call
342
+ // will be Unref'ed and deleted. If the client wants to open a new
343
+ // call, we have already enqueued a request for a new call in CallOpen
344
+ // above.
345
+ if (ok) {
346
+ call_->RequestReceived(service);
347
+ }
348
+ break;
349
+ case TagType::kResponseSent:
350
+ if (ok) {
351
+ // The obvious place to request a read would be at the end of
352
+ // RequestReceived(). Unfortunately, this can result in multiple
353
+ // outstanding write requests in the completion queue. This is
354
+ // currently not supported by gRPC, which requires at most one
355
+ // outstanding write request in the completion queue.
356
+ // Requesting a read here, in ResponseSent, works because at
357
+ // this point, the completion queue has no write requests
358
+ // (kResponseSent happens when a write completes).
359
+ // This might be synchronizing the processing more than strictly
360
+ // necessary, but is probably fine because, AFAICT from gRPC docs,
361
+ // the write request completes as soon as it can be written to
362
+ // outgoing buffer.
363
+ call_->RequestRead();
364
+ }
365
+ // ok == false means that the response is not going on the wire
366
+ // because the call is already dead (i.e., canceled, deadline
367
+ // expired, other side dropped the channel, etc). Since the call is
368
+ // dead, there is nothing for us to do, we just let the call be
369
+ // deleted.
370
+ break;
371
+ case TagType::kServerFinished:
372
+ // Whether our finish request is successful or not (whether it went
373
+ // on the wire towards the client), there is nothing for us to do.
374
+ // In the current implementation, there can be no read or write
375
+ // requests in the completion queue (see the comment in kResponseSent)
376
+ // above. Even if there were pending requests, they would complete
377
+ // with a non-ok status, we would not do anything, and let the call be
378
+ // deleted.
379
+ break;
380
+ }
381
+ call_->Unref(); // Ref acquired when tag was handed to grpc.
382
+ }
383
+
384
+ private:
385
+ ServerUntypedBidirectionalStreamingCall* const
386
+ call_; // `this` owns one reference.
387
+ TagType callback_;
388
+ };
389
+ };
390
+
391
+ // Represents a pending call with known request and response message
392
+ // types, and a known request-handling method.
393
+ // Common usage pattern is to have a single thread waiting on events from
394
+ // completion queue and calling Tag::OnCompleted(), which invokes methods
395
+ // on this.
396
+ // This implementation assumes that the server will generate a single response
397
+ // message for each request message. More precisely, this class expects that
398
+ // each time it invokes handle_request_function_, the service implementation
399
+ // will either call SendResponse or Finish exactly once.
400
+ // Not thread-safe.
401
+ template <class Service, class GrpcService, class RequestMessage,
402
+ class ResponseMessage>
403
+ class ServerBidirectionalStreamingCall
404
+ : public ServerUntypedBidirectionalStreamingCall<Service> {
405
+ public:
406
+ // Represents the generic signature of a generated
407
+ // `GrpcService::RequestFoo()` method, where `Foo` is the name of an
408
+ // RPC method.
409
+ using EnqueueFunction = void (GrpcService::*)(
410
+ ::grpc::ServerContext*,
411
+ ::grpc::ServerAsyncReaderWriter<ResponseMessage, RequestMessage>*,
412
+ ::grpc::CompletionQueue*, ::grpc::ServerCompletionQueue*, void*);
413
+
414
+ // Represents the generic signature of a `Service::HandleFoo()`
415
+ // method, where `Foo` is the name of an RPC method.
416
+ using HandleRequestFunction = void (Service::*)(
417
+ ServerBidirectionalStreamingCall<Service, GrpcService, RequestMessage,
418
+ ResponseMessage>*);
419
+
420
+ ServerBidirectionalStreamingCall(
421
+ HandleRequestFunction handle_request_function, GrpcService* grpc_service,
422
+ ::grpc::ServerCompletionQueue* cq, EnqueueFunction enqueue_function)
423
+ : handle_request_function_(handle_request_function),
424
+ stream_(&ctx_),
425
+ grpc_service_(grpc_service),
426
+ cq_(cq),
427
+ enqueue_function_(enqueue_function) {
428
+ VLOG(3) << "Creating ServerBidirectionalStreamingCall " << this;
429
+ }
430
+
431
+ ~ServerBidirectionalStreamingCall() override {
432
+ VLOG(3) << "Destroying ServerBidirectionalStreamingCall " << this;
433
+ }
434
+
435
+ void CallOpen() override {
436
+ // Let gRPC know that we can accept another call.
437
+ ServerBidirectionalStreamingCall<
438
+ Service, GrpcService, RequestMessage,
439
+ ResponseMessage>::EnqueueRequest(grpc_service_, cq_, enqueue_function_,
440
+ handle_request_function_);
441
+ RequestRead();
442
+ }
443
+
444
+ void RequestRead() override {
445
+ this->Ref();
446
+ request_.Clear();
447
+ stream_.Read(&request_, &request_received_tag_);
448
+ }
449
+
450
+ void RequestReceived(Service* service) override {
451
+ this->Ref();
452
+ // Request handling should result in a call to SendResponse or Finish.
453
+ (service->*handle_request_function_)(this);
454
+ }
455
+
456
+ void SendResponse() {
457
+ // Transferring ownership of this to the response_sent_tag_.
458
+ stream_.Write(response_, &response_sent_tag_);
459
+ // stream_.Write does not save references to response_. We are free to muck
460
+ // around with it as soon as Write returns.
461
+ // We clear the response_ to prepare it for the next response.
462
+ response_.Clear();
463
+ }
464
+
465
+ void Finish(::grpc::Status status) {
466
+ // Transferring ownership of this to the server_finished_tag_.
467
+ stream_.Finish(status, &server_finished_tag_);
468
+ }
469
+
470
+ // Enqueues a new request for the given service on the given
471
+ // completion queue, using the given `enqueue_function`.
472
+ //
473
+ // The request will be handled by the given `handle_request_function`.
474
+ static void EnqueueRequest(GrpcService* grpc_service,
475
+ ::grpc::ServerCompletionQueue* cq,
476
+ EnqueueFunction enqueue_function,
477
+ HandleRequestFunction handle_request_function) {
478
+ auto call =
479
+ new ServerBidirectionalStreamingCall<Service, GrpcService,
480
+ RequestMessage, ResponseMessage>(
481
+ handle_request_function, grpc_service, cq, enqueue_function);
482
+
483
+ // Initial ref for call handed to grpc; released in Tag callback.
484
+ (grpc_service->*enqueue_function)(&call->ctx_, &call->stream_, cq, cq,
485
+ &call->call_open_tag_);
486
+ }
487
+
488
+ const RequestMessage& request() const { return request_; }
489
+ ResponseMessage* mutable_response() { return &response_; }
490
+
491
+ private:
492
+ // Request and response messages are reused for each request/response exchange
493
+ // between the client and the server.
494
+ RequestMessage request_;
495
+ ResponseMessage response_;
496
+ ::grpc::ServerContext ctx_;
497
+
498
+ HandleRequestFunction handle_request_function_;
499
+ ::grpc::ServerAsyncReaderWriter<ResponseMessage, RequestMessage> stream_;
500
+
501
+ // Used as void* completion markers from grpc to indicate different
502
+ // events of interest for a ServerBidirectionalStreamingCall.
503
+ typedef typename ServerUntypedBidirectionalStreamingCall<Service>::Tag Tag;
504
+ // At most one tag of each kind may be given to gRPC at any one time.
505
+ // Beyond semantic sanity, this is needed to ensure proper ref counting
506
+ // of this call object.
507
+ Tag call_open_tag_{this, Tag::TagType::kCallOpen};
508
+ Tag request_received_tag_{this, Tag::TagType::kRequestReceived};
509
+ Tag response_sent_tag_{this, Tag::TagType::kResponseSent};
510
+ Tag server_finished_tag_{this, Tag::TagType::kServerFinished};
511
+
512
+ // These fields are used only to spawn another instance of this to accept
513
+ // more streaming calls.
514
+ GrpcService* grpc_service_;
515
+ ::grpc::ServerCompletionQueue* cq_;
516
+ EnqueueFunction enqueue_function_;
517
+ };
518
+
519
+ } // namespace tsl
520
+
521
+ #endif // TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_RPC_GRPC_CALL_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/rpc/grpc_channel.h ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_RPC_GRPC_CHANNEL_H_
17
+ #define TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_RPC_GRPC_CHANNEL_H_
18
+
19
+ #include <map>
20
+ #include <memory>
21
+ #include <set>
22
+ #include <string>
23
+ #include <vector>
24
+
25
+ #include "grpcpp/grpcpp.h"
26
+ #include "tsl/distributed_runtime/rpc/grpc_util.h"
27
+ #include "tsl/protobuf/rpc_options.pb.h"
28
+
29
+ namespace tsl {
30
+ using tensorflow::RPCOptions;
31
+
32
+ // Consolidated parameter structure to ease use of generic interfaces.
33
+ //
34
+ // Each job_id requires:
35
+ // - a list of host:port (or sparse list of index:host:port)
36
+ // - the number of tasks per replica
37
+ class GrpcChannelSpec {
38
+ public:
39
+ struct HostPortsJob {
40
+ HostPortsJob(const string& job_id, const std::map<int, string>& host_ports)
41
+ : job_id(job_id), host_ports(host_ports) {}
42
+ const string job_id;
43
+ const std::map<int, string> host_ports;
44
+ };
45
+
46
+ Status AddHostPortsJob(const string& job_id,
47
+ const std::map<int, string>& host_ports);
48
+
49
+ const std::vector<HostPortsJob>& host_ports_jobs() const {
50
+ return host_ports_jobs_;
51
+ }
52
+
53
+ private:
54
+ std::vector<HostPortsJob> host_ports_jobs_;
55
+ std::set<string> job_ids_;
56
+ };
57
+
58
+ class GrpcChannelCache {
59
+ public:
60
+ virtual ~GrpcChannelCache() {}
61
+
62
+ // Populates *workers with names of all workers which this object
63
+ // was created to handle. Worker names are in the format
64
+ // /job:<job identifier>/task:<task id>
65
+ // e.g. /job:mnist/task:2
66
+ virtual void ListWorkers(std::vector<string>* workers) = 0;
67
+ virtual void ListWorkersInJob(const string& job_name,
68
+ std::vector<string>* workers) = 0;
69
+
70
+ // If found, returns a gRPC channel that is connected to the remote
71
+ // worker named by 'target'. 'target' is of the following
72
+ // format: /job:<job identifier>/task:<task id>
73
+ // E.g., /job:mnist/task:2
74
+ virtual SharedGrpcChannelPtr FindWorkerChannel(const string& target) = 0;
75
+
76
+ // Translates a string in the form `/job:X/task:Z` into a host_port.
77
+ virtual string TranslateTask(const string& task) = 0;
78
+ };
79
+
80
+ typedef std::function<SharedGrpcChannelPtr(string)> ChannelCreationFunction;
81
+
82
+ GrpcChannelCache* NewGrpcChannelCache(
83
+ const GrpcChannelSpec& channel_spec, ChannelCreationFunction channel_func,
84
+ const RPCOptions& rpc_options = RPCOptions());
85
+
86
+ // Below here are internal-only functions.
87
+
88
+ ::grpc::ChannelArguments GetChannelArguments(const RPCOptions* rpc_options);
89
+
90
+ ChannelCreationFunction ConvertToChannelCreationFunction(
91
+ const std::function<Status(string, const RPCOptions*,
92
+ SharedGrpcChannelPtr*)>& new_channel_func_ptr);
93
+
94
+ Status NewHostPortGrpcChannel(const string& target,
95
+ const RPCOptions* rpc_options,
96
+ SharedGrpcChannelPtr* channel_pointer);
97
+
98
+ } // namespace tsl
99
+
100
+ #endif // TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_RPC_GRPC_CHANNEL_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/rpc/grpc_channel_common.h ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_RPC_GRPC_CHANNEL_COMMON_H_
17
+ #define TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_RPC_GRPC_CHANNEL_COMMON_H_
18
+
19
+ #include <unordered_map>
20
+ #include <vector>
21
+
22
+ #include "absl/container/flat_hash_map.h"
23
+ #include "tsl/distributed_runtime/rpc/grpc_util.h"
24
+ #include "tsl/platform/logging.h"
25
+ #include "tsl/platform/mutex.h"
26
+
27
+ namespace tsl {
28
+
29
+ // GenericCachingChannelCache that caches results to FindWorkerChannel() calls.
30
+ // To use instantiate with the type of channel cache needed.
31
+ // GenericCachingChannelCache allows using multiple channels to communiate with
32
+ // same target to provide throughput gains. When multiple channels exist for
33
+ // the same target they are chosen in a simple round robin fashion on each call
34
+ // to FindWorkerChannel.
35
+ template <typename ChannelCacheT>
36
+ class GenericCachingChannelCache : public ChannelCacheT {
37
+ public:
38
+ explicit GenericCachingChannelCache(int num_channels_per_target)
39
+ : num_channels_per_target_(
40
+ num_channels_per_target > 0 ? num_channels_per_target : 1) {}
41
+
42
+ ~GenericCachingChannelCache() override {}
43
+
44
+ SharedGrpcChannelPtr FindWorkerChannel(const string& target) override {
45
+ {
46
+ mutex_lock l(mu_);
47
+ auto iter = channels_.find(target);
48
+ if (iter != channels_.end()) {
49
+ return GetNextChannelPtrAndUpdateState(iter->second);
50
+ }
51
+ }
52
+ ChannelState new_chan_state;
53
+ for (int indx = 0; indx < num_channels_per_target_; indx++) {
54
+ auto ch = FindChannelOnce(target);
55
+ if (!ch) return nullptr;
56
+ new_chan_state.channels.push_back(ch);
57
+ }
58
+ new_chan_state.last_used = num_channels_per_target_ - 1;
59
+
60
+ {
61
+ mutex_lock l(mu_);
62
+ typename absl::flat_hash_map<string, ChannelState>::iterator iter;
63
+ bool was_inserted;
64
+ std::tie(iter, was_inserted) = channels_.insert({target, new_chan_state});
65
+ VLOG(2) << "Channel cache for target: " << target
66
+ << " Size: " << new_chan_state.channels.size()
67
+ << " insertion: " << was_inserted;
68
+ return GetNextChannelPtrAndUpdateState(iter->second);
69
+ }
70
+ }
71
+
72
+ protected:
73
+ // Find the ClientChannel for "target". Only called when no channel was
74
+ // found in the channels_ cache for "target". A non nullptr result will be
75
+ // cached in channels_.
76
+ virtual SharedGrpcChannelPtr FindChannelOnce(const string& target) = 0;
77
+
78
+ private:
79
+ struct ChannelState {
80
+ std::vector<SharedGrpcChannelPtr> channels;
81
+ int last_used;
82
+ };
83
+
84
+ // Should be called with mu_ held.
85
+ SharedGrpcChannelPtr GetNextChannelPtrAndUpdateState(
86
+ ChannelState& chan_state) {
87
+ // Following statement is marked as Crash OK as this is an invariant of
88
+ // code flow in this class.
89
+ CHECK_EQ(chan_state.channels.size(), num_channels_per_target_); // Crash OK
90
+ chan_state.last_used =
91
+ (chan_state.last_used + 1) % num_channels_per_target_;
92
+ return chan_state.channels[chan_state.last_used];
93
+ }
94
+
95
+ const int num_channels_per_target_;
96
+ // TODO(zhifengc): Eviction when the map becomes too big.
97
+ mutex mu_;
98
+ absl::flat_hash_map<string, ChannelState> channels_ TF_GUARDED_BY(mu_);
99
+ };
100
+
101
+ } // namespace tsl
102
+
103
+ #endif // TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_RPC_GRPC_CHANNEL_COMMON_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/rpc/grpc_client_cq_tag.h ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_RPC_GRPC_CLIENT_CQ_TAG_H_
17
+ #define TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_RPC_GRPC_CLIENT_CQ_TAG_H_
18
+
19
+ #include "tsl/platform/macros.h"
20
+
21
+ namespace tsl {
22
+
23
+ // Represents a pending asynchronous client call as a tag that can be
24
+ // stored in a `grpc::CompletionQueue`.
25
+ class GrpcClientCQTag {
26
+ public:
27
+ GrpcClientCQTag() = default;
28
+ virtual ~GrpcClientCQTag() = default;
29
+
30
+ // OnCompleted is invoked when the RPC has finished.
31
+ // Implementations of OnCompleted can delete *this.
32
+ virtual void OnCompleted(bool ok) = 0;
33
+
34
+ private:
35
+ GrpcClientCQTag(const GrpcClientCQTag&) = delete;
36
+ void operator=(const GrpcClientCQTag&) = delete;
37
+ };
38
+
39
+ } // namespace tsl
40
+
41
+ #endif // TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_RPC_GRPC_CLIENT_CQ_TAG_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/rpc/grpc_util.h ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_RPC_GRPC_UTIL_H_
17
+ #define TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_RPC_GRPC_UTIL_H_
18
+
19
+ #include <memory>
20
+ #include <string>
21
+
22
+ #include "grpcpp/grpcpp.h"
23
+ #include "grpcpp/support/byte_buffer.h"
24
+ #include "absl/status/status.h"
25
+ #include "absl/strings/cord.h"
26
+ #include "tsl/platform/protobuf.h"
27
+ #include "tsl/platform/status.h"
28
+ #include "tsl/platform/stringpiece.h"
29
+ #include "tsl/platform/stringprintf.h"
30
+ #include "tsl/protobuf/distributed_runtime_payloads.pb.h"
31
+
32
+ namespace tsl {
33
+
34
+ // Proto: tensorflow::distributed_runtime::GrpcPayloadsLost
35
+ // Location: tsl/protobuf/distributed_runtime_payloads.proto
36
+ // Usage: Flags the Status to have lost payloads during GRPC conversion.
37
+ constexpr char kGrpcPayloadsLost[] =
38
+ "type.googleapis.com/tensorflow.distributed_runtime.GrpcPayloadsLost";
39
+
40
+ constexpr char kStreamRemovedMessage[] = "Stream removed";
41
+
42
+ // Identify if the given grpc::Status corresponds to an HTTP stream removed
43
+ // error (see chttp2_transport.cc).
44
+ //
45
+ // When auto-reconnecting to a remote worker after it restarts, gRPC can return
46
+ // an UNKNOWN error code with a "Stream removed" error message. This should not
47
+ // be treated as an unrecoverable error.
48
+ //
49
+ // N.B. This is dependent on the error message from grpc remaining consistent.
50
+ inline bool IsStreamRemovedError(const ::grpc::Status& s) {
51
+ return !s.ok() && s.error_code() == ::grpc::StatusCode::UNKNOWN &&
52
+ s.error_message() == kStreamRemovedMessage;
53
+ }
54
+
55
+ inline std::string SerializePayloads(const Status& s) {
56
+ tensorflow::distributed_runtime::GrpcPayloadContainer container;
57
+ s.ForEachPayload([&container](StringPiece key, const absl::Cord& value) {
58
+ (*container.mutable_payloads())[std::string(key)] = std::string(value);
59
+ });
60
+ return container.SerializeAsString();
61
+ }
62
+
63
+ inline void InsertSerializedPayloads(Status& s, std::string payloads) {
64
+ tensorflow::distributed_runtime::GrpcPayloadContainer container;
65
+ if (container.ParseFromString(payloads)) {
66
+ for (const auto& key_val : container.payloads()) {
67
+ s.SetPayload(key_val.first, absl::Cord(key_val.second));
68
+ }
69
+ } else {
70
+ s.SetPayload(kGrpcPayloadsLost,
71
+ absl::Cord(tensorflow::distributed_runtime::GrpcPayloadsLost()
72
+ .SerializeAsString()));
73
+ }
74
+ }
75
+
76
+ inline Status FromGrpcStatus(const ::grpc::Status& s) {
77
+ if (s.ok()) {
78
+ return OkStatus();
79
+ } else {
80
+ Status converted;
81
+ // Convert "UNKNOWN" stream removed errors into unavailable, to allow
82
+ // for retry upstream.
83
+ if (IsStreamRemovedError(s)) {
84
+ converted = Status(absl::StatusCode::kUnavailable, s.error_message());
85
+ }
86
+ converted = Status(static_cast<absl::StatusCode>(s.error_code()),
87
+ s.error_message());
88
+ InsertSerializedPayloads(converted, s.error_details());
89
+ return converted;
90
+ }
91
+ }
92
+
93
+ inline ::grpc::Status ToGrpcStatus(const Status& s) {
94
+ if (s.ok()) {
95
+ return ::grpc::Status::OK;
96
+ } else {
97
+ if (s.message().size() > 3072 /* 3k bytes */) {
98
+ // TODO(b/62947679): Remove truncation once the gRPC issue is resolved.
99
+ string scratch = strings::Printf("%.3072s ... [truncated]",
100
+ tsl::NullTerminatedMessage(s));
101
+ LOG(ERROR) << "Truncated error message: " << s;
102
+ return ::grpc::Status(static_cast<::grpc::StatusCode>(s.code()), scratch,
103
+ SerializePayloads(s));
104
+ }
105
+ return ::grpc::Status(static_cast<::grpc::StatusCode>(s.code()),
106
+ std::string(s.message()), SerializePayloads(s));
107
+ }
108
+ }
109
+
110
+ typedef std::shared_ptr<::grpc::Channel> SharedGrpcChannelPtr;
111
+
112
+ // Serialize src and store in *dst.
113
+ ::grpc::Status GrpcMaybeUnparseProto(const protobuf::Message& src,
114
+ ::grpc::ByteBuffer* dst);
115
+
116
+ // Parse contents of src and initialize *dst with them.
117
+ bool GrpcMaybeParseProto(::grpc::ByteBuffer* src, protobuf::Message* dst);
118
+
119
+ // Copy string src to grpc buffer *dst.
120
+ ::grpc::Status GrpcMaybeUnparseProto(const string& src,
121
+ ::grpc::ByteBuffer* dst);
122
+
123
+ // Copy grpc buffer src to string *dst.
124
+ bool GrpcMaybeParseProto(::grpc::ByteBuffer* src, string* dst);
125
+
126
+ // Copy grpc buffer src to tstring *dst.
127
+ bool GrpcMaybeParseProto(::grpc::ByteBuffer* src, tstring* dst);
128
+ } // namespace tsl
129
+
130
+ #endif // TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_RPC_GRPC_UTIL_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/allocator.h ADDED
@@ -0,0 +1,430 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_FRAMEWORK_ALLOCATOR_H_
17
+ #define TENSORFLOW_TSL_FRAMEWORK_ALLOCATOR_H_
18
+
19
+ #include <stdlib.h>
20
+
21
+ #include <functional>
22
+ #include <limits>
23
+ #include <optional>
24
+
25
+ #include "absl/strings/string_view.h"
26
+ #include "absl/types/optional.h"
27
+ #include "tsl/framework/numeric_types.h"
28
+ #include "tsl/framework/type_traits.h"
29
+ #include "tsl/platform/logging.h"
30
+ #include "tsl/platform/macros.h"
31
+ #include "tsl/platform/numa.h"
32
+ #include "tsl/platform/types.h"
33
+
34
+ namespace tsl {
35
+
36
+ // Attributes for a single allocation call. Different calls to the same
37
+ // allocator could potentially have different allocation attributes.
38
+ struct AllocationAttributes {
39
+ AllocationAttributes() = default;
40
+
41
+ AllocationAttributes(bool retry_on_failure, bool allocation_will_be_logged,
42
+ std::function<uint64()>* freed_by_func)
43
+ : retry_on_failure(retry_on_failure),
44
+ allocation_will_be_logged(allocation_will_be_logged),
45
+ freed_by_func(freed_by_func) {}
46
+
47
+ // If the first attempt to allocate the memory fails, the allocation should
48
+ // wait and retry (with a timeout).
49
+ //
50
+ // This is usually set to true, but we may set it to false in cases where a
51
+ // failure has only performance impact (e.g. optional scratch space
52
+ // allocation).
53
+ bool retry_on_failure = true;
54
+ // If a Tensor is allocated without the following set to true, then
55
+ // it is logged as an unknown allocation. During execution Tensors
56
+ // should be allocated through the OpKernelContext which records
57
+ // which Op is performing the allocation, and sets this flag to
58
+ // true.
59
+ bool allocation_will_be_logged = false;
60
+ // EXPERIMENTAL: If provided, then evaluates to a timing count such that only
61
+ // a memory chunk whose freed_at_count is at this value or earlier may be
62
+ // returned.
63
+ std::function<uint64()>* freed_by_func = nullptr; // Not owned.
64
+
65
+ AllocationAttributes(const AllocationAttributes&) = delete;
66
+ void operator=(const AllocationAttributes&) = delete;
67
+ };
68
+
69
+ // Runtime statistics collected by an allocator. Exactly the same as
70
+ // stream_executor::AllocatorStats, but independently defined to preserve the
71
+ // mutual independence of StreamExecutor and TensorFlow.
72
+ struct AllocatorStats {
73
+ int64_t num_allocs; // Number of allocations.
74
+ int64_t bytes_in_use; // Number of bytes in use.
75
+ int64_t peak_bytes_in_use; // The peak bytes in use.
76
+ int64_t largest_alloc_size; // The largest single allocation seen.
77
+
78
+ // The upper limit of bytes of user allocatable device memory, if such a limit
79
+ // is known.
80
+ std::optional<int64_t> bytes_limit;
81
+
82
+ // Stats for reserved memory usage.
83
+ int64_t bytes_reserved; // Number of bytes reserved.
84
+ int64_t peak_bytes_reserved; // The peak number of bytes reserved.
85
+ // The upper limit on the number bytes of reservable memory,
86
+ // if such a limit is known.
87
+ std::optional<int64_t> bytes_reservable_limit;
88
+
89
+ int64_t largest_free_block_bytes; // Largest free block's size in heap.
90
+
91
+ // Number of bytes of memory held by the allocator. This may be higher than
92
+ // bytes_in_use if the allocator holds a pool of memory (e.g. BFCAllocator).
93
+ std::optional<int64_t> pool_bytes;
94
+ std::optional<int64_t> peak_pool_bytes;
95
+
96
+ AllocatorStats()
97
+ : num_allocs(0),
98
+ bytes_in_use(0),
99
+ peak_bytes_in_use(0),
100
+ largest_alloc_size(0),
101
+ bytes_reserved(0),
102
+ peak_bytes_reserved(0),
103
+ largest_free_block_bytes(0) {}
104
+
105
+ std::string DebugString() const;
106
+ };
107
+
108
+ // The type of the allocated memory.
109
+ enum class AllocatorMemoryType {
110
+ kUnknown = 0, // Memory type unknown.
111
+ kDevice = 1, // Memory on device.
112
+ kHostPageable = 2, // Memory on host and it is pagable.
113
+ kHostPinned = 3, // Memory on host and it is pinned.
114
+ };
115
+
116
+ // Allocator is an abstract interface for allocating and deallocating
117
+ // device memory.
118
+ class Allocator {
119
+ public:
120
+ // Align to 64 byte boundary.
121
+ static constexpr size_t kAllocatorAlignment = 64;
122
+
123
+ virtual ~Allocator();
124
+
125
+ // Return a string identifying this allocator
126
+ virtual std::string Name() = 0;
127
+
128
+ // Return an uninitialized block of memory that is "num_bytes" bytes
129
+ // in size. The returned pointer is guaranteed to be aligned to a
130
+ // multiple of "alignment" bytes.
131
+ // REQUIRES: "alignment" is a power of 2.
132
+ virtual void* AllocateRaw(size_t alignment, size_t num_bytes) = 0;
133
+
134
+ // Return an uninitialized block of memory that is "num_bytes" bytes
135
+ // in size with specified allocation attributes. The returned pointer is
136
+ // guaranteed to be aligned to a multiple of "alignment" bytes.
137
+ // REQUIRES: "alignment" is a power of 2.
138
+ virtual void* AllocateRaw(size_t alignment, size_t num_bytes,
139
+ const AllocationAttributes& allocation_attr) {
140
+ // The default behavior is to use the implementation without any allocation
141
+ // attributes.
142
+ return AllocateRaw(alignment, num_bytes);
143
+ }
144
+
145
+ // Deallocate a block of memory pointer to by "ptr"
146
+ // REQUIRES: "ptr" was previously returned by a call to AllocateRaw
147
+ virtual void DeallocateRaw(void* ptr) = 0;
148
+
149
+ // Returns true if this allocator tracks the sizes of allocations.
150
+ // RequestedSize and AllocatedSize must be overridden if
151
+ // TracksAllocationSizes is overridden to return true.
152
+ virtual bool TracksAllocationSizes() const { return false; }
153
+
154
+ // Returns true if this allocator allocates an opaque handle rather than the
155
+ // requested number of bytes.
156
+ //
157
+ // This method returns false for most allocators, but may be used by
158
+ // special-case allocators that track tensor usage. If this method returns
159
+ // true, AllocateRaw() should be invoked for all values of `num_bytes`,
160
+ // including 0.
161
+ //
162
+ // NOTE: It is the caller's responsibility to track whether an allocated
163
+ // object is a buffer or an opaque handle. In particular, when this method
164
+ // returns `true`, users of this allocator must not run any constructors or
165
+ // destructors for complex objects, since there is no backing store for the
166
+ // tensor in which to place their outputs.
167
+ virtual bool AllocatesOpaqueHandle() const { return false; }
168
+
169
+ // Returns the user-requested size of the data allocated at
170
+ // 'ptr'. Note that the actual buffer allocated might be larger
171
+ // than requested, but this function returns the size requested by
172
+ // the user.
173
+ //
174
+ // REQUIRES: TracksAllocationSizes() is true.
175
+ //
176
+ // REQUIRES: 'ptr!=nullptr' and points to a buffer previously
177
+ // allocated by this allocator.
178
+ virtual size_t RequestedSize(const void* ptr) const {
179
+ CHECK(false) << "allocator doesn't track sizes";
180
+ return size_t(0);
181
+ }
182
+
183
+ // Returns the allocated size of the buffer at 'ptr' if known,
184
+ // otherwise returns RequestedSize(ptr). AllocatedSize(ptr) is
185
+ // guaranteed to be >= RequestedSize(ptr).
186
+ //
187
+ // REQUIRES: TracksAllocationSizes() is true.
188
+ //
189
+ // REQUIRES: 'ptr!=nullptr' and points to a buffer previously
190
+ // allocated by this allocator.
191
+ virtual size_t AllocatedSize(const void* ptr) const {
192
+ return RequestedSize(ptr);
193
+ }
194
+
195
+ // Returns either 0 or an identifier assigned to the buffer at 'ptr'
196
+ // when the buffer was returned by AllocateRaw. If non-zero, the
197
+ // identifier differs from every other ID assigned by this
198
+ // allocator.
199
+ //
200
+ // REQUIRES: TracksAllocationSizes() is true.
201
+ //
202
+ // REQUIRES: 'ptr!=nullptr' and points to a buffer previously
203
+ // allocated by this allocator.
204
+ virtual int64_t AllocationId(const void* ptr) const { return 0; }
205
+
206
+ // Returns the allocated size of the buffer at 'ptr' if known,
207
+ // otherwise returns 0. This method can be called when
208
+ // TracksAllocationSizes() is false, but can be extremely slow.
209
+ //
210
+ // REQUIRES: 'ptr!=nullptr' and points to a buffer previously
211
+ // allocated by this allocator.
212
+ virtual size_t AllocatedSizeSlow(const void* ptr) const {
213
+ if (TracksAllocationSizes()) {
214
+ return AllocatedSize(ptr);
215
+ }
216
+ return 0;
217
+ }
218
+
219
+ // Fills in 'stats' with statistics collected by this allocator.
220
+ virtual absl::optional<AllocatorStats> GetStats() { return absl::nullopt; }
221
+
222
+ // If implemented, clears the internal stats except for the `in_use` fields
223
+ // and sets the `peak_bytes_in_use` to be equal to the `bytes_in_use`. Returns
224
+ // true if implemented.
225
+ //
226
+ // REQUIRES: GetStats is overridden.
227
+ virtual bool ClearStats() TF_MUST_USE_RESULT { return false; }
228
+
229
+ virtual void SetSafeFrontier(uint64 count) {}
230
+
231
+ // For allocator that are stream aware, allow to specify the compute
232
+ // stream this allocator is used for. This can also trigger memory
233
+ // preallocation.
234
+ virtual void SetStreamAndPreallocateMemory(void* stream) {}
235
+
236
+ // Returns the type of the memory allocated by this allocator.
237
+ virtual AllocatorMemoryType GetMemoryType() const {
238
+ return AllocatorMemoryType::kUnknown;
239
+ }
240
+ };
241
+
242
+ // An implementation of Allocator that delegates all calls to another Allocator.
243
+ //
244
+ // Useful to clients who want to override part of the functionality of another
245
+ // allocator.
246
+ class AllocatorWrapper : public Allocator {
247
+ public:
248
+ explicit AllocatorWrapper(Allocator* wrapped) : wrapped_(wrapped) {}
249
+
250
+ ~AllocatorWrapper() override {}
251
+
252
+ // Returns the wrapped allocator to which all calls are delegated.
253
+ Allocator* wrapped() const { return wrapped_; }
254
+
255
+ std::string Name() override { return wrapped_->Name(); }
256
+
257
+ void* AllocateRaw(size_t alignment, size_t num_bytes) override {
258
+ return wrapped_->AllocateRaw(alignment, num_bytes);
259
+ }
260
+
261
+ void* AllocateRaw(size_t alignment, size_t num_bytes,
262
+ const AllocationAttributes& allocation_attr) override {
263
+ return wrapped_->AllocateRaw(alignment, num_bytes, allocation_attr);
264
+ }
265
+
266
+ void DeallocateRaw(void* ptr) override { wrapped_->DeallocateRaw(ptr); }
267
+
268
+ bool TracksAllocationSizes() const override {
269
+ return wrapped_->TracksAllocationSizes();
270
+ }
271
+
272
+ bool AllocatesOpaqueHandle() const override {
273
+ return wrapped_->AllocatesOpaqueHandle();
274
+ }
275
+
276
+ size_t RequestedSize(const void* ptr) const override {
277
+ return wrapped_->RequestedSize(ptr);
278
+ }
279
+
280
+ size_t AllocatedSize(const void* ptr) const override {
281
+ return wrapped_->AllocatedSize(ptr);
282
+ }
283
+
284
+ int64_t AllocationId(const void* ptr) const override {
285
+ return wrapped_->AllocationId(ptr);
286
+ }
287
+
288
+ size_t AllocatedSizeSlow(const void* ptr) const override {
289
+ return wrapped_->AllocatedSizeSlow(ptr);
290
+ }
291
+
292
+ AllocatorMemoryType GetMemoryType() const override {
293
+ return wrapped_->GetMemoryType();
294
+ }
295
+
296
+ private:
297
+ Allocator* const wrapped_;
298
+ };
299
+
300
+ // A tensorflow Op may need access to different kinds of memory that
301
+ // are not simply a function of the device to which the Op has been
302
+ // assigned. For example, an Op executing on a GPU may still need
303
+ // to allocate CPU RAM for some purpose. Internal to the tensorflow
304
+ // runtime we may choose to allocate CPU ram from special regions
305
+ // that have been prepared for higher performance in some use
306
+ // contexts, e.g. doing DMA with particular devices. For these
307
+ // reasons, the Device interface does not expose just one memory
308
+ // Allocator, but instead provides an accessor that takes a
309
+ // specification of the desired memory attributes in order to select
310
+ // an Allocator.
311
+ //
312
+ // Example use:
313
+ // // Allocator for ordinary device memory:
314
+ // Allocator* a = allocator(AllocatorAttributes());
315
+ // ...
316
+ // // Allocator for CPU RAM, regardless of where Op is executing:
317
+ // AllocatorAttributes attr;
318
+ // attr.set_on_host(true);
319
+ // Allocator* a = allocator(attr);
320
+ struct AllocatorAttributes {
321
+ void set_on_host(bool v) { value |= (static_cast<int>(v)); }
322
+ bool on_host() const { return value & 0x1; }
323
+ void set_nic_compatible(bool v) { value |= (static_cast<int>(v) << 1); }
324
+ bool nic_compatible() const { return value & (0x1 << 1); }
325
+ void set_gpu_compatible(bool v) { value |= (static_cast<int>(v) << 2); }
326
+ bool gpu_compatible() const { return value & (0x1 << 2); }
327
+ void set_use_pjrt_allocator(bool v) { value |= (static_cast<int>(v) << 3); }
328
+ bool use_pjrt_allocator() const { return value & (0x1 << 3); }
329
+ void Merge(AllocatorAttributes other) {
330
+ value |= other.value;
331
+ if (scope_id != other.scope_id) {
332
+ CHECK(scope_id == 0 || other.scope_id == 0)
333
+ << "At least one scope_id should be zero to merge "
334
+ "AllocatorAttributes but found this.scope_id="
335
+ << scope_id << " and other.scope_id=" << other.scope_id;
336
+ scope_id = scope_id == 0 ? other.scope_id : scope_id;
337
+ }
338
+ }
339
+ // Returns true if the fields set in *this is a subset of or equal to
340
+ // those set in other.
341
+ bool IsEqualOrLessRestrictiveThan(const AllocatorAttributes& other) const {
342
+ return (value | other.value) == other.value;
343
+ }
344
+
345
+ // NOTE: The upper 8 bits of the value are reserved for
346
+ // device-specific uses. Implementors of a device can interpret these
347
+ // upper 8 bits in device-specific ways, and ops implemented for those
348
+ // devices are responsible for setting those 8 bits appropriately.
349
+ uint32 value = 0;
350
+ // EXPERIMENTAL: If this is greater than zero, then allocation is delegated to
351
+ // a named special-purpose allocator on the same device.
352
+ int32 scope_id = 0;
353
+
354
+ // Returns a human readable representation of this.
355
+ std::string DebugString() const;
356
+ };
357
+
358
+ // Returns a trivial implementation of Allocator, which is a process singleton.
359
+ // Access through this function is only intended for use by restricted parts
360
+ // of the infrastructure.
361
+ Allocator* cpu_allocator_base();
362
+
363
+ // If available, calls ProcessState::GetCPUAllocator(numa_node).
364
+ // If not, falls back to cpu_allocator_base().
365
+ // Intended for use in contexts where ProcessState is not visible at
366
+ // compile time. Where ProcessState is visible, it's preferable to
367
+ // call it directly.
368
+ Allocator* cpu_allocator(int numa_node = port::kNUMANoAffinity);
369
+
370
+ // Enables AllocatorStats in the default CPU allocator implementation. By
371
+ // default, it's disabled.
372
+ void EnableCPUAllocatorStats();
373
+ // Disables AllocatorStats in the default CPU allocator implementation. By
374
+ // default, it's disabled.
375
+ void DisableCPUAllocatorStats();
376
+ bool CPUAllocatorStatsEnabled();
377
+
378
+ // Enables full statistics collection in the default CPU allocator
379
+ // implementation. By default, it's disabled.
380
+ void EnableCPUAllocatorFullStats();
381
+ bool CPUAllocatorFullStatsEnabled();
382
+
383
+ // An object that does the underlying suballoc/free of memory for a higher-level
384
+ // allocator. The expectation is that the higher-level allocator is doing some
385
+ // kind of cache or pool management so that it will call SubAllocator::Alloc and
386
+ // Free relatively infrequently, compared to the number of times its own
387
+ // AllocateRaw and Free methods are called.
388
+ class SubAllocator {
389
+ public:
390
+ // Visitor gets called with a pointer to a memory area and its
391
+ // size in bytes. The index value will be numa_node for a CPU
392
+ // allocator and GPU id for a GPU allocator.
393
+ typedef std::function<void(void*, int index, size_t)> Visitor;
394
+
395
+ SubAllocator(const std::vector<Visitor>& alloc_visitors,
396
+ const std::vector<Visitor>& free_visitors);
397
+
398
+ virtual ~SubAllocator() {}
399
+ // Allocates at least num_bytes. Returns actual number of bytes allocated in
400
+ // bytes_received. The caller can safely use the full bytes_received sized
401
+ // buffer following the returend pointer.
402
+ virtual void* Alloc(size_t alignment, size_t num_bytes,
403
+ size_t* bytes_received) = 0;
404
+ virtual void Free(void* ptr, size_t num_bytes) = 0;
405
+
406
+ // Returns true if the BFC allocator can safely coalesce adjacent regions
407
+ // returned by this allocator.
408
+ virtual bool SupportsCoalescing() const = 0;
409
+
410
+ // Returns the type of the memory allocated by this SubAllocator.
411
+ virtual AllocatorMemoryType GetMemoryType() const {
412
+ return AllocatorMemoryType::kUnknown;
413
+ }
414
+
415
+ protected:
416
+ // Implementation of Alloc() method must call this on newly allocated
417
+ // value.
418
+ void VisitAlloc(void* ptr, int index, size_t num_bytes);
419
+
420
+ // Implementation of Free() method must call this on value to be
421
+ // freed immediately before deallocation.
422
+ void VisitFree(void* ptr, int index, size_t num_bytes);
423
+
424
+ const std::vector<Visitor> alloc_visitors_;
425
+ const std::vector<Visitor> free_visitors_;
426
+ };
427
+
428
+ } // namespace tsl
429
+
430
+ #endif // TENSORFLOW_TSL_FRAMEWORK_ALLOCATOR_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/allocator_registry.h ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ // Classes to maintain a static registry of memory allocator factories.
17
+ #ifndef TENSORFLOW_TSL_FRAMEWORK_ALLOCATOR_REGISTRY_H_
18
+ #define TENSORFLOW_TSL_FRAMEWORK_ALLOCATOR_REGISTRY_H_
19
+
20
+ #include <memory>
21
+ #include <string>
22
+ #include <vector>
23
+
24
+ #include "absl/base/thread_annotations.h"
25
+ #include "tsl/framework/allocator.h"
26
+ #include "tsl/platform/macros.h"
27
+ #include "tsl/platform/mutex.h"
28
+ #include "tsl/platform/numa.h"
29
+
30
+ namespace tensorflow {
31
+
32
+ class ProcessState;
33
+
34
+ }
35
+
36
+ namespace tsl {
37
+
38
+ class AllocatorFactory {
39
+ public:
40
+ virtual ~AllocatorFactory() {}
41
+
42
+ // Returns true if the factory will create a functionally different
43
+ // SubAllocator for different (legal) values of numa_node.
44
+ virtual bool NumaEnabled() { return false; }
45
+
46
+ // Create an Allocator.
47
+ virtual Allocator* CreateAllocator() = 0;
48
+
49
+ // Create a SubAllocator. If NumaEnabled() is true, then returned SubAllocator
50
+ // will allocate memory local to numa_node. If numa_node == kNUMANoAffinity
51
+ // then allocated memory is not specific to any NUMA node.
52
+ virtual SubAllocator* CreateSubAllocator(int numa_node) = 0;
53
+ };
54
+
55
+ // ProcessState is defined in a package that cannot be a dependency of
56
+ // framework. This definition allows us to access the one method we need.
57
+ class ProcessStateInterface {
58
+ public:
59
+ virtual ~ProcessStateInterface() {}
60
+ virtual Allocator* GetCPUAllocator(int numa_node) = 0;
61
+ };
62
+
63
+ // A singleton registry of AllocatorFactories.
64
+ //
65
+ // Allocators should be obtained through ProcessState or cpu_allocator()
66
+ // (deprecated), not directly through this interface. The purpose of this
67
+ // registry is to allow link-time discovery of multiple AllocatorFactories among
68
+ // which ProcessState will obtain the best fit at startup.
69
+ class AllocatorFactoryRegistry {
70
+ public:
71
+ AllocatorFactoryRegistry() {}
72
+ ~AllocatorFactoryRegistry() {}
73
+
74
+ void Register(const char* source_file, int source_line, const string& name,
75
+ int priority, AllocatorFactory* factory);
76
+
77
+ // Returns 'best fit' Allocator. Find the factory with the highest priority
78
+ // and return an allocator constructed by it. If multiple factories have
79
+ // been registered with the same priority, picks one by unspecified criteria.
80
+ Allocator* GetAllocator();
81
+
82
+ // Returns 'best fit' SubAllocator. First look for the highest priority
83
+ // factory that is NUMA-enabled. If none is registered, fall back to the
84
+ // highest priority non-NUMA-enabled factory. If NUMA-enabled, return a
85
+ // SubAllocator specific to numa_node, otherwise return a NUMA-insensitive
86
+ // SubAllocator.
87
+ SubAllocator* GetSubAllocator(int numa_node);
88
+
89
+ // Returns the singleton value.
90
+ static AllocatorFactoryRegistry* singleton();
91
+
92
+ ProcessStateInterface* process_state() const {
93
+ mutex_lock ml(mu_);
94
+ return process_state_;
95
+ }
96
+
97
+ protected:
98
+ friend class tensorflow::ProcessState;
99
+
100
+ void SetProcessState(ProcessStateInterface* interface) {
101
+ mutex_lock ml(mu_);
102
+ process_state_ = interface;
103
+ }
104
+
105
+ private:
106
+ mutable mutex mu_;
107
+ ProcessStateInterface* process_state_ ABSL_GUARDED_BY(mu_) = nullptr;
108
+ bool first_alloc_made_ = false;
109
+ struct FactoryEntry {
110
+ const char* source_file;
111
+ int source_line;
112
+ string name;
113
+ int priority;
114
+ std::unique_ptr<AllocatorFactory> factory;
115
+ std::unique_ptr<Allocator> allocator;
116
+ // Index 0 corresponds to kNUMANoAffinity, other indices are (numa_node +
117
+ // 1).
118
+ std::vector<std::unique_ptr<SubAllocator>> sub_allocators;
119
+ };
120
+ std::vector<FactoryEntry> factories_ ABSL_GUARDED_BY(mu_);
121
+
122
+ // Returns any FactoryEntry registered under 'name' and 'priority',
123
+ // or 'nullptr' if none found.
124
+ const FactoryEntry* FindEntry(const string& name, int priority) const
125
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
126
+
127
+ AllocatorFactoryRegistry(const AllocatorFactoryRegistry&) = delete;
128
+ void operator=(const AllocatorFactoryRegistry&) = delete;
129
+ };
130
+
131
+ class AllocatorFactoryRegistration {
132
+ public:
133
+ AllocatorFactoryRegistration(const char* file, int line, const string& name,
134
+ int priority, AllocatorFactory* factory) {
135
+ AllocatorFactoryRegistry::singleton()->Register(file, line, name, priority,
136
+ factory);
137
+ }
138
+ };
139
+
140
+ #define REGISTER_MEM_ALLOCATOR(name, priority, factory) \
141
+ REGISTER_MEM_ALLOCATOR_UNIQ_HELPER(__COUNTER__, __FILE__, __LINE__, name, \
142
+ priority, factory)
143
+
144
+ #define REGISTER_MEM_ALLOCATOR_UNIQ_HELPER(ctr, file, line, name, priority, \
145
+ factory) \
146
+ REGISTER_MEM_ALLOCATOR_UNIQ(ctr, file, line, name, priority, factory)
147
+
148
+ #define REGISTER_MEM_ALLOCATOR_UNIQ(ctr, file, line, name, priority, factory) \
149
+ static AllocatorFactoryRegistration allocator_factory_reg_##ctr( \
150
+ file, line, name, priority, new factory)
151
+
152
+ } // namespace tsl
153
+
154
+ #endif // TENSORFLOW_TSL_FRAMEWORK_ALLOCATOR_REGISTRY_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/allocator_retry.h ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_FRAMEWORK_ALLOCATOR_RETRY_H_
17
+ #define TENSORFLOW_TSL_FRAMEWORK_ALLOCATOR_RETRY_H_
18
+
19
+ #include "tsl/platform/env.h"
20
+ #include "tsl/platform/mutex.h"
21
+ #include "tsl/platform/types.h"
22
+
23
+ namespace tsl {
24
+
25
+ // A retrying wrapper for a memory allocator.
26
+ class AllocatorRetry {
27
+ public:
28
+ AllocatorRetry();
29
+
30
+ // Call 'alloc_func' to obtain memory. On first call,
31
+ // 'verbose_failure' will be false. If return value is nullptr,
32
+ // then wait up to 'max_millis_to_wait' milliseconds, retrying each
33
+ // time a call to DeallocateRaw() is detected, until either a good
34
+ // pointer is returned or the deadline is exhausted. If the
35
+ // deadline is exhausted, try one more time with 'verbose_failure'
36
+ // set to true. The value returned is either the first good pointer
37
+ // obtained from 'alloc_func' or nullptr.
38
+ void* AllocateRaw(std::function<void*(size_t alignment, size_t num_bytes,
39
+ bool verbose_failure)>
40
+ alloc_func,
41
+ int max_millis_to_wait, size_t alignment, size_t bytes);
42
+
43
+ // Called to notify clients that some memory was returned.
44
+ void NotifyDealloc();
45
+
46
+ private:
47
+ Env* env_;
48
+ mutex mu_;
49
+ condition_variable memory_returned_;
50
+ };
51
+
52
+ // Implementation details below
53
+ inline void AllocatorRetry::NotifyDealloc() {
54
+ mutex_lock l(mu_);
55
+ memory_returned_.notify_all();
56
+ }
57
+
58
+ } // namespace tsl
59
+
60
+ #endif // TENSORFLOW_TSL_FRAMEWORK_ALLOCATOR_RETRY_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/bfc_allocator.h ADDED
@@ -0,0 +1,629 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_FRAMEWORK_BFC_ALLOCATOR_H_
17
+ #define TENSORFLOW_TSL_FRAMEWORK_BFC_ALLOCATOR_H_
18
+
19
+ #include <array>
20
+ #include <deque>
21
+ #include <memory>
22
+ #include <string>
23
+ #include <unordered_map>
24
+ #include <vector>
25
+
26
+ #include "absl/container/flat_hash_set.h"
27
+ #include "tsl/framework/allocator.h"
28
+ #include "tsl/framework/allocator_retry.h"
29
+ #include "tsl/framework/shared_counter.h"
30
+ #include "tsl/platform/macros.h"
31
+ #include "tsl/platform/mutex.h"
32
+ #include "tsl/platform/numbers.h"
33
+ #include "tsl/platform/strcat.h"
34
+ #include "tsl/platform/thread_annotations.h"
35
+ #include "tsl/platform/types.h"
36
+
37
+ namespace tensorflow {
38
+ class MemoryDump;
39
+ }
40
+ namespace tsl {
41
+ using tensorflow::MemoryDump;
42
+
43
+ // A memory allocator that implements a 'best-fit with coalescing'
44
+ // algorithm. This is essentially a very simple version of Doug Lea's
45
+ // malloc (dlmalloc).
46
+ //
47
+ // The goal of this allocator is to support defragmentation via
48
+ // coalescing. One assumption we make is that the process using this
49
+ // allocator owns pretty much all of the memory, and that nearly
50
+ // all requests to allocate memory go through this interface.
51
+ class BFCAllocator : public Allocator {
52
+ public:
53
+ struct Options {
54
+ bool allow_growth = true;
55
+
56
+ // If true, the allocator may sleep for a period of time when it can't
57
+ // fulfill an allocation request, in the hopes that another thread will free
58
+ // up memory in the meantime.
59
+ //
60
+ // If false, the allocator will never sleep, even if
61
+ // AllocationAttributes::attr_retry_on_failure is true.
62
+ bool allow_retry_on_failure = true;
63
+
64
+ // Whether the allocator will deallocate free regions to avoid OOM due to
65
+ // memory fragmentation.
66
+ bool garbage_collection = false;
67
+
68
+ // Controls when a chunk should be split, if its size exceeds the requested
69
+ // allocation size.
70
+ double fragmentation_fraction = 0;
71
+ };
72
+ BFCAllocator(std::unique_ptr<SubAllocator> sub_allocator, size_t total_memory,
73
+ const string& name, const Options& opts);
74
+
75
+ ~BFCAllocator() override;
76
+
77
+ string Name() override { return name_; }
78
+
79
+ void* AllocateRaw(size_t alignment, size_t num_bytes) override {
80
+ return AllocateRaw(alignment, num_bytes, AllocationAttributes());
81
+ }
82
+
83
+ void* AllocateRaw(size_t alignment, size_t num_bytes,
84
+ const AllocationAttributes& allocation_attr) override;
85
+
86
+ void DeallocateRaw(void* ptr) override;
87
+
88
+ bool TracksAllocationSizes() const override;
89
+
90
+ size_t RequestedSize(const void* ptr) const override;
91
+
92
+ size_t AllocatedSize(const void* ptr) const override;
93
+
94
+ int64_t AllocationId(const void* ptr) const override;
95
+
96
+ absl::optional<AllocatorStats> GetStats() override;
97
+
98
+ bool ClearStats() override;
99
+
100
+ void SetTimingCounter(SharedCounter* sc) { timing_counter_ = sc; }
101
+
102
+ void SetSafeFrontier(uint64 count) override;
103
+
104
+ AllocatorMemoryType GetMemoryType() const override;
105
+
106
+ bool ShouldRecordOpName() const { return true; }
107
+
108
+ MemoryDump RecordMemoryMap();
109
+
110
+ private:
111
+ struct Bin;
112
+
113
+ void* AllocateRawInternal(size_t alignment, size_t num_bytes,
114
+ bool dump_log_on_failure,
115
+ uint64 freed_before_count);
116
+
117
+ void* AllocateRawInternalWithRetry(
118
+ size_t alignment, size_t num_bytes,
119
+ const AllocationAttributes& allocation_attr);
120
+
121
+ void DeallocateRawInternal(void* ptr);
122
+
123
+ // Chunks whose freed_at_count is later than the safe frontier value are kept
124
+ // on a special list and not subject to merging immediately upon being freed.
125
+ //
126
+ // This function sweeps that list looking for Chunks whose timestamp is now
127
+ // safe. When found their freed_at_count is set to 0 and we attempt to merge
128
+ // them with their neighbors.
129
+ //
130
+ // If required_bytes > 0 then this function is being called in the context of
131
+ // a need for this many bytes that could not be satisfied without merging
132
+ // unsafe chunks, so we go ahead and merge the unsafe chunks too, just up to
133
+ // the point that a free chunk of required_bytes is produced. Note that
134
+ // unsafe merged chunks adopt the most conservative timestamp from their
135
+ // constituents so they're only useful for allocations not requiring a
136
+ // particular timestamp.
137
+ bool MergeTimestampedChunks(size_t required_bytes)
138
+ TF_EXCLUSIVE_LOCKS_REQUIRED(lock_);
139
+
140
+ // Return the largest free chunk bytes from the largest bin in constant time.
141
+ // The free chunks are sorted by size (and then address) in a bin.
142
+ int64_t LargestFreeChunk() TF_EXCLUSIVE_LOCKS_REQUIRED(lock_);
143
+
144
+ // Add TraceMe (in memory allocation and deallocation) for memory stats
145
+ // profiling. The chunk_ptr is passed to get information such as address,
146
+ // chunk size and requested_size.
147
+ void AddTraceMe(absl::string_view traceme_name, const void* ptr)
148
+ TF_EXCLUSIVE_LOCKS_REQUIRED(lock_);
149
+
150
+ // Overloaded AddTraceMe function with chunk information.
151
+ void AddTraceMe(absl::string_view traceme_name, const void* chunk_ptr,
152
+ int64_t req_bytes, int64_t alloc_bytes)
153
+ TF_EXCLUSIVE_LOCKS_REQUIRED(lock_);
154
+
155
+ // A ChunkHandle is an index into the chunks_ vector in BFCAllocator
156
+ // kInvalidChunkHandle means an invalid chunk
157
+ typedef size_t ChunkHandle;
158
+ static constexpr ChunkHandle kInvalidChunkHandle = SIZE_MAX;
159
+
160
+ typedef int BinNum;
161
+ static constexpr int kInvalidBinNum = -1;
162
+ // The following means that the largest bin'd chunk size is 256 << 21 = 512MB.
163
+ static constexpr int kNumBins = 21;
164
+
165
+ // A Chunk points to a piece of memory that's either entirely free or entirely
166
+ // in use by one user memory allocation.
167
+ //
168
+ // An AllocationRegion's memory is split up into one or more disjoint Chunks,
169
+ // which together cover the whole region without gaps. Chunks participate in
170
+ // a doubly-linked list, and the prev/next pointers point to the physically
171
+ // adjacent chunks.
172
+ //
173
+ // Since a chunk cannot be partially in use, we may need to split a free chunk
174
+ // in order to service a user allocation. We always merge adjacent free
175
+ // chunks.
176
+ //
177
+ // Chunks contain information about whether they are in use or whether they
178
+ // are free, and contain a pointer to the bin they are in.
179
+ struct Chunk {
180
+ size_t size = 0; // Full size of buffer.
181
+
182
+ // We sometimes give chunks that are larger than needed to reduce
183
+ // fragmentation. requested_size keeps track of what the client
184
+ // actually wanted so we can understand whether our splitting
185
+ // strategy is efficient.
186
+ size_t requested_size = 0;
187
+
188
+ // allocation_id is set to -1 when the chunk is not in use. It is assigned a
189
+ // value greater than zero before the chunk is returned from
190
+ // AllocateRaw, and this value is unique among values assigned by
191
+ // the parent allocator.
192
+ int64_t allocation_id = -1;
193
+ void* ptr = nullptr; // pointer to granted subbuffer.
194
+
195
+ // If not kInvalidChunkHandle, the memory referred to by 'prev' is directly
196
+ // preceding the memory used by this chunk. E.g., It should start
197
+ // at 'ptr - prev->size'
198
+ ChunkHandle prev = kInvalidChunkHandle;
199
+
200
+ // If not kInvalidChunkHandle, the memory referred to by 'next' is directly
201
+ // following the memory used by this chunk. E.g., It should be at
202
+ // 'ptr + size'
203
+ ChunkHandle next = kInvalidChunkHandle;
204
+
205
+ // What bin are we in?
206
+ BinNum bin_num = kInvalidBinNum;
207
+
208
+ // Optional count when this chunk was most recently made free.
209
+ uint64 freed_at_count = 0;
210
+
211
+ bool in_use() const { return allocation_id != -1; }
212
+
213
+ #ifdef TENSORFLOW_MEM_DEBUG
214
+ // optional debugging info
215
+ const char* op_name = nullptr;
216
+ uint64 step_id = 0;
217
+ int64 action_count = 0;
218
+ #endif
219
+
220
+ string DebugString(BFCAllocator* a,
221
+ bool recurse) TF_NO_THREAD_SAFETY_ANALYSIS {
222
+ string dbg;
223
+ strings::StrAppend(
224
+ &dbg, " Size: ", strings::HumanReadableNumBytes(size),
225
+ " | Requested Size: ", strings::HumanReadableNumBytes(requested_size),
226
+ " | in_use: ", in_use(), " | bin_num: ", bin_num);
227
+ if (recurse && prev != BFCAllocator::kInvalidChunkHandle) {
228
+ Chunk* p = a->ChunkFromHandle(prev);
229
+ strings::StrAppend(&dbg, ", prev: ", p->DebugString(a, false));
230
+ }
231
+ if (recurse && next != BFCAllocator::kInvalidChunkHandle) {
232
+ Chunk* n = a->ChunkFromHandle(next);
233
+ strings::StrAppend(&dbg, ", next: ", n->DebugString(a, false));
234
+ }
235
+ #ifdef TENSORFLOW_MEM_DEBUG
236
+ strings::StrAppend(&dbg, ", for: ", op_name ? op_name : "UNKNOWN",
237
+ ", stepid: ", step_id,
238
+ ", last_action: ", action_count);
239
+ #endif
240
+ return dbg;
241
+ }
242
+ };
243
+
244
+ // A Bin is a collection of similar-sized free chunks.
245
+ // Allocated chunks are never in a Bin.
246
+ struct Bin {
247
+ // All chunks in this bin have >= bin_size memory.
248
+ size_t bin_size = 0;
249
+
250
+ class ChunkComparator {
251
+ public:
252
+ explicit ChunkComparator(BFCAllocator* allocator)
253
+ : allocator_(allocator) {}
254
+ // Sort first by size and then use pointer address as a tie breaker.
255
+ bool operator()(const ChunkHandle ha,
256
+ const ChunkHandle hb) const TF_NO_THREAD_SAFETY_ANALYSIS {
257
+ const Chunk* a = allocator_->ChunkFromHandle(ha);
258
+ const Chunk* b = allocator_->ChunkFromHandle(hb);
259
+ if (a->size != b->size) {
260
+ return a->size < b->size;
261
+ }
262
+ return a->ptr < b->ptr;
263
+ }
264
+
265
+ private:
266
+ BFCAllocator* allocator_; // The parent allocator
267
+ };
268
+
269
+ typedef std::set<ChunkHandle, ChunkComparator> FreeChunkSet;
270
+ // List of free chunks within the bin, sorted by chunk size.
271
+ // Chunk * not owned.
272
+ FreeChunkSet free_chunks;
273
+ Bin(BFCAllocator* allocator, size_t bs)
274
+ : bin_size(bs), free_chunks(ChunkComparator(allocator)) {}
275
+ };
276
+
277
+ static constexpr size_t kMinAllocationBits = 8;
278
+ static constexpr size_t kMinAllocationSize = 1 << kMinAllocationBits;
279
+
280
+ // BFCAllocator allocates memory into a collection of disjoint
281
+ // AllocationRegions. Each AllocationRegion corresponds to one call to
282
+ // SubAllocator::Alloc(). (Actually, if a subsequent call to
283
+ // SubAllocator::Alloc() returns another region immediately adjacent to the
284
+ // last, it will be used to extend the first AllocationRegion, not create a
285
+ // separate one.)
286
+ //
287
+ // An AllocationRegion contains one or more Chunks, covering all of its
288
+ // memory. Its primary job is to map pointers to ChunkHandles.
289
+ //
290
+ // This class is thread-compatible.
291
+ class AllocationRegion {
292
+ public:
293
+ AllocationRegion(void* ptr, size_t memory_size)
294
+ : ptr_(ptr),
295
+ memory_size_(memory_size),
296
+ end_ptr_(
297
+ static_cast<void*>(static_cast<char*>(ptr_) + memory_size_)) {
298
+ DCHECK_EQ(0, memory_size % kMinAllocationSize);
299
+ const size_t n_handles =
300
+ (memory_size + kMinAllocationSize - 1) / kMinAllocationSize;
301
+ handles_.resize(n_handles, kInvalidChunkHandle);
302
+ }
303
+
304
+ AllocationRegion() = default;
305
+ AllocationRegion(AllocationRegion&& other) { Swap(&other); }
306
+ AllocationRegion& operator=(AllocationRegion&& other) {
307
+ Swap(&other);
308
+ return *this;
309
+ }
310
+
311
+ void* ptr() const { return ptr_; }
312
+ void* end_ptr() const { return end_ptr_; }
313
+ size_t memory_size() const { return memory_size_; }
314
+ void extend(size_t size) {
315
+ memory_size_ += size;
316
+ DCHECK_EQ(0, memory_size_ % kMinAllocationSize);
317
+
318
+ end_ptr_ = static_cast<void*>(static_cast<char*>(end_ptr_) + size);
319
+ const size_t n_handles =
320
+ (memory_size_ + kMinAllocationSize - 1) / kMinAllocationSize;
321
+ handles_.resize(n_handles, kInvalidChunkHandle);
322
+ }
323
+ ChunkHandle get_handle(const void* p) const {
324
+ return handles_[IndexFor(p)];
325
+ }
326
+ void set_handle(const void* p, ChunkHandle h) { handles_[IndexFor(p)] = h; }
327
+ void erase(const void* p) { set_handle(p, kInvalidChunkHandle); }
328
+
329
+ private:
330
+ void Swap(AllocationRegion* other) {
331
+ std::swap(ptr_, other->ptr_);
332
+ std::swap(memory_size_, other->memory_size_);
333
+ std::swap(end_ptr_, other->end_ptr_);
334
+ std::swap(handles_, other->handles_);
335
+ }
336
+
337
+ size_t IndexFor(const void* p) const {
338
+ std::uintptr_t p_int = reinterpret_cast<std::uintptr_t>(p);
339
+ std::uintptr_t base_int = reinterpret_cast<std::uintptr_t>(ptr_);
340
+ DCHECK_GE(p_int, base_int);
341
+ DCHECK_LT(p_int, base_int + memory_size_);
342
+ return static_cast<size_t>(((p_int - base_int) >> kMinAllocationBits));
343
+ }
344
+
345
+ // Metadata about the allocation region.
346
+ void* ptr_ = nullptr;
347
+ size_t memory_size_ = 0;
348
+ void* end_ptr_ = nullptr;
349
+
350
+ // Array of size "memory_size / kMinAllocationSize". It is
351
+ // indexed by (p-base) / kMinAllocationSize, contains ChunkHandle
352
+ // for the memory allocation represented by "p"
353
+ std::vector<ChunkHandle> handles_;
354
+
355
+ AllocationRegion(const AllocationRegion&) = delete;
356
+ void operator=(const AllocationRegion&) = delete;
357
+ };
358
+
359
+ // RegionManager aggregates one or more "AllocationRegions" and provides
360
+ // a layer of indirection from pointers to the underlying ChunkHandle,
361
+ // allowing allocation across multiple discontiguous memory regions.
362
+ //
363
+ // This class is thread-compatible.
364
+ class RegionManager {
365
+ public:
366
+ RegionManager() {}
367
+ ~RegionManager() {}
368
+
369
+ void AddAllocationRegion(void* ptr, size_t memory_size) {
370
+ // Insert sorted by end_ptr.
371
+ auto entry =
372
+ std::upper_bound(regions_.begin(), regions_.end(), ptr, &Comparator);
373
+ regions_.insert(entry, AllocationRegion(ptr, memory_size));
374
+ }
375
+
376
+ // Adds an alloation region for the given ptr and size, potentially
377
+ // extending a region if ptr matches the end_ptr of an existing region.
378
+ // If a region is extended, returns a pointer to the extended region so that
379
+ // the BFC allocator can reason about chunkification.
380
+ AllocationRegion* AddOrExtendAllocationRegion(void* ptr,
381
+ size_t memory_size) {
382
+ // Insert sorted by end_ptr.
383
+ auto entry =
384
+ std::upper_bound(regions_.begin(), regions_.end(), ptr, &Comparator);
385
+ // Check if can be coalesced with preceding region.
386
+ if (entry != regions_.begin()) {
387
+ auto preceding_region = entry - 1;
388
+ if (preceding_region->end_ptr() == ptr) {
389
+ if (VLOG_IS_ON(1)) {
390
+ LOG(INFO) << "Extending region " << preceding_region->ptr()
391
+ << " of "
392
+ << strings::HumanReadableNumBytes(
393
+ preceding_region->memory_size())
394
+ << " by " << strings::HumanReadableNumBytes(memory_size)
395
+ << " bytes";
396
+ }
397
+ preceding_region->extend(memory_size);
398
+ return &*preceding_region;
399
+ }
400
+ }
401
+ VLOG(1) << "Inserting new region " << ptr << " of "
402
+ << strings::HumanReadableNumBytes(memory_size);
403
+ regions_.insert(entry, AllocationRegion(ptr, memory_size));
404
+ return nullptr;
405
+ }
406
+
407
+ std::vector<AllocationRegion>::iterator RemoveAllocationRegion(
408
+ std::vector<AllocationRegion>::iterator it) {
409
+ return regions_.erase(it);
410
+ }
411
+
412
+ ChunkHandle get_handle(const void* p) const {
413
+ return RegionFor(p)->get_handle(p);
414
+ }
415
+
416
+ void set_handle(const void* p, ChunkHandle h) {
417
+ return MutableRegionFor(p)->set_handle(p, h);
418
+ }
419
+ void erase(const void* p) { return MutableRegionFor(p)->erase(p); }
420
+
421
+ const std::vector<AllocationRegion>& regions() const { return regions_; }
422
+
423
+ private:
424
+ static bool Comparator(const void* ptr, const AllocationRegion& other) {
425
+ return ptr < other.end_ptr();
426
+ }
427
+
428
+ AllocationRegion* MutableRegionFor(const void* p) {
429
+ return const_cast<AllocationRegion*>(RegionFor(p));
430
+ }
431
+
432
+ const AllocationRegion* RegionFor(const void* p) const {
433
+ auto entry =
434
+ std::upper_bound(regions_.begin(), regions_.end(), p, &Comparator);
435
+
436
+ if (entry != regions_.end()) {
437
+ return &(*entry);
438
+ }
439
+
440
+ LOG(FATAL) << "Could not find Region for " << p;
441
+ return nullptr;
442
+ }
443
+
444
+ private:
445
+ std::vector<AllocationRegion> regions_;
446
+ };
447
+
448
+ // Returns 'bytes' rounded up to the next highest kMinAllocationSize.
449
+ static size_t RoundedBytes(size_t bytes);
450
+
451
+ // Try to add a new memory region that can satisfy an allocation of
452
+ // 'rounded_bytes' bytes. Returns true on success and false on
453
+ // failure.
454
+ bool Extend(size_t alignment, size_t rounded_bytes)
455
+ TF_EXCLUSIVE_LOCKS_REQUIRED(lock_);
456
+
457
+ // Deallocate free regions to give back the memory to suballocator, so that
458
+ // we can re-allocate a larger region. The main use scenario of this function
459
+ // is when OOM happens but we have free regions and the sum of sizes of free
460
+ // regions and unallocated bytes is larger than the requested size, implying
461
+ // (external) memory fragmentation. Returns true if any free regions are
462
+ // found and freed; false otherwise.
463
+ bool DeallocateFreeRegions(size_t rounded_bytes);
464
+
465
+ // Helper function to deallocate regions.
466
+ void DeallocateRegions(const absl::flat_hash_set<void*>& region_ptrs)
467
+ TF_EXCLUSIVE_LOCKS_REQUIRED(lock_);
468
+
469
+ // Returns a pointer to an underlying allocated chunk of size
470
+ // 'rounded_bytes'.
471
+ void* FindChunkPtr(BinNum bin_num, size_t rounded_bytes, size_t num_bytes,
472
+ uint64 freed_before) TF_EXCLUSIVE_LOCKS_REQUIRED(lock_);
473
+
474
+ // Splits the chunk specified by 'h' into two chunks, one at least
475
+ // of size 'num_bytes'.
476
+ void SplitChunk(ChunkHandle h, size_t num_bytes)
477
+ TF_EXCLUSIVE_LOCKS_REQUIRED(lock_);
478
+
479
+ // Merges the two chunk handles. Requires that the chunks are
480
+ // contiguous in their allocation.
481
+ void Merge(ChunkHandle h, ChunkHandle h2) TF_EXCLUSIVE_LOCKS_REQUIRED(lock_);
482
+
483
+ // Adds the chunk 'h' to the proper free bin.
484
+ void InsertFreeChunkIntoBin(ChunkHandle h) TF_EXCLUSIVE_LOCKS_REQUIRED(lock_);
485
+
486
+ // Removes the free chunk pointed to by 'c' from the set free_chunks.
487
+ void RemoveFreeChunkIterFromBin(Bin::FreeChunkSet* free_chunks,
488
+ const Bin::FreeChunkSet::iterator& c)
489
+ TF_EXCLUSIVE_LOCKS_REQUIRED(lock_);
490
+
491
+ // Removes a free chunk from the bin.
492
+ void RemoveFreeChunkFromBin(ChunkHandle h) TF_EXCLUSIVE_LOCKS_REQUIRED(lock_);
493
+ void MaybeRemoveFreeChunkFromBin(ChunkHandle h)
494
+ TF_EXCLUSIVE_LOCKS_REQUIRED(lock_);
495
+
496
+ // Removes the chunk metadata represented by 'h'.
497
+ void DeleteChunk(ChunkHandle h) TF_EXCLUSIVE_LOCKS_REQUIRED(lock_);
498
+
499
+ string RenderOccupancy() TF_EXCLUSIVE_LOCKS_REQUIRED(lock_);
500
+ void DumpMemoryLog(size_t num_bytes) TF_EXCLUSIVE_LOCKS_REQUIRED(lock_);
501
+ tensorflow::MemoryDump RecordMemoryMapInternal()
502
+ TF_EXCLUSIVE_LOCKS_REQUIRED(lock_);
503
+ void MaybeWriteMemoryMap() TF_EXCLUSIVE_LOCKS_REQUIRED(lock_);
504
+
505
+ ChunkHandle AllocateChunk() TF_EXCLUSIVE_LOCKS_REQUIRED(lock_);
506
+ void DeallocateChunk(ChunkHandle h) TF_EXCLUSIVE_LOCKS_REQUIRED(lock_);
507
+
508
+ Chunk* ChunkFromHandle(ChunkHandle h) TF_EXCLUSIVE_LOCKS_REQUIRED(lock_);
509
+ const Chunk* ChunkFromHandle(ChunkHandle h) const
510
+ TF_EXCLUSIVE_LOCKS_REQUIRED(lock_);
511
+
512
+ void MarkFree(ChunkHandle h) TF_EXCLUSIVE_LOCKS_REQUIRED(lock_);
513
+
514
+ ChunkHandle TryToCoalesce(ChunkHandle h, bool ignore_freed_at)
515
+ TF_EXCLUSIVE_LOCKS_REQUIRED(lock_);
516
+
517
+ // Fragmentation is calculated as the reverse ratio of the largest free chunk
518
+ // size over total free memory, and returns a value within [0, 1].
519
+ double GetFragmentation() TF_EXCLUSIVE_LOCKS_REQUIRED(lock_);
520
+
521
+ // Information about a Bin that is useful for debugging.
522
+ struct BinDebugInfo {
523
+ size_t total_bytes_in_use = 0;
524
+ size_t total_bytes_in_bin = 0;
525
+ size_t total_requested_bytes_in_use = 0;
526
+ size_t total_chunks_in_use = 0;
527
+ size_t total_chunks_in_bin = 0;
528
+ };
529
+
530
+ // Computes and returns a BinDebugInfo for each Bin.
531
+ std::array<BinDebugInfo, kNumBins> get_bin_debug_info()
532
+ TF_EXCLUSIVE_LOCKS_REQUIRED(lock_);
533
+
534
+ AllocatorRetry retry_helper_;
535
+
536
+ // Structures immutable after construction
537
+ size_t memory_limit_ = 0;
538
+
539
+ inline int Log2FloorNonZeroSlow(uint64 n) {
540
+ int r = 0;
541
+ while (n > 0) {
542
+ r++;
543
+ n >>= 1;
544
+ }
545
+ return r - 1;
546
+ }
547
+
548
+ // Returns floor(log2(n)).
549
+ inline int Log2FloorNonZero(uint64 n) {
550
+ #if defined(__GNUC__)
551
+ return 63 ^ __builtin_clzll(n);
552
+ #elif defined(PLATFORM_WINDOWS) && (_WIN64)
553
+ unsigned long index;
554
+ _BitScanReverse64(&index, n);
555
+ return index;
556
+ #else
557
+ return Log2FloorNonZeroSlow(n);
558
+ #endif
559
+ }
560
+
561
+ // Map from bin size to Bin
562
+ Bin* BinFromIndex(BinNum index) {
563
+ return reinterpret_cast<Bin*>(&(bins_space_[index * sizeof(Bin)]));
564
+ }
565
+ size_t BinNumToSize(BinNum index) {
566
+ return static_cast<size_t>(256) << index;
567
+ }
568
+ BinNum BinNumForSize(size_t bytes) {
569
+ uint64 v = std::max<size_t>(bytes, 256) >> kMinAllocationBits;
570
+ int b = std::min(kNumBins - 1, Log2FloorNonZero(v));
571
+ return b;
572
+ }
573
+ Bin* BinForSize(size_t bytes) { return BinFromIndex(BinNumForSize(bytes)); }
574
+
575
+ char bins_space_[sizeof(Bin) * kNumBins];
576
+
577
+ const Options opts_;
578
+
579
+ // The size of the current region allocation.
580
+ size_t curr_region_allocation_bytes_;
581
+
582
+ // An indicator that expansion of a region has hit the limits
583
+ // of the available memory.
584
+ bool started_backpedal_ = false;
585
+
586
+ // Whether the allocator will coalesce adjacent sub allocator provided
587
+ // AllocationRegions. This may be disabled if discrete sub allocator
588
+ // regions can't be treated as contiguous (e.g. if the allocation refers to
589
+ // device visible memory which is not adjacent to the other region in the
590
+ // device's address space).
591
+ const bool coalesce_regions_;
592
+
593
+ std::unique_ptr<SubAllocator> sub_allocator_;
594
+ string name_;
595
+ SharedCounter* timing_counter_ = nullptr;
596
+ std::deque<ChunkHandle> timestamped_chunks_;
597
+
598
+ std::atomic<uint64> safe_frontier_ = {0};
599
+
600
+ // Structures mutable after construction
601
+ mutable mutex lock_;
602
+ RegionManager region_manager_ TF_GUARDED_BY(lock_);
603
+
604
+ std::vector<Chunk> chunks_ TF_GUARDED_BY(lock_);
605
+
606
+ // Pointer to head of linked list of free Chunks
607
+ ChunkHandle free_chunks_list_ TF_GUARDED_BY(lock_);
608
+
609
+ // Counter containing the next unique identifier to assign to a
610
+ // newly-created chunk.
611
+ int64_t next_allocation_id_ TF_GUARDED_BY(lock_);
612
+
613
+ // Stats.
614
+ AllocatorStats stats_ TF_GUARDED_BY(lock_);
615
+ #ifdef TENSORFLOW_MEM_DEBUG
616
+ int64 action_counter_ TF_GUARDED_BY(lock_) = 0;
617
+ #define MEM_DEBUG_SIZE_HISTORY_SIZE 4096
618
+ int64 size_history_[MEM_DEBUG_SIZE_HISTORY_SIZE];
619
+ #endif
620
+
621
+ friend class GPUBFCAllocatorPrivateMethodsTest;
622
+ friend class GPUBFCAllocatorPrivateMethodsTest_SubAllocatorSpecific;
623
+ BFCAllocator(const BFCAllocator&) = delete;
624
+ void operator=(const BFCAllocator&) = delete;
625
+ };
626
+
627
+ } // namespace tsl
628
+
629
+ #endif // TENSORFLOW_TSL_FRAMEWORK_BFC_ALLOCATOR_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/cancellation.h ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_FRAMEWORK_CANCELLATION_H_
17
+ #define TENSORFLOW_TSL_FRAMEWORK_CANCELLATION_H_
18
+
19
+ #include <atomic>
20
+ #include <functional>
21
+
22
+ #include "tsl/lib/gtl/flatmap.h"
23
+ #include "tsl/platform/hash.h"
24
+ #include "tsl/platform/mutex.h"
25
+ #include "tsl/platform/notification.h"
26
+ #include "tsl/platform/status.h"
27
+ #include "tsl/platform/stringpiece.h"
28
+ #include "tsl/platform/thread_annotations.h"
29
+ #include "tsl/platform/types.h"
30
+
31
+ namespace tsl {
32
+
33
+ // A token that can be used to register and deregister a
34
+ // CancelCallback with a CancellationManager.
35
+ //
36
+ // CancellationToken values must be created by a call to
37
+ // CancellationManager::get_cancellation_token.
38
+ typedef int64_t CancellationToken;
39
+
40
+ // A callback that is invoked when a step is canceled.
41
+ //
42
+ // NOTE(mrry): See caveats about CancelCallback implementations in the
43
+ // comment for CancellationManager::RegisterCallback.
44
+ typedef std::function<void()> CancelCallback;
45
+
46
+ // This class should never simultaneously be used as the cancellation manager
47
+ // for two separate sets of executions (i.e two separate steps, or two separate
48
+ // function executions).
49
+ class CancellationManager {
50
+ public:
51
+ // A value that won't be returned by get_cancellation_token().
52
+ static const CancellationToken kInvalidToken;
53
+
54
+ CancellationManager();
55
+
56
+ // Constructs a new CancellationManager that is a "child" of `*parent`.
57
+ //
58
+ // If `*parent` is cancelled, `*this` will be cancelled. `*parent` must
59
+ // outlive the created CancellationManager.
60
+ explicit CancellationManager(CancellationManager* parent);
61
+
62
+ ~CancellationManager();
63
+
64
+ // Run all callbacks associated with this manager.
65
+ void StartCancel();
66
+
67
+ // Run all callbacks associated with this manager with a status.
68
+ // Currently the status is for logging purpose only. See also
69
+ // CancellationManager::RegisterCallbackWithErrorLogging.
70
+ void StartCancelWithStatus(const Status& status);
71
+
72
+ // Returns true iff StartCancel() has been called.
73
+ bool IsCancelled() { return is_cancelled_.load(std::memory_order_acquire); }
74
+
75
+ // Returns a token that must be used in calls to RegisterCallback
76
+ // and DeregisterCallback.
77
+ CancellationToken get_cancellation_token() {
78
+ return next_cancellation_token_.fetch_add(1);
79
+ }
80
+
81
+ // Attempts to register the given callback to be invoked when this
82
+ // manager is cancelled. Returns true if the callback was
83
+ // registered; returns false if this manager was already cancelled,
84
+ // and the callback was not registered.
85
+ //
86
+ // If this method returns false, it is the caller's responsibility
87
+ // to perform any cancellation cleanup.
88
+ //
89
+ // This method is tricky to use correctly. The following usage pattern
90
+ // is recommended:
91
+ //
92
+ // class ObjectWithCancellableOperation {
93
+ // mutex mu_;
94
+ // void CancellableOperation(CancellationManager* cm,
95
+ // std::function<void(Status)> callback) {
96
+ // bool already_cancelled;
97
+ // CancellationToken token = cm->get_cancellation_token();
98
+ // {
99
+ // mutex_lock(mu_);
100
+ // already_cancelled = !cm->RegisterCallback(
101
+ // [this, token]() { Cancel(token); });
102
+ // if (!already_cancelled) {
103
+ // // Issue asynchronous operation. Associate the pending operation
104
+ // // with `token` in some object state, or provide another way for
105
+ // // the Cancel method to look up the operation for cancellation.
106
+ // // Ensure that `cm->DeregisterCallback(token)` is called without
107
+ // // holding `mu_`, before `callback` is invoked.
108
+ // // ...
109
+ // }
110
+ // }
111
+ // if (already_cancelled) {
112
+ // callback(errors::Cancelled("Operation was cancelled"));
113
+ // }
114
+ // }
115
+ //
116
+ // void Cancel(CancellationToken token) {
117
+ // mutex_lock(mu_);
118
+ // // Take action to cancel the operation with the given cancellation
119
+ // // token.
120
+ // }
121
+ //
122
+ // NOTE(mrry): The caller should take care that (i) the calling code
123
+ // is robust to `callback` being invoked asynchronously (e.g. from
124
+ // another thread), (ii) `callback` is deregistered by a call to
125
+ // this->DeregisterCallback(token) when the operation completes
126
+ // successfully, and (iii) `callback` does not invoke any method
127
+ // on this cancellation manager. Furthermore, it is important that
128
+ // the eventual caller of the complementary DeregisterCallback does not
129
+ // hold any mutexes that are required by `callback`.
130
+ bool RegisterCallback(CancellationToken token, CancelCallback callback);
131
+
132
+ // Similar to RegisterCallback, but if the cancellation manager starts a
133
+ // cancellation with an error status, it will log the error status before
134
+ // invoking the callback. `callback_name` is a human-readable name of the
135
+ // callback, which will be displayed on the log.
136
+ bool RegisterCallbackWithErrorLogging(CancellationToken token,
137
+ CancelCallback callback,
138
+ tsl::StringPiece callback_name);
139
+
140
+ // Deregister the callback that, when registered, was associated
141
+ // with the given cancellation token. Returns true iff the callback
142
+ // was deregistered and will not be invoked; otherwise returns false
143
+ // after the callback has been invoked, blocking if necessary.
144
+ //
145
+ // NOTE(mrry): This method may block if cancellation is in progress.
146
+ // The caller of this method must not hold any mutexes that are required
147
+ // to invoke any cancellation callback that has been registered with this
148
+ // cancellation manager.
149
+ bool DeregisterCallback(CancellationToken token);
150
+
151
+ // Deregister the callback that, when registered, was associated
152
+ // with the given cancellation token. Returns true iff the callback
153
+ // was deregistered and will not be invoked; otherwise returns false
154
+ // immediately, with no guarantee that the callback has completed.
155
+ //
156
+ // This method is guaranteed to return true if StartCancel has not been
157
+ // called.
158
+ bool TryDeregisterCallback(CancellationToken token);
159
+
160
+ // Returns true iff cancellation is in progress.
161
+ bool IsCancelling();
162
+
163
+ private:
164
+ struct CallbackConfiguration {
165
+ CancelCallback callback;
166
+ std::string name;
167
+ bool log_error = false;
168
+ };
169
+
170
+ struct State {
171
+ Notification cancelled_notification;
172
+ gtl::FlatMap<CancellationToken, CallbackConfiguration> callbacks;
173
+
174
+ // If this CancellationManager has any children, this member points to the
175
+ // head of a doubly-linked list of its children.
176
+ CancellationManager* first_child = nullptr; // Not owned.
177
+ };
178
+
179
+ bool RegisterCallbackConfig(CancellationToken token,
180
+ CallbackConfiguration config);
181
+
182
+ bool RegisterChild(CancellationManager* child);
183
+ void DeregisterChild(CancellationManager* child);
184
+
185
+ bool is_cancelling_;
186
+ std::atomic_bool is_cancelled_;
187
+ std::atomic<CancellationToken> next_cancellation_token_;
188
+
189
+ CancellationManager* const parent_ = nullptr; // Not owned.
190
+
191
+ // If this CancellationManager is associated with a parent, this member will
192
+ // be set to `true` after this is removed from the parent's list of children.
193
+ bool is_removed_from_parent_ TF_GUARDED_BY(parent_->mu_) = false;
194
+
195
+ // If this CancellationManager is associated with a parent, these members form
196
+ // a doubly-linked list of that parent's children.
197
+ //
198
+ // These fields are valid only when `this->is_removed_from_parent_` is false.
199
+ CancellationManager* prev_sibling_ TF_GUARDED_BY(parent_->mu_) =
200
+ nullptr; // Not owned.
201
+ CancellationManager* next_sibling_ TF_GUARDED_BY(parent_->mu_) =
202
+ nullptr; // Not owned.
203
+
204
+ mutex mu_;
205
+ std::unique_ptr<State> state_ TF_GUARDED_BY(mu_);
206
+ };
207
+
208
+ // Registers the given cancellation callback, returning a function that can be
209
+ // used to deregister the callback. If `cancellation_manager` is NULL, no
210
+ // registration occurs and `deregister_fn` will be a no-op.
211
+ Status RegisterCancellationCallback(CancellationManager* cancellation_manager,
212
+ std::function<void()> callback,
213
+ std::function<void()>* deregister_fn);
214
+
215
+ } // namespace tsl
216
+
217
+ #endif // TENSORFLOW_TSL_FRAMEWORK_CANCELLATION_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/contraction/eigen_contraction_kernel.h ADDED
@@ -0,0 +1,905 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_FRAMEWORK_CONTRACTION_EIGEN_CONTRACTION_KERNEL_H_
17
+ #define TENSORFLOW_TSL_FRAMEWORK_CONTRACTION_EIGEN_CONTRACTION_KERNEL_H_
18
+
19
+ // Depending on a build configuration this header provides custom kernel for
20
+ // Eigen tensor contractions (small matrix multiplication kernel used to
21
+ // multiple together blocks of the original tensors).
22
+ //
23
+ // 1) --define tensorflow_mkldnn_contraction_kernel=1
24
+ // Use Mkldnn single threaded sgemm. The mkldnn kernels are generated at
25
+ // runtime and use avx/avx2/fma/avx512 based on cpu status registers
26
+ // (https://en.wikipedia.org/wiki/CPUID).
27
+ //
28
+ // If you use `tensor.contract(other_tensor)` in your code, you must include
29
+ // this header to get the benefit of custom contraction kernel:
30
+ //
31
+ // #if defined(TENSORFLOW_USE_CUSTOM_CONTRACTION_KERNEL)
32
+ // #include
33
+ // "third_party/tensorflow/tsl/framework/contraction/eigen_contraction_kernel.h"
34
+ // #endif
35
+
36
+ #include "unsupported/Eigen/CXX11/Tensor" // from @eigen_archive
37
+ #include "tsl/framework/fixedpoint/FixedPoint.h"
38
+
39
+ #if defined(TENSORFLOW_USE_MKLDNN_CONTRACTION_KERNEL)
40
+ #include "dnnl.h"
41
+ #endif
42
+
43
+ #include "tsl/platform/dynamic_annotations.h"
44
+
45
+ namespace Eigen {
46
+ namespace internal {
47
+
48
+ #if defined(TENSORFLOW_USE_CUSTOM_CONTRACTION_KERNEL)
49
+ // Returns `true` iff we can use custom contraction kernels. This is a runtime
50
+ // check, that uses environment variables.
51
+ EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE bool UseCustomContractionKernels();
52
+
53
+ // Pack a 2D block of a Tensor expression into contiguous block of memory with
54
+ // col-major storage order. We do not have access to the underlying Tensor
55
+ // expression, we only have a DataMapper (TensorContractionInputMapper for
56
+ // tensor contractions, or blas_data_mapper for plain tensors), that provides a
57
+ // two-dimensional view into the Tensor expression.
58
+ //
59
+ // Default Eigen gemm_pack_rhs and gemm_pack_lhs pack blocks of tensor
60
+ // expressions into the packed format described in "Anatomy of High-Performance
61
+ // Matrix Multiplication" paper (1). Eigen::internal::gebp_kernel relies on this
62
+ // packing format for efficient micro-panel multiplication.
63
+ //
64
+ // This simple packing can be used with any '?gemm' function from BLAS
65
+ // libraries, that work with col-major matrices.
66
+ //
67
+ // (1) http://www.cs.utexas.edu/~flame/pubs/GotoTOMS_revision.pdf
68
+ //
69
+ // IMPORTANT: `gemm_pack_colmajor_block` always packs the block in column major
70
+ // order, DataMapperStorageOrder specifies the storage order of the underlying
71
+ // Tensor expression.
72
+ template <typename Scalar, typename IndexType, typename DataMapper,
73
+ int DataMapperStorageOrder>
74
+ struct gemm_pack_colmajor_block;
75
+
76
+ // gemm_pack_colmajor_block for ColMajor storage order.
77
+ template <typename Scalar, typename IndexType, typename DataMapper>
78
+ struct gemm_pack_colmajor_block<Scalar, IndexType, DataMapper,
79
+ /*DataMapperStorageOrder*/ ColMajor> {
80
+ typedef typename internal::packet_traits<Scalar>::type Packet;
81
+ typedef typename DataMapper::LinearMapper LinearMapper;
82
+
83
+ enum { PacketSize = internal::packet_traits<Scalar>::size };
84
+
85
+ EIGEN_DONT_INLINE
86
+ void operator()(Scalar* block, const DataMapper& data_mapper, IndexType rows,
87
+ IndexType cols) {
88
+ const IndexType unrolled_rows = rows - 4 * PacketSize;
89
+ const IndexType vectorized_rows = rows - PacketSize;
90
+
91
+ for (IndexType col = 0; col < cols; ++col) {
92
+ LinearMapper lm = data_mapper.getLinearMapper(0, col);
93
+
94
+ IndexType row = 0;
95
+ // Give compiler a strong possibility to unroll the loop.
96
+ for (; row <= unrolled_rows; row += 4 * PacketSize) {
97
+ for (IndexType j = 0; j < 4; ++j) {
98
+ const Packet p = lm.template loadPacket<Packet>(row + j * PacketSize);
99
+ internal::pstoreu(block + j * PacketSize, p);
100
+ }
101
+ block += 4 * PacketSize;
102
+ }
103
+ // Process remaining rows with packets.
104
+ for (; row <= vectorized_rows; row += PacketSize) {
105
+ const Packet p = lm.template loadPacket<Packet>(row);
106
+ internal::pstoreu(block, p);
107
+ block += PacketSize;
108
+ }
109
+ // Finalize with coefficients.
110
+ for (; row < rows; ++row) {
111
+ *block = lm(row);
112
+ ++block;
113
+ }
114
+ }
115
+ }
116
+ };
117
+
118
+ #endif // TENSORFLOW_USE_CUSTOM_CONTRACTION_KERNEL
119
+
120
+ // Enabled by build option: "--define tensorflow_mkldnn_contraction_kernel=1"
121
+ #if defined(TENSORFLOW_USE_MKLDNN_CONTRACTION_KERNEL)
122
+
123
+ template <typename Scalar, typename IndexType, typename OutputMapper,
124
+ bool ConjugateLhs = false, bool ConjugateRhs = false>
125
+ struct dnnl_gemm_kernel;
126
+
127
+ // dnnl_gemm_kernel for floats defined as a thin layer on top of mkldnn_sgemm.
128
+ template <typename IndexType, typename OutputMapper, bool ConjugateLhs,
129
+ bool ConjugateRhs>
130
+ struct dnnl_gemm_kernel</*Scalar*/ float, IndexType, OutputMapper, ConjugateLhs,
131
+ ConjugateRhs> {
132
+ static_assert(!ConjugateLhs, "DNNL kernel doesn't support ConjugateLhs");
133
+ static_assert(!ConjugateRhs, "DNNL kernel doesn't support ConjugateRhs");
134
+
135
+ static constexpr int kComputeStrideFromBlockDimensions = -1;
136
+
137
+ using LhsScalar = float;
138
+ using RhsScalar = float;
139
+ using ResScalar = float;
140
+
141
+ EIGEN_DONT_INLINE
142
+ void operator()(const OutputMapper& output, const LhsScalar* blockA,
143
+ const RhsScalar* blockB, const IndexType rows,
144
+ const IndexType depth, const IndexType cols, float alpha,
145
+ float beta, int ldA = kComputeStrideFromBlockDimensions,
146
+ int ldB = kComputeStrideFromBlockDimensions,
147
+ char transposeA = 'N', char transposeB = 'N') {
148
+ static const int max_index = (std::numeric_limits<int>::max)();
149
+
150
+ eigen_assert(max_index >= rows);
151
+ eigen_assert(max_index >= cols);
152
+ eigen_assert(max_index >= depth);
153
+ eigen_assert(max_index >= output.stride());
154
+
155
+ const int m = static_cast<int>(rows);
156
+ const int n = static_cast<int>(cols);
157
+ const int k = static_cast<int>(depth);
158
+
159
+ ldA = ldA == kComputeStrideFromBlockDimensions ? m : ldA;
160
+ ldB = ldB == kComputeStrideFromBlockDimensions ? k : ldB;
161
+ const int ldC = static_cast<int>(output.stride());
162
+
163
+ // DNNL takes row-major matrices. Our packed column-major matrices can be
164
+ // viewed as a transposed row-major matrix, i.e.,
165
+ // C_colmajor = C_rowmajor^T = (A_rowmajor * B_rowmajor)^T
166
+ // = B_rowmajor^T * A_rowmajor^T
167
+ // = B_colmajor * A_colmajor
168
+ // So we can just swap the input matrices A and B for DNNL.
169
+ // TODO(penporn): Switch to row-major packing instead.
170
+ dnnl_status_t st =
171
+ dnnl_sgemm(transposeB, transposeA, n, m, k, alpha, blockB, ldB, blockA,
172
+ ldA, beta, const_cast<ResScalar*>(output.data()), ldC);
173
+ eigen_assert(st == 0);
174
+
175
+ #if DYNAMIC_ANNOTATIONS_ENABLED == 1 || defined(MEMORY_SANITIZER)
176
+ for (IndexType col = 0; col < cols; ++col) {
177
+ ResScalar* row_base = &output(0, col);
178
+ EIGEN_UNUSED_VARIABLE(row_base); // Suppress unused variable error.
179
+ TF_ANNOTATE_MEMORY_IS_INITIALIZED(row_base, sizeof(ResScalar) * rows);
180
+ }
181
+ #endif
182
+
183
+ // eigen_assert is a no-op in optimized mode so we add these to avoid
184
+ // compiler's unused-variable errors.
185
+ EIGEN_UNUSED_VARIABLE(max_index);
186
+ EIGEN_UNUSED_VARIABLE(st);
187
+ }
188
+ };
189
+
190
+ template <typename IndexType, typename OutputMapper, bool ConjugateLhs = false,
191
+ bool ConjugateRhs = false>
192
+ struct mkldnn_gemm_s8u8s32_kernel {
193
+ static_assert(!ConjugateLhs, "DNNL kernel doesn't support ConjugateLhs");
194
+ static_assert(!ConjugateRhs, "DNNL kernel doesn't support ConjugateRhs");
195
+
196
+ static constexpr int kComputeStrideFromBlockDimensions = -1;
197
+
198
+ using LhsScalar = Eigen::QInt8;
199
+ using RhsScalar = Eigen::QUInt8;
200
+ using ResScalar = Eigen::QInt32;
201
+
202
+ EIGEN_DONT_INLINE
203
+ void operator()(const OutputMapper& output, const LhsScalar* blockA,
204
+ const RhsScalar* blockB, const IndexType rows,
205
+ const IndexType depth, const IndexType cols, float alpha,
206
+ float beta, int ldA = kComputeStrideFromBlockDimensions,
207
+ int ldB = kComputeStrideFromBlockDimensions,
208
+ char transposeA = 'N', char transposeB = 'N') {
209
+ static const int max_index = (std::numeric_limits<int>::max)();
210
+
211
+ eigen_assert(max_index >= rows);
212
+ eigen_assert(max_index >= cols);
213
+ eigen_assert(max_index >= depth);
214
+ eigen_assert(max_index >= output.stride());
215
+
216
+ const int m = static_cast<int>(rows);
217
+ const int n = static_cast<int>(cols);
218
+ const int k = static_cast<int>(depth);
219
+
220
+ ldA = ldA == kComputeStrideFromBlockDimensions ? m : ldA;
221
+ ldB = ldB == kComputeStrideFromBlockDimensions ? k : ldB;
222
+ const int ldC = static_cast<int>(output.stride());
223
+
224
+ // Currently we support only symmetric quantization with zero point at 0.
225
+ const int8_t ao = 0;
226
+ const int8_t bo = 0;
227
+
228
+ // Don't add any offset to the result C.
229
+ const char offsetc = 'F';
230
+ const int32_t co = 0;
231
+
232
+ const auto* A = reinterpret_cast<const int8_t*>(blockA);
233
+ const auto* B = reinterpret_cast<const uint8_t*>(blockB);
234
+ auto* C = reinterpret_cast<int32_t*>(const_cast<ResScalar*>(output.data()));
235
+
236
+ // DNNL takes row-major matrices. Our packed column-major matrices can be
237
+ // viewed as a transposed row-major matrix, i.e., C_colmajor = C_rowmajor^T.
238
+ // C_colmajor = C_rowmajor^T = (A_rowmajor * B_rowmajor)^T
239
+ // = B_rowmajor^T * A_rowmajor^T
240
+ // = B_colmajor * A_colmajor
241
+ // So we can just swap the input matrices A and B for DNNL.
242
+ // TODO(penporn): Switch to row-major packing instead.
243
+ dnnl_status_t st = dnnl_gemm_u8s8s32(transposeB, transposeA, offsetc, //
244
+ n, m, k, //
245
+ alpha, //
246
+ B, ldB, bo, //
247
+ A, ldA, ao, //
248
+ beta, //
249
+ C, ldC, &co);
250
+ eigen_assert(st == 0);
251
+
252
+ #if DYNAMIC_ANNOTATIONS_ENABLED == 1 || defined(MEMORY_SANITIZER)
253
+ for (IndexType col = 0; col < cols; ++col) {
254
+ ResScalar* row_base = &output(0, col);
255
+ EIGEN_UNUSED_VARIABLE(row_base); // Suppress unused variable error.
256
+ TF_ANNOTATE_MEMORY_IS_INITIALIZED(row_base, sizeof(ResScalar) * rows);
257
+ }
258
+ #endif
259
+
260
+ // eigen_assert is a no-op in optimized mode so we add these to avoid
261
+ // compiler's unused-variable errors.
262
+ EIGEN_UNUSED_VARIABLE(max_index);
263
+ EIGEN_UNUSED_VARIABLE(st);
264
+ }
265
+ };
266
+
267
+ // For mkldnn_sgemm having the right dimensions (especially for small matrices)
268
+ // is more important than fitting all the working set in L1/L2 caches.
269
+ // TODO(ezhulenev): Do better heuristics.
270
+ template <typename StorageIndex, int sharding_type>
271
+ class TensorContractionBlocking<float, float, float, StorageIndex,
272
+ sharding_type> {
273
+ // For now mkldnn has only mkldnn_sgemm (gemm for floats).
274
+ using Scalar = float;
275
+
276
+ // Adjust the block sizes to work well with mkldnn kernels.
277
+
278
+ // Multiply default choice of block size along M and N dimensions.
279
+ // TODO(ezhulenev): Explore if this can work in general (kScaleM=2.0 worked
280
+ // well in some of models).
281
+ static constexpr float kScaleM = 1.5;
282
+ static constexpr float kScaleN = 1.0;
283
+
284
+ // Mkldnn Avx/Avx2/Avx512 unroll factors are: 8/16/48.
285
+ static constexpr StorageIndex kUnrollM = 48;
286
+
287
+ // Mkldnn Avx/Avx2/Avx512 unroll factors are: 6/6/8.
288
+ static constexpr StorageIndex kUnrollN = 24;
289
+
290
+ public:
291
+ TensorContractionBlocking(StorageIndex k, StorageIndex m, StorageIndex n,
292
+ StorageIndex num_threads = 1)
293
+ : kc_(k), mc_(m), nc_(n) {
294
+ // 1. Compute block sizes using default Eigen heuristics.
295
+ if (sharding_type == ShardByCol) {
296
+ computeProductBlockingSizes<Scalar, Scalar, 1>(kc_, mc_, nc_,
297
+ num_threads);
298
+ } else {
299
+ computeProductBlockingSizes<Scalar, Scalar, 1>(kc_, nc_, mc_,
300
+ num_threads);
301
+ }
302
+
303
+ // If dimensions do not pass basic sanity checks return immediately.
304
+ if (kc_ <= 0 || mc_ <= 0 || nc_ <= 0) return;
305
+
306
+ // If we are using default Eigen gebp kernel there is no need to adjust the
307
+ // block sizes for DNNL.
308
+ if (!UseCustomContractionKernels()) return;
309
+
310
+ // 2. And refine them to work well with mkldnn sgemm.
311
+ mc_ = (std::min)(
312
+ m, Eigen::divup(static_cast<StorageIndex>(mc_ * kScaleM), kUnrollM) *
313
+ kUnrollM);
314
+ nc_ = (std::min)(
315
+ n, Eigen::divup(static_cast<StorageIndex>(nc_ * kScaleN), kUnrollN) *
316
+ kUnrollN);
317
+
318
+ // We split Kth dimensions in roughly equal slices.
319
+ StorageIndex target_k_slices =
320
+ (std::max)(StorageIndex(1), Eigen::divup(k, kc_));
321
+ StorageIndex packet_size = internal::packet_traits<Scalar>::size;
322
+ if (packet_size < 8) packet_size = 8;
323
+ StorageIndex target_bk =
324
+ Eigen::divup(k / target_k_slices, packet_size) * packet_size;
325
+ kc_ = (std::min)(k, target_bk);
326
+ }
327
+
328
+ EIGEN_ALWAYS_INLINE StorageIndex kc() const { return kc_; }
329
+ EIGEN_ALWAYS_INLINE StorageIndex mc() const { return mc_; }
330
+ EIGEN_ALWAYS_INLINE StorageIndex nc() const { return nc_; }
331
+
332
+ private:
333
+ StorageIndex kc_;
334
+ StorageIndex mc_;
335
+ StorageIndex nc_;
336
+ };
337
+
338
+ template <typename StorageIndex, int sharding_type>
339
+ class TensorContractionBlocking<Eigen::QInt32, Eigen::QInt8, Eigen::QUInt8,
340
+ StorageIndex, sharding_type> {
341
+ // TODO(ezhulenev): Define proper gebp_traits in Eigen for quantized types?
342
+
343
+ // Default Eigen block heuristics for `QInt8xQUInt8 -> QInt32` are wrong.
344
+ // Mostly because gebp_traits are not correctly defined. But we know that we
345
+ // are going to use s8u8s32_gemm from DNNL, so we use float heuristics, and
346
+ // adjust them to work well with DNNL.
347
+ using LhsScalar = Eigen::QInt8;
348
+ using RhsScalar = Eigen::QUInt8;
349
+ using ResScalar = Eigen::QInt32;
350
+
351
+ // Multiply default choice of block size along M, N and K dimensions.
352
+ static constexpr float kScaleM = 1.5;
353
+ static constexpr float kScaleN = 1.5;
354
+ static constexpr float kScaleK = 1.5;
355
+
356
+ public:
357
+ TensorContractionBlocking(StorageIndex k, StorageIndex m, StorageIndex n,
358
+ StorageIndex num_threads = 1)
359
+ : kc_(k), mc_(m), nc_(n) {
360
+ // Each dimension is a multiple of 32 (fits into _m256i).
361
+ mc_ = (std::min)(m, static_cast<StorageIndex>(192));
362
+ nc_ = (std::min)(n, static_cast<StorageIndex>(288));
363
+ kc_ = (std::min)(k, static_cast<StorageIndex>(320));
364
+ }
365
+
366
+ EIGEN_ALWAYS_INLINE StorageIndex kc() const { return kc_; }
367
+ EIGEN_ALWAYS_INLINE StorageIndex mc() const { return mc_; }
368
+ EIGEN_ALWAYS_INLINE StorageIndex nc() const { return nc_; }
369
+
370
+ private:
371
+ StorageIndex kc_;
372
+ StorageIndex mc_;
373
+ StorageIndex nc_;
374
+ };
375
+
376
+ // If the Lhs or Rhs Tensor expressions are already evaluated and have access to
377
+ // raw data, we can skip packing step and setup pointers and a stride to the
378
+ // underlying memory buffer and pass them directly to Gemm.
379
+ template <typename Scalar, typename StorageIndex>
380
+ struct ColMajorBlock {
381
+ bool is_direct_access;
382
+
383
+ // Valid iff `is_direct_access == false`
384
+ Scalar* packed_data;
385
+
386
+ // Valid iff `is_direct_access == true`
387
+ Scalar* raw_data;
388
+ StorageIndex stride;
389
+ char transpose;
390
+ };
391
+
392
+ template <typename DataMapper>
393
+ struct DirectColMajorAccess {
394
+ enum { value = false };
395
+
396
+ template <typename Scalar, typename StorageIndex>
397
+ static bool block(const typename DataMapper::SubMapper& data_mapper,
398
+ const StorageIndex rows, const StorageIndex cols,
399
+ const StorageIndex num_kernels,
400
+ ColMajorBlock<Scalar, StorageIndex>* block) {
401
+ eigen_assert(false && "Not implemented");
402
+ return false;
403
+ }
404
+ };
405
+
406
+ // If we have an access to raw memory of the contraction input, we can safely
407
+ // skip packing if:
408
+ // (1) Packing is a no-op.
409
+ // (2) Packed block will be used just once.
410
+ //
411
+ // If a packed block is used many times, it's more efficient to pack it into
412
+ // contiguous block of memory to reduce pressure on TLB.
413
+ //
414
+ // TODO(ezhulenev): Add support for more tensor expressions that matters.
415
+ #define REGISTER_DIRECT_COL_MAJOR_ACCESS(TENSOR_EXPR) \
416
+ template <typename Scalar, typename StorageIndex, int Side, typename Device, \
417
+ typename nocontract_t, typename contract_t, int packet_size, \
418
+ int Alignment> \
419
+ struct DirectColMajorAccess<TensorContractionInputMapper< \
420
+ Scalar, StorageIndex, Side, TensorEvaluator<TENSOR_EXPR, Device>, \
421
+ nocontract_t, contract_t, packet_size, /*inner_dim_contiguous=*/true, \
422
+ /*inner_dim_reordered=*/false, Alignment>> { \
423
+ enum { value = true }; \
424
+ \
425
+ using DataMapper = TensorContractionInputMapper< \
426
+ Scalar, StorageIndex, Side, TensorEvaluator<TENSOR_EXPR, Device>, \
427
+ nocontract_t, contract_t, packet_size, /*inner_dim_contiguous=*/true, \
428
+ /*inner_dim_reordered=*/false, Alignment>; \
429
+ \
430
+ static bool block(const typename DataMapper::SubMapper& data_mapper, \
431
+ const StorageIndex rows, const StorageIndex cols, \
432
+ const StorageIndex num_kernels, \
433
+ ColMajorBlock<Scalar, StorageIndex>* block) { \
434
+ static_assert(DataMapper::DirectOffsets == true, \
435
+ "DataMapper must support direct offsets"); \
436
+ \
437
+ const StorageIndex vert_offset = data_mapper.vert_offset(); \
438
+ const StorageIndex horiz_offset = data_mapper.horiz_offset(); \
439
+ const StorageIndex stride = \
440
+ Side == Lhs ? data_mapper.base_mapper().stride() \
441
+ : data_mapper.base_mapper().nocontract_strides()[0]; \
442
+ const Scalar* data = data_mapper.base_mapper().tensor().data(); \
443
+ data = Side == Lhs ? data : data + vert_offset + horiz_offset * stride; \
444
+ \
445
+ const bool is_no_op_packing = stride == rows; \
446
+ const StorageIndex addressable_mem = (stride * cols * sizeof(Scalar)); \
447
+ const bool use_direct_access = \
448
+ is_no_op_packing || num_kernels == 1 /* used once */ || \
449
+ ((num_kernels == 2) && \
450
+ (addressable_mem < (256 << 10) /* 256 kb */)); \
451
+ \
452
+ if (use_direct_access) { \
453
+ block->is_direct_access = true; \
454
+ block->raw_data = const_cast<Scalar*>(data); \
455
+ block->stride = stride; \
456
+ block->transpose = 'N'; \
457
+ return true; \
458
+ } \
459
+ return false; \
460
+ } \
461
+ }
462
+
463
+ #define SIMPLE_TENSOR const Tensor<Scalar, 2, Eigen::ColMajor, StorageIndex>
464
+
465
+ #define TENSOR_MAP_ROWMAJOR \
466
+ const TensorMap<Tensor<const Scalar, 2, Eigen::RowMajor, StorageIndex>, \
467
+ Eigen::Aligned>
468
+
469
+ #define TENSOR_MAP_COLMAJOR \
470
+ const TensorMap<Tensor<const Scalar, 2, Eigen::ColMajor, StorageIndex>, \
471
+ Eigen::Aligned>
472
+
473
+ #define TENSOR_MAP_CONST_ROWMAJOR \
474
+ const TensorMap<Tensor<Scalar, 2, Eigen::RowMajor, StorageIndex>, \
475
+ Eigen::Aligned>
476
+
477
+ #define TENSOR_MAP_CONST_COLMAJOR \
478
+ const TensorMap<Tensor<Scalar, 2, Eigen::ColMajor, StorageIndex>, \
479
+ Eigen::Aligned>
480
+
481
+ // This is reshaped convolution filter from `eigen_spatial_convolutions.h`.
482
+ #define TENSOR_RESHAPE \
483
+ const TensorReshapingOp< \
484
+ const Eigen::DSizes<StorageIndex, 2>, \
485
+ const TensorMap<Tensor<const Scalar, 4, Eigen::RowMajor, StorageIndex>, \
486
+ Eigen::Aligned>>
487
+
488
+ REGISTER_DIRECT_COL_MAJOR_ACCESS(SIMPLE_TENSOR);
489
+ REGISTER_DIRECT_COL_MAJOR_ACCESS(TENSOR_MAP_ROWMAJOR);
490
+ REGISTER_DIRECT_COL_MAJOR_ACCESS(TENSOR_MAP_COLMAJOR);
491
+ REGISTER_DIRECT_COL_MAJOR_ACCESS(TENSOR_MAP_CONST_ROWMAJOR);
492
+ REGISTER_DIRECT_COL_MAJOR_ACCESS(TENSOR_MAP_CONST_COLMAJOR);
493
+ REGISTER_DIRECT_COL_MAJOR_ACCESS(TENSOR_RESHAPE);
494
+
495
+ #undef SIMPLE_TENSOR
496
+ #undef TENSOR_MAP_ROWMAJOR
497
+ #undef TENSOR_MAP_COLMAJOR
498
+ #undef TENSOR_MAP_CONST_ROWMAJOR
499
+ #undef TENSOR_MAP_CONST_COLMAJOR
500
+ #undef TENSOR_RESHAPE
501
+ #undef REGISTER_DIRECT_COL_MAJOR_ACCESS
502
+
503
+ template <typename ResScalar, typename LhsScalar, typename RhsScalar,
504
+ typename StorageIndex, typename OutputMapper>
505
+ struct GemmKernelProvider {
506
+ enum { Defined = 0 };
507
+ using GemmKernel = void;
508
+ };
509
+
510
+ template <typename StorageIndex, typename OutputMapper>
511
+ struct GemmKernelProvider<float, float, float, StorageIndex, OutputMapper> {
512
+ enum { Defined = 1 };
513
+ using GemmKernel = dnnl_gemm_kernel<float, StorageIndex, OutputMapper>;
514
+ };
515
+
516
+ template <typename StorageIndex, typename OutputMapper>
517
+ struct GemmKernelProvider<Eigen::QInt32, Eigen::QInt8, Eigen::QUInt8,
518
+ StorageIndex, OutputMapper> {
519
+ enum { Defined = 1 };
520
+ using GemmKernel = mkldnn_gemm_s8u8s32_kernel<StorageIndex, OutputMapper>;
521
+ };
522
+
523
+ // NOTE: 'std::enable_if' doesn't work for template specializations. See
524
+ // "default template argument in a class template partial specialization".
525
+
526
+ // Tensor contraction kernel that can fallback on Eigen gebp_kernel at runtime.
527
+ #define REGISTER_TENSOR_CONTRACTION_KERNEL_WITH_FALLBACK( \
528
+ RES_SCALAR, LHS_SCALAR, RHS_SCALAR) \
529
+ \
530
+ template <typename StorageIndex, typename OutputMapper, typename LhsMapper, \
531
+ typename RhsMapper> \
532
+ struct TensorContractionKernel<RES_SCALAR, LHS_SCALAR, RHS_SCALAR, \
533
+ StorageIndex, OutputMapper, LhsMapper, \
534
+ RhsMapper> { \
535
+ TensorContractionKernel(StorageIndex m, StorageIndex k, StorageIndex n, \
536
+ StorageIndex bm, StorageIndex bk, StorageIndex bn) \
537
+ : m(m), k(k), n(n), bm(bm), bk(bk), bn(bn) {} \
538
+ \
539
+ enum { HasBeta = true }; \
540
+ \
541
+ using ResScalar = RES_SCALAR; \
542
+ using LhsScalar = LHS_SCALAR; \
543
+ using RhsScalar = RHS_SCALAR; \
544
+ \
545
+ using Traits = typename internal::gebp_traits<LhsScalar, RhsScalar>; \
546
+ \
547
+ using LhsBlock = ColMajorBlock<LhsScalar, StorageIndex>; \
548
+ using RhsBlock = ColMajorBlock<RhsScalar, StorageIndex>; \
549
+ \
550
+ using DirectLhsAccess = DirectColMajorAccess<LhsMapper>; \
551
+ using DirectRhsAccess = DirectColMajorAccess<RhsMapper>; \
552
+ \
553
+ /* Packed Lhs/Rhs block memory allocator.*/ \
554
+ typedef TensorContractionBlockMemAllocator<LhsScalar, RhsScalar> \
555
+ BlockMemAllocator; \
556
+ typedef typename BlockMemAllocator::BlockMemHandle BlockMemHandle; \
557
+ \
558
+ using LhsPacker = \
559
+ gemm_pack_colmajor_block<LhsScalar, StorageIndex, \
560
+ typename LhsMapper::SubMapper, ColMajor>; \
561
+ using RhsPacker = \
562
+ gemm_pack_colmajor_block<RhsScalar, StorageIndex, \
563
+ typename RhsMapper::SubMapper, ColMajor>; \
564
+ \
565
+ using GemmKernelProviderType = \
566
+ GemmKernelProvider<ResScalar, LhsScalar, RhsScalar, StorageIndex, \
567
+ OutputMapper>; \
568
+ static_assert( \
569
+ GemmKernelProviderType::Defined, \
570
+ "Custom GEMM kernel is not registered for given scalar types"); \
571
+ using GemmKernel = typename GemmKernelProviderType::GemmKernel; \
572
+ \
573
+ /* Fallback on default Eigen pack and GEBP kernel if custom contraction */ \
574
+ /* kernels disabled at runtime. */ \
575
+ using EigenLhsPacker = \
576
+ gemm_pack_lhs<LhsScalar, StorageIndex, typename LhsMapper::SubMapper, \
577
+ Traits::mr, Traits::LhsProgress, \
578
+ typename Traits::LhsPacket4Packing, ColMajor>; \
579
+ using EigenRhsPacker = \
580
+ gemm_pack_rhs<RhsScalar, StorageIndex, typename RhsMapper::SubMapper, \
581
+ Traits::nr, ColMajor>; \
582
+ using GebpKernel = \
583
+ gebp_kernel<LhsScalar, RhsScalar, StorageIndex, OutputMapper, \
584
+ Traits::mr, Traits::nr, /*ConjugateLhs*/ false, \
585
+ /*ConjugateRhs*/ false>; \
586
+ \
587
+ template <typename Device> \
588
+ EIGEN_DEVICE_FUNC BlockMemHandle allocate(Device& d, LhsBlock* lhs_block, \
589
+ RhsBlock* rhs_block) { \
590
+ return BlockMemAllocator::allocate( \
591
+ d, bm, bk, bn, &lhs_block->packed_data, &rhs_block->packed_data); \
592
+ } \
593
+ \
594
+ template <typename Device> \
595
+ EIGEN_DEVICE_FUNC BlockMemHandle \
596
+ allocateSlices(Device& d, const int num_lhs, const int num_rhs, \
597
+ const int num_slices, std::vector<LhsBlock>* lhs_blocks, \
598
+ std::vector<RhsBlock>* rhs_blocks) { \
599
+ eigen_assert(num_slices > 0); \
600
+ std::vector<std::vector<LhsScalar*>> lhs_mem(num_slices); \
601
+ std::vector<std::vector<RhsScalar*>> rhs_mem(num_slices); \
602
+ \
603
+ BlockMemHandle block_mem = BlockMemAllocator::allocateSlices( \
604
+ d, bm, bk, bn, num_lhs, num_rhs, num_slices, lhs_mem.data(), \
605
+ rhs_mem.data()); \
606
+ \
607
+ for (Index x = 0; x < num_slices; x++) { \
608
+ if (num_lhs > 0) lhs_blocks[x].resize(num_lhs); \
609
+ for (Index m = 0; m < num_lhs; m++) { \
610
+ lhs_blocks[x][m].packed_data = lhs_mem[x][m]; \
611
+ } \
612
+ if (num_rhs > 0) rhs_blocks[x].resize(num_rhs); \
613
+ for (Index n = 0; n < num_rhs; n++) { \
614
+ rhs_blocks[x][n].packed_data = rhs_mem[x][n]; \
615
+ } \
616
+ } \
617
+ \
618
+ return block_mem; \
619
+ } \
620
+ \
621
+ template <typename Device> \
622
+ EIGEN_DEVICE_FUNC static void deallocate(Device& d, \
623
+ BlockMemHandle handle) { \
624
+ BlockMemAllocator::deallocate(d, handle); \
625
+ } \
626
+ \
627
+ EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE void packLhs( \
628
+ LhsBlock* lhsBlock, const typename LhsMapper::SubMapper& data_mapper, \
629
+ const StorageIndex depth, const StorageIndex rows) { \
630
+ if (UseCustomContractionKernels()) { \
631
+ const bool is_direct_access = \
632
+ DirectLhsAccess::value && \
633
+ DirectLhsAccess::block(data_mapper, rows, depth, \
634
+ bn > 0 ? divup(n, bn) : 0, lhsBlock); \
635
+ \
636
+ if (!is_direct_access) { \
637
+ lhsBlock->is_direct_access = false; \
638
+ LhsPacker()(lhsBlock->packed_data, data_mapper, rows, depth); \
639
+ } \
640
+ } else { \
641
+ lhsBlock->is_direct_access = false; \
642
+ EigenLhsPacker()(lhsBlock->packed_data, data_mapper, depth, rows, \
643
+ /*stride*/ 0, /*offset*/ 0); \
644
+ } \
645
+ } \
646
+ \
647
+ EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE void packRhs( \
648
+ RhsBlock* rhsBlock, const typename RhsMapper::SubMapper& data_mapper, \
649
+ const StorageIndex depth, const StorageIndex cols) { \
650
+ if (UseCustomContractionKernels()) { \
651
+ const bool is_direct_access = \
652
+ DirectRhsAccess::value && \
653
+ DirectRhsAccess::block(data_mapper, depth, cols, \
654
+ bm > 0 ? divup(m, bm) : 0, rhsBlock); \
655
+ \
656
+ if (!is_direct_access) { \
657
+ rhsBlock->is_direct_access = false; \
658
+ RhsPacker()(rhsBlock->packed_data, data_mapper, depth, cols); \
659
+ } \
660
+ } else { \
661
+ rhsBlock->is_direct_access = false; \
662
+ EigenRhsPacker()(rhsBlock->packed_data, data_mapper, depth, cols); \
663
+ } \
664
+ } \
665
+ \
666
+ EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE void invoke( \
667
+ const OutputMapper& output_mapper, const LhsBlock& lhsBlock, \
668
+ const RhsBlock& rhsBlock, const StorageIndex rows, \
669
+ const StorageIndex depth, const StorageIndex cols, const float alpha, \
670
+ const float beta) { \
671
+ if (UseCustomContractionKernels()) { \
672
+ if ((DirectLhsAccess::value && lhsBlock.is_direct_access) && \
673
+ (DirectRhsAccess::value && rhsBlock.is_direct_access)) { \
674
+ GemmKernel()(output_mapper, lhsBlock.raw_data, rhsBlock.raw_data, \
675
+ rows, depth, cols, alpha, beta, \
676
+ /*ldA=*/lhsBlock.stride, /*ldB=*/rhsBlock.stride, \
677
+ /*transposeA=*/lhsBlock.transpose, \
678
+ /*transposeB=*/rhsBlock.transpose); \
679
+ \
680
+ } else if (DirectLhsAccess::value && lhsBlock.is_direct_access) { \
681
+ GemmKernel()(output_mapper, lhsBlock.raw_data, rhsBlock.packed_data, \
682
+ rows, depth, cols, alpha, beta, \
683
+ /*ldA=*/lhsBlock.stride, \
684
+ /*ldB=*/GemmKernel::kComputeStrideFromBlockDimensions, \
685
+ /*transposeA=*/lhsBlock.transpose, /*transposeB=*/'N'); \
686
+ \
687
+ } else if (DirectRhsAccess::value && rhsBlock.is_direct_access) { \
688
+ GemmKernel()(output_mapper, lhsBlock.packed_data, rhsBlock.raw_data, \
689
+ rows, depth, cols, alpha, beta, \
690
+ /*ldA=*/GemmKernel::kComputeStrideFromBlockDimensions, \
691
+ /*ldB=*/rhsBlock.stride, /*transposeA=*/'N', \
692
+ /*transposeB=*/rhsBlock.transpose); \
693
+ \
694
+ } else { \
695
+ GemmKernel()(output_mapper, lhsBlock.packed_data, \
696
+ rhsBlock.packed_data, rows, depth, cols, alpha, beta); \
697
+ } \
698
+ } else { \
699
+ /* Gebp kernel does not support beta, so we have to clear memory in */ \
700
+ /* the output mapper manually. */ \
701
+ /* WARNING(ezhulenev): This is optimized into a memset in a loop, */ \
702
+ /* could be much slower for small matrices. Currently this code */ \
703
+ /* path used only for testing, and performance does not matter. */ \
704
+ if (beta == 0.0) { \
705
+ for (StorageIndex col = 0; col < cols; ++col) { \
706
+ ResScalar* output_base = &output_mapper(0, col); \
707
+ typedef Array<ResScalar, Dynamic, 1> OutputRow; \
708
+ typedef Map<OutputRow, 0, InnerStride<1>> OutputRowMap; \
709
+ OutputRowMap(output_base, rows).setZero(); \
710
+ } \
711
+ } \
712
+ \
713
+ GebpKernel()( \
714
+ output_mapper, lhsBlock.packed_data, rhsBlock.packed_data, rows, \
715
+ depth, cols, alpha, \
716
+ /*strideA*/ GemmKernel::kComputeStrideFromBlockDimensions, \
717
+ /*strideB*/ GemmKernel::kComputeStrideFromBlockDimensions, \
718
+ /*offsetA*/ 0, /*offsetB*/ 0); \
719
+ } \
720
+ } \
721
+ \
722
+ private: \
723
+ /* These are dimensions of the original Tensors, and selected block */ \
724
+ /* sizes. The actual block sizes passed to all function above might be */ \
725
+ /* smaller because of the partial blocks at the end. */ \
726
+ const StorageIndex m; \
727
+ const StorageIndex k; \
728
+ const StorageIndex n; \
729
+ const StorageIndex bm; \
730
+ const StorageIndex bk; \
731
+ const StorageIndex bn; \
732
+ }
733
+
734
+ // Tensor contraction kernel that do not fallback on Eigen. Currently not all
735
+ // data types are supported by Eigen data packing and default gebp_kernel.
736
+ #define REGISTER_TENSOR_CONTRACTION_KERNEL_NO_FALLBACK(RES_SCALAR, LHS_SCALAR, \
737
+ RHS_SCALAR) \
738
+ \
739
+ template <typename StorageIndex, typename OutputMapper, typename LhsMapper, \
740
+ typename RhsMapper> \
741
+ struct TensorContractionKernel<RES_SCALAR, LHS_SCALAR, RHS_SCALAR, \
742
+ StorageIndex, OutputMapper, LhsMapper, \
743
+ RhsMapper> { \
744
+ TensorContractionKernel(StorageIndex m, StorageIndex k, StorageIndex n, \
745
+ StorageIndex bm, StorageIndex bk, StorageIndex bn) \
746
+ : m(m), k(k), n(n), bm(bm), bk(bk), bn(bn) {} \
747
+ \
748
+ enum { HasBeta = true }; \
749
+ \
750
+ using ResScalar = RES_SCALAR; \
751
+ using LhsScalar = LHS_SCALAR; \
752
+ using RhsScalar = RHS_SCALAR; \
753
+ \
754
+ using Traits = typename internal::gebp_traits<LhsScalar, RhsScalar>; \
755
+ \
756
+ using LhsBlock = ColMajorBlock<LhsScalar, StorageIndex>; \
757
+ using RhsBlock = ColMajorBlock<RhsScalar, StorageIndex>; \
758
+ \
759
+ using DirectLhsAccess = DirectColMajorAccess<LhsMapper>; \
760
+ using DirectRhsAccess = DirectColMajorAccess<RhsMapper>; \
761
+ \
762
+ /* Packed Lhs/Rhs block memory allocator.*/ \
763
+ typedef TensorContractionBlockMemAllocator<LhsScalar, RhsScalar> \
764
+ BlockMemAllocator; \
765
+ typedef typename BlockMemAllocator::BlockMemHandle BlockMemHandle; \
766
+ \
767
+ using LhsPacker = \
768
+ gemm_pack_colmajor_block<LhsScalar, StorageIndex, \
769
+ typename LhsMapper::SubMapper, ColMajor>; \
770
+ using RhsPacker = \
771
+ gemm_pack_colmajor_block<RhsScalar, StorageIndex, \
772
+ typename RhsMapper::SubMapper, ColMajor>; \
773
+ \
774
+ using GemmKernelProviderType = \
775
+ GemmKernelProvider<ResScalar, LhsScalar, RhsScalar, StorageIndex, \
776
+ OutputMapper>; \
777
+ static_assert( \
778
+ GemmKernelProviderType::Defined, \
779
+ "Custom GEMM kernel is not registered for given scalar types"); \
780
+ using GemmKernel = typename GemmKernelProviderType::GemmKernel; \
781
+ \
782
+ template <typename Device> \
783
+ EIGEN_DEVICE_FUNC BlockMemHandle allocate(Device& d, LhsBlock* lhs_block, \
784
+ RhsBlock* rhs_block) { \
785
+ return BlockMemAllocator::allocate( \
786
+ d, bm, bk, bn, &lhs_block->packed_data, &rhs_block->packed_data); \
787
+ } \
788
+ \
789
+ template <typename Device> \
790
+ EIGEN_DEVICE_FUNC BlockMemHandle \
791
+ allocateSlices(Device& d, const int num_lhs, const int num_rhs, \
792
+ const int num_slices, std::vector<LhsBlock>* lhs_blocks, \
793
+ std::vector<RhsBlock>* rhs_blocks) { \
794
+ eigen_assert(num_slices > 0); \
795
+ std::vector<std::vector<LhsScalar*>> lhs_mem(num_slices); \
796
+ std::vector<std::vector<RhsScalar*>> rhs_mem(num_slices); \
797
+ \
798
+ BlockMemHandle block_mem = BlockMemAllocator::allocateSlices( \
799
+ d, bm, bk, bn, num_lhs, num_rhs, num_slices, lhs_mem.data(), \
800
+ rhs_mem.data()); \
801
+ \
802
+ for (Index x = 0; x < num_slices; x++) { \
803
+ if (num_lhs > 0) lhs_blocks[x].resize(num_lhs); \
804
+ for (Index m = 0; m < num_lhs; m++) { \
805
+ lhs_blocks[x][m].packed_data = lhs_mem[x][m]; \
806
+ } \
807
+ if (num_rhs > 0) rhs_blocks[x].resize(num_rhs); \
808
+ for (Index n = 0; n < num_rhs; n++) { \
809
+ rhs_blocks[x][n].packed_data = rhs_mem[x][n]; \
810
+ } \
811
+ } \
812
+ \
813
+ return block_mem; \
814
+ } \
815
+ \
816
+ template <typename Device> \
817
+ EIGEN_DEVICE_FUNC static void deallocate(Device& d, \
818
+ BlockMemHandle handle) { \
819
+ BlockMemAllocator::deallocate(d, handle); \
820
+ } \
821
+ \
822
+ EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE void packLhs( \
823
+ LhsBlock* lhsBlock, const typename LhsMapper::SubMapper& data_mapper, \
824
+ const StorageIndex depth, const StorageIndex rows) { \
825
+ const bool is_direct_access = \
826
+ DirectLhsAccess::value && \
827
+ DirectLhsAccess::block(data_mapper, rows, depth, \
828
+ bn > 0 ? divup(n, bn) : 0, lhsBlock); \
829
+ \
830
+ if (!is_direct_access) { \
831
+ lhsBlock->is_direct_access = false; \
832
+ LhsPacker()(lhsBlock->packed_data, data_mapper, rows, depth); \
833
+ } \
834
+ } \
835
+ \
836
+ EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE void packRhs( \
837
+ RhsBlock* rhsBlock, const typename RhsMapper::SubMapper& data_mapper, \
838
+ const StorageIndex depth, const StorageIndex cols) { \
839
+ const bool is_direct_access = \
840
+ DirectRhsAccess::value && \
841
+ DirectRhsAccess::block(data_mapper, depth, cols, \
842
+ bm > 0 ? divup(m, bm) : 0, rhsBlock); \
843
+ \
844
+ if (!is_direct_access) { \
845
+ rhsBlock->is_direct_access = false; \
846
+ RhsPacker()(rhsBlock->packed_data, data_mapper, depth, cols); \
847
+ } \
848
+ } \
849
+ \
850
+ EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE void invoke( \
851
+ const OutputMapper& output_mapper, const LhsBlock& lhsBlock, \
852
+ const RhsBlock& rhsBlock, const StorageIndex rows, \
853
+ const StorageIndex depth, const StorageIndex cols, const float alpha, \
854
+ const float beta) { \
855
+ if ((DirectLhsAccess::value && lhsBlock.is_direct_access) && \
856
+ (DirectRhsAccess::value && rhsBlock.is_direct_access)) { \
857
+ GemmKernel()(output_mapper, lhsBlock.raw_data, rhsBlock.raw_data, \
858
+ rows, depth, cols, alpha, beta, /*ldA=*/lhsBlock.stride, \
859
+ /*ldB=*/rhsBlock.stride, \
860
+ /*transposeA=*/lhsBlock.transpose, \
861
+ /*transposeB=*/rhsBlock.transpose); \
862
+ \
863
+ } else if (DirectLhsAccess::value && lhsBlock.is_direct_access) { \
864
+ GemmKernel()(output_mapper, lhsBlock.raw_data, rhsBlock.packed_data, \
865
+ rows, depth, cols, alpha, beta, /*ldA=*/lhsBlock.stride, \
866
+ /*ldB=*/GemmKernel::kComputeStrideFromBlockDimensions, \
867
+ /*transposeA=*/lhsBlock.transpose, /*transposeB=*/'N'); \
868
+ \
869
+ } else if (DirectRhsAccess::value && rhsBlock.is_direct_access) { \
870
+ GemmKernel()(output_mapper, lhsBlock.packed_data, rhsBlock.raw_data, \
871
+ rows, depth, cols, alpha, beta, \
872
+ /*ldA=*/GemmKernel::kComputeStrideFromBlockDimensions, \
873
+ /*ldB=*/rhsBlock.stride, /*transposeA=*/'N', \
874
+ /*transposeB=*/rhsBlock.transpose); \
875
+ \
876
+ } else { \
877
+ GemmKernel()(output_mapper, lhsBlock.packed_data, \
878
+ rhsBlock.packed_data, rows, depth, cols, alpha, beta); \
879
+ } \
880
+ } \
881
+ \
882
+ private: \
883
+ /* These are dimensions of the original Tensors, and selected block */ \
884
+ /* sizes. The actual block sizes passed to all function above might be */ \
885
+ /* smaller because of the partial blocks at the end. */ \
886
+ const StorageIndex m; \
887
+ const StorageIndex k; \
888
+ const StorageIndex n; \
889
+ const StorageIndex bm; \
890
+ const StorageIndex bk; \
891
+ const StorageIndex bn; \
892
+ }
893
+
894
+ REGISTER_TENSOR_CONTRACTION_KERNEL_WITH_FALLBACK(float, float, float);
895
+ REGISTER_TENSOR_CONTRACTION_KERNEL_NO_FALLBACK(Eigen::QInt32, Eigen::QInt8,
896
+ Eigen::QUInt8);
897
+
898
+ #undef REGISTER_TENSOR_CONTRACTION_KERNEL
899
+
900
+ #endif // defined(TENSORFLOW_USE_MKLDNN_CONTRACTION_KERNEL)
901
+
902
+ } // namespace internal
903
+ } // namespace Eigen
904
+
905
+ #endif // TENSORFLOW_TSL_FRAMEWORK_CONTRACTION_EIGEN_CONTRACTION_KERNEL_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/convolution/eigen_convolution_helpers.h ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_FRAMEWORK_CONVOLUTION_EIGEN_CONVOLUTION_HELPERS_H_
17
+ #define TENSORFLOW_TSL_FRAMEWORK_CONVOLUTION_EIGEN_CONVOLUTION_HELPERS_H_
18
+
19
+ #include "unsupported/Eigen/CXX11/Tensor" // from @eigen_archive
20
+
21
+ namespace Eigen {
22
+ namespace internal {
23
+
24
+ // TensorEvaluatorHasPartialPacket<TensorEvaluatorType, PacketType, IndexType>
25
+ // provides `value` that is true if TensorEvaluatorType has `PacketType
26
+ // partialPacket<PacketType>(IndexType, unpacket_traits<PacketType>::mask_t)
27
+ // const` and if the PacketType supports masked load.
28
+ //
29
+ // Partial packets are used to:
30
+ //
31
+ // 1) Split the packet over two columns in eigen based spatial convolution and
32
+ // use partial loads for each individual part before combining them to get the
33
+ // required packet. This class is used to pick the correct implementation of
34
+ // loadPacketStandard function.
35
+ //
36
+ // 2) Split the packet over two rows (within the same column) in eigen based
37
+ // cuboid convolution and use partial loads for each individual part before
38
+ // combining them to get the required packet. This class is used to pick the
39
+ // correct implementation of loadPacketStandard function. This usage is similar
40
+ // to the usage in eigen based spatial convolution described above.
41
+ //
42
+ // 3) Finalize packing of columns in gemm_pack_colmajor after processing
43
+ // vectorized part with full packets (see eigen_spatial_convolutions.h).
44
+ template <typename TensorEvaluatorType, typename PacketType, typename IndexType>
45
+ class TensorEvaluatorHasPartialPacket {
46
+ public:
47
+ template <typename TensorEvaluatorT, typename PacketT, typename IndexT>
48
+ static auto functionExistsSfinae(
49
+ typename std::enable_if<
50
+ unpacket_traits<PacketT>::masked_load_available &&
51
+ std::is_same<PacketT,
52
+ decltype(std::declval<const TensorEvaluatorT>()
53
+ .template partialPacket<PacketT>(
54
+ std::declval<IndexT>(),
55
+ std::declval<typename unpacket_traits<
56
+ PacketT>::mask_t>()))>::value>::
57
+ type*) -> std::true_type;
58
+
59
+ template <typename TensorEvaluatorT, typename PacketT, typename IndexT>
60
+ static auto functionExistsSfinae(...) -> std::false_type;
61
+
62
+ typedef decltype(functionExistsSfinae<TensorEvaluatorType, PacketType,
63
+ IndexType>(nullptr)) status;
64
+
65
+ static constexpr bool value = status::value;
66
+ };
67
+
68
+ // Compute a mask for loading/storing coefficients in/from a packet in a
69
+ // [from, to) range. If the mask bit is 1, element will be loaded/stored.
70
+ template <typename Packet>
71
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
72
+ typename std::enable_if<unpacket_traits<Packet>::masked_load_available,
73
+ typename unpacket_traits<Packet>::mask_t>::type
74
+ mask(int from, int to) {
75
+ const Index packet_size = internal::unpacket_traits<Packet>::size;
76
+ eigen_assert(0 <= from && to <= (packet_size + 1) && from < to);
77
+
78
+ using Mask = typename internal::unpacket_traits<Packet>::mask_t;
79
+ const Mask mask_max = std::numeric_limits<Mask>::max();
80
+
81
+ return (mask_max >> (packet_size - to)) ^ (mask_max >> (packet_size - from));
82
+ }
83
+
84
+ } // namespace internal
85
+ } // namespace Eigen
86
+
87
+ #endif // TENSORFLOW_TSL_FRAMEWORK_CONVOLUTION_EIGEN_CONVOLUTION_HELPERS_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/convolution/eigen_spatial_convolutions-inl.h ADDED
@@ -0,0 +1,1772 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_FRAMEWORK_CONVOLUTION_EIGEN_SPATIAL_CONVOLUTIONS_INL_H_
17
+ #define TENSORFLOW_TSL_FRAMEWORK_CONVOLUTION_EIGEN_SPATIAL_CONVOLUTIONS_INL_H_
18
+
19
+ #include "tsl/framework/convolution/eigen_convolution_helpers.h"
20
+
21
+ // Note this header is used in both TF and TFLite.
22
+ namespace Eigen {
23
+
24
+ namespace internal {
25
+
26
+ #if !EIGEN_ALTIVEC_USE_CUSTOM_PACK
27
+ // WARNING: Most of the code here implicitly assumes that the matrix is in
28
+ // ColMajor layout. This is guaranteed by the tensor contraction (see
29
+ // TensorContraction.h).
30
+ //
31
+ // Inside Eigen a tensor contraction is represented by a matrix multiplication.
32
+ // We don't want to actually extract image patches and reshape the result into
33
+ // a matrix (this involves allocating huge extra memory), so the patch
34
+ // extraction and reshape operations are implicit.
35
+ //
36
+ // TensorContractionInputMapper takes a matrix index and returns the coefficient
37
+ // (or the packet) of the "virtual tensor", that would be at that index if we
38
+ // were to actually reshape the result of patch extraction.
39
+ //
40
+ // TensorContractionSubMapper provides a similar view into the "virtual matrix"
41
+ // at the given vertical and horizontal offsets.
42
+ //
43
+ // "Virtual matrix" dimensions:
44
+ // *0: kernelChannels * kernelRows * kernelCols;
45
+ // 1: out_height * out_width; * OTHERS (e.g batches, etc...)
46
+ //
47
+ // *) extracted patches are continuous in memory (innermost dimension assuming
48
+ // col major layout)
49
+ //
50
+ // With this dimensions:
51
+ // row - offset within a single patch (in code: patchId)
52
+ // col - index of the extracted patch (in code: patchIndex)
53
+ // patchIndex ∈ [0..num_patches * OTHERS] (batch and other dimensions)
54
+ //
55
+ // TODO(ezhulenev): Consolidate this part of the code with the image patch
56
+ // extraction code since they are both very similar.
57
+
58
+ template <typename NewDimension, Index Rows, Index Cols, typename ArgType,
59
+ typename Device, typename Scalar_, typename Index,
60
+ typename nocontract_t, typename contract_t, int Side, int packet_size,
61
+ bool inner_dim_contiguous, bool inner_dim_reordered, int Alignment>
62
+ class TensorContractionInputMapper<
63
+ Scalar_, Index, Side,
64
+ TensorEvaluator<
65
+ const TensorReshapingOp<NewDimension,
66
+ const TensorImagePatchOp<Rows, Cols, ArgType> >,
67
+ Device>,
68
+ nocontract_t, contract_t, packet_size, inner_dim_contiguous,
69
+ inner_dim_reordered, Alignment> {
70
+ public:
71
+ typedef Scalar_ Scalar;
72
+
73
+ typedef TensorContractionInputMapper<
74
+ Scalar, Index, Side,
75
+ TensorEvaluator<
76
+ const TensorReshapingOp<
77
+ NewDimension, const TensorImagePatchOp<Rows, Cols, ArgType> >,
78
+ Device>,
79
+ nocontract_t, contract_t, packet_size, inner_dim_contiguous,
80
+ inner_dim_reordered, Alignment>
81
+ Self;
82
+
83
+ typedef TensorContractionSubMapper<
84
+ Scalar, Index, Side,
85
+ TensorEvaluator<
86
+ const TensorReshapingOp<
87
+ NewDimension, const TensorImagePatchOp<Rows, Cols, ArgType> >,
88
+ Device>,
89
+ nocontract_t, contract_t, packet_size, inner_dim_contiguous,
90
+ inner_dim_reordered, Alignment>
91
+ SubMapper;
92
+
93
+ typedef SubMapper VectorMapper;
94
+ typedef SubMapper LinearMapper;
95
+ typedef typename packet_traits<Scalar>::type Packet;
96
+
97
+ typedef TensorEvaluator<ArgType, Device> TensorEvaluatorT;
98
+
99
+ EIGEN_DEVICE_FUNC
100
+ TensorContractionInputMapper(
101
+ const TensorEvaluator<
102
+ const TensorReshapingOp<
103
+ NewDimension, const TensorImagePatchOp<Rows, Cols, ArgType> >,
104
+ Device>& tensor,
105
+ const nocontract_t&, const nocontract_t&, const contract_t&,
106
+ const contract_t&)
107
+ : m_impl(tensor.impl().impl()) {
108
+ Index patch_rows;
109
+ Index patch_depth;
110
+ if (internal::traits<ArgType>::Layout == ColMajor) {
111
+ patch_depth = tensor.impl().dimensions()[0];
112
+ patch_rows = tensor.impl().dimensions()[1];
113
+ m_patch_cols = tensor.impl().dimensions()[2];
114
+ m_num_patches = tensor.impl().dimensions()[3];
115
+ } else {
116
+ const size_t NumDims = tensor.impl().dimensions().size();
117
+ patch_depth = tensor.impl().dimensions()[NumDims - 1];
118
+ patch_rows = tensor.impl().dimensions()[NumDims - 2];
119
+ m_patch_cols = tensor.impl().dimensions()[NumDims - 3];
120
+ m_num_patches = tensor.impl().dimensions()[NumDims - 4];
121
+ }
122
+
123
+ // Strides for navigating through the single patch.
124
+ m_patch_row_stride = patch_depth;
125
+ m_patch_col_stride = patch_rows * m_patch_row_stride;
126
+
127
+ m_patch_row_inflate_strides = tensor.impl().rowInflateStride();
128
+ m_patch_col_inflate_strides = tensor.impl().colInflateStride();
129
+
130
+ m_colStride = patch_rows;
131
+
132
+ m_outputRows = tensor.impl().outputRows();
133
+ m_outputCols = tensor.impl().outputCols();
134
+ m_row_strides = tensor.impl().userRowStride();
135
+ m_col_strides = tensor.impl().userColStride();
136
+
137
+ m_in_row_strides = tensor.impl().userInRowStride();
138
+ m_in_col_strides = tensor.impl().userInColStride();
139
+
140
+ if (internal::traits<ArgType>::Layout == ColMajor) {
141
+ m_inputRows = tensor.impl().impl().dimensions()[1];
142
+ m_inputCols = tensor.impl().impl().dimensions()[2];
143
+ } else {
144
+ const int NumDims = tensor.impl().impl().dimensions().size();
145
+ m_inputRows = tensor.impl().impl().dimensions()[NumDims - 2];
146
+ m_inputCols = tensor.impl().impl().dimensions()[NumDims - 3];
147
+ }
148
+
149
+ m_rowInputStride = patch_depth;
150
+ m_colInputStride = patch_depth * m_inputRows;
151
+ m_patchInputStride = patch_depth * m_inputRows * m_inputCols;
152
+
153
+ m_rowPaddingTop = tensor.impl().rowPaddingTop();
154
+ m_colPaddingLeft = tensor.impl().colPaddingLeft();
155
+
156
+ m_fastPatchRowStride =
157
+ internal::TensorIntDivisor<Index>(m_patch_row_stride);
158
+ m_fastPatchColStride =
159
+ internal::TensorIntDivisor<Index>(m_patch_col_stride);
160
+ m_fastInputRowStride =
161
+ internal::TensorIntDivisor<Index>(m_patch_row_inflate_strides);
162
+ m_fastInputColStride =
163
+ internal::TensorIntDivisor<Index>(m_patch_col_inflate_strides);
164
+ m_fastNumPatches = internal::TensorIntDivisor<Index>(m_num_patches);
165
+ m_fastColStride = internal::TensorIntDivisor<Index>(m_colStride);
166
+ m_fastOutputRows = internal::TensorIntDivisor<Index>(m_outputRows);
167
+ m_fastDimZero = internal::TensorIntDivisor<Index>(patch_depth);
168
+ }
169
+
170
+ EIGEN_DEVICE_FUNC
171
+ TensorContractionInputMapper(const TensorContractionInputMapper& base_mapper)
172
+ : m_impl(base_mapper.m_impl) {
173
+ m_patch_cols = base_mapper.m_patch_cols;
174
+ m_num_patches = base_mapper.m_num_patches;
175
+
176
+ m_patch_row_stride = base_mapper.m_patch_row_stride;
177
+ m_patch_col_stride = base_mapper.m_patch_col_stride;
178
+
179
+ m_patch_row_inflate_strides = base_mapper.m_patch_row_inflate_strides;
180
+ m_patch_col_inflate_strides = base_mapper.m_patch_col_inflate_strides;
181
+
182
+ m_colStride = base_mapper.m_colStride;
183
+
184
+ m_rowInputStride = base_mapper.m_rowInputStride;
185
+ m_colInputStride = base_mapper.m_colInputStride;
186
+ m_patchInputStride = base_mapper.m_patchInputStride;
187
+
188
+ m_inputRows = base_mapper.m_inputRows;
189
+ m_inputCols = base_mapper.m_inputCols;
190
+
191
+ m_outputRows = base_mapper.m_outputRows;
192
+ m_outputCols = base_mapper.m_outputCols;
193
+ m_row_strides = base_mapper.m_row_strides;
194
+ m_col_strides = base_mapper.m_col_strides;
195
+
196
+ m_in_row_strides = base_mapper.m_in_row_strides;
197
+ m_in_col_strides = base_mapper.m_in_col_strides;
198
+
199
+ m_rowPaddingTop = base_mapper.m_rowPaddingTop;
200
+ m_colPaddingLeft = base_mapper.m_colPaddingLeft;
201
+
202
+ m_fastPatchRowStride = base_mapper.m_fastPatchRowStride;
203
+ m_fastPatchColStride = base_mapper.m_fastPatchColStride;
204
+ m_fastInputRowStride = base_mapper.m_fastInputRowStride;
205
+ m_fastInputColStride = base_mapper.m_fastInputColStride;
206
+ m_fastNumPatches = base_mapper.m_fastNumPatches;
207
+ m_fastColStride = base_mapper.m_fastColStride;
208
+ m_fastOutputRows = base_mapper.m_fastOutputRows;
209
+ m_fastDimZero = base_mapper.m_fastDimZero;
210
+ }
211
+
212
+ // If true, turns off some optimizations for loading packets since the image
213
+ // patches are "non-standard" such as there are non-trivial strides or
214
+ // inflations in the input.
215
+ EIGEN_DEVICE_FUNC
216
+ EIGEN_ALWAYS_INLINE bool nonStandardPatches() const {
217
+ return m_in_row_strides != 1 || m_in_col_strides != 1 ||
218
+ m_patch_row_inflate_strides != 1 || m_patch_col_inflate_strides != 1;
219
+ }
220
+
221
+ EIGEN_DEVICE_FUNC
222
+ EIGEN_STRONG_INLINE SubMapper getSubMapper(Index i, Index j) const {
223
+ return SubMapper(*this, i, j);
224
+ }
225
+
226
+ EIGEN_DEVICE_FUNC
227
+ EIGEN_STRONG_INLINE LinearMapper getLinearMapper(Index i, Index j) const {
228
+ return LinearMapper(*this, i, j);
229
+ }
230
+
231
+ EIGEN_DEVICE_FUNC
232
+ EIGEN_ALWAYS_INLINE Scalar operator()(Index row) const {
233
+ Index rowIndex, colIndex, otherIndex;
234
+ computeBaseIndices(0, rowIndex, colIndex, otherIndex);
235
+ return loadCoeff(row, rowIndex, colIndex, otherIndex);
236
+ }
237
+
238
+ // Load the coefficient at the patchIndex location instead of the usual
239
+ // m_rowIndex,
240
+ // m_colIndex, m_otherIndex. This is currently only used by the gpu code.
241
+ // EIGEN_DEVICE_FUNC
242
+ EIGEN_DEVICE_FUNC
243
+ EIGEN_STRONG_INLINE Scalar operator()(Index row, Index patchIndex) const {
244
+ Index rowIndex, colIndex, otherIndex;
245
+ computeBaseIndices(patchIndex, rowIndex, colIndex, otherIndex);
246
+ return loadCoeff(row, rowIndex, colIndex, otherIndex);
247
+ }
248
+
249
+ EIGEN_DEVICE_FUNC
250
+ EIGEN_ALWAYS_INLINE Packet loadPacket(Index row) const {
251
+ Index rowIndex, colIndex, otherIndex;
252
+ computeBaseIndices(0, rowIndex, colIndex, otherIndex);
253
+ return loadPacket(row, rowIndex, colIndex, otherIndex);
254
+ }
255
+
256
+ // Load the packet at the patchIndex location instead of the usual m_rowIndex,
257
+ // m_colIndex, m_otherIndex. This is currently only used by the gpu code.
258
+ EIGEN_DEVICE_FUNC
259
+ EIGEN_ALWAYS_INLINE Packet loadPacket(Index row, Index patchIndex) const {
260
+ Index rowIndex, colIndex, otherIndex;
261
+ computeBaseIndices(patchIndex, rowIndex, colIndex, otherIndex);
262
+ return loadPacket(row, rowIndex, colIndex, otherIndex);
263
+ }
264
+
265
+ EIGEN_DEVICE_FUNC
266
+ EIGEN_ALWAYS_INLINE const TensorEvaluator<ArgType, Device>& impl() const {
267
+ return m_impl;
268
+ }
269
+
270
+ EIGEN_DEVICE_FUNC
271
+ EIGEN_ALWAYS_INLINE Index patchDepth() const { return m_rowInputStride; }
272
+ EIGEN_DEVICE_FUNC
273
+ EIGEN_ALWAYS_INLINE Index patchRows() const { return m_colStride; }
274
+ EIGEN_DEVICE_FUNC
275
+ EIGEN_ALWAYS_INLINE Index patchCols() const { return m_patch_cols; }
276
+
277
+ private:
278
+ friend class TensorContractionSubMapper<
279
+ Scalar, Index, Side,
280
+ TensorEvaluator<
281
+ const TensorReshapingOp<
282
+ NewDimension, const TensorImagePatchOp<Rows, Cols, ArgType> >,
283
+ Device>,
284
+ nocontract_t, contract_t, packet_size, inner_dim_contiguous,
285
+ inner_dim_reordered, Alignment>;
286
+
287
+ // Load coefficient from a patch specified by the "within patch offset"
288
+ // (patchId) and the precomputed indices of the first element of the patch.
289
+ EIGEN_DEVICE_FUNC
290
+ EIGEN_STRONG_INLINE Scalar loadCoeff(Index patchId, Index rowIndex,
291
+ Index colIndex, Index otherIndex) const {
292
+ // Find the offset of the element wrt the location of the first element.
293
+ const Index patchOffset = patchId / m_fastDimZero;
294
+
295
+ const Index colOffset = patchOffset / m_fastColStride;
296
+ const Index inputCol = colIndex + colOffset * m_in_col_strides;
297
+ const Index origInputCol =
298
+ (m_patch_col_inflate_strides == 1)
299
+ ? inputCol
300
+ : ((inputCol >= 0) ? (inputCol / m_fastInputColStride) : 0);
301
+
302
+ const Index rowOffset = patchOffset - colOffset * m_colStride;
303
+ const Index inputRow = rowIndex + rowOffset * m_in_row_strides;
304
+ const Index origInputRow =
305
+ (m_patch_row_inflate_strides == 1)
306
+ ? inputRow
307
+ : ((inputRow >= 0) ? (inputRow / m_fastInputRowStride) : 0);
308
+ if (origInputCol < 0 || origInputRow < 0 || origInputCol >= m_inputCols ||
309
+ origInputRow >= m_inputRows ||
310
+ (inputCol != origInputCol * m_patch_col_inflate_strides) ||
311
+ (inputRow != origInputRow * m_patch_row_inflate_strides)) {
312
+ return Scalar(0);
313
+ }
314
+ const Index depth = patchId - patchOffset * patchDepth();
315
+ const Index inputIndex = depth + origInputRow * m_rowInputStride +
316
+ origInputCol * m_colInputStride + otherIndex;
317
+ return m_impl.coeff(inputIndex);
318
+ }
319
+
320
+ // This is the same as loadCoeff(...), but optimized for all `inflate_strides`
321
+ // and `in_strides` equal to 1 (template specialization without templates).
322
+ EIGEN_DEVICE_FUNC
323
+ EIGEN_STRONG_INLINE Scalar loadCoeffStandard(Index patchId, Index rowIndex,
324
+ Index colIndex,
325
+ Index otherIndex) const {
326
+ eigen_assert(!nonStandardPatches());
327
+
328
+ // Find the offset of the element wrt the location of the first element.
329
+ const Index patchOffset = patchId / m_fastDimZero;
330
+ const Index colOffset = patchOffset / m_fastColStride;
331
+ const Index rowOffset = patchOffset - colOffset * m_colStride;
332
+ const Index inputCol = colIndex + colOffset;
333
+ const Index inputRow = rowIndex + rowOffset;
334
+ if (inputCol < 0 || inputCol >= m_inputCols || inputRow < 0 ||
335
+ inputRow >= m_inputRows) {
336
+ return Scalar(0);
337
+ }
338
+ const Index depth = patchId - patchOffset * patchDepth();
339
+ const Index inputIndex = depth + inputRow * m_rowInputStride +
340
+ inputCol * m_colInputStride + otherIndex;
341
+ return m_impl.coeff(inputIndex);
342
+ }
343
+
344
+ // Load packet from a patch specified by the "within patch offset"
345
+ // (patchId) and the precomputed indices of the first element of the patch.
346
+ EIGEN_DEVICE_FUNC
347
+ EIGEN_ALWAYS_INLINE Packet loadPacket(Index patchId, Index rowIndex,
348
+ Index colIndex,
349
+ Index otherIndex) const {
350
+ const Index packetSize = internal::unpacket_traits<Packet>::size;
351
+ EIGEN_STATIC_ASSERT(packetSize > 1, YOU_MADE_A_PROGRAMMING_MISTAKE)
352
+ eigen_assert(patchId < patchDepth() * patchRows() * m_patch_cols);
353
+
354
+ if (nonStandardPatches()) {
355
+ return packetWithPossibleZero(patchId, rowIndex, colIndex, otherIndex);
356
+ }
357
+ typedef decltype(m_impl) TensorEvaluatorT;
358
+ return loadPacketStandard<Packet, TensorEvaluatorT>(patchId, rowIndex,
359
+ colIndex, otherIndex);
360
+ }
361
+
362
+ // Helper function to load a 'partial' packet - this is the single column
363
+ // part of a packet that is split across two columns. In the 'partial' packet,
364
+ // the elements corresponding to the column (specified through colOffset) are
365
+ // loaded and the rest of the elements are zero-filled into the 'partial'
366
+ // packet. This function is called from loadPacketStandardFromTwoColumns().
367
+ // This code path is exercised only when the packet type supports masked load
368
+ // and when the partial packet load is available in the TensorEvaluator.
369
+ EIGEN_DEVICE_FUNC
370
+ EIGEN_ALWAYS_INLINE Packet loadPartialPacketStandard(
371
+ Index rowIndex, Index colIndex, Index otherIndex, Index patchId,
372
+ const Index span[], const Index patchOffsets[], Index colOffset) const {
373
+ const Index inputCol = colIndex + colOffset;
374
+ const Index rowOffsets[2] = {patchOffsets[0] - colOffset * m_colStride,
375
+ patchOffsets[1] - colOffset * m_colStride};
376
+ const Index inputRows[2] = {rowIndex + rowOffsets[0],
377
+ rowIndex + rowOffsets[1]};
378
+
379
+ if (inputRows[0] >= m_inputRows || inputRows[1] < 0 ||
380
+ inputCol >= m_inputCols || inputCol < 0) {
381
+ // Partial packet is all zeros
382
+ return internal::pset1<Packet>(Scalar(0));
383
+ } else if (inputRows[0] >= 0 && inputRows[1] < m_inputRows) {
384
+ // From inputIndex-span[0], we need to load elements starting from index
385
+ // span[0] all the way upto (and including) span[1].
386
+ const Index depth = patchId - patchOffsets[0] * patchDepth();
387
+ const Index inputIndex = depth + inputRows[0] * m_rowInputStride +
388
+ inputCol * m_colInputStride + otherIndex;
389
+ return m_impl.template partialPacket<Packet>(
390
+ inputIndex - span[0], mask<Packet>(span[0], span[1] + 1));
391
+ } else {
392
+ // Using slow path for this partial packet.
393
+ // We need to load elements starting from index span[0] all the way upto
394
+ // (and including) span[1]. We split this load into 3 parts:
395
+ // 0 : span[0]-1 - Zeros will be loaded for these indices
396
+ // span[0] : span[1] - Elements will be loaded here for these indices
397
+ // span[1]+1 : packetSize-1 - Zeross will be loaded for these indices
398
+ const Index packetSize = internal::unpacket_traits<Packet>::size;
399
+ EIGEN_ALIGN_MAX
400
+ std::remove_const_t<Scalar> values[packetSize];
401
+ for (int i = 0; i < span[0]; ++i) values[i] = Scalar(0);
402
+ for (int i = span[0]; i < span[1] + 1; ++i)
403
+ values[i] =
404
+ loadCoeff(patchId - span[0] + i, rowIndex, colIndex, otherIndex);
405
+ for (int i = span[1] + 1; i < packetSize; ++i) values[i] = Scalar(0);
406
+ return internal::pload<Packet>(values);
407
+ }
408
+ }
409
+
410
+ // Helper function to load a packet that is split across two columns.
411
+ // If required, this function is called from loadPacketStandard() when the
412
+ // packet type supports masked load and when the partial packet load is
413
+ // available in the TensorEvaluator.
414
+ EIGEN_DEVICE_FUNC
415
+ EIGEN_ALWAYS_INLINE Packet loadPacketStandardFromTwoColumns(
416
+ Index patchId, Index rowIndex, Index colIndex, Index otherIndex,
417
+ const Index patchOffsets[], const Index colOffsets[]) const {
418
+ eigen_assert(colOffsets[1] == colOffsets[0] + 1);
419
+ const Index packetSize = internal::unpacket_traits<Packet>::size;
420
+
421
+ // Packet to load will be split into 2 parts where each part spans a single
422
+ // column. First determine where to split.
423
+ const Index patchIdSplit =
424
+ ((colOffsets[1] * m_colStride) * m_rowInputStride) - 1;
425
+ const Index patchOffsetSplit = patchIdSplit / m_fastDimZero;
426
+
427
+ // patchIds[i]: patchId corresponding to partial packet i
428
+ // spans[i]: Start and end indices corresponding to the elements
429
+ // to be loaded for partial packet i
430
+ // patchOffsets2Cols[i]: patchOffsets corresponding to partial packet i
431
+ const Index patchIds[2] = {patchId, patchIdSplit + 1};
432
+ const Index spans[2][2] = {{0, patchIdSplit - patchId},
433
+ {patchIdSplit - patchId + 1, packetSize - 1}};
434
+ const Index patchOffsets2Cols[2][2] = {
435
+ {patchOffsets[0], patchOffsetSplit},
436
+ {patchOffsetSplit + 1, patchOffsets[1]}};
437
+
438
+ // Load partial packets and do bit-wise OR to generate required packet
439
+ return internal::por<Packet>(
440
+ loadPartialPacketStandard(rowIndex, colIndex, otherIndex, patchIds[0],
441
+ spans[0], patchOffsets2Cols[0],
442
+ colOffsets[0]),
443
+ loadPartialPacketStandard(rowIndex, colIndex, otherIndex, patchIds[1],
444
+ spans[1], patchOffsets2Cols[1],
445
+ colOffsets[1]));
446
+ }
447
+
448
+ // Helper function to load a packet that is present in a single columns.
449
+ // If required, this function is called from loadPacketStandard().
450
+ EIGEN_DEVICE_FUNC
451
+ EIGEN_ALWAYS_INLINE Packet loadPacketStandardFromSingleColumn(
452
+ Index patchId, Index rowIndex, Index colIndex, Index otherIndex,
453
+ const Index patchOffsets[], const Index colOffsets[],
454
+ const Index inputCols[]) const {
455
+ eigen_assert(colOffsets[0] == colOffsets[1]);
456
+ const Index rowOffsets[2] = {patchOffsets[0] - colOffsets[0] * m_colStride,
457
+ patchOffsets[1] - colOffsets[1] * m_colStride};
458
+ eigen_assert(rowOffsets[0] <= rowOffsets[1]);
459
+ const Index inputRows[2] = {rowIndex + rowOffsets[0],
460
+ rowIndex + rowOffsets[1]};
461
+
462
+ if (inputRows[0] >= m_inputRows || inputRows[1] < 0) {
463
+ // all zeros
464
+ return internal::pset1<Packet>(Scalar(0)); // all zeros
465
+ }
466
+
467
+ if (inputRows[0] >= 0 && inputRows[1] < m_inputRows) {
468
+ // no padding
469
+ const Index depth = patchId - patchOffsets[0] * patchDepth();
470
+ const Index inputIndex = depth + inputRows[0] * m_rowInputStride +
471
+ inputCols[0] * m_colInputStride + otherIndex;
472
+ return m_impl.template packet<Unaligned>(inputIndex);
473
+ }
474
+ return packetWithPossibleZero(patchId, rowIndex, colIndex, otherIndex);
475
+ }
476
+
477
+ // Load standard packet from a patch specified by the "within patch offset"
478
+ // (patchId) and the precomputed indices of the first element of the patch.
479
+ // This function will be called if partial packet loading is not available
480
+ // for the TensorEvaluator or if the packet type does not support masked
481
+ // load.
482
+ template <typename PacketT, typename TensorEvaluatorT>
483
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE typename std::enable_if<
484
+ !TensorEvaluatorHasPartialPacket<TensorEvaluatorT, PacketT, Index>::value,
485
+ PacketT>::type
486
+ loadPacketStandard(Index patchId, Index rowIndex, Index colIndex,
487
+ Index otherIndex) const {
488
+ const Index packetSize = internal::unpacket_traits<Packet>::size;
489
+ EIGEN_STATIC_ASSERT(packetSize > 1, YOU_MADE_A_PROGRAMMING_MISTAKE)
490
+ eigen_assert(patchId < patchDepth() * patchRows() * m_patch_cols);
491
+
492
+ eigen_assert(!nonStandardPatches());
493
+
494
+ if ((patchDepth() % packetSize) == 0) {
495
+ return loadPacketFast(patchId, rowIndex, colIndex, otherIndex);
496
+ }
497
+
498
+ // Offsets and input calculation here are identical to
499
+ // loadCoeffStandard(...), but repeated twice.
500
+ const Index patchOffsets[2] = {patchId / m_fastDimZero,
501
+ (patchId + packetSize - 1) / m_fastDimZero};
502
+ const Index colOffsets[2] = {patchOffsets[0] / m_fastColStride,
503
+ patchOffsets[1] / m_fastColStride};
504
+ const Index inputCols[2] = {colIndex + colOffsets[0],
505
+ colIndex + colOffsets[1]};
506
+
507
+ if (inputCols[0] >= m_inputCols || inputCols[1] < 0) {
508
+ // all zeros
509
+ return internal::pset1<Packet>(Scalar(0));
510
+ }
511
+ if (inputCols[0] == inputCols[1]) {
512
+ return loadPacketStandardFromSingleColumn(patchId, rowIndex, colIndex,
513
+ otherIndex, patchOffsets,
514
+ colOffsets, inputCols);
515
+ }
516
+ return packetWithPossibleZero(patchId, rowIndex, colIndex, otherIndex);
517
+ }
518
+
519
+ // Load standard packet from a patch specified by the "within patch offset"
520
+ // (patchId) and the precomputed indices of the first element of the patch.
521
+ // This function will be called if partial packet loading is available for
522
+ // the TensorEvaluator and if the packet type supports masked load.
523
+ // The only difference between this and the other case is that if the packet
524
+ // to load is split across two columns, then in this case instead of going to
525
+ // the slow (element-by-element) load, we load two packets - each containing
526
+ // elements from one of the columns (rest of the elements of the packets are
527
+ // zeroes), and then combine these two packets to generate the required
528
+ // packet. The idea is to enable fast load (if possible) of these 'partial'
529
+ // packets.
530
+ template <typename PacketT, typename TensorEvaluatorT>
531
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE typename std::enable_if<
532
+ TensorEvaluatorHasPartialPacket<TensorEvaluatorT, PacketT, Index>::value,
533
+ PacketT>::type
534
+ loadPacketStandard(Index patchId, Index rowIndex, Index colIndex,
535
+ Index otherIndex) const {
536
+ const Index packetSize = internal::unpacket_traits<PacketT>::size;
537
+ EIGEN_STATIC_ASSERT(packetSize > 1, YOU_MADE_A_PROGRAMMING_MISTAKE)
538
+ eigen_assert(patchId < patchDepth() * patchRows() * m_patch_cols);
539
+
540
+ eigen_assert(!nonStandardPatches());
541
+
542
+ if ((patchDepth() % packetSize) == 0) {
543
+ return loadPacketFast(patchId, rowIndex, colIndex, otherIndex);
544
+ }
545
+
546
+ // Offsets and input calculation here are identical to
547
+ // loadCoeffStandard(...), but repeated twice.
548
+ const Index patchOffsets[2] = {patchId / m_fastDimZero,
549
+ (patchId + packetSize - 1) / m_fastDimZero};
550
+ const Index colOffsets[2] = {patchOffsets[0] / m_fastColStride,
551
+ patchOffsets[1] / m_fastColStride};
552
+ const Index inputCols[2] = {colIndex + colOffsets[0],
553
+ colIndex + colOffsets[1]};
554
+
555
+ if (inputCols[0] >= m_inputCols || inputCols[1] < 0) {
556
+ // all zeros
557
+ return internal::pset1<PacketT>(Scalar(0));
558
+ }
559
+ if (inputCols[0] == inputCols[1]) {
560
+ return loadPacketStandardFromSingleColumn(patchId, rowIndex, colIndex,
561
+ otherIndex, patchOffsets,
562
+ colOffsets, inputCols);
563
+ }
564
+ if (inputCols[1] == inputCols[0] + 1) {
565
+ return loadPacketStandardFromTwoColumns(
566
+ patchId, rowIndex, colIndex, otherIndex, patchOffsets, colOffsets);
567
+ }
568
+ return packetWithPossibleZero(patchId, rowIndex, colIndex, otherIndex);
569
+ }
570
+
571
+ EIGEN_DEVICE_FUNC
572
+ EIGEN_ALWAYS_INLINE Packet loadPacketFast(Index patchId, Index rowIndex,
573
+ Index colIndex,
574
+ Index otherIndex) const {
575
+ const Index packetSize = internal::unpacket_traits<Packet>::size;
576
+ EIGEN_STATIC_ASSERT(packetSize > 1, YOU_MADE_A_PROGRAMMING_MISTAKE)
577
+ eigen_assert(patchId < patchDepth() * patchRows() * m_patch_cols);
578
+
579
+ eigen_assert(!nonStandardPatches());
580
+ eigen_assert((patchDepth() % packetSize) == 0);
581
+ // Find the offset of the element wrt the location of the first element.
582
+ const Index patchOffset = patchId / m_fastDimZero;
583
+ eigen_assert((patchId + packetSize - 1) / m_fastDimZero == patchOffset);
584
+
585
+ const Index colOffset = patchOffset / m_fastColStride;
586
+ const Index rowOffset = patchOffset - colOffset * m_colStride;
587
+ const Index inputCol = colIndex + colOffset;
588
+ const Index inputRow = rowIndex + rowOffset;
589
+ if (inputCol < 0 || inputRow < 0 || inputCol >= m_inputCols ||
590
+ inputRow >= m_inputRows) {
591
+ // all zeros
592
+ return internal::pset1<Packet>(Scalar(0));
593
+ }
594
+ // no padding
595
+ const Index depth = patchId - patchOffset * patchDepth();
596
+ const Index inputIndex = depth + inputRow * m_rowInputStride +
597
+ inputCol * m_colInputStride + otherIndex;
598
+ return m_impl.template packet<Unaligned>(inputIndex);
599
+ }
600
+
601
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet packetWithPossibleZero(
602
+ Index patchId, Index rowIndex, Index colIndex, Index otherIndex) const {
603
+ const int packetSize = internal::unpacket_traits<Packet>::size;
604
+ EIGEN_ALIGN_MAX
605
+ std::remove_const_t<Scalar> values[packetSize];
606
+ for (int i = 0; i < packetSize; ++i) {
607
+ values[i] = loadCoeff(patchId + i, rowIndex, colIndex, otherIndex);
608
+ }
609
+ Packet rslt = internal::pload<Packet>(values);
610
+ return rslt;
611
+ }
612
+
613
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void computeBaseIndices(
614
+ Index patchIndex, Index& rowIndex, Index& colIndex,
615
+ Index& otherIndex) const {
616
+ const size_t NumInputDims = array_size<
617
+ typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
618
+ otherIndex = (NumInputDims == 3) ? 0 : patchIndex / m_fastNumPatches;
619
+ const Index patch2DIndex = (NumInputDims == 3)
620
+ ? patchIndex
621
+ : (patchIndex - otherIndex * m_num_patches);
622
+ otherIndex *= m_patchInputStride;
623
+ colIndex = patch2DIndex / m_fastOutputRows;
624
+ rowIndex = patch2DIndex - colIndex * m_outputRows;
625
+ colIndex = colIndex * m_col_strides - m_colPaddingLeft;
626
+ rowIndex = rowIndex * m_row_strides - m_rowPaddingTop;
627
+ }
628
+
629
+ Index m_patch_cols; // number of columns in the patch
630
+ Index m_num_patches; // number of patches to extract.
631
+
632
+ // Strides for navigating through the single patch.
633
+ Index m_patch_row_stride;
634
+ Index m_patch_col_stride;
635
+ internal::TensorIntDivisor<Index> m_fastPatchRowStride;
636
+ internal::TensorIntDivisor<Index> m_fastPatchColStride;
637
+
638
+ Index m_patch_row_inflate_strides; // the strides for row inflation in the
639
+ // image patch
640
+ Index m_patch_col_inflate_strides; // the strides for col inflation in the
641
+ // image patch
642
+ // Fast representation of inflation strides.
643
+ internal::TensorIntDivisor<Index> m_fastInputRowStride;
644
+ internal::TensorIntDivisor<Index> m_fastInputColStride;
645
+
646
+ Index m_otherStride;
647
+ Index m_colStride;
648
+ internal::TensorIntDivisor<Index> m_fastNumPatches;
649
+ internal::TensorIntDivisor<Index> m_fastColStride;
650
+
651
+ Index m_rowInputStride; // row stride in the input tensor
652
+ Index m_colInputStride; // col stride in the input tensor
653
+ Index m_patchInputStride; // patch stride in the input tensor
654
+
655
+ Index m_inputRows; // Number of rows in the input tensor
656
+ Index m_inputCols; // Number of cols in the input tensor
657
+
658
+ Index m_outputRows; // Number of convolution output rows
659
+ Index m_outputCols; // Number of convolution output column
660
+
661
+ Index m_row_strides; // User specified row stride
662
+ Index m_col_strides; // User specified col stride
663
+
664
+ Index m_in_row_strides; // User specified input row stride
665
+ Index m_in_col_strides; // User specified input col stride
666
+
667
+ Index m_rowPaddingTop; // Row padding
668
+ Index m_colPaddingLeft; // Column padding
669
+
670
+ internal::TensorIntDivisor<Index> m_fastOutputRows;
671
+ internal::TensorIntDivisor<Index> m_fastDimZero;
672
+
673
+ const TensorEvaluator<ArgType, Device> m_impl;
674
+ };
675
+
676
+ template <typename NewDimension, Index Rows, Index Cols, typename ArgType,
677
+ typename Device, typename Scalar, typename Index,
678
+ typename nocontract_t, typename contract_t, int Side, int packet_size,
679
+ bool inner_dim_contiguous, bool inner_dim_reordered, int Alignment>
680
+ class TensorContractionSubMapper<
681
+ Scalar, Index, Side,
682
+ TensorEvaluator<
683
+ const TensorReshapingOp<NewDimension,
684
+ const TensorImagePatchOp<Rows, Cols, ArgType> >,
685
+ Device>,
686
+ nocontract_t, contract_t, packet_size, inner_dim_contiguous,
687
+ inner_dim_reordered, Alignment> {
688
+ public:
689
+ typedef typename packet_traits<Scalar>::type Packet;
690
+ typedef typename packet_traits<Scalar>::half HalfPacket;
691
+
692
+ typedef TensorContractionInputMapper<
693
+ Scalar, Index, Side,
694
+ TensorEvaluator<
695
+ const TensorReshapingOp<
696
+ NewDimension, const TensorImagePatchOp<Rows, Cols, ArgType> >,
697
+ Device>,
698
+ nocontract_t, contract_t, packet_size, inner_dim_contiguous,
699
+ inner_dim_reordered, Alignment>
700
+ ParentMapper;
701
+
702
+ typedef TensorContractionSubMapper<
703
+ Scalar, Index, Side,
704
+ TensorEvaluator<
705
+ const TensorReshapingOp<
706
+ NewDimension, const TensorImagePatchOp<Rows, Cols, ArgType> >,
707
+ Device>,
708
+ nocontract_t, contract_t, packet_size, inner_dim_contiguous,
709
+ inner_dim_reordered, Alignment>
710
+ Self;
711
+
712
+ typedef Self LinearMapper;
713
+
714
+ typedef typename ParentMapper::TensorEvaluatorT TensorEvaluatorT;
715
+
716
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorContractionSubMapper(
717
+ const ParentMapper& base_mapper, Index vert_offset, Index horiz_offset)
718
+ : m_depth_offset(vert_offset),
719
+ m_col_offset(horiz_offset),
720
+ m_base_mapper(base_mapper) {
721
+ m_base_mapper.computeBaseIndices(m_col_offset, m_rowIndex, m_colIndex,
722
+ m_otherIndex);
723
+ }
724
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorContractionSubMapper(
725
+ const Self& base_mapper, Index vert_offset, Index horiz_offset)
726
+ : m_depth_offset(vert_offset + base_mapper.m_depth_offset),
727
+ m_col_offset(horiz_offset + base_mapper.m_col_offset),
728
+ m_base_mapper(base_mapper.m_base_mapper) {
729
+ m_base_mapper.computeBaseIndices(m_col_offset, m_rowIndex, m_colIndex,
730
+ m_otherIndex);
731
+ }
732
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Scalar operator()(Index i) const {
733
+ return m_base_mapper.loadCoeff(i + m_depth_offset, m_rowIndex, m_colIndex,
734
+ m_otherIndex);
735
+ }
736
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Scalar operator()(Index i,
737
+ Index j) const {
738
+ return m_base_mapper(i + m_depth_offset, j + m_col_offset);
739
+ }
740
+
741
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet loadPacket(Index i) const {
742
+ return m_base_mapper.loadPacket(i + m_depth_offset, m_rowIndex, m_colIndex,
743
+ m_otherIndex);
744
+ }
745
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet loadPacket(Index i,
746
+ Index j) const {
747
+ return m_base_mapper.template loadPacket<Alignment>(i + m_depth_offset,
748
+ j + m_col_offset);
749
+ }
750
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Scalar
751
+ loadCoeffStandard(Index i) const {
752
+ return m_base_mapper.loadCoeffStandard(i + m_depth_offset, m_rowIndex,
753
+ m_colIndex, m_otherIndex);
754
+ }
755
+
756
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet loadPacketFast(Index i) const {
757
+ return m_base_mapper.loadPacketFast(i + m_depth_offset, m_rowIndex,
758
+ m_colIndex, m_otherIndex);
759
+ }
760
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet
761
+ loadPacketStandard(Index i) const {
762
+ typedef decltype(m_base_mapper.m_impl) TensorEvaluatorT;
763
+ return m_base_mapper.template loadPacketStandard<Packet, TensorEvaluatorT>(
764
+ i + m_depth_offset, m_rowIndex, m_colIndex, m_otherIndex);
765
+ }
766
+ template <typename Packet>
767
+ EIGEN_DEVICE_FUNC bool aligned(Index) const {
768
+ return false;
769
+ }
770
+
771
+ EIGEN_DEVICE_FUNC
772
+ EIGEN_ALWAYS_INLINE bool nonStandardPatches() const {
773
+ return m_base_mapper.nonStandardPatches();
774
+ }
775
+
776
+ // Max(Col|Row|Depth): compute the upper limit for the column, row and depth
777
+ // index respectively that fits into the peeled_k elements starting at
778
+ // m_depth_offset.
779
+
780
+ EIGEN_DEVICE_FUNC
781
+ EIGEN_ALWAYS_INLINE Index maxCol(const Index peeled_k) const {
782
+ const Index max_col =
783
+ (m_depth_offset + (peeled_k == 0 ? 0 : peeled_k - 1)) /
784
+ fastPatchColStride();
785
+ return std::min<Index>(1 + max_col, patchCols());
786
+ }
787
+
788
+ EIGEN_DEVICE_FUNC
789
+ EIGEN_ALWAYS_INLINE Index maxRow(const Index peeled_k,
790
+ const Index col) const {
791
+ const Index max_row = (m_depth_offset + (peeled_k == 0 ? 0 : peeled_k - 1) -
792
+ col * patchColStride()) /
793
+ fastPatchRowStride();
794
+ return std::min<Index>(1 + max_row, patchRows());
795
+ }
796
+
797
+ EIGEN_DEVICE_FUNC
798
+ EIGEN_ALWAYS_INLINE Index maxDepth(const Index peeled_k, const Index col,
799
+ Index row) const {
800
+ const Index max_depth = m_depth_offset + peeled_k - //
801
+ col * patchColStride() - //
802
+ row * patchRowStride();
803
+ return std::min<Index>(max_depth, patchDepth());
804
+ }
805
+
806
+ // MaxDepth uses only the remaining number of elements in the peeled_k.
807
+ EIGEN_DEVICE_FUNC
808
+ EIGEN_ALWAYS_INLINE Index maxDepth(const Index num_elements,
809
+ const Index start_depth) const {
810
+ return std::min<Index>(start_depth + num_elements, patchDepth());
811
+ }
812
+
813
+ // Every register matters in this code, so sometimes to prevent register
814
+ // spilling, instead of the variable that you would expect to see, we use
815
+ // another one, that is guaranteed to have the same value. E.g. patch depth is
816
+ // always the same as input depth, and it's also the same as input row stride.
817
+ // Bunch of other parameters have similar relations.
818
+
819
+ typedef internal::TensorIntDivisor<Index> IndexDivisor;
820
+
821
+ EIGEN_DEVICE_FUNC
822
+ EIGEN_ALWAYS_INLINE Index patchDepth() const {
823
+ return m_base_mapper.m_rowInputStride;
824
+ }
825
+ EIGEN_DEVICE_FUNC
826
+ EIGEN_ALWAYS_INLINE Index patchRows() const {
827
+ return m_base_mapper.m_colStride;
828
+ }
829
+ EIGEN_DEVICE_FUNC
830
+ EIGEN_ALWAYS_INLINE Index patchCols() const {
831
+ return m_base_mapper.m_patch_cols;
832
+ }
833
+
834
+ EIGEN_DEVICE_FUNC
835
+ EIGEN_ALWAYS_INLINE Index patchRowStride() const {
836
+ eigen_assert(patchDepth() == m_base_mapper.m_patch_row_stride &&
837
+ "Patch depth must be equal to patch row stride.");
838
+ return patchDepth();
839
+ }
840
+ EIGEN_DEVICE_FUNC
841
+ EIGEN_ALWAYS_INLINE Index patchColStride() const {
842
+ return m_base_mapper.m_patch_col_stride;
843
+ }
844
+
845
+ EIGEN_DEVICE_FUNC
846
+ EIGEN_ALWAYS_INLINE IndexDivisor fastPatchRowStride() const {
847
+ eigen_assert(patchDepth() == m_base_mapper.m_patch_row_stride &&
848
+ "Patch depth must be equal to patch row stride.");
849
+ return m_base_mapper.m_fastDimZero; // patch_depth
850
+ }
851
+ EIGEN_DEVICE_FUNC
852
+ EIGEN_ALWAYS_INLINE IndexDivisor fastPatchColStride() const {
853
+ return m_base_mapper.m_fastPatchColStride;
854
+ }
855
+
856
+ EIGEN_DEVICE_FUNC
857
+ EIGEN_ALWAYS_INLINE Packet packetNoPadding(const Index depth,
858
+ const Index baseIndex) const {
859
+ const Index inputIndex = depth + baseIndex;
860
+ return m_base_mapper.m_impl.template packet<Unaligned>(inputIndex);
861
+ }
862
+ EIGEN_DEVICE_FUNC
863
+ EIGEN_ALWAYS_INLINE Scalar coeffNoPadding(const Index depth,
864
+ const Index baseIndex) const {
865
+ const Index inputIndex = depth + baseIndex;
866
+ return m_base_mapper.m_impl.coeff(inputIndex);
867
+ }
868
+ template <typename PacketT = Packet>
869
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE typename std::enable_if<
870
+ TensorEvaluatorHasPartialPacket<TensorEvaluatorT, PacketT, Index>::value,
871
+ PacketT>::type
872
+ partialPacketNoPadding(const Index depth, const Index baseIndex,
873
+ Index num_coeffs) const {
874
+ const Index inputIndex = depth + baseIndex;
875
+ return m_base_mapper.m_impl.template partialPacket<PacketT>(
876
+ inputIndex, mask<PacketT>(0, num_coeffs));
877
+ }
878
+ EIGEN_DEVICE_FUNC
879
+ EIGEN_ALWAYS_INLINE bool hasPadding() const {
880
+ // TODO(ezhulenev): It does seems that for inflated filter it's still
881
+ // possible to guarantee "no padding or skipping" for non-standard packing.
882
+ if (nonStandardPatches()) return true;
883
+
884
+ // Non zero padding before.
885
+ if (m_base_mapper.m_rowPaddingTop > 0) return true;
886
+ if (m_base_mapper.m_colPaddingLeft > 0) return true;
887
+
888
+ // Non zero padding after in rows.
889
+ const Index last_row =
890
+ (m_base_mapper.m_outputRows - 1) * m_base_mapper.m_row_strides;
891
+ if (last_row + (patchRows() - 1) >= m_base_mapper.m_inputRows) return true;
892
+
893
+ // Non zero padding after in cols.
894
+ const Index last_col =
895
+ (m_base_mapper.m_outputCols - 1) * m_base_mapper.m_col_strides;
896
+ if (last_col + (patchCols() - 1) >= m_base_mapper.m_inputCols) return true;
897
+
898
+ return false;
899
+ }
900
+ EIGEN_DEVICE_FUNC
901
+ EIGEN_ALWAYS_INLINE bool padRow(const Index row) const {
902
+ const Index r = m_rowIndex + row;
903
+ return r < 0 || r >= m_base_mapper.m_inputRows;
904
+ }
905
+ EIGEN_DEVICE_FUNC
906
+ EIGEN_ALWAYS_INLINE bool padAnyRow(const Index first_row,
907
+ const Index last_row) const {
908
+ return m_rowIndex + first_row < 0 ||
909
+ m_rowIndex + last_row >= m_base_mapper.m_inputRows;
910
+ }
911
+ EIGEN_DEVICE_FUNC
912
+ EIGEN_ALWAYS_INLINE bool padOrSkipRow(const Index row,
913
+ Index* orig_row) const {
914
+ eigen_assert(nonStandardPatches());
915
+
916
+ const Index input_row = m_rowIndex + row * m_base_mapper.m_in_row_strides;
917
+ *orig_row = (m_base_mapper.m_patch_row_inflate_strides == 1)
918
+ ? input_row
919
+ : ((input_row >= 0)
920
+ ? (input_row / m_base_mapper.m_fastInputRowStride)
921
+ : 0);
922
+
923
+ return (*orig_row < 0 || *orig_row >= m_base_mapper.m_inputRows) ||
924
+ (input_row != *orig_row * m_base_mapper.m_patch_row_inflate_strides);
925
+ }
926
+ EIGEN_DEVICE_FUNC
927
+ EIGEN_ALWAYS_INLINE bool padCol(const Index col) const {
928
+ const Index c = m_colIndex + col;
929
+ return c < 0 || c >= m_base_mapper.m_inputCols;
930
+ }
931
+ EIGEN_DEVICE_FUNC
932
+ EIGEN_ALWAYS_INLINE bool padOrSkipCol(const Index col,
933
+ Index* orig_col) const {
934
+ eigen_assert(nonStandardPatches());
935
+
936
+ const Index input_col = m_colIndex + col * m_base_mapper.m_in_col_strides;
937
+ *orig_col = (m_base_mapper.m_patch_col_inflate_strides == 1)
938
+ ? input_col
939
+ : ((input_col >= 0)
940
+ ? (input_col / m_base_mapper.m_fastInputColStride)
941
+ : 0);
942
+
943
+ return (*orig_col < 0 || *orig_col >= m_base_mapper.m_inputCols) ||
944
+ (input_col != *orig_col * m_base_mapper.m_patch_col_inflate_strides);
945
+ }
946
+ EIGEN_DEVICE_FUNC
947
+ EIGEN_ALWAYS_INLINE Index baseIndex(const Index row, const Index col) const {
948
+ const Index r = m_rowIndex + row;
949
+ const Index c = m_colIndex + col;
950
+ return r * m_base_mapper.m_rowInputStride +
951
+ c * m_base_mapper.m_colInputStride + m_otherIndex;
952
+ }
953
+ // Compute a base index when original input row and column were precomputed
954
+ // using padOrSkipRow and padOrSkipCol. Used only for non standard patches.
955
+ EIGEN_DEVICE_FUNC
956
+ EIGEN_ALWAYS_INLINE Index origBaseIndex(const Index orig_row,
957
+ const Index orig_col) const {
958
+ return orig_row * m_base_mapper.m_rowInputStride +
959
+ orig_col * m_base_mapper.m_colInputStride + m_otherIndex;
960
+ }
961
+
962
+ EIGEN_DEVICE_FUNC
963
+ EIGEN_ALWAYS_INLINE Index rowStride() const {
964
+ return m_base_mapper.m_row_strides;
965
+ }
966
+ EIGEN_DEVICE_FUNC
967
+ EIGEN_ALWAYS_INLINE Index colStride() const {
968
+ return m_base_mapper.m_col_strides;
969
+ }
970
+
971
+ EIGEN_DEVICE_FUNC
972
+ EIGEN_ALWAYS_INLINE Index rowOffset() const {
973
+ const Index patchOffset = m_depth_offset / m_base_mapper.m_fastDimZero;
974
+ const Index colOffset = patchOffset / m_base_mapper.m_fastColStride;
975
+ return patchOffset - colOffset * m_base_mapper.m_colStride;
976
+ }
977
+
978
+ EIGEN_DEVICE_FUNC
979
+ EIGEN_ALWAYS_INLINE Index colOffset() const {
980
+ const Index patchOffset = m_depth_offset / m_base_mapper.m_fastDimZero;
981
+ const Index colOffset = patchOffset / m_base_mapper.m_fastColStride;
982
+ return colOffset;
983
+ }
984
+
985
+ EIGEN_DEVICE_FUNC
986
+ EIGEN_ALWAYS_INLINE Index depthOffset() const {
987
+ return m_depth_offset % patchDepth();
988
+ }
989
+
990
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE LinearMapper
991
+ getLinearMapper(Index i, Index j) const {
992
+ return LinearMapper(m_base_mapper, i + m_depth_offset, j + m_col_offset);
993
+ }
994
+
995
+ private:
996
+ Index m_depth_offset; // First row in the input matrix
997
+ Index m_col_offset; // First col in the input matrix
998
+
999
+ // Knowing that: col_offset == patchIndex * OTHERS, we keep precomputed base
1000
+ // indices for the first element in a patch specified by col_offset
1001
+ // (see computeBaseIndices(...) for details).
1002
+ Index m_rowIndex;
1003
+ Index m_colIndex;
1004
+ Index m_otherIndex;
1005
+
1006
+ const ParentMapper m_base_mapper; // Keeping a copy instead of a reference
1007
+ // performs better in benchmarks.
1008
+ };
1009
+
1010
+ // Arrange a block of the right input matrix (in our case it's always a "virtual
1011
+ // matrix" constructed from extracted image patches) in contiguous memory.
1012
+ //
1013
+ // Given column major input (A0 beside A1 in memory):
1014
+ // A0 B0 C0 D0 E0 F0 G0 H0 ... Z0
1015
+ // A1 B1 C1 D1 E1 F1 G1 H1 ... Z1
1016
+ // A2 B2 C2 D2 E2 F2 G2 H2 ... Z2
1017
+ // A3 B3 C3 D3 E3 F3 G3 H3 ... Z3
1018
+ // A4 B4 C4 D4 E4 F4 G4 H4 ... Z4
1019
+ // A5 B5 C5 D5 E5 F5 G5 H5 ... Z5
1020
+ // A6 B6 C6 D6 E6 F6 G6 H6 ... Z6
1021
+ // A7 B7 C7 D7 E7 F7 G7 H7 ... Z7
1022
+ // A8 ...
1023
+ // ...
1024
+ //
1025
+ // *) A, B, C, ... - patches extracted from the original input.
1026
+ // *) A0, A1, A2 ... - values from the same patch at different offsets.
1027
+ //
1028
+ // The traversal (packed rhs memory) order (B0 besides A0 in memory):
1029
+ // A0 B0 C0 D0 A1 B1 C1 D1 ...
1030
+ // E0 F0 G0 H0 E1 F1 G1 H1 ...
1031
+ // ...
1032
+ // Z0 Z1 Z2 Z3 Z4 Z5 Z6 Z7 ... <- doesn't belong to any block (nr = 4)
1033
+ //
1034
+ // This traversal order must be the same as in default gemm_pack_rhs defined in
1035
+ // GeneralBlockPanelKernel.h.
1036
+ //
1037
+ // *) nr - number of registers along the 'n' dimension.
1038
+ // See GeneralBlockPanelKernel.h and "Anatomy of High-Performance Matrix
1039
+ // Multiplication" paper.
1040
+ template <typename NewDimension, Index Rows, Index Cols, typename ArgType,
1041
+ typename Device, typename Scalar, typename Index,
1042
+ typename nocontract_t, typename contract_t, int packet_size,
1043
+ bool inner_dim_contiguous, bool inner_dim_reordered, int Alignment,
1044
+ int nr>
1045
+ struct gemm_pack_rhs<
1046
+ Scalar, Index,
1047
+ TensorContractionSubMapper<
1048
+ Scalar, Index, Rhs,
1049
+ TensorEvaluator<
1050
+ const TensorReshapingOp<
1051
+ NewDimension, const TensorImagePatchOp<Rows, Cols, ArgType> >,
1052
+ Device>,
1053
+ nocontract_t, contract_t, packet_size, inner_dim_contiguous,
1054
+ inner_dim_reordered, Alignment>,
1055
+ nr, ColMajor, false, false> {
1056
+ typedef TensorContractionSubMapper<
1057
+ Scalar, Index, Rhs,
1058
+ TensorEvaluator<
1059
+ const TensorReshapingOp<
1060
+ NewDimension, const TensorImagePatchOp<Rows, Cols, ArgType> >,
1061
+ Device>,
1062
+ nocontract_t, contract_t, packet_size, inner_dim_contiguous,
1063
+ inner_dim_reordered, Alignment>
1064
+ SubMapper;
1065
+ typedef SubMapper DataMapper;
1066
+ typedef typename packet_traits<Scalar>::type Packet;
1067
+
1068
+ EIGEN_STATIC_ASSERT((nr == 4), YOU_MADE_A_PROGRAMMING_MISTAKE)
1069
+
1070
+ EIGEN_DEVICE_FUNC
1071
+ EIGEN_DONT_INLINE void operator()(Scalar* block, const DataMapper& rhs,
1072
+ Index depth, Index cols, Index stride = 0,
1073
+ Index offset = 0) const {
1074
+ eigen_assert(stride == 0);
1075
+ eigen_assert(offset == 0);
1076
+
1077
+ const Index packet_cols4 = (cols / 4) * 4;
1078
+ const Index peeled_k = (depth / packet_size) * packet_size;
1079
+ const bool non_standard_patches = rhs.nonStandardPatches();
1080
+
1081
+ for (Index j2 = 0; j2 < packet_cols4; j2 += 4) {
1082
+ const SubMapper dm0 = rhs.getLinearMapper(0, j2 + 0);
1083
+ const SubMapper dm1 = rhs.getLinearMapper(0, j2 + 1);
1084
+ const SubMapper dm2 = rhs.getLinearMapper(0, j2 + 2);
1085
+ const SubMapper dm3 = rhs.getLinearMapper(0, j2 + 3);
1086
+
1087
+ Index k = 0;
1088
+ if ((packet_size % 4) == 0 && !non_standard_patches) {
1089
+ // FAST PATH:
1090
+ // Iterate over patch columns and rows, if we know that a single
1091
+ // packet do not span across multiple rows or columns.
1092
+ if ((rhs.patchDepth() % packet_size) == 0) {
1093
+ const Index start_col = rhs.colOffset();
1094
+ const Index max_col = rhs.maxCol(peeled_k);
1095
+
1096
+ for (Index c = start_col; c < max_col; ++c) {
1097
+ eigen_assert(k <= peeled_k);
1098
+
1099
+ const Index start_row = (c == start_col) ? rhs.rowOffset() : 0;
1100
+ const Index max_row = rhs.maxRow(peeled_k, c);
1101
+
1102
+ const bool pad_col0 = dm0.padCol(c);
1103
+ const bool pad_col1 = dm1.padCol(c);
1104
+ const bool pad_col2 = dm2.padCol(c);
1105
+ const bool pad_col3 = dm3.padCol(c);
1106
+
1107
+ // Check if we can squeeze reads along the `row` and `depth`
1108
+ // dimensions (two innermost dimensions).
1109
+ if (!pad_col0 && !pad_col1 && !pad_col2 && !pad_col3 && //
1110
+ !dm0.padRow(start_row) && !dm0.padRow(max_row - 1) && //
1111
+ !dm1.padRow(start_row) && !dm1.padRow(max_row - 1) && //
1112
+ !dm2.padRow(start_row) && !dm2.padRow(max_row - 1) && //
1113
+ !dm3.padRow(start_row) && !dm3.padRow(max_row - 1)) {
1114
+ // Compute how many elements we can squeeze read.
1115
+ const Index start_depth =
1116
+ (c == start_col) ? rhs.depthOffset() : 0;
1117
+
1118
+ // Upper bound for the number of elements in the depth dimension
1119
+ // that we can squeeze read.
1120
+ const Index squeeze_length =
1121
+ (max_row - start_row) * rhs.patchDepth() - start_depth;
1122
+
1123
+ // Do not overshoot beyond the block size.
1124
+ const Index max_depth =
1125
+ start_depth + std::min<Index>(peeled_k - k, squeeze_length);
1126
+ eigen_assert((max_depth - start_depth) % packet_size == 0);
1127
+
1128
+ const Index idx0 = dm0.baseIndex(start_row, c);
1129
+ const Index idx1 = dm1.baseIndex(start_row, c);
1130
+ const Index idx2 = dm2.baseIndex(start_row, c);
1131
+ const Index idx3 = dm3.baseIndex(start_row, c);
1132
+
1133
+ for (Index d = start_depth; d < max_depth; d += packet_size) {
1134
+ eigen_assert(k < peeled_k);
1135
+ PacketBlock<Packet, 4> kernel;
1136
+ kernel.packet[0] = rhs.packetNoPadding(d, idx0);
1137
+ kernel.packet[1] = rhs.packetNoPadding(d, idx1);
1138
+ kernel.packet[2] = rhs.packetNoPadding(d, idx2);
1139
+ kernel.packet[3] = rhs.packetNoPadding(d, idx3);
1140
+ ptranspose(kernel);
1141
+ pstoreu(block + 0 * packet_size, kernel.packet[0]);
1142
+ pstoreu(block + 1 * packet_size, kernel.packet[1]);
1143
+ pstoreu(block + 2 * packet_size, kernel.packet[2]);
1144
+ pstoreu(block + 3 * packet_size, kernel.packet[3]);
1145
+ block += 4 * packet_size;
1146
+ k += packet_size;
1147
+ }
1148
+
1149
+ // Go to the next column.
1150
+ continue;
1151
+ }
1152
+
1153
+ // If we can't squeeze reads, process rows one by one.
1154
+ for (Index r = start_row; r < max_row; ++r) {
1155
+ eigen_assert(k <= peeled_k);
1156
+
1157
+ const bool pad0 = pad_col0 || dm0.padRow(r);
1158
+ const bool pad1 = pad_col1 || dm1.padRow(r);
1159
+ const bool pad2 = pad_col2 || dm2.padRow(r);
1160
+ const bool pad3 = pad_col3 || dm3.padRow(r);
1161
+
1162
+ const Index idx0 = dm0.baseIndex(r, c);
1163
+ const Index idx1 = dm1.baseIndex(r, c);
1164
+ const Index idx2 = dm2.baseIndex(r, c);
1165
+ const Index idx3 = dm3.baseIndex(r, c);
1166
+
1167
+ const Index start_depth = ((c == start_col) && (r == start_row))
1168
+ ? rhs.depthOffset()
1169
+ : 0;
1170
+ const Index max_depth = rhs.maxDepth(peeled_k - k, start_depth);
1171
+ eigen_assert((max_depth - start_depth) % packet_size == 0);
1172
+
1173
+ for (Index d = start_depth; d < max_depth; d += packet_size) {
1174
+ eigen_assert(k < peeled_k);
1175
+ PacketBlock<Packet, 4> kernel;
1176
+ kernel.packet[0] = pad0 ? pset1<Packet>(Scalar(0))
1177
+ : rhs.packetNoPadding(d, idx0);
1178
+ kernel.packet[1] = pad1 ? pset1<Packet>(Scalar(0))
1179
+ : rhs.packetNoPadding(d, idx1);
1180
+ kernel.packet[2] = pad2 ? pset1<Packet>(Scalar(0))
1181
+ : rhs.packetNoPadding(d, idx2);
1182
+ kernel.packet[3] = pad3 ? pset1<Packet>(Scalar(0))
1183
+ : rhs.packetNoPadding(d, idx3);
1184
+ ptranspose(kernel);
1185
+ pstoreu(block + 0 * packet_size, kernel.packet[0]);
1186
+ pstoreu(block + 1 * packet_size, kernel.packet[1]);
1187
+ pstoreu(block + 2 * packet_size, kernel.packet[2]);
1188
+ pstoreu(block + 3 * packet_size, kernel.packet[3]);
1189
+ block += 4 * packet_size;
1190
+ k += packet_size;
1191
+ }
1192
+ }
1193
+ }
1194
+
1195
+ // The loop above should fill peeled_k elements.
1196
+ eigen_assert(peeled_k == k);
1197
+
1198
+ } else {
1199
+ for (; k < peeled_k; k += packet_size) {
1200
+ PacketBlock<Packet, 4> kernel;
1201
+ kernel.packet[0] = dm0.loadPacketStandard(k);
1202
+ kernel.packet[1] = dm1.loadPacketStandard(k);
1203
+ kernel.packet[2] = dm2.loadPacketStandard(k);
1204
+ kernel.packet[3] = dm3.loadPacketStandard(k);
1205
+ ptranspose(kernel);
1206
+ pstoreu(block + 0 * packet_size, kernel.packet[0]);
1207
+ pstoreu(block + 1 * packet_size, kernel.packet[1]);
1208
+ pstoreu(block + 2 * packet_size, kernel.packet[2]);
1209
+ pstoreu(block + 3 * packet_size, kernel.packet[3]);
1210
+ block += 4 * packet_size;
1211
+ }
1212
+ }
1213
+ }
1214
+
1215
+ // Copy the remaining coefficients of the column block after the peeled_k.
1216
+ if (!rhs.nonStandardPatches()) {
1217
+ for (; k < depth; k++) {
1218
+ block[0] = dm0.loadCoeffStandard(k);
1219
+ block[1] = dm1.loadCoeffStandard(k);
1220
+ block[2] = dm2.loadCoeffStandard(k);
1221
+ block[3] = dm3.loadCoeffStandard(k);
1222
+ block += 4;
1223
+ }
1224
+ } else {
1225
+ for (; k < depth; k++) {
1226
+ block[0] = dm0(k);
1227
+ block[1] = dm1(k);
1228
+ block[2] = dm2(k);
1229
+ block[3] = dm3(k);
1230
+ block += 4;
1231
+ }
1232
+ }
1233
+ }
1234
+
1235
+ // copy the remaining columns one at a time (nr==1)
1236
+ for (Index j2 = packet_cols4; j2 < cols; ++j2) {
1237
+ const SubMapper dm0 = rhs.getLinearMapper(0, j2);
1238
+ for (Index k = 0; k < depth; k++) {
1239
+ *block = dm0(k);
1240
+ block += 1;
1241
+ }
1242
+ }
1243
+ }
1244
+ };
1245
+
1246
+ // Template specialization for packet_size = 2. We must special-case packet
1247
+ // blocks with nr > packet_size, e.g. PacketBlock<Packet2d, 4>.
1248
+ template <typename NewDimension, Index Rows, Index Cols, typename ArgType,
1249
+ typename Device, typename Scalar, typename Index,
1250
+ typename nocontract_t, typename contract_t, bool inner_dim_contiguous,
1251
+ bool inner_dim_reordered, int Alignment, int nr>
1252
+ struct gemm_pack_rhs<
1253
+ Scalar, Index,
1254
+ TensorContractionSubMapper<
1255
+ Scalar, Index, Rhs,
1256
+ TensorEvaluator<
1257
+ const TensorReshapingOp<
1258
+ NewDimension, const TensorImagePatchOp<Rows, Cols, ArgType> >,
1259
+ Device>,
1260
+ nocontract_t, contract_t, 2, inner_dim_contiguous, inner_dim_reordered,
1261
+ Alignment>,
1262
+ nr, ColMajor, false, false> {
1263
+ typedef TensorContractionSubMapper<
1264
+ Scalar, Index, Rhs,
1265
+ TensorEvaluator<
1266
+ const TensorReshapingOp<
1267
+ NewDimension, const TensorImagePatchOp<Rows, Cols, ArgType> >,
1268
+ Device>,
1269
+ nocontract_t, contract_t, 2, inner_dim_contiguous, inner_dim_reordered,
1270
+ Alignment>
1271
+ SubMapper;
1272
+ typedef SubMapper DataMapper;
1273
+ typedef typename packet_traits<Scalar>::type Packet;
1274
+
1275
+ EIGEN_STATIC_ASSERT((nr == 4), YOU_MADE_A_PROGRAMMING_MISTAKE)
1276
+
1277
+ EIGEN_DEVICE_FUNC
1278
+ EIGEN_DONT_INLINE void operator()(Scalar* block, const DataMapper& rhs,
1279
+ Index depth, Index cols, Index stride = 0,
1280
+ Index offset = 0) const {
1281
+ eigen_assert(stride == 0);
1282
+ eigen_assert(offset == 0);
1283
+
1284
+ const int packet_size = 2;
1285
+ const Index packet_cols4 = (cols / 4) * 4;
1286
+ const Index peeled_k = (depth / packet_size) * packet_size;
1287
+ const bool non_standard_patches = rhs.nonStandardPatches();
1288
+
1289
+ for (Index j2 = 0; j2 < packet_cols4; j2 += 4) {
1290
+ const SubMapper dm0 = rhs.getLinearMapper(0, j2 + 0);
1291
+ const SubMapper dm1 = rhs.getLinearMapper(0, j2 + 1);
1292
+ const SubMapper dm2 = rhs.getLinearMapper(0, j2 + 2);
1293
+ const SubMapper dm3 = rhs.getLinearMapper(0, j2 + 3);
1294
+
1295
+ Index k = 0;
1296
+ if (!non_standard_patches) {
1297
+ // FAST PATH:
1298
+ // Iterate over patch columns and rows if we know that a single
1299
+ // packet do not span across multiple rows or columns.
1300
+ if ((rhs.patchDepth() % packet_size) == 0) {
1301
+ const Index start_col = rhs.colOffset();
1302
+ const Index max_col = rhs.maxCol(peeled_k);
1303
+
1304
+ for (Index c = start_col; c < max_col; ++c) {
1305
+ eigen_assert(k <= peeled_k);
1306
+
1307
+ const Index start_row = (c == start_col) ? rhs.rowOffset() : 0;
1308
+ const Index max_row = rhs.maxRow(peeled_k, c);
1309
+
1310
+ const bool pad_col0 = dm0.padCol(c);
1311
+ const bool pad_col1 = dm1.padCol(c);
1312
+ const bool pad_col2 = dm2.padCol(c);
1313
+ const bool pad_col3 = dm3.padCol(c);
1314
+
1315
+ // We can squeeze reads along the `row` and `depth` dimensions if
1316
+ // the row stride is `1`, which means that `row` and `depth`
1317
+ // dimensions are contiguous (two innermost dimensions).
1318
+ if (rhs.rowStride() == 1 && //
1319
+ !pad_col0 && !pad_col1 && !pad_col2 && !pad_col3 && //
1320
+ !dm0.padRow(start_row) && !dm0.padRow(max_row - 1) && //
1321
+ !dm1.padRow(start_row) && !dm1.padRow(max_row - 1) && //
1322
+ !dm2.padRow(start_row) && !dm2.padRow(max_row - 1) && //
1323
+ !dm3.padRow(start_row) && !dm3.padRow(max_row - 1)) {
1324
+ // Compute how many elements we can squeeze read.
1325
+ const Index start_depth =
1326
+ (c == start_col) ? rhs.depthOffset() : 0;
1327
+
1328
+ // Upper bound for the number of elements in the depth dimension
1329
+ // that we can squeeze read.
1330
+ const Index squeeze_length =
1331
+ (max_row - start_row) * rhs.patchDepth() - start_depth;
1332
+
1333
+ // Do not overshoot beyond the block size.
1334
+ const Index max_depth =
1335
+ start_depth + std::min<Index>(peeled_k - k, squeeze_length);
1336
+ eigen_assert((max_depth - start_depth) % packet_size == 0);
1337
+
1338
+ const Index idx0 = dm0.baseIndex(start_row, c);
1339
+ const Index idx1 = dm1.baseIndex(start_row, c);
1340
+ const Index idx2 = dm2.baseIndex(start_row, c);
1341
+ const Index idx3 = dm3.baseIndex(start_row, c);
1342
+
1343
+ for (Index d = start_depth; d < max_depth; d += packet_size) {
1344
+ PacketBlock<Packet, 2> kernel0;
1345
+ PacketBlock<Packet, 2> kernel1;
1346
+ kernel0.packet[0] = rhs.packetNoPadding(d, idx0);
1347
+ kernel0.packet[1] = rhs.packetNoPadding(d, idx1);
1348
+ kernel1.packet[0] = rhs.packetNoPadding(d, idx2);
1349
+ kernel1.packet[1] = rhs.packetNoPadding(d, idx3);
1350
+ ptranspose(kernel0);
1351
+ ptranspose(kernel1);
1352
+ pstoreu(block + 0 * packet_size, kernel0.packet[0]);
1353
+ pstoreu(block + 1 * packet_size, kernel1.packet[0]);
1354
+ pstoreu(block + 2 * packet_size, kernel0.packet[1]);
1355
+ pstoreu(block + 3 * packet_size, kernel1.packet[1]);
1356
+ block += 4 * packet_size;
1357
+ k += packet_size;
1358
+ }
1359
+
1360
+ // Go to the next column.
1361
+ continue;
1362
+ }
1363
+
1364
+ // If we can't squeeze reads, process rows one by one.
1365
+ for (Index r = start_row; r < max_row; ++r) {
1366
+ eigen_assert(k <= peeled_k);
1367
+
1368
+ const bool pad0 = pad_col0 || dm0.padRow(r);
1369
+ const bool pad1 = pad_col1 || dm1.padRow(r);
1370
+ const bool pad2 = pad_col2 || dm2.padRow(r);
1371
+ const bool pad3 = pad_col3 || dm3.padRow(r);
1372
+
1373
+ const Index idx0 = dm0.baseIndex(r, c);
1374
+ const Index idx1 = dm1.baseIndex(r, c);
1375
+ const Index idx2 = dm2.baseIndex(r, c);
1376
+ const Index idx3 = dm3.baseIndex(r, c);
1377
+
1378
+ const Index start_depth = ((c == start_col) && (r == start_row))
1379
+ ? rhs.depthOffset()
1380
+ : 0;
1381
+ const Index max_depth = rhs.maxDepth(peeled_k - k, start_depth);
1382
+ eigen_assert((max_depth - start_depth) % packet_size == 0);
1383
+
1384
+ for (Index d = start_depth; d < max_depth; d += packet_size) {
1385
+ eigen_assert(k < peeled_k);
1386
+ PacketBlock<Packet, 2> kernel0;
1387
+ PacketBlock<Packet, 2> kernel1;
1388
+ kernel0.packet[0] = pad0 ? pset1<Packet>(Scalar(0))
1389
+ : rhs.packetNoPadding(d, idx0);
1390
+ kernel0.packet[1] = pad1 ? pset1<Packet>(Scalar(0))
1391
+ : rhs.packetNoPadding(d, idx1);
1392
+ kernel1.packet[0] = pad2 ? pset1<Packet>(Scalar(0))
1393
+ : rhs.packetNoPadding(d, idx2);
1394
+ kernel1.packet[1] = pad3 ? pset1<Packet>(Scalar(0))
1395
+ : rhs.packetNoPadding(d, idx3);
1396
+ ptranspose(kernel0);
1397
+ ptranspose(kernel1);
1398
+ pstoreu(block + 0 * packet_size, kernel0.packet[0]);
1399
+ pstoreu(block + 1 * packet_size, kernel1.packet[0]);
1400
+ pstoreu(block + 2 * packet_size, kernel0.packet[1]);
1401
+ pstoreu(block + 3 * packet_size, kernel1.packet[1]);
1402
+ block += 4 * packet_size;
1403
+ k += packet_size;
1404
+ }
1405
+ }
1406
+ }
1407
+
1408
+ // The loop above should fill peeled_k elements.
1409
+ eigen_assert(peeled_k == k);
1410
+
1411
+ } else {
1412
+ // Packet can span multiple rows or columns, so we have to go
1413
+ // though the slower "standard" path.
1414
+ for (; k < peeled_k; k += packet_size) {
1415
+ PacketBlock<Packet, 2> kernel0;
1416
+ PacketBlock<Packet, 2> kernel1;
1417
+ kernel0.packet[0] = dm0.loadPacketStandard(k);
1418
+ kernel0.packet[1] = dm1.loadPacketStandard(k);
1419
+ kernel1.packet[0] = dm2.loadPacketStandard(k);
1420
+ kernel1.packet[1] = dm3.loadPacketStandard(k);
1421
+ ptranspose(kernel0);
1422
+ ptranspose(kernel1);
1423
+ pstoreu(block + 0 * packet_size, kernel0.packet[0]);
1424
+ pstoreu(block + 1 * packet_size, kernel1.packet[0]);
1425
+ pstoreu(block + 2 * packet_size, kernel0.packet[1]);
1426
+ pstoreu(block + 3 * packet_size, kernel1.packet[1]);
1427
+ block += 4 * packet_size;
1428
+ }
1429
+ }
1430
+ }
1431
+
1432
+ // Copy the remaining coefficients of the column block after the peeled_k.
1433
+ if (!non_standard_patches) {
1434
+ for (; k < depth; k++) {
1435
+ block[0] = dm0.loadCoeffStandard(k);
1436
+ block[1] = dm1.loadCoeffStandard(k);
1437
+ block[2] = dm2.loadCoeffStandard(k);
1438
+ block[3] = dm3.loadCoeffStandard(k);
1439
+ block += 4;
1440
+ }
1441
+ } else {
1442
+ for (; k < depth; k++) {
1443
+ block[0] = dm0(k);
1444
+ block[1] = dm1(k);
1445
+ block[2] = dm2(k);
1446
+ block[3] = dm3(k);
1447
+ block += 4;
1448
+ }
1449
+ }
1450
+ }
1451
+
1452
+ // Copy the remaining columns one at a time (nr==1).
1453
+ for (Index j2 = packet_cols4; j2 < cols; ++j2) {
1454
+ const SubMapper dm0 = rhs.getLinearMapper(0, j2);
1455
+ for (Index k = 0; k < depth; k++) {
1456
+ *block = dm0(k);
1457
+ block += 1;
1458
+ }
1459
+ }
1460
+ }
1461
+ };
1462
+
1463
+ // Special case for non-vectorized types such as float16.
1464
+ template <typename NewDimension, Index Rows, Index Cols, typename ArgType,
1465
+ typename Device, typename Scalar, typename Index,
1466
+ typename nocontract_t, typename contract_t, bool inner_dim_contiguous,
1467
+ bool inner_dim_reordered, int Alignment, int nr>
1468
+ struct gemm_pack_rhs<
1469
+ Scalar, Index,
1470
+ TensorContractionSubMapper<
1471
+ Scalar, Index, Rhs,
1472
+ TensorEvaluator<
1473
+ const TensorReshapingOp<
1474
+ NewDimension, const TensorImagePatchOp<Rows, Cols, ArgType> >,
1475
+ Device>,
1476
+ nocontract_t, contract_t, 1, inner_dim_contiguous, inner_dim_reordered,
1477
+ Alignment>,
1478
+ nr, ColMajor, false, false> {
1479
+ typedef TensorContractionSubMapper<
1480
+ Scalar, Index, Rhs,
1481
+ TensorEvaluator<
1482
+ const TensorReshapingOp<
1483
+ NewDimension, const TensorImagePatchOp<Rows, Cols, ArgType> >,
1484
+ Device>,
1485
+ nocontract_t, contract_t, 1, inner_dim_contiguous, inner_dim_reordered,
1486
+ Alignment>
1487
+ SubMapper;
1488
+ typedef SubMapper DataMapper;
1489
+
1490
+ EIGEN_STATIC_ASSERT((nr == 4), YOU_MADE_A_PROGRAMMING_MISTAKE)
1491
+
1492
+ EIGEN_DEVICE_FUNC
1493
+ EIGEN_DONT_INLINE void operator()(Scalar* block, const DataMapper& rhs,
1494
+ Index depth, Index cols, Index stride = 0,
1495
+ Index offset = 0) const {
1496
+ eigen_assert(stride == 0);
1497
+ eigen_assert(offset == 0);
1498
+
1499
+ const Index packet_cols4 = (cols / 4) * 4;
1500
+
1501
+ for (Index j2 = 0; j2 < packet_cols4; j2 += 4) {
1502
+ const SubMapper dm0 = rhs.getLinearMapper(0, j2 + 0);
1503
+ const SubMapper dm1 = rhs.getLinearMapper(0, j2 + 1);
1504
+ const SubMapper dm2 = rhs.getLinearMapper(0, j2 + 2);
1505
+ const SubMapper dm3 = rhs.getLinearMapper(0, j2 + 3);
1506
+
1507
+ if (!rhs.nonStandardPatches()) {
1508
+ for (Index k = 0; k < depth; k++) {
1509
+ block[0] = dm0.loadCoeffStandard(k);
1510
+ block[1] = dm1.loadCoeffStandard(k);
1511
+ block[2] = dm2.loadCoeffStandard(k);
1512
+ block[3] = dm3.loadCoeffStandard(k);
1513
+ block += 4;
1514
+ }
1515
+ } else {
1516
+ for (Index k = 0; k < depth; k++) {
1517
+ block[0] = dm0(k);
1518
+ block[1] = dm1(k);
1519
+ block[2] = dm2(k);
1520
+ block[3] = dm3(k);
1521
+ block += 4;
1522
+ }
1523
+ }
1524
+ }
1525
+
1526
+ // Copy the remaining columns one at a time (nr==1).
1527
+ for (Index j2 = packet_cols4; j2 < cols; ++j2) {
1528
+ const SubMapper dm0 = rhs.getLinearMapper(0, j2);
1529
+ for (Index k = 0; k < depth; k++) {
1530
+ *block = dm0(k);
1531
+ block += 1;
1532
+ }
1533
+ }
1534
+ }
1535
+ };
1536
+ #endif
1537
+ } // end namespace internal
1538
+
1539
+ /** SpatialConvolution
1540
+ * \ingroup CXX11_NeuralNetworks_Module
1541
+ *
1542
+ * \brief Applies a 2D convolution over a multichannel input image.
1543
+ *
1544
+ * The input parameter is expected to be a tensor with a rank of 3 or more
1545
+ * (channels, height, width, and optionally others)
1546
+ * The kernel parameter is expected to be a 4D tensor (filters, channels,
1547
+ * kernel_height, kernel_width)
1548
+ * The input and the kernel must both be in col-major layout. The result will
1549
+ * also be in col-major layout.
1550
+ *
1551
+ * If col_in_stride, row_in_stride > 1, then applies convolution with holes
1552
+ * (aka atrous convolution), sampling every col_in_stride, row_in_stride input
1553
+ * pixels.
1554
+ *
1555
+ * If padding_top, padding_bottom, padding_left, or padding_right is specified,
1556
+ * then those paddings will be used to pad the input, and padding_type must be
1557
+ * PADDING_VALID.
1558
+ *
1559
+ * The result can be assigned to a tensor of rank equal to the rank of the
1560
+ * input. The dimensions of the result will be filters, height, width (and
1561
+ * others if applicable).
1562
+ *
1563
+ * It is possible to swap the order of the width and height dimensions provided
1564
+ * that the same order is used in the input, the kernel, and the output.
1565
+ *
1566
+ * It is also possible to add an output kernel to the contraction, output
1567
+ * kernel is called by Eigen when it "finalizes" the block of an output tensor.
1568
+ *
1569
+ */
1570
+ template <typename Input, typename Kernel,
1571
+ typename OutputKernel = const NoOpOutputKernel>
1572
+ EIGEN_ALWAYS_INLINE static const std::conditional_t<
1573
+ internal::traits<Input>::Layout == ColMajor,
1574
+ TensorReshapingOp<
1575
+ const DSizes<typename internal::traits<Input>::Index,
1576
+ internal::traits<Input>::NumDimensions>,
1577
+ const TensorContractionOp<
1578
+ const array<IndexPair<typename internal::traits<Input>::Index>, 1>,
1579
+ const TensorReshapingOp<
1580
+ const DSizes<typename internal::traits<Input>::Index, 2>,
1581
+ const Kernel>,
1582
+ const TensorReshapingOp<
1583
+ const DSizes<typename internal::traits<Input>::Index, 2>,
1584
+ const TensorImagePatchOp<Dynamic, Dynamic, const Input> >,
1585
+ const OutputKernel> >,
1586
+ TensorReshapingOp<
1587
+ const DSizes<typename internal::traits<Input>::Index,
1588
+ internal::traits<Input>::NumDimensions>,
1589
+ const TensorContractionOp<
1590
+ const array<IndexPair<typename internal::traits<Input>::Index>, 1>,
1591
+ const TensorReshapingOp<
1592
+ const DSizes<typename internal::traits<Input>::Index, 2>,
1593
+ const TensorImagePatchOp<Dynamic, Dynamic, const Input> >,
1594
+ const TensorReshapingOp<
1595
+ const DSizes<typename internal::traits<Input>::Index, 2>,
1596
+ const Kernel>,
1597
+ const OutputKernel> > >
1598
+ SpatialConvolution(const Input& input, const Kernel& kernel,
1599
+ const Index row_stride = 1, const Index col_stride = 1,
1600
+ const PaddingType padding_type = PADDING_SAME,
1601
+ const Index row_in_stride = 1, const Index col_in_stride = 1,
1602
+ const OutputKernel& output_kernel = OutputKernel(),
1603
+ Index padding_top = 0, Index padding_bottom = 0,
1604
+ Index padding_left = 0, Index padding_right = 0) {
1605
+ typedef typename internal::traits<Input>::Index TensorIndex;
1606
+ typedef typename internal::traits<Input>::Scalar InputScalar;
1607
+ TensorRef<Tensor<InputScalar, internal::traits<Input>::NumDimensions,
1608
+ internal::traits<Input>::Layout, TensorIndex> >
1609
+ in(input);
1610
+ TensorRef<Tensor<typename internal::traits<Kernel>::Scalar,
1611
+ internal::traits<Kernel>::NumDimensions,
1612
+ internal::traits<Kernel>::Layout, TensorIndex> >
1613
+ kern(kernel);
1614
+
1615
+ EIGEN_STATIC_ASSERT(
1616
+ internal::traits<Input>::Layout == internal::traits<Kernel>::Layout,
1617
+ YOU_MADE_A_PROGRAMMING_MISTAKE)
1618
+ const bool isColMajor = (internal::traits<Input>::Layout == ColMajor);
1619
+
1620
+ const int NumDims = internal::traits<Input>::NumDimensions;
1621
+
1622
+ // Number of filters to apply. This is the same as the output depth of the
1623
+ // result
1624
+ const TensorIndex kernelFilters =
1625
+ isColMajor ? kern.dimensions()[0] : kern.dimensions()[3];
1626
+ // Number of channels. This is the same as the input depth.
1627
+ const TensorIndex kernelChannels =
1628
+ isColMajor ? kern.dimensions()[1] : kern.dimensions()[2];
1629
+ const TensorIndex kernelRows =
1630
+ isColMajor ? kern.dimensions()[2] : kern.dimensions()[1];
1631
+ const TensorIndex kernelCols =
1632
+ isColMajor ? kern.dimensions()[3] : kern.dimensions()[0];
1633
+
1634
+ const Index kernelRowsEff =
1635
+ kernelRows + (kernelRows - 1) * (row_in_stride - 1);
1636
+ const Index kernelColsEff =
1637
+ kernelCols + (kernelCols - 1) * (col_in_stride - 1);
1638
+
1639
+ array<IndexPair<TensorIndex>, 1> contract_dims;
1640
+ contract_dims[0] = IndexPair<TensorIndex>(1, 0);
1641
+
1642
+ const TensorIndex InputRows =
1643
+ isColMajor ? in.dimension(1) : in.dimension(NumDims - 2);
1644
+ const TensorIndex InputCols =
1645
+ isColMajor ? in.dimension(2) : in.dimension(NumDims - 3);
1646
+ const bool padding_explicit =
1647
+ (padding_top || padding_bottom || padding_left || padding_right);
1648
+
1649
+ TensorIndex out_height;
1650
+ TensorIndex out_width;
1651
+ switch (padding_type) {
1652
+ case PADDING_VALID: {
1653
+ const TensorIndex InputRowsEff = InputRows + padding_top + padding_bottom;
1654
+ const TensorIndex InputColsEff = InputCols + padding_left + padding_right;
1655
+ out_height = divup(InputRowsEff - kernelRowsEff + 1, row_stride);
1656
+ out_width = divup(InputColsEff - kernelColsEff + 1, col_stride);
1657
+ break;
1658
+ }
1659
+ case PADDING_SAME: {
1660
+ eigen_assert(!padding_explicit);
1661
+ out_height = divup(InputRows, row_stride);
1662
+ out_width = divup(InputCols, col_stride);
1663
+ break;
1664
+ }
1665
+ default: {
1666
+ // Initialize unused variables to avoid a compiler warning
1667
+ out_height = 0;
1668
+ out_width = 0;
1669
+ eigen_assert(false && "unexpected padding");
1670
+ }
1671
+ }
1672
+
1673
+ // Molds the output of the patch extraction code into a 2d tensor:
1674
+ // - the first dimension (dims[0]): the patch values to be multiplied with the
1675
+ // kernels
1676
+ // - the second dimension (dims[1]): everything else
1677
+ DSizes<TensorIndex, 2> pre_contract_dims;
1678
+ if (isColMajor) {
1679
+ pre_contract_dims[0] = kernelChannels * kernelRows * kernelCols;
1680
+ pre_contract_dims[1] = out_height * out_width;
1681
+ for (int i = 3; i < NumDims; ++i) {
1682
+ pre_contract_dims[1] *= in.dimension(i);
1683
+ }
1684
+ } else {
1685
+ pre_contract_dims[1] = kernelChannels * kernelRows * kernelCols;
1686
+ pre_contract_dims[0] = out_height * out_width;
1687
+ for (int i = 0; i < NumDims - 3; ++i) {
1688
+ pre_contract_dims[0] *= in.dimension(i);
1689
+ }
1690
+ }
1691
+
1692
+ // Molds the output of the contraction into the shape expected by the used
1693
+ // (assuming this is ColMajor):
1694
+ // - 1st dim: kernel filters
1695
+ // - 2nd dim: output height
1696
+ // - 3rd dim: output width
1697
+ // - 4th dim and beyond: everything else including batch size
1698
+ DSizes<TensorIndex, NumDims> post_contract_dims;
1699
+ if (isColMajor) {
1700
+ post_contract_dims[0] = kernelFilters;
1701
+ post_contract_dims[1] = out_height;
1702
+ post_contract_dims[2] = out_width;
1703
+ for (int i = 3; i < NumDims; ++i) {
1704
+ post_contract_dims[i] = in.dimension(i);
1705
+ }
1706
+ } else {
1707
+ post_contract_dims[NumDims - 1] = kernelFilters;
1708
+ post_contract_dims[NumDims - 2] = out_height;
1709
+ post_contract_dims[NumDims - 3] = out_width;
1710
+ for (int i = 0; i < NumDims - 3; ++i) {
1711
+ post_contract_dims[i] = in.dimension(i);
1712
+ }
1713
+ }
1714
+
1715
+ DSizes<TensorIndex, 2> kernel_dims;
1716
+ if (isColMajor) {
1717
+ kernel_dims[0] = kernelFilters;
1718
+ kernel_dims[1] = kernelChannels * kernelRows * kernelCols;
1719
+ } else {
1720
+ kernel_dims[0] = kernelChannels * kernelRows * kernelCols;
1721
+ kernel_dims[1] = kernelFilters;
1722
+ }
1723
+ if (padding_explicit) {
1724
+ return choose(
1725
+ Cond<internal::traits<Input>::Layout == ColMajor>(),
1726
+ kernel.reshape(kernel_dims)
1727
+ .contract(input
1728
+ .extract_image_patches(
1729
+ kernelRows, kernelCols, row_stride, col_stride,
1730
+ row_in_stride, col_in_stride,
1731
+ /*row_inflate_stride=*/1,
1732
+ /*col_inflate_stride=*/1, padding_top,
1733
+ padding_bottom, padding_left, padding_right,
1734
+ /*padding_value=*/static_cast<InputScalar>(0))
1735
+ .reshape(pre_contract_dims),
1736
+ contract_dims, output_kernel)
1737
+ .reshape(post_contract_dims),
1738
+ input
1739
+ .extract_image_patches(
1740
+ kernelRows, kernelCols, row_stride, col_stride, row_in_stride,
1741
+ col_in_stride,
1742
+ /*row_inflate_stride=*/1,
1743
+ /*col_inflate_stride=*/1, padding_top, padding_bottom,
1744
+ padding_left, padding_right,
1745
+ /*padding_value=*/static_cast<InputScalar>(0))
1746
+ .reshape(pre_contract_dims)
1747
+ .contract(kernel.reshape(kernel_dims), contract_dims, output_kernel)
1748
+ .reshape(post_contract_dims));
1749
+ } else {
1750
+ return choose(
1751
+ Cond<internal::traits<Input>::Layout == ColMajor>(),
1752
+ kernel.reshape(kernel_dims)
1753
+ .contract(input
1754
+ .extract_image_patches(
1755
+ kernelRows, kernelCols, row_stride, col_stride,
1756
+ row_in_stride, col_in_stride, padding_type)
1757
+ .reshape(pre_contract_dims),
1758
+ contract_dims, output_kernel)
1759
+ .reshape(post_contract_dims),
1760
+ input
1761
+ .extract_image_patches(kernelRows, kernelCols, row_stride,
1762
+ col_stride, row_in_stride, col_in_stride,
1763
+ padding_type)
1764
+ .reshape(pre_contract_dims)
1765
+ .contract(kernel.reshape(kernel_dims), contract_dims, output_kernel)
1766
+ .reshape(post_contract_dims));
1767
+ }
1768
+ }
1769
+
1770
+ } // end namespace Eigen
1771
+
1772
+ #endif // TENSORFLOW_TSL_FRAMEWORK_CONVOLUTION_EIGEN_SPATIAL_CONVOLUTIONS_INL_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/convolution/eigen_spatial_convolutions.h ADDED
@@ -0,0 +1,445 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_FRAMEWORK_CONVOLUTION_EIGEN_SPATIAL_CONVOLUTIONS_H_
17
+ #define TENSORFLOW_TSL_FRAMEWORK_CONVOLUTION_EIGEN_SPATIAL_CONVOLUTIONS_H_
18
+
19
+ #include "unsupported/Eigen/CXX11/Tensor" // from @eigen_archive
20
+
21
+ // Note the following header is used in both TF and TFLite. Particularly, it's
22
+ // used for float TFLite Conv2D.
23
+ #include "tsl/framework/convolution/eigen_spatial_convolutions-inl.h"
24
+
25
+ #if defined(TENSORFLOW_USE_CUSTOM_CONTRACTION_KERNEL)
26
+ #include "tsl/framework/contraction/eigen_contraction_kernel.h"
27
+
28
+ namespace Eigen {
29
+ namespace internal {
30
+
31
+ // After we vectorized all loads from the underlying tensor using Packet ops, we
32
+ // have to finalize coefficients that do not fit into a packet.
33
+ template <typename Scalar, typename DataMapper, int packet_size,
34
+ bool masked_load_store>
35
+ struct FinalizeDataMapperCoeffs {
36
+ EIGEN_ALWAYS_INLINE static Index finalize(Scalar* block,
37
+ const DataMapper& rhs,
38
+ Index base_idx, Index depth,
39
+ Index max_depth, bool pad = false) {
40
+ const Index num_coeffs = max_depth - depth;
41
+ eigen_assert(num_coeffs <= packet_size);
42
+
43
+ for (; depth < max_depth; ++depth) {
44
+ *block = pad ? Scalar(0) : rhs.coeffNoPadding(depth, base_idx);
45
+ ++block;
46
+ }
47
+
48
+ return num_coeffs;
49
+ }
50
+ };
51
+
52
+ template <typename Scalar, typename DataMapper, int packet_size>
53
+ struct FinalizeDataMapperCoeffs<Scalar, DataMapper, packet_size,
54
+ /*masked_load_store=*/true> {
55
+ EIGEN_ALWAYS_INLINE static Index finalize(Scalar* block,
56
+ const DataMapper& rhs,
57
+ Index base_idx, Index depth,
58
+ Index max_depth, bool pad = false) {
59
+ Index num_coeffs = max_depth - depth;
60
+ eigen_assert(num_coeffs <= packet_size);
61
+ if (num_coeffs == 0) return 0;
62
+
63
+ using Packet = typename packet_traits<Scalar>::type;
64
+ Packet p = pad ? pset1<Packet>(Scalar(0))
65
+ : rhs.partialPacketNoPadding(depth, base_idx, num_coeffs);
66
+ internal::pstoreu(block, p, mask<Packet>(0, num_coeffs));
67
+
68
+ return num_coeffs;
69
+ }
70
+ };
71
+
72
+ // Pack a block of the right input matrix (in our case it's always a
73
+ // "virtual matrix" constructed from extracted image patches) in contiguous
74
+ // block in column-major storage order. Knowing the properties of the
75
+ // original patch op we can do it more efficient than the default
76
+ // gemm_pack_colmajor_block.
77
+ template <typename NewDimension, Index Rows, Index Cols, typename ArgType,
78
+ typename Device, typename Scalar, typename StorageIndex,
79
+ typename nocontract_t, typename contract_t, int packet_size,
80
+ bool inner_dim_contiguous, bool inner_dim_reordered, int Alignment>
81
+ struct gemm_pack_colmajor_block<
82
+ Scalar, StorageIndex,
83
+ TensorContractionSubMapper<
84
+ Scalar, StorageIndex, Rhs,
85
+ TensorEvaluator<
86
+ const TensorReshapingOp<
87
+ NewDimension, const TensorImagePatchOp<Rows, Cols, ArgType> >,
88
+ Device>,
89
+ nocontract_t, contract_t, packet_size, inner_dim_contiguous,
90
+ inner_dim_reordered, Alignment>,
91
+ ColMajor> {
92
+ typedef TensorContractionSubMapper<
93
+ Scalar, StorageIndex, Rhs,
94
+ TensorEvaluator<
95
+ const TensorReshapingOp<
96
+ NewDimension, const TensorImagePatchOp<Rows, Cols, ArgType> >,
97
+ Device>,
98
+ nocontract_t, contract_t, packet_size, inner_dim_contiguous,
99
+ inner_dim_reordered, Alignment>
100
+ SubMapper;
101
+
102
+ typedef SubMapper DataMapper;
103
+ typedef typename packet_traits<Scalar>::type Packet;
104
+
105
+ using CoeffFinalizer = FinalizeDataMapperCoeffs<
106
+ Scalar, DataMapper, packet_size,
107
+ TensorEvaluatorHasPartialPacket<typename DataMapper::TensorEvaluatorT,
108
+ Packet, Index>::value &&
109
+ unpacket_traits<Packet>::masked_store_available>;
110
+
111
+ EIGEN_DONT_INLINE
112
+ void operator()(Scalar* block, const DataMapper& rhs, StorageIndex rows,
113
+ StorageIndex cols) {
114
+ const bool standard_patches = !rhs.nonStandardPatches();
115
+
116
+ if (standard_patches && (rhs.patchDepth() % packet_size == 0)) {
117
+ // Single packet always belong to single patch (row, col).
118
+ if (rhs.hasPadding()) {
119
+ packStandardPatches</*patch_depth_is_multiple_of_packet_size=*/true,
120
+ /*has_padding=*/true>(block, rhs, rows, cols);
121
+ } else {
122
+ packStandardPatches</*patch_depth_is_multiple_of_packet_size=*/true,
123
+ /*has_padding=*/false>(block, rhs, rows, cols);
124
+ }
125
+
126
+ } else if (standard_patches) {
127
+ // Single packet can span across multiple patch rows or columns.
128
+ if (rhs.hasPadding()) {
129
+ packStandardPatches</*patch_depth_is_multiple_of_packet_size=*/false,
130
+ /*has_padding=*/true>(block, rhs, rows, cols);
131
+ } else {
132
+ packStandardPatches</*patch_depth_is_multiple_of_packet_size=*/false,
133
+ /*has_padding=*/false>(block, rhs, rows, cols);
134
+ }
135
+
136
+ } else if (rhs.patchDepth() % packet_size == 0) {
137
+ // Single packet always belong to single patch (row, col).
138
+ packNonStandardPatches</*patch_depth_is_multiple_of_packet_size*/
139
+ true>(block, rhs, rows, cols);
140
+
141
+ } else {
142
+ // Single packet can span across multiple patch rows or columns.
143
+ packNonStandardPatches</*patch_depth_is_multiple_of_packet_size*/
144
+ false>(block, rhs, rows, cols);
145
+ }
146
+ }
147
+
148
+ private:
149
+ // (A) Standard image patches:
150
+ //
151
+ // (1) patch_row_inflate_strides == 1 AND
152
+ // (2) patch_col_inflate_strides == 1
153
+ //
154
+ // Standard patches guarantee that two inner most dimensions (depth and rows)
155
+ // are contiguous in memory and we can try to squeeze reads from them.
156
+ //
157
+ // (B) Non standard image patches: in_row/in_col and patch_row/patch_col
158
+ // strides can be not equal to 1, and for each [row, col] inside a patch we
159
+ // have to do additional computations to find corresponding row and col in the
160
+ // input tensor. Also we can no longer squeeze reads from inner dimensions.
161
+ //
162
+ // Additional parameters:
163
+ // - patch_depth_is_multiple_of_packet_size=true: We are guaranteed to have
164
+ // depth dimension size to be a multiple of packet size, so we can skip all
165
+ // non vectorized loads and checks, because it's guaranteed that block size
166
+ // will be a multiple of a packet size (see TensorContractionBlocking).
167
+ //
168
+ // - has_padding: Input tensor has non-zero padding. In this case for each
169
+ // patch col and row we need to check that it doesn't correspond to the
170
+ // padded region of original input.
171
+ template <bool patch_depth_is_multiple_of_packet_size, bool has_padding>
172
+ EIGEN_ALWAYS_INLINE void packStandardPatches(Scalar* __restrict block,
173
+ const DataMapper& rhs,
174
+ StorageIndex rows,
175
+ StorageIndex cols) {
176
+ eigen_assert(!rhs.nonStandardPatches());
177
+
178
+ // Give vectorized_rows the name used in all other gemm_pack_rhs above.
179
+ const StorageIndex peeled_k = (rows / packet_size) * packet_size;
180
+
181
+ const StorageIndex start_col = rhs.colOffset();
182
+ const StorageIndex max_col = rhs.maxCol(peeled_k);
183
+ const StorageIndex rhs_depth_offset = rhs.depthOffset();
184
+
185
+ for (StorageIndex col = 0; col < cols; ++col) {
186
+ SubMapper lm = rhs.getLinearMapper(0, col);
187
+
188
+ StorageIndex k = 0;
189
+ for (Index c = start_col; c < max_col; ++c) {
190
+ eigen_assert(k <= peeled_k);
191
+
192
+ const StorageIndex start_row = (c == start_col) ? rhs.rowOffset() : 0;
193
+ const StorageIndex max_row = rhs.maxRow(peeled_k, c);
194
+ const bool pad_col = has_padding && lm.padCol(c);
195
+
196
+ eigen_assert(has_padding || !lm.padCol(c));
197
+ eigen_assert(has_padding || !lm.padAnyRow(start_row, max_row - 1));
198
+
199
+ // We can squeeze reads for all rows in [start_row, max_row) range.
200
+ if (!has_padding ||
201
+ (!pad_col && !lm.padAnyRow(start_row, max_row - 1))) {
202
+ const StorageIndex start_depth =
203
+ (c == start_col) ? rhs_depth_offset : 0;
204
+
205
+ const StorageIndex max_depth =
206
+ std::min<StorageIndex>(start_depth + (peeled_k - k),
207
+ (max_row - start_row) * rhs.patchDepth());
208
+
209
+ const StorageIndex base_idx = lm.baseIndex(start_row, c);
210
+
211
+ if (patch_depth_is_multiple_of_packet_size) {
212
+ // If patch depth is a multiple of packet size, it's guaranteed that
213
+ // we can process all values in depth dimension with packets.
214
+ eigen_assert((max_depth - start_depth) % packet_size == 0);
215
+ StorageIndex d = start_depth;
216
+
217
+ const StorageIndex unrolled_depth = max_depth - 4 * packet_size;
218
+ for (; d <= unrolled_depth; d += 4 * packet_size) {
219
+ eigen_assert(k < peeled_k);
220
+
221
+ Packet p0 = rhs.packetNoPadding(d + 0 * packet_size, base_idx);
222
+ Packet p1 = rhs.packetNoPadding(d + 1 * packet_size, base_idx);
223
+ Packet p2 = rhs.packetNoPadding(d + 2 * packet_size, base_idx);
224
+ Packet p3 = rhs.packetNoPadding(d + 3 * packet_size, base_idx);
225
+
226
+ internal::pstoreu(block + 0 * packet_size, p0);
227
+ internal::pstoreu(block + 1 * packet_size, p1);
228
+ internal::pstoreu(block + 2 * packet_size, p2);
229
+ internal::pstoreu(block + 3 * packet_size, p3);
230
+
231
+ block += 4 * packet_size;
232
+ k += 4 * packet_size;
233
+ }
234
+
235
+ for (; d < max_depth; d += packet_size) {
236
+ eigen_assert(k < peeled_k);
237
+ internal::pstoreu(block, rhs.packetNoPadding(d, base_idx));
238
+ block += packet_size;
239
+ k += packet_size;
240
+ }
241
+
242
+ } else {
243
+ StorageIndex d = start_depth;
244
+
245
+ const StorageIndex unrolled_depth = max_depth - 4 * packet_size;
246
+ for (; d <= unrolled_depth; d += 4 * packet_size) {
247
+ eigen_assert(k < peeled_k);
248
+
249
+ Packet p0 = rhs.packetNoPadding(d + 0 * packet_size, base_idx);
250
+ Packet p1 = rhs.packetNoPadding(d + 1 * packet_size, base_idx);
251
+ Packet p2 = rhs.packetNoPadding(d + 2 * packet_size, base_idx);
252
+ Packet p3 = rhs.packetNoPadding(d + 3 * packet_size, base_idx);
253
+
254
+ internal::pstoreu(block + 0 * packet_size, p0);
255
+ internal::pstoreu(block + 1 * packet_size, p1);
256
+ internal::pstoreu(block + 2 * packet_size, p2);
257
+ internal::pstoreu(block + 3 * packet_size, p3);
258
+
259
+ block += 4 * packet_size;
260
+ k += 4 * packet_size;
261
+ }
262
+
263
+ const StorageIndex vectorized_depth = max_depth - packet_size;
264
+ for (; d <= vectorized_depth; d += packet_size) {
265
+ eigen_assert(k < peeled_k);
266
+ internal::pstoreu(block, rhs.packetNoPadding(d, base_idx));
267
+ block += packet_size;
268
+ k += packet_size;
269
+ }
270
+
271
+ eigen_assert(k <= peeled_k);
272
+ const Index num_coeffs =
273
+ CoeffFinalizer::finalize(block, rhs, base_idx, d, max_depth);
274
+
275
+ k += num_coeffs;
276
+ block += num_coeffs;
277
+ eigen_assert(k <= peeled_k);
278
+ }
279
+
280
+ // Go to the next column.
281
+ continue;
282
+ }
283
+
284
+ // If we are not allowed to squeeze reads along the `row` and `depth`
285
+ // dimensions, we must process rows one by one.
286
+ for (StorageIndex r = start_row; r < max_row; ++r) {
287
+ eigen_assert(k <= peeled_k);
288
+
289
+ const StorageIndex start_depth =
290
+ ((c == start_col) && (r == start_row)) ? rhs_depth_offset : 0;
291
+ const StorageIndex max_depth =
292
+ rhs.maxDepth(peeled_k - k, start_depth);
293
+
294
+ const bool pad = has_padding && (pad_col || lm.padRow(r));
295
+ eigen_assert(has_padding || !lm.padRow(r));
296
+
297
+ const StorageIndex base_idx = lm.baseIndex(r, c);
298
+
299
+ if (patch_depth_is_multiple_of_packet_size) {
300
+ // If patch depth is a multiple of packet size, it's guaranteed that
301
+ // we can process all values in depth dimension with packets.
302
+ eigen_assert((max_depth - start_depth) % packet_size == 0);
303
+ StorageIndex d = start_depth;
304
+
305
+ for (; d < max_depth; d += packet_size) {
306
+ eigen_assert(k < peeled_k);
307
+ const Packet p = (has_padding && pad)
308
+ ? pset1<Packet>(Scalar(0))
309
+ : rhs.packetNoPadding(d, base_idx);
310
+ internal::pstoreu(block, p);
311
+ block += packet_size;
312
+ k += packet_size;
313
+ }
314
+
315
+ } else {
316
+ StorageIndex d = start_depth;
317
+
318
+ const StorageIndex vectorized_depth = max_depth - packet_size;
319
+ for (; d <= vectorized_depth; d += packet_size) {
320
+ eigen_assert(k < peeled_k);
321
+ const Packet p = (has_padding && pad)
322
+ ? pset1<Packet>(Scalar(0))
323
+ : rhs.packetNoPadding(d, base_idx);
324
+ internal::pstoreu(block, p);
325
+ block += packet_size;
326
+ k += packet_size;
327
+ }
328
+
329
+ eigen_assert(k <= peeled_k);
330
+ const Index num_coeffs = CoeffFinalizer::finalize(
331
+ block, rhs, base_idx, d, max_depth, has_padding && pad);
332
+
333
+ k += num_coeffs;
334
+ block += num_coeffs;
335
+ eigen_assert(k <= peeled_k);
336
+ }
337
+ }
338
+ }
339
+
340
+ // The loop above should fill peeled_k elements.
341
+ eigen_assert(peeled_k == k);
342
+
343
+ // Fill remaining elements using loadCoeffStandard.
344
+ for (; k < rows; ++k) {
345
+ *block = lm.loadCoeffStandard(k);
346
+ ++block;
347
+ }
348
+ }
349
+ }
350
+
351
+ template <bool patch_depth_is_multiple_of_packet_size>
352
+ EIGEN_ALWAYS_INLINE void packNonStandardPatches(Scalar* __restrict block,
353
+ const DataMapper& rhs,
354
+ StorageIndex rows,
355
+ StorageIndex cols) {
356
+ eigen_assert(rhs.nonStandardPatches());
357
+
358
+ // Give vectorized_rows the name used in all other gemm_pack_rhs above.
359
+ const StorageIndex peeled_k = (rows / packet_size) * packet_size;
360
+
361
+ const StorageIndex start_col = rhs.colOffset();
362
+ const StorageIndex max_col = rhs.maxCol(peeled_k);
363
+ const StorageIndex rhs_depth_offset = rhs.depthOffset();
364
+
365
+ // Original input column and row after applying all non-standard strides and
366
+ // dilations. Computed by padOrSkip{Row,Col}.
367
+ Index orig_c = 0;
368
+ Index orig_r = 0;
369
+
370
+ for (StorageIndex col = 0; col < cols; ++col) {
371
+ SubMapper lm = rhs.getLinearMapper(0, col);
372
+
373
+ StorageIndex k = 0;
374
+ for (Index c = start_col; c < max_col; ++c) {
375
+ eigen_assert(k <= peeled_k);
376
+
377
+ const StorageIndex start_row = (c == start_col) ? rhs.rowOffset() : 0;
378
+ const StorageIndex max_row = rhs.maxRow(peeled_k, c);
379
+ const bool pad_or_skip_col = lm.padOrSkipCol(c, &orig_c);
380
+
381
+ for (StorageIndex r = start_row; r < max_row; ++r) {
382
+ eigen_assert(k <= peeled_k);
383
+
384
+ const StorageIndex start_depth =
385
+ ((c == start_col) && (r == start_row)) ? rhs_depth_offset : 0;
386
+ const StorageIndex max_depth =
387
+ rhs.maxDepth(peeled_k - k, start_depth);
388
+
389
+ const bool pad_or_skip =
390
+ pad_or_skip_col || lm.padOrSkipRow(r, &orig_r);
391
+ const StorageIndex base_idx = lm.origBaseIndex(orig_r, orig_c);
392
+
393
+ if (patch_depth_is_multiple_of_packet_size) {
394
+ // If patch depth is a multiple of packet size, it's guaranteed that
395
+ // we can process all values in depth dimension with packets.
396
+ eigen_assert((max_depth - start_depth) % packet_size == 0);
397
+ StorageIndex d = start_depth;
398
+
399
+ for (; d < max_depth; d += packet_size) {
400
+ eigen_assert(k < peeled_k);
401
+ const Packet p = pad_or_skip ? pset1<Packet>(Scalar(0))
402
+ : rhs.packetNoPadding(d, base_idx);
403
+ internal::pstoreu(block, p);
404
+ block += packet_size;
405
+ k += packet_size;
406
+ }
407
+
408
+ } else {
409
+ const StorageIndex vectorized_depth = max_depth - packet_size;
410
+ StorageIndex d = start_depth;
411
+ for (; d <= vectorized_depth; d += packet_size) {
412
+ eigen_assert(k < peeled_k);
413
+ const Packet p = pad_or_skip ? pset1<Packet>(Scalar(0))
414
+ : rhs.packetNoPadding(d, base_idx);
415
+ internal::pstoreu(block, p);
416
+ block += packet_size;
417
+ k += packet_size;
418
+ }
419
+
420
+ eigen_assert(k <= peeled_k);
421
+ const Index num_coeffs = CoeffFinalizer::finalize(
422
+ block, rhs, base_idx, d, max_depth, pad_or_skip);
423
+
424
+ k += num_coeffs;
425
+ block += num_coeffs;
426
+ eigen_assert(k <= peeled_k);
427
+ }
428
+ }
429
+ }
430
+
431
+ // The loop above should fill peeled_k elements.
432
+ eigen_assert(peeled_k == k);
433
+
434
+ // Fill remaining elements using loadCoeff.
435
+ for (; k < rows; ++k) {
436
+ *block = lm(k);
437
+ ++block;
438
+ }
439
+ }
440
+ }
441
+ };
442
+ } // namespace internal
443
+ } // namespace Eigen
444
+ #endif // defined(TENSORFLOW_USE_CUSTOM_CONTRACTION_KERNEL)
445
+ #endif // TENSORFLOW_TSL_FRAMEWORK_CONVOLUTION_EIGEN_SPATIAL_CONVOLUTIONS_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/device_id.h ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_FRAMEWORK_DEVICE_ID_H_
17
+ #define TENSORFLOW_TSL_FRAMEWORK_DEVICE_ID_H_
18
+
19
+ #include "tsl/lib/gtl/int_type.h"
20
+
21
+ namespace tsl {
22
+
23
+ // There are three types of device ids:
24
+ // - *physical* device id: this is the integer index of a device in the
25
+ // physical machine, it can be filtered (for e.g. using environment variable
26
+ // CUDA_VISIBLE_DEVICES when using CUDA). Note that this id is not visible to
27
+ // Tensorflow, but result after filtering is visible to TF and is called
28
+ // platform device id as below.
29
+ // For CUDA, see
30
+ // http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#env-vars
31
+ // for more details.
32
+ // - *platform* device id (also called *visible* device id in
33
+ // third_party/tensorflow/core/protobuf/config.proto): this is the id that is
34
+ // visible to Tensorflow after filtering (for e.g. by CUDA_VISIBLE_DEVICES).
35
+ // For CUDA, this id is generated by the CUDA GPU driver. It starts from 0
36
+ // and is used for CUDA API calls like cuDeviceGet().
37
+ // - TF device id (also called *virtual* device id in
38
+ // third_party/tensorflow/core/protobuf/config.proto): this is the id that
39
+ // Tensorflow generates and exposes to its users. It is the id in the <id>
40
+ // field of the device name "/device:GPU:<id>", and is also the identifier of
41
+ // a BaseGPUDevice. Note that the configuration allows us to create multiple
42
+ // BaseGPUDevice per GPU hardware in order to use multi CUDA streams on the
43
+ // hardware, so the mapping between TF GPU id and platform GPU id is not a 1:1
44
+ // mapping, see the example below.
45
+ //
46
+ // For example, assuming that in the machine we have GPU device with index 0, 1,
47
+ // 2 and 3 (physical GPU id). Setting "CUDA_VISIBLE_DEVICES=1,2,3" will create
48
+ // the following mapping between platform GPU id and physical GPU id:
49
+ //
50
+ // platform GPU id -> physical GPU id
51
+ // 0 -> 1
52
+ // 1 -> 2
53
+ // 2 -> 3
54
+ //
55
+ // Note that physical GPU id 0 is invisible to TF so there is no mapping entry
56
+ // for it.
57
+ //
58
+ // Assuming we configure the Session to create one BaseGPUDevice per GPU
59
+ // hardware, then setting GPUOptions::visible_device_list to "2,0" will create
60
+ // the following mapping between TF device id and platform device id:
61
+ //
62
+ // TF GPU id -> platform GPU ID
63
+ // 0 (i.e. /device:GPU:0) -> 2
64
+ // 1 (i.e. /device:GPU:1) -> 0
65
+ //
66
+ // Note that platform device id 1 is filtered out by
67
+ // GPUOptions::visible_device_list, so it won't be used by the TF process.
68
+ //
69
+ // On the other hand, if we configure it to create 2 BaseGPUDevice per GPU
70
+ // hardware, then setting GPUOptions::visible_device_list to "2,0" will create
71
+ // the following mapping between TF device id and platform device id:
72
+ //
73
+ // TF GPU id -> platform GPU ID
74
+ // 0 (i.e. /device:GPU:0) -> 2
75
+ // 1 (i.e. /device:GPU:1) -> 2
76
+ // 2 (i.e. /device:GPU:2) -> 0
77
+ // 3 (i.e. /device:GPU:3) -> 0
78
+ //
79
+ // We create strong-typed integer classes for both TF device id and platform
80
+ // device id to minimize programming errors and improve code readability. Except
81
+ // for the StreamExecutor interface (as we don't change its API), whenever we
82
+ // need a TF device id (or platform device id) we should use TfDeviceId (or
83
+ // PlatformDeviceId) instead of a raw integer.
84
+ TSL_LIB_GTL_DEFINE_INT_TYPE(TfDeviceId, int32);
85
+ TSL_LIB_GTL_DEFINE_INT_TYPE(PlatformDeviceId, int32);
86
+
87
+ } // namespace tsl
88
+
89
+ #endif // TENSORFLOW_TSL_FRAMEWORK_DEVICE_ID_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/device_id_manager.h ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_FRAMEWORK_DEVICE_ID_MANAGER_H_
17
+ #define TENSORFLOW_TSL_FRAMEWORK_DEVICE_ID_MANAGER_H_
18
+
19
+ #include <vector>
20
+
21
+ #include "tsl/framework/device_id.h"
22
+ #include "tsl/framework/device_type.h"
23
+ #include "tsl/platform/status.h"
24
+ #include "tsl/platform/statusor.h"
25
+
26
+ namespace tsl {
27
+
28
+ // Class that maintains a map from TfDeviceId to PlatformDeviceId, and manages
29
+ // the translation between them.
30
+ class DeviceIdManager {
31
+ public:
32
+ // Adds a mapping from tf_device_id to platform_device_id.
33
+ static Status InsertTfPlatformDeviceIdPair(
34
+ const DeviceType& type, TfDeviceId tf_device_id,
35
+ PlatformDeviceId platform_device_id);
36
+
37
+ // Gets the platform_device_id associated with tf_device_id. Returns OK if
38
+ // found.
39
+ static Status TfToPlatformDeviceId(const DeviceType& type,
40
+ TfDeviceId tf_device_id,
41
+ PlatformDeviceId* platform_device_id);
42
+
43
+ // Gets all tf_device_ids that are on the platform with `platform_device_id`.
44
+ static StatusOr<std::vector<TfDeviceId>> GetTfDevicesOnPlatform(
45
+ const DeviceType& type, PlatformDeviceId platform_device_id);
46
+
47
+ // Clears the map. Used in unit tests only.
48
+ static void TestOnlyReset();
49
+ };
50
+
51
+ } // namespace tsl
52
+
53
+ #endif // TENSORFLOW_TSL_FRAMEWORK_DEVICE_ID_MANAGER_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/device_id_utils.h ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_FRAMEWORK_DEVICE_ID_UTILS_H_
17
+ #define TENSORFLOW_TSL_FRAMEWORK_DEVICE_ID_UTILS_H_
18
+
19
+ #include <string>
20
+ #include <vector>
21
+
22
+ #include "absl/container/flat_hash_map.h"
23
+ #include "tsl/framework/device_id.h"
24
+ #include "tsl/framework/device_type.h"
25
+ #include "tsl/platform/status.h"
26
+ #include "tsl/platform/statusor.h"
27
+ #include "tsl/util/device_name_utils.h"
28
+
29
+ namespace tsl {
30
+
31
+ // Utility methods for translation between TensorFlow device ids and platform
32
+ // device ids.
33
+
34
+ // Verify that the platform_device_id associated with a TfDeviceId is
35
+ // legitimate.
36
+ void CheckValidTfDeviceId(const DeviceType& type, int visible_device_count,
37
+ TfDeviceId tf_device_id);
38
+
39
+ // Parse `visible_device_list` into a list of platform Device ids.
40
+ Status ParseVisibleDeviceList(
41
+ const std::string& visible_device_list, int visible_device_count,
42
+ std::vector<PlatformDeviceId>* visible_device_order);
43
+
44
+ // Returns how many TF devices should be created, and generates the mapping
45
+ // between TfDeviceId and PlatformDeviceId. The number of TF devices is the
46
+ // minimum among the device count in `session_option_device_counts`,
47
+ // `visible_device_count` and the number of visible devices in
48
+ // `visible_device_list`. If `visible_device_list` is empty, the mapping
49
+ // between TfDeviceId and PlatformDeviceId is an identity mapping.
50
+ // Please refer to tensorflow/tsl/framework/device_id.h and
51
+ // tensorflow/core/protobuf/config.proto about the relationship between
52
+ // TfDeviceId and PlatformDeviceId, and how `visible_device_list` is used.
53
+ StatusOr<size_t> GetNumberTfDevicesAndConfigurePlatformDeviceId(
54
+ const absl::flat_hash_map<std::string, int64_t>&
55
+ session_option_device_counts,
56
+ absl::string_view device_type, absl::string_view visible_device_list,
57
+ int visible_device_count);
58
+
59
+ StatusOr<int> GetPlatformDeviceIdFromDeviceParsedName(
60
+ const DeviceNameUtils::ParsedName& device_name,
61
+ const DeviceType& device_type);
62
+
63
+ // TODO(b/293324740): support virtual devices.
64
+ // Returns the corresponding PlatformDeviceId if it is found. Otherwise returns
65
+ // the id in device_name.
66
+ StatusOr<int> GetDeviceIdFromDeviceParsedName(
67
+ const DeviceNameUtils::ParsedName& device_name,
68
+ const DeviceType& device_type);
69
+
70
+ } // namespace tsl
71
+
72
+ #endif // TENSORFLOW_TSL_FRAMEWORK_DEVICE_ID_UTILS_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/device_type.h ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_FRAMEWORK_DEVICE_TYPE_H_
17
+ #define TENSORFLOW_TSL_FRAMEWORK_DEVICE_TYPE_H_
18
+
19
+ #include <ostream>
20
+ #include <string>
21
+
22
+ #include "absl/strings/string_view.h"
23
+
24
+ namespace tsl {
25
+
26
+ // A DeviceType is just a string, but we wrap it up in a class to give
27
+ // some type checking as we're passing these around
28
+ class DeviceType {
29
+ public:
30
+ DeviceType(const char* type) // NOLINT
31
+ : type_(type) {}
32
+
33
+ explicit DeviceType(absl::string_view type)
34
+ : type_(type.data(), type.size()) {}
35
+
36
+ const char* type() const { return type_.c_str(); }
37
+ const std::string& type_string() const { return type_; }
38
+
39
+ bool operator<(const DeviceType& other) const;
40
+ bool operator==(const DeviceType& other) const;
41
+ bool operator!=(const DeviceType& other) const { return !(*this == other); }
42
+
43
+ private:
44
+ std::string type_;
45
+ };
46
+ std::ostream& operator<<(std::ostream& os, const DeviceType& d);
47
+
48
+ } // namespace tsl
49
+
50
+ #endif // TENSORFLOW_TSL_FRAMEWORK_DEVICE_TYPE_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/fixedpoint/FixedPoint.h ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_FIXEDPOINT_H_
17
+ #define TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_FIXEDPOINT_H_
18
+
19
+ #include "unsupported/Eigen/CXX11/Tensor" // from @eigen_archive
20
+ #include "tsl/framework/fixedpoint_types.h"
21
+
22
+ // Use optimized implementations whenever available
23
+ #if defined(EIGEN_VECTORIZE_AVX512DQ) || defined(EIGEN_VECTORIZE_AVX512BW)
24
+ #include "tsl/framework/fixedpoint/PacketMathAVX512.h"
25
+ #include "tsl/framework/fixedpoint/TypeCastingAVX512.h"
26
+
27
+ #elif defined EIGEN_VECTORIZE_AVX2
28
+ #define EIGEN_USE_OPTIMIZED_INT8_UINT8_MAT_MAT_PRODUCT
29
+ #define EIGEN_USE_OPTIMIZED_INT16_INT16_MAT_MAT_PRODUCT
30
+ #include "tsl/framework/fixedpoint/PacketMathAVX2.h"
31
+ // Disable clang-format to prevent 'MatMatProductAVX2.h' header from being
32
+ // included before 'PacketMathAVX2' header on which it depends.
33
+ // clang-format off
34
+ #include "tsl/framework/fixedpoint/MatMatProductAVX2.h"
35
+ // clang-format on
36
+ #include "tsl/framework/fixedpoint/TypeCastingAVX2.h"
37
+
38
+ #elif defined EIGEN_VECTORIZE_AVX
39
+ #include "tsl/framework/fixedpoint/PacketMathAVX.h"
40
+
41
+ #elif defined EIGEN_VECTORIZE_NEON
42
+ #define EIGEN_USE_OPTIMIZED_INT8_INT8_MAT_MAT_PRODUCT
43
+ #define EIGEN_USE_OPTIMIZED_INT8_UINT8_MAT_MAT_PRODUCT
44
+ #define EIGEN_USE_OPTIMIZED_UINT8_INT8_MAT_MAT_PRODUCT
45
+ #define EIGEN_USE_OPTIMIZED_INT16_INT16_MAT_MAT_PRODUCT
46
+ #include "tsl/framework/fixedpoint/MatMatProductNEON.h"
47
+ #endif
48
+
49
+ // Use the default implementation when no optimized code is available
50
+ #include "tsl/framework/fixedpoint/MatMatProduct.h"
51
+ #include "tsl/framework/fixedpoint/MatVecProduct.h"
52
+
53
+ #endif // TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_FIXEDPOINT_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/fixedpoint/MatMatProduct.h ADDED
@@ -0,0 +1,363 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_MATMATPRODUCT_H_
17
+ #define TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_MATMATPRODUCT_H_
18
+
19
+ namespace Eigen {
20
+ namespace internal {
21
+
22
+ // Accumulate the product of 2 QInt8 inputs on 32 bits to prevent
23
+ // overflows
24
+ template <>
25
+ struct scalar_product_traits<QInt8, QInt8> {
26
+ enum { Defined = 1 };
27
+ typedef QInt32 ReturnType;
28
+ };
29
+
30
+ // Accumulate the product of 2 QInt16 inputs on 32 bits to prevent
31
+ // overflows
32
+ template <>
33
+ struct scalar_product_traits<QInt16, QInt16> {
34
+ enum { Defined = 1 };
35
+ typedef QInt32 ReturnType;
36
+ };
37
+
38
+ // Accumulate the product of QInt8 inputs with QUint8 inputs on 32 bits
39
+ // to prevent overflows
40
+ template <>
41
+ struct scalar_product_traits<QInt8, QUInt8> {
42
+ enum { Defined = 1 };
43
+ typedef QInt32 ReturnType;
44
+ };
45
+
46
+ // Accumulate the product of QUInt8 inputs with Qint8 inputs on 32 bits
47
+ // to prevent overflows
48
+ template <>
49
+ struct scalar_product_traits<QUInt8, QInt8> {
50
+ enum { Defined = 1 };
51
+ typedef QInt32 ReturnType;
52
+ };
53
+
54
+ // Description of the product implementation. It's pretty simple now since
55
+ // nothing is vectorized yet.
56
+ // This definition tackle the case where both lhs and rhs are encoded using
57
+ // signed 8bit integers
58
+ #ifndef EIGEN_USE_OPTIMIZED_INT8_INT8_MAT_MAT_PRODUCT
59
+
60
+ template <bool _ConjLhs, bool _ConjRhs>
61
+ class gebp_traits<QInt8, QInt8, _ConjLhs, _ConjRhs> {
62
+ public:
63
+ typedef QInt8 LhsScalar;
64
+ typedef QInt8 RhsScalar;
65
+ typedef QInt32 ResScalar;
66
+
67
+ typedef typename packet_traits<LhsScalar>::type LhsPacket;
68
+ typedef LhsPacket LhsPacket4Packing;
69
+
70
+ enum {
71
+ // register block size along the M and N directions
72
+ // One for the current implementation
73
+ nr = 1,
74
+ mr = 1,
75
+ // Progress made at each iteration of the product loop
76
+ // also 1 for the current implementation
77
+ LhsProgress = 1,
78
+ RhsProgress = 1
79
+ };
80
+ };
81
+
82
+ // The signed 8bit Mat-Mat product itself.
83
+ template <typename Index, typename DataMapper, int mr, int nr,
84
+ bool ConjugateLhs, bool ConjugateRhs>
85
+ struct gebp_kernel<QInt8, QInt8, Index, DataMapper, mr, nr, ConjugateLhs,
86
+ ConjugateRhs> {
87
+ EIGEN_DONT_INLINE
88
+ void operator()(const DataMapper& res, const QInt8* blockA,
89
+ const QInt8* blockB, Index rows, Index depth, Index cols,
90
+ QInt32 alpha, Index strideA = -1, Index strideB = -1,
91
+ Index offsetA = 0, Index offsetB = 0);
92
+ };
93
+
94
+ template <typename Index, typename DataMapper, int mr, int nr,
95
+ bool ConjugateLhs, bool ConjugateRhs>
96
+ EIGEN_DONT_INLINE void
97
+ gebp_kernel<QInt8, QInt8, Index, DataMapper, mr, nr, ConjugateLhs,
98
+ ConjugateRhs>::operator()(const DataMapper& res,
99
+ const QInt8* blockA, const QInt8* blockB,
100
+ Index rows, Index depth, Index cols,
101
+ QInt32 alpha, Index strideA,
102
+ Index strideB, Index offsetA,
103
+ Index offsetB) {
104
+ EIGEN_STATIC_ASSERT(!ConjugateLhs, YOU_MADE_A_PROGRAMMING_MISTAKE);
105
+ EIGEN_STATIC_ASSERT(!ConjugateRhs, YOU_MADE_A_PROGRAMMING_MISTAKE);
106
+
107
+ eigen_assert(alpha.value == 1);
108
+ eigen_assert(strideA == -1);
109
+ eigen_assert(strideB == -1);
110
+ eigen_assert(offsetA == 0);
111
+ eigen_assert(offsetB == 0);
112
+
113
+ eigen_assert(rows > 0);
114
+ eigen_assert(cols > 0);
115
+ eigen_assert(depth > 0);
116
+ eigen_assert(blockA);
117
+ eigen_assert(blockB);
118
+
119
+ for (Index j = 0; j < cols; ++j) {
120
+ Index startB = j * depth;
121
+
122
+ for (Index i = 0; i < rows; ++i) {
123
+ Index startA = i * depth;
124
+
125
+ for (Index k = 0; k < depth; ++k) {
126
+ res(i, j) += blockA[startA + k] * blockB[startB + k];
127
+ }
128
+ }
129
+ }
130
+ }
131
+ #endif
132
+
133
+ // This definition tackle the case where the lhs is encoded using signed 8bit
134
+ // integers and the rhs using unsigned 8bit integers.
135
+ #ifndef EIGEN_USE_OPTIMIZED_INT8_UINT8_MAT_MAT_PRODUCT
136
+ template <bool _ConjLhs, bool _ConjRhs>
137
+ class gebp_traits<QInt8, QUInt8, _ConjLhs, _ConjRhs> {
138
+ public:
139
+ typedef QInt8 LhsScalar;
140
+ typedef QUInt8 RhsScalar;
141
+ typedef QInt32 ResScalar;
142
+
143
+ typedef typename packet_traits<LhsScalar>::type LhsPacket;
144
+ typedef LhsPacket LhsPacket4Packing;
145
+
146
+ enum {
147
+ // register block size along the M and N directions
148
+ // One for the current implementation
149
+ nr = 1,
150
+ mr = 1,
151
+ // Progress made at each iteration of the product loop
152
+ // also 1 for the current implementation
153
+ LhsProgress = 1,
154
+ RhsProgress = 1
155
+ };
156
+ };
157
+
158
+ // Mat-Mat product of a signed 8bit lhs with an unsigned 8bit rhs
159
+ template <typename Index, typename DataMapper, int mr, int nr,
160
+ bool ConjugateLhs, bool ConjugateRhs>
161
+ struct gebp_kernel<QInt8, QUInt8, Index, DataMapper, mr, nr, ConjugateLhs,
162
+ ConjugateRhs> {
163
+ EIGEN_DONT_INLINE
164
+ void operator()(const DataMapper& res, const QInt8* blockA,
165
+ const QUInt8* blockB, Index rows, Index depth, Index cols,
166
+ QInt32 alpha, Index strideA = -1, Index strideB = -1,
167
+ Index offsetA = 0, Index offsetB = 0);
168
+ };
169
+
170
+ template <typename Index, typename DataMapper, int mr, int nr,
171
+ bool ConjugateLhs, bool ConjugateRhs>
172
+ EIGEN_DONT_INLINE void
173
+ gebp_kernel<QInt8, QUInt8, Index, DataMapper, mr, nr, ConjugateLhs,
174
+ ConjugateRhs>::operator()(const DataMapper& res,
175
+ const QInt8* blockA, const QUInt8* blockB,
176
+ Index rows, Index depth, Index cols,
177
+ QInt32 alpha, Index strideA,
178
+ Index strideB, Index offsetA,
179
+ Index offsetB) {
180
+ EIGEN_STATIC_ASSERT(!ConjugateLhs, YOU_MADE_A_PROGRAMMING_MISTAKE);
181
+ EIGEN_STATIC_ASSERT(!ConjugateRhs, YOU_MADE_A_PROGRAMMING_MISTAKE);
182
+
183
+ eigen_assert(alpha.value == 1);
184
+ eigen_assert(strideA == -1);
185
+ eigen_assert(strideB == -1);
186
+ eigen_assert(offsetA == 0);
187
+ eigen_assert(offsetB == 0);
188
+
189
+ eigen_assert(rows > 0);
190
+ eigen_assert(cols > 0);
191
+ eigen_assert(depth > 0);
192
+ eigen_assert(blockA);
193
+ eigen_assert(blockB);
194
+
195
+ for (Index j = 0; j < cols; ++j) {
196
+ Index startB = j * depth;
197
+
198
+ for (Index i = 0; i < rows; ++i) {
199
+ Index startA = i * depth;
200
+
201
+ for (Index k = 0; k < depth; ++k) {
202
+ res(i, j) += blockA[startA + k] * blockB[startB + k];
203
+ }
204
+ }
205
+ }
206
+ }
207
+ #endif
208
+
209
+ // This definition tackle the case where the lhs is encoded using unsigned 8bit
210
+ // integers and the rhs using signed 8bit integers.
211
+ #ifndef EIGEN_USE_OPTIMIZED_UINT8_INT8_MAT_MAT_PRODUCT
212
+ template <bool _ConjLhs, bool _ConjRhs>
213
+ class gebp_traits<QUInt8, QInt8, _ConjLhs, _ConjRhs> {
214
+ public:
215
+ typedef QUInt8 LhsScalar;
216
+ typedef QInt8 RhsScalar;
217
+ typedef QInt32 ResScalar;
218
+
219
+ typedef typename packet_traits<LhsScalar>::type LhsPacket;
220
+ typedef LhsPacket LhsPacket4Packing;
221
+
222
+ enum {
223
+ // register block size along the M and N directions
224
+ // One for the current implementation
225
+ nr = 1,
226
+ mr = 1,
227
+ // Progress made at each iteration of the product loop
228
+ // also 1 for the current implementation
229
+ LhsProgress = 1,
230
+ RhsProgress = 1
231
+ };
232
+ };
233
+
234
+ // Mat-Mat product of an unsigned 8bit lhs with a signed 8bit rhs
235
+ template <typename Index, typename DataMapper, int mr, int nr,
236
+ bool ConjugateLhs, bool ConjugateRhs>
237
+ struct gebp_kernel<QUInt8, QInt8, Index, DataMapper, mr, nr, ConjugateLhs,
238
+ ConjugateRhs> {
239
+ EIGEN_DONT_INLINE
240
+ void operator()(const DataMapper& res, const QUInt8* blockA,
241
+ const QInt8* blockB, Index rows, Index depth, Index cols,
242
+ QInt32 alpha, Index strideA = -1, Index strideB = -1,
243
+ Index offsetA = 0, Index offsetB = 0);
244
+ };
245
+
246
+ template <typename Index, typename DataMapper, int mr, int nr,
247
+ bool ConjugateLhs, bool ConjugateRhs>
248
+ EIGEN_DONT_INLINE void
249
+ gebp_kernel<QUInt8, QInt8, Index, DataMapper, mr, nr, ConjugateLhs,
250
+ ConjugateRhs>::operator()(const DataMapper& res,
251
+ const QUInt8* blockA, const QInt8* blockB,
252
+ Index rows, Index depth, Index cols,
253
+ QInt32 alpha, Index strideA,
254
+ Index strideB, Index offsetA,
255
+ Index offsetB) {
256
+ EIGEN_STATIC_ASSERT(!ConjugateLhs, YOU_MADE_A_PROGRAMMING_MISTAKE);
257
+ EIGEN_STATIC_ASSERT(!ConjugateRhs, YOU_MADE_A_PROGRAMMING_MISTAKE);
258
+
259
+ eigen_assert(alpha.value == 1);
260
+ eigen_assert(strideA == -1);
261
+ eigen_assert(strideB == -1);
262
+ eigen_assert(offsetA == 0);
263
+ eigen_assert(offsetB == 0);
264
+
265
+ eigen_assert(rows > 0);
266
+ eigen_assert(cols > 0);
267
+ eigen_assert(depth > 0);
268
+ eigen_assert(blockA);
269
+ eigen_assert(blockB);
270
+
271
+ for (Index j = 0; j < cols; ++j) {
272
+ Index startB = j * depth;
273
+
274
+ for (Index i = 0; i < rows; ++i) {
275
+ Index startA = i * depth;
276
+
277
+ for (Index k = 0; k < depth; ++k) {
278
+ res(i, j) += blockA[startA + k] * blockB[startB + k];
279
+ }
280
+ }
281
+ }
282
+ }
283
+ #endif
284
+
285
+ #ifndef EIGEN_USE_OPTIMIZED_INT16_INT16_MAT_MAT_PRODUCT
286
+
287
+ template <bool _ConjLhs, bool _ConjRhs>
288
+ class gebp_traits<QInt16, QInt16, _ConjLhs, _ConjRhs> {
289
+ public:
290
+ typedef QInt16 LhsScalar;
291
+ typedef QInt16 RhsScalar;
292
+ typedef QInt32 ResScalar;
293
+
294
+ typedef typename packet_traits<LhsScalar>::type LhsPacket;
295
+ typedef LhsPacket LhsPacket4Packing;
296
+
297
+ enum {
298
+ // register block size along the M and N directions
299
+ // One for the current implementation
300
+ nr = 1,
301
+ mr = 1,
302
+ // Progress made at each iteration of the product loop
303
+ // also 1 for the current implementation
304
+ LhsProgress = 1,
305
+ RhsProgress = 1
306
+ };
307
+ };
308
+
309
+ // The signed 16bit Mat-Mat product itself.
310
+ template <typename Index, typename DataMapper, int mr, int nr,
311
+ bool ConjugateLhs, bool ConjugateRhs>
312
+ struct gebp_kernel<QInt16, QInt16, Index, DataMapper, mr, nr, ConjugateLhs,
313
+ ConjugateRhs> {
314
+ EIGEN_DONT_INLINE
315
+ void operator()(const DataMapper& res, const QInt16* blockA,
316
+ const QInt16* blockB, Index rows, Index depth, Index cols,
317
+ QInt32 alpha, Index strideA = -1, Index strideB = -1,
318
+ Index offsetA = 0, Index offsetB = 0);
319
+ };
320
+
321
+ template <typename Index, typename DataMapper, int mr, int nr,
322
+ bool ConjugateLhs, bool ConjugateRhs>
323
+ EIGEN_DONT_INLINE void
324
+ gebp_kernel<QInt16, QInt16, Index, DataMapper, mr, nr, ConjugateLhs,
325
+ ConjugateRhs>::operator()(const DataMapper& res,
326
+ const QInt16* blockA,
327
+ const QInt16* blockB, Index rows,
328
+ Index depth, Index cols, QInt32 alpha,
329
+ Index strideA, Index strideB,
330
+ Index offsetA, Index offsetB) {
331
+ EIGEN_STATIC_ASSERT(!ConjugateLhs, YOU_MADE_A_PROGRAMMING_MISTAKE);
332
+ EIGEN_STATIC_ASSERT(!ConjugateRhs, YOU_MADE_A_PROGRAMMING_MISTAKE);
333
+
334
+ eigen_assert(alpha.value == 1);
335
+ eigen_assert(strideA == -1);
336
+ eigen_assert(strideB == -1);
337
+ eigen_assert(offsetA == 0);
338
+ eigen_assert(offsetB == 0);
339
+
340
+ eigen_assert(rows > 0);
341
+ eigen_assert(cols > 0);
342
+ eigen_assert(depth > 0);
343
+ eigen_assert(blockA);
344
+ eigen_assert(blockB);
345
+
346
+ for (Index j = 0; j < cols; ++j) {
347
+ Index startB = j * depth;
348
+
349
+ for (Index i = 0; i < rows; ++i) {
350
+ Index startA = i * depth;
351
+
352
+ for (Index k = 0; k < depth; ++k) {
353
+ res(i, j) += blockA[startA + k] * blockB[startB + k];
354
+ }
355
+ }
356
+ }
357
+ }
358
+ #endif
359
+
360
+ } // namespace internal
361
+ } // namespace Eigen
362
+
363
+ #endif // TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_MATMATPRODUCT_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/fixedpoint/MatMatProductAVX2.h ADDED
@@ -0,0 +1,2314 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_MATMATPRODUCTAVX2_H_
17
+ #define TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_MATMATPRODUCTAVX2_H_
18
+
19
+ namespace Eigen {
20
+ namespace internal {
21
+
22
+ // AVX2 optimized implementation of Mat-Mat product.
23
+ // LHS is encoded using signed 16-bit integers.
24
+ // RHS is encoded using signed 16-bit integers.
25
+ #ifdef EIGEN_USE_OPTIMIZED_INT16_INT16_MAT_MAT_PRODUCT
26
+
27
+ // Define quantized traits
28
+ template <bool _ConjLhs, bool _ConjRhs>
29
+ class gebp_traits<QInt16, QInt16, _ConjLhs, _ConjRhs> {
30
+ public:
31
+ typedef QInt16 LhsScalar;
32
+ typedef QInt16 RhsScalar;
33
+ typedef QInt32 ResScalar;
34
+
35
+ typedef typename packet_traits<LhsScalar>::type LhsPacket;
36
+ typedef LhsPacket LhsPacket4Packing;
37
+
38
+ enum {
39
+ // Define register blocking scheme.
40
+ nr = 16,
41
+ mr = 16,
42
+ kr = 4,
43
+ // Ignore progress tracking per loop iteration.
44
+ LhsProgress = -1,
45
+ RhsProgress = -1
46
+ };
47
+ };
48
+
49
+ // Specialized blocking for quantized implementations.
50
+ // Used by TensorContractionThreadPool, inputs must have dimensions that are
51
+ // multiples of 32.
52
+ template <typename Index, int ShardingType>
53
+ class TensorContractionBlocking<QInt16, QInt16, QInt16, Index, ShardingType> {
54
+ public:
55
+ TensorContractionBlocking(Index k, Index m, Index n, Index num_threads = 1)
56
+ : kc_(((k + 15) / 16) * 16),
57
+ mc_(((m + 15) / 16) * 16),
58
+ nc_(((n + 15) / 16) * 16) {
59
+ eigen_assert(mc_ % 16 == 0);
60
+ eigen_assert(kc_ % 16 == 0);
61
+ if (!k || !m || !n) {
62
+ return;
63
+ }
64
+
65
+ if (ShardingType == ShardByCol) {
66
+ eigen_assert(nc_ % 16 == 0);
67
+ nc_ = (((nc_ / num_threads) + 15) / 16) * 16;
68
+ } else {
69
+ eigen_assert(nc_ % 16 == 0);
70
+ mc_ = (((mc_ / num_threads) + 15) / 16) * 16;
71
+ }
72
+ }
73
+
74
+ EIGEN_ALWAYS_INLINE Index kc() const { return kc_; }
75
+ EIGEN_ALWAYS_INLINE Index mc() const { return mc_; }
76
+ EIGEN_ALWAYS_INLINE Index nc() const { return nc_; }
77
+
78
+ private:
79
+ Index kc_;
80
+ Index mc_;
81
+ Index nc_;
82
+ };
83
+
84
+ // Specialized blocking for quantized implementations.
85
+ // Used by TensorContraction and GeneralMatrixMatrix, inputs are padded to
86
+ // multiples of 32.
87
+ template <int MaxRows, int MaxCols, int MaxDepth, int KcFactor>
88
+ class gemm_blocking_space<ColMajor, QInt16, QInt16, MaxRows, MaxCols, MaxDepth,
89
+ KcFactor, false>
90
+ : public level3_blocking<QInt16, QInt16> {
91
+ DenseIndex m_sizeA;
92
+ DenseIndex m_sizeB;
93
+
94
+ public:
95
+ gemm_blocking_space(DenseIndex rows, DenseIndex cols, DenseIndex depth,
96
+ DenseIndex /*num_threads*/, bool /*l3_blocking*/) {
97
+ this->m_mc = ((rows + 15) / 16) * 16;
98
+ this->m_nc = ((cols + 15) / 16) * 16;
99
+ this->m_kc = ((depth + 15) / 16) * 16;
100
+ m_sizeA = this->m_mc * this->m_kc;
101
+ m_sizeB = this->m_kc * this->m_nc;
102
+ }
103
+ void allocateA() {
104
+ if (this->m_blockA == 0) this->m_blockA = aligned_new<QInt16>(m_sizeA);
105
+ }
106
+ void allocateB() {
107
+ if (this->m_blockB == 0) this->m_blockB = aligned_new<QInt16>(m_sizeB);
108
+ }
109
+ void allocateAll() {
110
+ allocateA();
111
+ allocateB();
112
+ }
113
+ ~gemm_blocking_space() {
114
+ aligned_delete(this->m_blockA, m_sizeA);
115
+ aligned_delete(this->m_blockB, m_sizeB);
116
+ }
117
+ };
118
+
119
+ // Below are the fully optimized versions that are correct only for sizes that
120
+ // are multiple of 16. It is about a 10% performance benefit to keep these
121
+ // implementations separate.
122
+
123
+ // Arrange a block of the left input matrix in contiguous memory.
124
+ //
125
+ // Given column major input (A0 beside A1 in memory):
126
+ // A0 B0 C0 D0 E0 F0 G0 H0 ...
127
+ // A1 B1 C1 D1 E1 F1 G1 H1 ...
128
+ // A2 B2 C2 D2 E2 F2 G2 H2 ...
129
+ // A3 B3 C3 D3 E3 F3 G3 H3 ...
130
+ // A4 B4 C4 D4 E4 F4 G4 H4 ...
131
+ // A5 B5 C5 D5 E5 F5 G5 H5 ...
132
+ // A6 B6 C6 D6 E6 F6 G6 H6 ...
133
+ // A7 B7 C7 D7 E7 F7 G7 H7 ...
134
+ // A8 ...
135
+ // ...
136
+ //
137
+ // Packing with m = 8 yields row major output (A0 beside B0 in memory):
138
+ // A0 B0
139
+ // A1 B1
140
+ // A2 B2
141
+ // A3 B3
142
+ // A4 B4
143
+ // A5 B5
144
+ // A6 B6
145
+ // A7 B7
146
+ // ...
147
+ //
148
+ // The purpose is to collect m rows of size k. Two elements of the same
149
+ // row are arranged contiguously because madd performs an adjacent addition
150
+ // in the kernel.
151
+
152
+ template <typename Index, typename DataMapper, int Pack1, int Pack2,
153
+ bool Conjugate, bool PanelMode>
154
+ struct gemm_pack_lhs<QInt16, Index, DataMapper, Pack1, Pack2, QInt16, ColMajor,
155
+ Conjugate, PanelMode> {
156
+ EIGEN_DONT_INLINE void operator()(QInt16* blockA, const DataMapper& lhs,
157
+ Index depth, Index rows, Index stride = 0,
158
+ Index offset = 0);
159
+ };
160
+
161
+ template <typename Index, typename DataMapper, int Pack1, int Pack2,
162
+ bool Conjugate, bool PanelMode>
163
+ EIGEN_DONT_INLINE void
164
+ gemm_pack_lhs<QInt16, Index, DataMapper, Pack1, Pack2, QInt16, ColMajor,
165
+ Conjugate, PanelMode>::operator()(QInt16* blockA,
166
+ const DataMapper& lhs,
167
+ Index depth, Index rows,
168
+ Index stride, Index offset) {
169
+ eigen_assert(stride == 0);
170
+ eigen_assert(offset == 0);
171
+
172
+ typedef typename packet_traits<QInt16>::type Packet;
173
+
174
+ // Use alternate function for weird sizes
175
+ if (rows % 16 != 0 || depth % 16 != 0) {
176
+ eigen_assert(false &&
177
+ "only depths and rows that are a multiple of 16 are currently "
178
+ "supported");
179
+ // gemm_pack_lhs_any<QInt16, Index, DataMapper, Pack1, Pack2, ColMajor,
180
+ // Conjugate, PanelMode> lhs_pack;
181
+ // return lhs_pack(blockA, lhs, depth, rows, stride, offset);
182
+ }
183
+
184
+ // Get vector pointer
185
+ __m256i* blockA_256 = reinterpret_cast<__m256i*>(blockA);
186
+
187
+ // Pack rows in sets of 16
188
+ for (Index m = 0; m < rows; m += 16) {
189
+ // Pack depth in sets of 4
190
+ for (Index k = 0; k < depth; k += 4) {
191
+ // Load vectors
192
+ __m256i L_A = lhs.template loadPacket<Packet>(m, k);
193
+ __m256i L_B = lhs.template loadPacket<Packet>(m, k + 1);
194
+ __m256i L_C = lhs.template loadPacket<Packet>(m, k + 2);
195
+ __m256i L_D = lhs.template loadPacket<Packet>(m, k + 3);
196
+
197
+ // Rearrange the inputs as required by the kernel
198
+ __m256i L_AB0_AB7 = _mm256_unpacklo_epi16(L_A, L_B);
199
+ __m256i L_AB8_AB15 = _mm256_unpackhi_epi16(L_A, L_B);
200
+ __m256i L_CD0_CD7 = _mm256_unpacklo_epi16(L_C, L_D);
201
+ __m256i L_CD8_CD15 = _mm256_unpackhi_epi16(L_C, L_D);
202
+
203
+ __m256i L_AD0 = _mm256_permute2x128_si256(L_AB0_AB7, L_AB8_AB15, 0x20);
204
+ _mm256_store_si256(blockA_256++, L_AD0);
205
+ __m256i L_AD8 = _mm256_permute2x128_si256(L_CD0_CD7, L_CD8_CD15, 0x20);
206
+ _mm256_store_si256(blockA_256++, L_AD8);
207
+ __m256i L_AD16 = _mm256_permute2x128_si256(L_AB0_AB7, L_AB8_AB15, 0x31);
208
+ _mm256_store_si256(blockA_256++, L_AD16);
209
+ __m256i L_AD24 = _mm256_permute2x128_si256(L_CD0_CD7, L_CD8_CD15, 0x31);
210
+ _mm256_store_si256(blockA_256++, L_AD24);
211
+ }
212
+ }
213
+ }
214
+
215
+ // Arrange a block of the right input matrix in contiguous memory.
216
+ //
217
+ // Given column major input (A0 beside A1 in memory):
218
+ // A0 B0 C0 D0 E0 F0 G0 H0 ...
219
+ // A1 B1 C1 D1 E1 F1 G1 H1 ...
220
+ // A2 B2 C2 D2 E2 F2 G2 H2 ...
221
+ // A3 B3 C3 D3 E3 F3 G3 H3 ...
222
+ // A4 B4 C4 D4 E4 F4 G4 H4 ...
223
+ // A5 B5 C5 D5 E5 F5 G5 H5 ...
224
+ // A6 B6 C6 D6 E6 F6 G6 H6 ...
225
+ // A7 B7 C7 D7 E7 F7 G7 H7 ...
226
+ // A8 ...
227
+ // ...
228
+ // Packing yields row major output (A0 beside A1 in memory):
229
+ // A0 A1 A2 A3 A4 A5 A6 A7
230
+ // B0 B1 B2 B3 B4 B5 B6 B7
231
+ // ...
232
+ //
233
+ // At least two elements of the same col are arranged contiguously because
234
+ // maddubs and madd both perform an adjacent addition in the kernel. We can
235
+ // save work by leaving 4 adjacent elements because kr = 4.
236
+ // The purpose is to collect n cols of size k. Two elements of the same
237
+ // col are arranged contiguously because madd performs an adjacent addition
238
+ // in the kernel.
239
+ template <typename Index, typename DataMapper, int nr, bool Conjugate,
240
+ bool PanelMode>
241
+ struct gemm_pack_rhs<QInt16, Index, DataMapper, nr, ColMajor, Conjugate,
242
+ PanelMode> {
243
+ EIGEN_DONT_INLINE void operator()(QInt16* blockB, const DataMapper& rhs,
244
+ Index depth, Index cols, Index stride = 0,
245
+ Index offset = 0);
246
+ };
247
+
248
+ template <typename Index, typename DataMapper, int nr, bool Conjugate,
249
+ bool PanelMode>
250
+ EIGEN_DONT_INLINE void
251
+ gemm_pack_rhs<QInt16, Index, DataMapper, nr, ColMajor, Conjugate,
252
+ PanelMode>::operator()(QInt16* blockB, const DataMapper& rhs,
253
+ Index depth, Index cols, Index stride,
254
+ Index offset) {
255
+ eigen_assert(stride == 0);
256
+ eigen_assert(offset == 0);
257
+
258
+ typedef typename packet_traits<QInt16>::type Packet;
259
+
260
+ // Use alternate function for weird sizes
261
+ if (cols % 16 != 0 || depth % 16 != 0) {
262
+ eigen_assert(false &&
263
+ "only depths and cols that are a multiple of 16 are currently "
264
+ "supported");
265
+ // gemm_pack_rhs_any<QInt16, Index, DataMapper, nr, ColMajor, Conjugate,
266
+ // PanelMode> rhs_pack;
267
+ // return rhs_pack(blockB, rhs, depth, cols, stride, offset);
268
+ }
269
+
270
+ // Get vector pointer
271
+ __m256i* blockB_256 = reinterpret_cast<__m256i*>(blockB);
272
+
273
+ // Perform a step of the packing for 4 columns
274
+ __m256i R_AB_L, R_AB_H, R_CD_L, R_CD_H, R_AD_0, R_AD_4, R_AD_8, R_AD_12;
275
+ #define PACK_STEP \
276
+ R_AB_L = _mm256_unpacklo_epi64(R_A, R_B); \
277
+ R_CD_L = _mm256_unpacklo_epi64(R_C, R_D); \
278
+ R_AB_H = _mm256_unpackhi_epi64(R_A, R_B); \
279
+ R_CD_H = _mm256_unpackhi_epi64(R_C, R_D); \
280
+ R_AD_0 = _mm256_permute2x128_si256(R_AB_L, R_CD_L, 0x20); \
281
+ R_AD_8 = _mm256_permute2x128_si256(R_AB_L, R_CD_L, 0x31); \
282
+ R_AD_4 = _mm256_permute2x128_si256(R_AB_H, R_CD_H, 0x20); \
283
+ R_AD_12 = _mm256_permute2x128_si256(R_AB_H, R_CD_H, 0x31); \
284
+ _mm256_store_si256(blockB_256, R_AD_0); \
285
+ _mm256_store_si256(blockB_256 + 4, R_AD_4); \
286
+ _mm256_store_si256(blockB_256 + 8, R_AD_8); \
287
+ _mm256_store_si256(blockB_256 + 12, R_AD_12); \
288
+ blockB_256++;
289
+
290
+ // Pack cols in sets of 16
291
+ for (Index n = 0; n < cols; n += 16) {
292
+ // Pack depth in sets of 16
293
+ for (Index k = 0; k < depth; k += 16) {
294
+ __m256i R_A = rhs.template loadPacket<Packet>(k, n);
295
+ __m256i R_B = rhs.template loadPacket<Packet>(k, n + 1);
296
+ __m256i R_C = rhs.template loadPacket<Packet>(k, n + 2);
297
+ __m256i R_D = rhs.template loadPacket<Packet>(k, n + 3);
298
+ PACK_STEP;
299
+
300
+ R_A = rhs.template loadPacket<Packet>(k, n + 4);
301
+ R_B = rhs.template loadPacket<Packet>(k, n + 5);
302
+ R_C = rhs.template loadPacket<Packet>(k, n + 6);
303
+ R_D = rhs.template loadPacket<Packet>(k, n + 7);
304
+ PACK_STEP;
305
+
306
+ R_A = rhs.template loadPacket<Packet>(k, n + 8);
307
+ R_B = rhs.template loadPacket<Packet>(k, n + 9);
308
+ R_C = rhs.template loadPacket<Packet>(k, n + 10);
309
+ R_D = rhs.template loadPacket<Packet>(k, n + 11);
310
+ PACK_STEP;
311
+
312
+ R_A = rhs.template loadPacket<Packet>(k, n + 12);
313
+ R_B = rhs.template loadPacket<Packet>(k, n + 13);
314
+ R_C = rhs.template loadPacket<Packet>(k, n + 14);
315
+ R_D = rhs.template loadPacket<Packet>(k, n + 15);
316
+ PACK_STEP;
317
+
318
+ blockB_256 += 12;
319
+ }
320
+ }
321
+ #undef PACK_STEP
322
+ }
323
+
324
+ // Perform the actual multiplication on packed inputs
325
+ template <typename Index, typename DataMapper, int mr, int nr,
326
+ bool ConjugateLhs, bool ConjugateRhs>
327
+ struct gebp_kernel<QInt16, QInt16, Index, DataMapper, mr, nr, ConjugateLhs,
328
+ ConjugateRhs> {
329
+ typedef typename DataMapper::LinearMapper LinearMapper;
330
+
331
+ EIGEN_DONT_INLINE
332
+ void operator()(const DataMapper& res, const QInt16* blockA,
333
+ const QInt16* blockB, Index rows, Index depth, Index cols,
334
+ QInt32 alpha, Index strideA = -1, Index strideB = -1,
335
+ Index offsetA = 0, Index offsetB = 0);
336
+ };
337
+
338
+ template <typename Index, typename DataMapper, int mr, int nr,
339
+ bool ConjugateLhs, bool ConjugateRhs>
340
+ EIGEN_DONT_INLINE void
341
+ gebp_kernel<QInt16, QInt16, Index, DataMapper, mr, nr, ConjugateLhs,
342
+ ConjugateRhs>::operator()(const DataMapper& res,
343
+ const QInt16* blockA,
344
+ const QInt16* blockB, Index rows,
345
+ Index depth, Index cols, QInt32 alpha,
346
+ Index strideA, Index strideB,
347
+ Index offsetA, Index offsetB) {
348
+ EIGEN_STATIC_ASSERT(!ConjugateLhs, YOU_MADE_A_PROGRAMMING_MISTAKE);
349
+ EIGEN_STATIC_ASSERT(!ConjugateRhs, YOU_MADE_A_PROGRAMMING_MISTAKE);
350
+ eigen_assert(alpha.value == 1);
351
+ eigen_assert(strideA == -1);
352
+ eigen_assert(strideB == -1);
353
+ eigen_assert(offsetA == 0);
354
+ eigen_assert(offsetB == 0);
355
+ eigen_assert(rows > 0);
356
+ eigen_assert(cols > 0);
357
+ eigen_assert(depth > 0);
358
+ eigen_assert(blockA);
359
+ eigen_assert(blockB);
360
+
361
+ // Use alternate function for weird sizes
362
+ if (rows % 16 != 0 || cols % 16 != 0 || depth % 16 != 0) {
363
+ eigen_assert(
364
+ false &&
365
+ "only depths, cols and rows that are a multiple of 16 are currently "
366
+ "supported");
367
+ // gebp_kernel_any<QInt16, QInt16, Index, DataMapper, mr, nr, ConjugateLhs,
368
+ // ConjugateRhs> gebp;
369
+ // return gebp(res, blockA, blockB, rows, depth, cols, alpha, strideA,
370
+ // strideB, offsetA, offsetB);
371
+ }
372
+
373
+ // Create result block
374
+ QInt32* blockO = aligned_new<QInt32>(16 * 16);
375
+ memset(blockO, 0, 16 * 16 * sizeof(QInt32));
376
+
377
+ // Get vectorized pointers
378
+ __m256i* blockO_256 = reinterpret_cast<__m256i*>(blockO);
379
+ const __m256i* blockA_256 = reinterpret_cast<const __m256i*>(blockA);
380
+ const __m256i* blockB_256 = reinterpret_cast<const __m256i*>(blockB);
381
+
382
+ // Loop over blocks of 16 columns
383
+ for (Index n = 0; n < cols; n += 16) {
384
+ // Reset index into blockA
385
+ Index indexL = 0;
386
+ // Loop over blocks of 16 rows
387
+ for (Index m = 0; m < rows; m += 16) {
388
+ // Reset index into blockB
389
+ Index indexR = n / 16 * depth;
390
+ // Loop over blocks of 4 on depth
391
+ for (Index k = 0; k < depth; k += 4) {
392
+ // Load inputs
393
+ __m256i L_AD0 = blockA_256[indexL++];
394
+ __m256i L_AD8 = blockA_256[indexL++];
395
+ __m256i L_EH0 = blockA_256[indexL++];
396
+ __m256i L_EH8 = blockA_256[indexL++];
397
+
398
+ __m256i R_AH0 = blockB_256[indexR++];
399
+ __m256i R_AH4 = blockB_256[indexR++];
400
+ __m256i R_AH8 = blockB_256[indexR++];
401
+ __m256i R_AH12 = blockB_256[indexR++];
402
+
403
+ // Declare variables used in COMPUTE_STEP
404
+ __m256i P_32_A, P_32_B, P_32;
405
+
406
+ #define COMPUTE_STEP(R_INPUT_A, R_INPUT_B, OFFSET) \
407
+ P_32_A = _mm256_madd_epi16(R_INPUT_A, L_AD0); \
408
+ P_32_B = _mm256_madd_epi16(R_INPUT_B, L_AD8); \
409
+ P_32 = _mm256_add_epi32(P_32_A, P_32_B); \
410
+ _mm256_store_si256( \
411
+ blockO_256 + 2 * OFFSET, \
412
+ _mm256_add_epi32(_mm256_load_si256(blockO_256 + 2 * OFFSET), P_32)); \
413
+ \
414
+ P_32_A = _mm256_madd_epi16(R_INPUT_A, L_EH0); \
415
+ P_32_B = _mm256_madd_epi16(R_INPUT_B, L_EH8); \
416
+ P_32 = _mm256_add_epi32(P_32_A, P_32_B); \
417
+ _mm256_store_si256( \
418
+ blockO_256 + 2 * OFFSET + 1, \
419
+ _mm256_add_epi32(_mm256_load_si256(blockO_256 + 2 * OFFSET + 1), P_32));
420
+
421
+ // Permute and shuffle to copy a single value across the entire vector
422
+ // Then compute the multiplication
423
+ // Replicate lower 128-bits of R_AH0 across both lanes
424
+ __m256i R_AH0_ = _mm256_permute2x128_si256(R_AH0, R_AH0, 0x00);
425
+ // Copy first two elements of R_AH0 across entire vector
426
+ __m256i R_AD0 = _mm256_shuffle_epi32(R_AH0_, 0x00);
427
+ // Copy second two elements of R_AH0 across entire vector
428
+ __m256i R_EH0 = _mm256_shuffle_epi32(R_AH0_, 0x55);
429
+
430
+ COMPUTE_STEP(R_AD0, R_EH0, 0);
431
+ __m256i R_AD1 = _mm256_shuffle_epi32(R_AH0_, 0xAA);
432
+ __m256i R_EH1 = _mm256_shuffle_epi32(R_AH0_, 0xFF);
433
+ COMPUTE_STEP(R_AD1, R_EH1, 1);
434
+
435
+ // Replicate upper 128-bits of R_AH0 across both lanes
436
+ R_AH0_ = _mm256_permute2x128_si256(R_AH0, R_AH0, 0x11);
437
+ __m256i R_AD2 = _mm256_shuffle_epi32(R_AH0_, 0x00);
438
+ __m256i R_EH2 = _mm256_shuffle_epi32(R_AH0_, 0x55);
439
+ COMPUTE_STEP(R_AD2, R_EH2, 2);
440
+ __m256i R_AD3 = _mm256_shuffle_epi32(R_AH0_, 0xAA);
441
+ __m256i R_EH3 = _mm256_shuffle_epi32(R_AH0_, 0xFF);
442
+ COMPUTE_STEP(R_AD3, R_EH3, 3);
443
+
444
+ R_AH0_ = _mm256_permute2x128_si256(R_AH4, R_AH4, 0x00);
445
+ R_AD0 = _mm256_shuffle_epi32(R_AH0_, 0x00);
446
+ R_EH0 = _mm256_shuffle_epi32(R_AH0_, 0x55);
447
+ COMPUTE_STEP(R_AD0, R_EH0, 4);
448
+ R_AD1 = _mm256_shuffle_epi32(R_AH0_, 0xAA);
449
+ R_EH1 = _mm256_shuffle_epi32(R_AH0_, 0xFF);
450
+ COMPUTE_STEP(R_AD1, R_EH1, 5);
451
+ R_AH0_ = _mm256_permute2x128_si256(R_AH4, R_AH4, 0x11);
452
+ R_AD2 = _mm256_shuffle_epi32(R_AH0_, 0x00);
453
+ R_EH2 = _mm256_shuffle_epi32(R_AH0_, 0x55);
454
+ COMPUTE_STEP(R_AD2, R_EH2, 6);
455
+ R_AD3 = _mm256_shuffle_epi32(R_AH0_, 0xAA);
456
+ R_EH3 = _mm256_shuffle_epi32(R_AH0_, 0xFF);
457
+ COMPUTE_STEP(R_AD3, R_EH3, 7);
458
+
459
+ R_AH0_ = _mm256_permute2x128_si256(R_AH8, R_AH8, 0x00);
460
+ R_AD0 = _mm256_shuffle_epi32(R_AH0_, 0x00);
461
+ R_EH0 = _mm256_shuffle_epi32(R_AH0_, 0x55);
462
+ COMPUTE_STEP(R_AD0, R_EH0, 8);
463
+ R_AD1 = _mm256_shuffle_epi32(R_AH0_, 0xAA);
464
+ R_EH1 = _mm256_shuffle_epi32(R_AH0_, 0xFF);
465
+ COMPUTE_STEP(R_AD1, R_EH1, 9);
466
+ R_AH0_ = _mm256_permute2x128_si256(R_AH8, R_AH8, 0x11);
467
+ R_AD2 = _mm256_shuffle_epi32(R_AH0_, 0x00);
468
+ R_EH2 = _mm256_shuffle_epi32(R_AH0_, 0x55);
469
+ COMPUTE_STEP(R_AD2, R_EH2, 10);
470
+ R_AD3 = _mm256_shuffle_epi32(R_AH0_, 0xAA);
471
+ R_EH3 = _mm256_shuffle_epi32(R_AH0_, 0xFF);
472
+ COMPUTE_STEP(R_AD3, R_EH3, 11);
473
+
474
+ R_AH0_ = _mm256_permute2x128_si256(R_AH12, R_AH12, 0x00);
475
+ R_AD0 = _mm256_shuffle_epi32(R_AH0_, 0x00);
476
+ R_EH0 = _mm256_shuffle_epi32(R_AH0_, 0x55);
477
+ COMPUTE_STEP(R_AD0, R_EH0, 12);
478
+ R_AD1 = _mm256_shuffle_epi32(R_AH0_, 0xAA);
479
+ R_EH1 = _mm256_shuffle_epi32(R_AH0_, 0xFF);
480
+ COMPUTE_STEP(R_AD1, R_EH1, 13);
481
+ R_AH0_ = _mm256_permute2x128_si256(R_AH12, R_AH12, 0x11);
482
+ R_AD2 = _mm256_shuffle_epi32(R_AH0_, 0x00);
483
+ R_EH2 = _mm256_shuffle_epi32(R_AH0_, 0x55);
484
+ COMPUTE_STEP(R_AD2, R_EH2, 14);
485
+ R_AD3 = _mm256_shuffle_epi32(R_AH0_, 0xAA);
486
+ R_EH3 = _mm256_shuffle_epi32(R_AH0_, 0xFF);
487
+ COMPUTE_STEP(R_AD3, R_EH3, 15);
488
+
489
+ #undef COMPUTE_STEP
490
+ }
491
+
492
+ // Transfer the results to the result matrix
493
+ Index i = 0;
494
+ for (Index j = n; j < n + 16; j++) {
495
+ LinearMapper r0 = res.getLinearMapper(m, j);
496
+ LinearMapper r1 = res.getLinearMapper(m + 8, j);
497
+ typedef typename packet_traits<QInt32>::type Packet;
498
+ r0.template storePacket<Packet>(
499
+ 0, _mm256_add_epi32(blockO_256[i++],
500
+ r0.template loadPacket<Packet>(0)));
501
+ r1.template storePacket<Packet>(
502
+ 0, _mm256_add_epi32(blockO_256[i++],
503
+ r1.template loadPacket<Packet>(0)));
504
+ }
505
+
506
+ // Zero the result block so it can be reused
507
+ memset(blockO, 0, 16 * 16 * sizeof(QInt32));
508
+ }
509
+ }
510
+ aligned_delete(blockO, 16 * 16);
511
+ }
512
+
513
+ #endif
514
+
515
+ // AVX2 optimized implementation of Mat-Mat product.
516
+ // LHS is encoded using signed 8-bit integers.
517
+ // RHS is encoded using unsigned 8-bit integers.
518
+ #ifdef EIGEN_USE_OPTIMIZED_INT8_UINT8_MAT_MAT_PRODUCT
519
+
520
+ // Define quantized traits
521
+ template <bool _ConjLhs, bool _ConjRhs>
522
+ class gebp_traits<QInt8, QUInt8, _ConjLhs, _ConjRhs> {
523
+ public:
524
+ typedef QInt8 LhsScalar;
525
+ typedef QUInt8 RhsScalar;
526
+ typedef QInt32 ResScalar;
527
+
528
+ typedef typename packet_traits<LhsScalar>::type LhsPacket;
529
+ typedef LhsPacket LhsPacket4Packing;
530
+
531
+ enum {
532
+ // Define register blocking scheme.
533
+ nr = 32,
534
+ mr = 32,
535
+ kr = 8,
536
+ // Ignore progress tracking per loop iteration.
537
+ LhsProgress = -1,
538
+ RhsProgress = -1
539
+ };
540
+ };
541
+
542
+ // Specialized blocking for quantized implementations.
543
+ // Used by TensorContractionThreadPool, inputs must have dimensions that are
544
+ // multiples of 32.
545
+ template <typename ResScalar, typename Index, typename LeftTensor,
546
+ typename left_nocontract_t, typename left_contract_t,
547
+ bool left_inner_dim_contiguous, bool left_inner_dim_reordered,
548
+ int LeftAlignment, typename RightTensor, typename right_nocontract_t,
549
+ typename right_contract_t, bool right_inner_dim_contiguous,
550
+ bool right_inner_dim_reordered, int RightAlignment, int ShardingType>
551
+ class TensorContractionBlocking<
552
+ ResScalar,
553
+ TensorContractionInputMapper<
554
+ QInt8, Index, Lhs, LeftTensor, left_nocontract_t, left_contract_t, 32,
555
+ left_inner_dim_contiguous, left_inner_dim_reordered, LeftAlignment>,
556
+ TensorContractionInputMapper<QUInt8, Index, Rhs, RightTensor,
557
+ right_nocontract_t, right_contract_t, 32,
558
+ right_inner_dim_contiguous,
559
+ right_inner_dim_reordered, RightAlignment>,
560
+ Index, ShardingType> {
561
+ public:
562
+ typedef QInt8 LhsScalar;
563
+ typedef QUInt8 RhsScalar;
564
+
565
+ TensorContractionBlocking(Index k, Index m, Index n, Index num_threads = 1)
566
+ : kc_(k), mc_(m), nc_(n) {
567
+ eigen_assert(m % 32 == 0);
568
+ eigen_assert(k % 32 == 0);
569
+ if (!k || !m || !n) {
570
+ return;
571
+ }
572
+
573
+ if (ShardingType == ShardByCol) {
574
+ eigen_assert(n % 32 == 0);
575
+ nc_ = (((n / num_threads) + 31) / 32) * 32;
576
+ } else {
577
+ eigen_assert(n % 32 == 0 || n == 1);
578
+ // Special case to avoid breaking the unimplemented matrix-vector case
579
+ if (n == 1) {
580
+ nc_ = 32;
581
+ }
582
+ mc_ = (((m / num_threads) + 31) / 32) * 32;
583
+ }
584
+ }
585
+
586
+ EIGEN_ALWAYS_INLINE Index kc() const { return kc_; }
587
+ EIGEN_ALWAYS_INLINE Index mc() const { return mc_; }
588
+ EIGEN_ALWAYS_INLINE Index nc() const { return nc_; }
589
+
590
+ private:
591
+ Index kc_;
592
+ Index mc_;
593
+ Index nc_;
594
+ };
595
+
596
+ // Specialized blocking for quantized implementations.
597
+ // Used by TensorContraction and GeneralMatrixMatrix, inputs are padded to
598
+ // multiples of 32.
599
+ template <int MaxRows, int MaxCols, int MaxDepth, int KcFactor>
600
+ class gemm_blocking_space<ColMajor, QInt8, QInt8, MaxRows, MaxCols, MaxDepth,
601
+ KcFactor, false>
602
+ : public level3_blocking<QInt8, QInt8> {
603
+ DenseIndex m_sizeA;
604
+ DenseIndex m_sizeB;
605
+
606
+ public:
607
+ gemm_blocking_space(DenseIndex rows, DenseIndex cols, DenseIndex depth,
608
+ DenseIndex /*num_threads*/, bool /*l3_blocking*/) {
609
+ this->m_mc = ((rows + 31) / 32) * 32;
610
+ this->m_nc = ((cols + 31) / 32) * 32;
611
+ this->m_kc = ((depth + 31) / 32) * 32;
612
+ m_sizeA = this->m_mc * this->m_kc;
613
+ m_sizeB = this->m_kc * this->m_nc;
614
+ }
615
+ void allocateA() {
616
+ if (this->m_blockA == 0) this->m_blockA = aligned_new<QInt8>(m_sizeA);
617
+ }
618
+ void allocateB() {
619
+ if (this->m_blockB == 0) this->m_blockB = aligned_new<QInt8>(m_sizeB);
620
+ }
621
+ void allocateAll() {
622
+ allocateA();
623
+ allocateB();
624
+ }
625
+ ~gemm_blocking_space() {
626
+ aligned_delete(this->m_blockA, m_sizeA);
627
+ aligned_delete(this->m_blockB, m_sizeB);
628
+ }
629
+ };
630
+
631
+ template <int MaxRows, int MaxCols, int MaxDepth, int KcFactor>
632
+ class gemm_blocking_space<ColMajor, QInt8, QUInt8, MaxRows, MaxCols, MaxDepth,
633
+ KcFactor, false>
634
+ : public level3_blocking<QInt8, QUInt8> {
635
+ DenseIndex m_sizeA;
636
+ DenseIndex m_sizeB;
637
+
638
+ public:
639
+ gemm_blocking_space(DenseIndex rows, DenseIndex cols, DenseIndex depth,
640
+ DenseIndex /*num_threads*/, bool /*l3_blocking*/) {
641
+ this->m_mc = ((rows + 31) / 32) * 32;
642
+ this->m_nc = ((cols + 31) / 32) * 32;
643
+ this->m_kc = ((depth + 31) / 32) * 32;
644
+ m_sizeA = this->m_mc * this->m_kc;
645
+ m_sizeB = this->m_kc * this->m_nc;
646
+ }
647
+ void allocateA() {
648
+ if (this->m_blockA == 0) this->m_blockA = aligned_new<QInt8>(m_sizeA);
649
+ }
650
+ void allocateB() {
651
+ if (this->m_blockB == 0) this->m_blockB = aligned_new<QUInt8>(m_sizeB);
652
+ }
653
+ void allocateAll() {
654
+ allocateA();
655
+ allocateB();
656
+ }
657
+ ~gemm_blocking_space() {
658
+ aligned_delete(this->m_blockA, m_sizeA);
659
+ aligned_delete(this->m_blockB, m_sizeB);
660
+ }
661
+ };
662
+
663
+ // Alternate templates for any input sizes
664
+ template <typename Scalar, typename Index, typename DataMapper, int Pack1,
665
+ int Pack2, int StorageOrder, bool Conjugate = false,
666
+ bool PanelMode = false>
667
+ struct gemm_pack_lhs_any;
668
+ template <typename Index, typename DataMapper, int Pack1, int Pack2,
669
+ bool Conjugate, bool PanelMode>
670
+ struct gemm_pack_lhs_any<QInt8, Index, DataMapper, Pack1, Pack2, ColMajor,
671
+ Conjugate, PanelMode> {
672
+ EIGEN_DONT_INLINE void operator()(QInt8* blockA, const DataMapper& lhs,
673
+ Index depth, Index rows, Index stride = 0,
674
+ Index offset = 0);
675
+ };
676
+
677
+ template <typename Scalar, typename Index, typename DataMapper, int nr,
678
+ int StorageOrder, bool Conjugate = false, bool PanelMode = false>
679
+ struct gemm_pack_rhs_any;
680
+ template <typename Index, typename DataMapper, int nr, bool Conjugate,
681
+ bool PanelMode>
682
+ struct gemm_pack_rhs_any<QUInt8, Index, DataMapper, nr, ColMajor, Conjugate,
683
+ PanelMode> {
684
+ EIGEN_DONT_INLINE void operator()(QUInt8* blockB, const DataMapper& rhs,
685
+ Index depth, Index cols, Index stride = 0,
686
+ Index offset = 0);
687
+ };
688
+
689
+ template <typename LhsScalar, typename RhsScalar, typename Index,
690
+ typename DataMapper, int mr, int nr, bool ConjugateLhs = false,
691
+ bool ConjugateRhs = false>
692
+ struct gebp_kernel_any;
693
+ template <typename Index, typename DataMapper, int mr, int nr,
694
+ bool ConjugateLhs, bool ConjugateRhs>
695
+ struct gebp_kernel_any<QInt8, QUInt8, Index, DataMapper, mr, nr, ConjugateLhs,
696
+ ConjugateRhs> {
697
+ typedef typename DataMapper::LinearMapper LinearMapper;
698
+
699
+ EIGEN_DONT_INLINE
700
+ void operator()(const DataMapper& res, const QInt8* blockA,
701
+ const QUInt8* blockB, Index rows, Index depth, Index cols,
702
+ QInt32 alpha, Index strideA = -1, Index strideB = -1,
703
+ Index offsetA = 0, Index offsetB = 0);
704
+ };
705
+
706
+ // Alternate implementations for any input sizes
707
+ template <typename Index, typename DataMapper, int Pack1, int Pack2,
708
+ bool Conjugate, bool PanelMode>
709
+ EIGEN_DONT_INLINE void
710
+ gemm_pack_lhs_any<QInt8, Index, DataMapper, Pack1, Pack2, ColMajor, Conjugate,
711
+ PanelMode>::operator()(QInt8* blockA, const DataMapper& lhs,
712
+ Index depth, Index rows, Index stride,
713
+ Index offset) {
714
+ eigen_assert(stride == 0);
715
+ eigen_assert(offset == 0);
716
+
717
+ typedef typename packet_traits<QInt8>::type Packet;
718
+
719
+ // Get vector pointer
720
+ __m256i* blockA_256 = reinterpret_cast<__m256i*>(blockA);
721
+
722
+ // Get even multiples of the dimensions
723
+ Index rows_32 = (rows / 32) * 32;
724
+ Index depth_8 = (depth / 8) * 8;
725
+
726
+ // Get padding for when depth is not a multiple of 32
727
+ int padding = 0;
728
+ if (depth % 32 != 0) {
729
+ int depth_32 = (depth / 32) * 32;
730
+ int extra_depth = depth - depth_32;
731
+ int extra_depth_8 = ((extra_depth + 7) / 8) * 8;
732
+ padding = 32 - extra_depth_8;
733
+ }
734
+
735
+ // Pack rows in sets of 32
736
+ for (Index m = 0; m < rows_32; m += 32) {
737
+ // Pack depth in sets of 8
738
+ for (Index k = 0; k < depth_8; k += 8) {
739
+ // Load vectors
740
+ __m256i L_A = lhs.template loadPacket<Packet>(m, k);
741
+ __m256i L_B = lhs.template loadPacket<Packet>(m, k + 1);
742
+
743
+ // Interleave 8-bit elements
744
+ __m256i L_AB0_AB16 = _mm256_unpacklo_epi8(L_A, L_B);
745
+ __m256i L_AB8_AB24 = _mm256_unpackhi_epi8(L_A, L_B);
746
+
747
+ __m256i L_C = lhs.template loadPacket<Packet>(m, k + 2);
748
+ __m256i L_D = lhs.template loadPacket<Packet>(m, k + 3);
749
+ __m256i L_CD0_CD16 = _mm256_unpacklo_epi8(L_C, L_D);
750
+ __m256i L_CD8_CD24 = _mm256_unpackhi_epi8(L_C, L_D);
751
+
752
+ // Interleave 16-bit elements
753
+ __m256i L_AD0_AD16 = _mm256_unpacklo_epi16(L_AB0_AB16, L_CD0_CD16);
754
+ __m256i L_AD4_AD20 = _mm256_unpackhi_epi16(L_AB0_AB16, L_CD0_CD16);
755
+
756
+ // Use permute before we store to cross 128-bit lanes
757
+ __m256i L_AD0 = _mm256_permute2x128_si256(L_AD0_AD16, L_AD4_AD20, 0x20);
758
+ _mm256_store_si256(blockA_256++, L_AD0);
759
+
760
+ // Complete packing for 32 x 8 block
761
+ __m256i L_AD16 = _mm256_permute2x128_si256(L_AD0_AD16, L_AD4_AD20, 0x31);
762
+ __m256i L_AD8_AD24 = _mm256_unpacklo_epi16(L_AB8_AB24, L_CD8_CD24);
763
+ __m256i L_AD12_AD28 = _mm256_unpackhi_epi16(L_AB8_AB24, L_CD8_CD24);
764
+ __m256i L_AD8 = _mm256_permute2x128_si256(L_AD8_AD24, L_AD12_AD28, 0x20);
765
+ _mm256_store_si256(blockA_256++, L_AD8);
766
+ _mm256_store_si256(blockA_256++, L_AD16);
767
+ __m256i L_AD24 = _mm256_permute2x128_si256(L_AD8_AD24, L_AD12_AD28, 0x31);
768
+ _mm256_store_si256(blockA_256++, L_AD24);
769
+ __m256i L_E = lhs.template loadPacket<Packet>(m, k + 4);
770
+ __m256i L_F = lhs.template loadPacket<Packet>(m, k + 5);
771
+ __m256i L_EF0_EF16 = _mm256_unpacklo_epi8(L_E, L_F);
772
+ __m256i L_EF8_EF24 = _mm256_unpackhi_epi8(L_E, L_F);
773
+ __m256i L_G = lhs.template loadPacket<Packet>(m, k + 6);
774
+ __m256i L_H = lhs.template loadPacket<Packet>(m, k + 7);
775
+ __m256i L_GH0_GH16 = _mm256_unpacklo_epi8(L_G, L_H);
776
+ __m256i L_GH8_GH24 = _mm256_unpackhi_epi8(L_G, L_H);
777
+ __m256i L_EH0_EH16 = _mm256_unpacklo_epi16(L_EF0_EF16, L_GH0_GH16);
778
+ __m256i L_EH4_EH20 = _mm256_unpackhi_epi16(L_EF0_EF16, L_GH0_GH16);
779
+ __m256i L_EH0 = _mm256_permute2x128_si256(L_EH0_EH16, L_EH4_EH20, 0x20);
780
+ _mm256_store_si256(blockA_256++, L_EH0);
781
+ __m256i L_EH16 = _mm256_permute2x128_si256(L_EH0_EH16, L_EH4_EH20, 0x31);
782
+ __m256i L_EH8_EH24 = _mm256_unpacklo_epi16(L_EF8_EF24, L_GH8_GH24);
783
+ __m256i L_EH12_EH28 = _mm256_unpackhi_epi16(L_EF8_EF24, L_GH8_GH24);
784
+ __m256i L_EH8 = _mm256_permute2x128_si256(L_EH8_EH24, L_EH12_EH28, 0x20);
785
+ _mm256_store_si256(blockA_256++, L_EH8);
786
+ _mm256_store_si256(blockA_256++, L_EH16);
787
+ __m256i L_EH24 = _mm256_permute2x128_si256(L_EH8_EH24, L_EH12_EH28, 0x31);
788
+ _mm256_store_si256(blockA_256++, L_EH24);
789
+ }
790
+
791
+ // Finish the k dimension, padding with zeros
792
+ if (depth_8 < depth) {
793
+ __m256i L_A, L_B, L_C, L_D, L_E, L_F, L_G, L_H;
794
+ switch (depth - depth_8) {
795
+ case 1:
796
+ L_A = lhs.template loadPacket<Packet>(m, depth_8);
797
+ L_B = _mm256_setzero_si256();
798
+ L_C = _mm256_setzero_si256();
799
+ L_D = _mm256_setzero_si256();
800
+ L_E = _mm256_setzero_si256();
801
+ L_F = _mm256_setzero_si256();
802
+ L_G = _mm256_setzero_si256();
803
+ L_H = _mm256_setzero_si256();
804
+ break;
805
+ case 2:
806
+ L_A = lhs.template loadPacket<Packet>(m, depth_8);
807
+ L_B = lhs.template loadPacket<Packet>(m, depth_8 + 1);
808
+ L_C = _mm256_setzero_si256();
809
+ L_D = _mm256_setzero_si256();
810
+ L_E = _mm256_setzero_si256();
811
+ L_F = _mm256_setzero_si256();
812
+ L_G = _mm256_setzero_si256();
813
+ L_H = _mm256_setzero_si256();
814
+ break;
815
+ case 3:
816
+ L_A = lhs.template loadPacket<Packet>(m, depth_8);
817
+ L_B = lhs.template loadPacket<Packet>(m, depth_8 + 1);
818
+ L_C = lhs.template loadPacket<Packet>(m, depth_8 + 2);
819
+ L_D = _mm256_setzero_si256();
820
+ L_E = _mm256_setzero_si256();
821
+ L_F = _mm256_setzero_si256();
822
+ L_G = _mm256_setzero_si256();
823
+ L_H = _mm256_setzero_si256();
824
+ break;
825
+ case 4:
826
+ L_A = lhs.template loadPacket<Packet>(m, depth_8);
827
+ L_B = lhs.template loadPacket<Packet>(m, depth_8 + 1);
828
+ L_C = lhs.template loadPacket<Packet>(m, depth_8 + 2);
829
+ L_D = lhs.template loadPacket<Packet>(m, depth_8 + 3);
830
+ L_E = _mm256_setzero_si256();
831
+ L_F = _mm256_setzero_si256();
832
+ L_G = _mm256_setzero_si256();
833
+ L_H = _mm256_setzero_si256();
834
+ break;
835
+ case 5:
836
+ L_A = lhs.template loadPacket<Packet>(m, depth_8);
837
+ L_B = lhs.template loadPacket<Packet>(m, depth_8 + 1);
838
+ L_C = lhs.template loadPacket<Packet>(m, depth_8 + 2);
839
+ L_D = lhs.template loadPacket<Packet>(m, depth_8 + 3);
840
+ L_E = lhs.template loadPacket<Packet>(m, depth_8 + 4);
841
+ L_F = _mm256_setzero_si256();
842
+ L_G = _mm256_setzero_si256();
843
+ L_H = _mm256_setzero_si256();
844
+ break;
845
+ case 6:
846
+ L_A = lhs.template loadPacket<Packet>(m, depth_8);
847
+ L_B = lhs.template loadPacket<Packet>(m, depth_8 + 1);
848
+ L_C = lhs.template loadPacket<Packet>(m, depth_8 + 2);
849
+ L_D = lhs.template loadPacket<Packet>(m, depth_8 + 3);
850
+ L_E = lhs.template loadPacket<Packet>(m, depth_8 + 4);
851
+ L_F = lhs.template loadPacket<Packet>(m, depth_8 + 5);
852
+ L_G = _mm256_setzero_si256();
853
+ L_H = _mm256_setzero_si256();
854
+ break;
855
+ case 7:
856
+ L_A = lhs.template loadPacket<Packet>(m, depth_8);
857
+ L_B = lhs.template loadPacket<Packet>(m, depth_8 + 1);
858
+ L_C = lhs.template loadPacket<Packet>(m, depth_8 + 2);
859
+ L_D = lhs.template loadPacket<Packet>(m, depth_8 + 3);
860
+ L_E = lhs.template loadPacket<Packet>(m, depth_8 + 4);
861
+ L_F = lhs.template loadPacket<Packet>(m, depth_8 + 5);
862
+ L_G = lhs.template loadPacket<Packet>(m, depth_8 + 6);
863
+ L_H = _mm256_setzero_si256();
864
+ break;
865
+ }
866
+
867
+ // Interleave 8-bit elements
868
+ __m256i L_AB0_AB16 = _mm256_unpacklo_epi8(L_A, L_B);
869
+ __m256i L_AB8_AB24 = _mm256_unpackhi_epi8(L_A, L_B);
870
+
871
+ __m256i L_CD0_CD16 = _mm256_unpacklo_epi8(L_C, L_D);
872
+ __m256i L_CD8_CD24 = _mm256_unpackhi_epi8(L_C, L_D);
873
+
874
+ // Interleave 16-bit elements
875
+ __m256i L_AD0_AD16 = _mm256_unpacklo_epi16(L_AB0_AB16, L_CD0_CD16);
876
+ __m256i L_AD4_AD20 = _mm256_unpackhi_epi16(L_AB0_AB16, L_CD0_CD16);
877
+
878
+ // Use permute before we store to cross 128-bit lanes
879
+ __m256i L_AD0 = _mm256_permute2x128_si256(L_AD0_AD16, L_AD4_AD20, 0x20);
880
+ _mm256_store_si256(blockA_256++, L_AD0);
881
+
882
+ // Complete packing
883
+ __m256i L_AD16 = _mm256_permute2x128_si256(L_AD0_AD16, L_AD4_AD20, 0x31);
884
+ __m256i L_AD8_AD24 = _mm256_unpacklo_epi16(L_AB8_AB24, L_CD8_CD24);
885
+ __m256i L_AD12_AD28 = _mm256_unpackhi_epi16(L_AB8_AB24, L_CD8_CD24);
886
+ __m256i L_AD8 = _mm256_permute2x128_si256(L_AD8_AD24, L_AD12_AD28, 0x20);
887
+ _mm256_store_si256(blockA_256++, L_AD8);
888
+ _mm256_store_si256(blockA_256++, L_AD16);
889
+ __m256i L_AD24 = _mm256_permute2x128_si256(L_AD8_AD24, L_AD12_AD28, 0x31);
890
+ _mm256_store_si256(blockA_256++, L_AD24);
891
+ __m256i L_EF0_EF16 = _mm256_unpacklo_epi8(L_E, L_F);
892
+ __m256i L_EF8_EF24 = _mm256_unpackhi_epi8(L_E, L_F);
893
+ __m256i L_GH0_GH16 = _mm256_unpacklo_epi8(L_G, L_H);
894
+ __m256i L_GH8_GH24 = _mm256_unpackhi_epi8(L_G, L_H);
895
+ __m256i L_EH0_EH16 = _mm256_unpacklo_epi16(L_EF0_EF16, L_GH0_GH16);
896
+ __m256i L_EH4_EH20 = _mm256_unpackhi_epi16(L_EF0_EF16, L_GH0_GH16);
897
+ __m256i L_EH0 = _mm256_permute2x128_si256(L_EH0_EH16, L_EH4_EH20, 0x20);
898
+ _mm256_store_si256(blockA_256++, L_EH0);
899
+ __m256i L_EH16 = _mm256_permute2x128_si256(L_EH0_EH16, L_EH4_EH20, 0x31);
900
+ __m256i L_EH8_EH24 = _mm256_unpacklo_epi16(L_EF8_EF24, L_GH8_GH24);
901
+ __m256i L_EH12_EH28 = _mm256_unpackhi_epi16(L_EF8_EF24, L_GH8_GH24);
902
+ __m256i L_EH8 = _mm256_permute2x128_si256(L_EH8_EH24, L_EH12_EH28, 0x20);
903
+ _mm256_store_si256(blockA_256++, L_EH8);
904
+ _mm256_store_si256(blockA_256++, L_EH16);
905
+ __m256i L_EH24 = _mm256_permute2x128_si256(L_EH8_EH24, L_EH12_EH28, 0x31);
906
+ _mm256_store_si256(blockA_256++, L_EH24);
907
+ }
908
+ blockA_256 += padding;
909
+ }
910
+
911
+ // Finish the m dimension, padding with zeros
912
+ if (rows_32 < rows) {
913
+ // Pack depth in sets of 8
914
+ for (Index k = 0; k < depth_8; k += 8) {
915
+ // Load vectors
916
+ __m256i L_A = _mm256_setzero_si256();
917
+ __m256i L_B = _mm256_setzero_si256();
918
+ __m256i L_C = _mm256_setzero_si256();
919
+ __m256i L_D = _mm256_setzero_si256();
920
+ __m256i L_E = _mm256_setzero_si256();
921
+ __m256i L_F = _mm256_setzero_si256();
922
+ __m256i L_G = _mm256_setzero_si256();
923
+ __m256i L_H = _mm256_setzero_si256();
924
+ for (Index m = 0; m < rows - rows_32; m++) {
925
+ QInt8* ptr = (QInt8*)&L_A;
926
+ ptr[m] = lhs(rows_32 + m, k);
927
+ ptr = (QInt8*)&L_B;
928
+ ptr[m] = lhs(rows_32 + m, k + 1);
929
+ ptr = (QInt8*)&L_C;
930
+ ptr[m] = lhs(rows_32 + m, k + 2);
931
+ ptr = (QInt8*)&L_D;
932
+ ptr[m] = lhs(rows_32 + m, k + 3);
933
+ ptr = (QInt8*)&L_E;
934
+ ptr[m] = lhs(rows_32 + m, k + 4);
935
+ ptr = (QInt8*)&L_F;
936
+ ptr[m] = lhs(rows_32 + m, k + 5);
937
+ ptr = (QInt8*)&L_G;
938
+ ptr[m] = lhs(rows_32 + m, k + 6);
939
+ ptr = (QInt8*)&L_H;
940
+ ptr[m] = lhs(rows_32 + m, k + 7);
941
+ }
942
+
943
+ // Interleave 8-bit elements
944
+ __m256i L_AB0_AB16 = _mm256_unpacklo_epi8(L_A, L_B);
945
+ __m256i L_AB8_AB24 = _mm256_unpackhi_epi8(L_A, L_B);
946
+ __m256i L_CD0_CD16 = _mm256_unpacklo_epi8(L_C, L_D);
947
+ __m256i L_CD8_CD24 = _mm256_unpackhi_epi8(L_C, L_D);
948
+
949
+ // Interleave 16-bit elements
950
+ __m256i L_AD0_AD16 = _mm256_unpacklo_epi16(L_AB0_AB16, L_CD0_CD16);
951
+ __m256i L_AD4_AD20 = _mm256_unpackhi_epi16(L_AB0_AB16, L_CD0_CD16);
952
+
953
+ // Use permute before we store to cross 128-bit lanes
954
+ __m256i L_AD0 = _mm256_permute2x128_si256(L_AD0_AD16, L_AD4_AD20, 0x20);
955
+ _mm256_store_si256(blockA_256++, L_AD0);
956
+
957
+ // Complete packing for 32 x 8 block
958
+ __m256i L_AD16 = _mm256_permute2x128_si256(L_AD0_AD16, L_AD4_AD20, 0x31);
959
+ __m256i L_AD8_AD24 = _mm256_unpacklo_epi16(L_AB8_AB24, L_CD8_CD24);
960
+ __m256i L_AD12_AD28 = _mm256_unpackhi_epi16(L_AB8_AB24, L_CD8_CD24);
961
+ __m256i L_AD8 = _mm256_permute2x128_si256(L_AD8_AD24, L_AD12_AD28, 0x20);
962
+ _mm256_store_si256(blockA_256++, L_AD8);
963
+ _mm256_store_si256(blockA_256++, L_AD16);
964
+ __m256i L_AD24 = _mm256_permute2x128_si256(L_AD8_AD24, L_AD12_AD28, 0x31);
965
+ _mm256_store_si256(blockA_256++, L_AD24);
966
+ __m256i L_EF0_EF16 = _mm256_unpacklo_epi8(L_E, L_F);
967
+ __m256i L_EF8_EF24 = _mm256_unpackhi_epi8(L_E, L_F);
968
+ __m256i L_GH0_GH16 = _mm256_unpacklo_epi8(L_G, L_H);
969
+ __m256i L_GH8_GH24 = _mm256_unpackhi_epi8(L_G, L_H);
970
+ __m256i L_EH0_EH16 = _mm256_unpacklo_epi16(L_EF0_EF16, L_GH0_GH16);
971
+ __m256i L_EH4_EH20 = _mm256_unpackhi_epi16(L_EF0_EF16, L_GH0_GH16);
972
+ __m256i L_EH0 = _mm256_permute2x128_si256(L_EH0_EH16, L_EH4_EH20, 0x20);
973
+ _mm256_store_si256(blockA_256++, L_EH0);
974
+ __m256i L_EH16 = _mm256_permute2x128_si256(L_EH0_EH16, L_EH4_EH20, 0x31);
975
+ __m256i L_EH8_EH24 = _mm256_unpacklo_epi16(L_EF8_EF24, L_GH8_GH24);
976
+ __m256i L_EH12_EH28 = _mm256_unpackhi_epi16(L_EF8_EF24, L_GH8_GH24);
977
+ __m256i L_EH8 = _mm256_permute2x128_si256(L_EH8_EH24, L_EH12_EH28, 0x20);
978
+ _mm256_store_si256(blockA_256++, L_EH8);
979
+ _mm256_store_si256(blockA_256++, L_EH16);
980
+ __m256i L_EH24 = _mm256_permute2x128_si256(L_EH8_EH24, L_EH12_EH28, 0x31);
981
+ _mm256_store_si256(blockA_256++, L_EH24);
982
+ }
983
+
984
+ // Finish the k dimension, padding with zeros
985
+ if (depth_8 < depth) {
986
+ __m256i L_A, L_B, L_C, L_D, L_E, L_F, L_G, L_H;
987
+ QInt8* ptr;
988
+ switch (depth - depth_8) {
989
+ case 1:
990
+ L_A = _mm256_setzero_si256();
991
+ L_B = _mm256_setzero_si256();
992
+ L_C = _mm256_setzero_si256();
993
+ L_D = _mm256_setzero_si256();
994
+ L_E = _mm256_setzero_si256();
995
+ L_F = _mm256_setzero_si256();
996
+ L_G = _mm256_setzero_si256();
997
+ L_H = _mm256_setzero_si256();
998
+ for (Index m = 0; m < rows - rows_32; m++) {
999
+ QInt8* ptr = (QInt8*)&L_A;
1000
+ ptr[m] = lhs(rows_32 + m, depth_8);
1001
+ }
1002
+ break;
1003
+ case 2:
1004
+ L_A = _mm256_setzero_si256();
1005
+ L_B = _mm256_setzero_si256();
1006
+ L_C = _mm256_setzero_si256();
1007
+ L_D = _mm256_setzero_si256();
1008
+ L_E = _mm256_setzero_si256();
1009
+ L_F = _mm256_setzero_si256();
1010
+ L_G = _mm256_setzero_si256();
1011
+ L_H = _mm256_setzero_si256();
1012
+ for (Index m = 0; m < rows - rows_32; m++) {
1013
+ ptr = (QInt8*)&L_A;
1014
+ ptr[m] = lhs(rows_32 + m, depth_8);
1015
+ ptr = (QInt8*)&L_B;
1016
+ ptr[m] = lhs(rows_32 + m, depth_8 + 1);
1017
+ }
1018
+ break;
1019
+ case 3:
1020
+ L_A = _mm256_setzero_si256();
1021
+ L_B = _mm256_setzero_si256();
1022
+ L_C = _mm256_setzero_si256();
1023
+ L_D = _mm256_setzero_si256();
1024
+ L_E = _mm256_setzero_si256();
1025
+ L_F = _mm256_setzero_si256();
1026
+ L_G = _mm256_setzero_si256();
1027
+ L_H = _mm256_setzero_si256();
1028
+ for (Index m = 0; m < rows - rows_32; m++) {
1029
+ ptr = (QInt8*)&L_A;
1030
+ ptr[m] = lhs(rows_32 + m, depth_8);
1031
+ ptr = (QInt8*)&L_B;
1032
+ ptr[m] = lhs(rows_32 + m, depth_8 + 1);
1033
+ ptr = (QInt8*)&L_C;
1034
+ ptr[m] = lhs(rows_32 + m, depth_8 + 2);
1035
+ }
1036
+ break;
1037
+ case 4:
1038
+ L_A = _mm256_setzero_si256();
1039
+ L_B = _mm256_setzero_si256();
1040
+ L_C = _mm256_setzero_si256();
1041
+ L_D = _mm256_setzero_si256();
1042
+ L_E = _mm256_setzero_si256();
1043
+ L_F = _mm256_setzero_si256();
1044
+ L_G = _mm256_setzero_si256();
1045
+ L_H = _mm256_setzero_si256();
1046
+ for (Index m = 0; m < rows - rows_32; m++) {
1047
+ ptr = (QInt8*)&L_A;
1048
+ ptr[m] = lhs(rows_32 + m, depth_8);
1049
+ ptr = (QInt8*)&L_B;
1050
+ ptr[m] = lhs(rows_32 + m, depth_8 + 1);
1051
+ ptr = (QInt8*)&L_C;
1052
+ ptr[m] = lhs(rows_32 + m, depth_8 + 2);
1053
+ ptr = (QInt8*)&L_D;
1054
+ ptr[m] = lhs(rows_32 + m, depth_8 + 3);
1055
+ }
1056
+ break;
1057
+ case 5:
1058
+ L_A = _mm256_setzero_si256();
1059
+ L_B = _mm256_setzero_si256();
1060
+ L_C = _mm256_setzero_si256();
1061
+ L_D = _mm256_setzero_si256();
1062
+ L_E = _mm256_setzero_si256();
1063
+ L_F = _mm256_setzero_si256();
1064
+ L_G = _mm256_setzero_si256();
1065
+ L_H = _mm256_setzero_si256();
1066
+ for (Index m = 0; m < rows - rows_32; m++) {
1067
+ ptr = (QInt8*)&L_A;
1068
+ ptr[m] = lhs(rows_32 + m, depth_8);
1069
+ ptr = (QInt8*)&L_B;
1070
+ ptr[m] = lhs(rows_32 + m, depth_8 + 1);
1071
+ ptr = (QInt8*)&L_C;
1072
+ ptr[m] = lhs(rows_32 + m, depth_8 + 2);
1073
+ ptr = (QInt8*)&L_D;
1074
+ ptr[m] = lhs(rows_32 + m, depth_8 + 3);
1075
+ ptr = (QInt8*)&L_E;
1076
+ ptr[m] = lhs(rows_32 + m, depth_8 + 4);
1077
+ }
1078
+ break;
1079
+ case 6:
1080
+ L_A = _mm256_setzero_si256();
1081
+ L_B = _mm256_setzero_si256();
1082
+ L_C = _mm256_setzero_si256();
1083
+ L_D = _mm256_setzero_si256();
1084
+ L_E = _mm256_setzero_si256();
1085
+ L_F = _mm256_setzero_si256();
1086
+ L_G = _mm256_setzero_si256();
1087
+ L_H = _mm256_setzero_si256();
1088
+ for (Index m = 0; m < rows - rows_32; m++) {
1089
+ ptr = (QInt8*)&L_A;
1090
+ ptr[m] = lhs(rows_32 + m, depth_8);
1091
+ ptr = (QInt8*)&L_B;
1092
+ ptr[m] = lhs(rows_32 + m, depth_8 + 1);
1093
+ ptr = (QInt8*)&L_C;
1094
+ ptr[m] = lhs(rows_32 + m, depth_8 + 2);
1095
+ ptr = (QInt8*)&L_D;
1096
+ ptr[m] = lhs(rows_32 + m, depth_8 + 3);
1097
+ ptr = (QInt8*)&L_E;
1098
+ ptr[m] = lhs(rows_32 + m, depth_8 + 4);
1099
+ ptr = (QInt8*)&L_F;
1100
+ ptr[m] = lhs(rows_32 + m, depth_8 + 5);
1101
+ }
1102
+ break;
1103
+ case 7:
1104
+ L_A = _mm256_setzero_si256();
1105
+ L_B = _mm256_setzero_si256();
1106
+ L_C = _mm256_setzero_si256();
1107
+ L_D = _mm256_setzero_si256();
1108
+ L_E = _mm256_setzero_si256();
1109
+ L_F = _mm256_setzero_si256();
1110
+ L_G = _mm256_setzero_si256();
1111
+ L_H = _mm256_setzero_si256();
1112
+ for (Index m = 0; m < rows - rows_32; m++) {
1113
+ ptr = (QInt8*)&L_A;
1114
+ ptr[m] = lhs(rows_32 + m, depth_8);
1115
+ ptr = (QInt8*)&L_B;
1116
+ ptr[m] = lhs(rows_32 + m, depth_8 + 1);
1117
+ ptr = (QInt8*)&L_C;
1118
+ ptr[m] = lhs(rows_32 + m, depth_8 + 2);
1119
+ ptr = (QInt8*)&L_D;
1120
+ ptr[m] = lhs(rows_32 + m, depth_8 + 3);
1121
+ ptr = (QInt8*)&L_E;
1122
+ ptr[m] = lhs(rows_32 + m, depth_8 + 4);
1123
+ ptr = (QInt8*)&L_F;
1124
+ ptr[m] = lhs(rows_32 + m, depth_8 + 5);
1125
+ ptr = (QInt8*)&L_G;
1126
+ ptr[m] = lhs(rows_32 + m, depth_8 + 6);
1127
+ }
1128
+ break;
1129
+ }
1130
+
1131
+ // Interleave 8-bit elements
1132
+ __m256i L_AB0_AB16 = _mm256_unpacklo_epi8(L_A, L_B);
1133
+ __m256i L_AB8_AB24 = _mm256_unpackhi_epi8(L_A, L_B);
1134
+ __m256i L_CD0_CD16 = _mm256_unpacklo_epi8(L_C, L_D);
1135
+ __m256i L_CD8_CD24 = _mm256_unpackhi_epi8(L_C, L_D);
1136
+
1137
+ // Interleave 16-bit elements
1138
+ __m256i L_AD0_AD16 = _mm256_unpacklo_epi16(L_AB0_AB16, L_CD0_CD16);
1139
+ __m256i L_AD4_AD20 = _mm256_unpackhi_epi16(L_AB0_AB16, L_CD0_CD16);
1140
+
1141
+ // Use permute before we store to cross 128-bit lanes
1142
+ __m256i L_AD0 = _mm256_permute2x128_si256(L_AD0_AD16, L_AD4_AD20, 0x20);
1143
+ _mm256_store_si256(blockA_256++, L_AD0);
1144
+
1145
+ // Complete packing
1146
+ __m256i L_AD16 = _mm256_permute2x128_si256(L_AD0_AD16, L_AD4_AD20, 0x31);
1147
+ __m256i L_AD8_AD24 = _mm256_unpacklo_epi16(L_AB8_AB24, L_CD8_CD24);
1148
+ __m256i L_AD12_AD28 = _mm256_unpackhi_epi16(L_AB8_AB24, L_CD8_CD24);
1149
+ __m256i L_AD8 = _mm256_permute2x128_si256(L_AD8_AD24, L_AD12_AD28, 0x20);
1150
+ _mm256_store_si256(blockA_256++, L_AD8);
1151
+ _mm256_store_si256(blockA_256++, L_AD16);
1152
+ __m256i L_AD24 = _mm256_permute2x128_si256(L_AD8_AD24, L_AD12_AD28, 0x31);
1153
+ _mm256_store_si256(blockA_256++, L_AD24);
1154
+ __m256i L_EF0_EF16 = _mm256_unpacklo_epi8(L_E, L_F);
1155
+ __m256i L_EF8_EF24 = _mm256_unpackhi_epi8(L_E, L_F);
1156
+ __m256i L_GH0_GH16 = _mm256_unpacklo_epi8(L_G, L_H);
1157
+ __m256i L_GH8_GH24 = _mm256_unpackhi_epi8(L_G, L_H);
1158
+ __m256i L_EH0_EH16 = _mm256_unpacklo_epi16(L_EF0_EF16, L_GH0_GH16);
1159
+ __m256i L_EH4_EH20 = _mm256_unpackhi_epi16(L_EF0_EF16, L_GH0_GH16);
1160
+ __m256i L_EH0 = _mm256_permute2x128_si256(L_EH0_EH16, L_EH4_EH20, 0x20);
1161
+ _mm256_store_si256(blockA_256++, L_EH0);
1162
+ __m256i L_EH16 = _mm256_permute2x128_si256(L_EH0_EH16, L_EH4_EH20, 0x31);
1163
+ __m256i L_EH8_EH24 = _mm256_unpacklo_epi16(L_EF8_EF24, L_GH8_GH24);
1164
+ __m256i L_EH12_EH28 = _mm256_unpackhi_epi16(L_EF8_EF24, L_GH8_GH24);
1165
+ __m256i L_EH8 = _mm256_permute2x128_si256(L_EH8_EH24, L_EH12_EH28, 0x20);
1166
+ _mm256_store_si256(blockA_256++, L_EH8);
1167
+ _mm256_store_si256(blockA_256++, L_EH16);
1168
+ __m256i L_EH24 = _mm256_permute2x128_si256(L_EH8_EH24, L_EH12_EH28, 0x31);
1169
+ _mm256_store_si256(blockA_256++, L_EH24);
1170
+ }
1171
+ }
1172
+ }
1173
+
1174
+ template <typename Index, typename DataMapper, int nr, bool Conjugate,
1175
+ bool PanelMode>
1176
+ EIGEN_DONT_INLINE void
1177
+ gemm_pack_rhs_any<QUInt8, Index, DataMapper, nr, ColMajor, Conjugate,
1178
+ PanelMode>::operator()(QUInt8* blockB, const DataMapper& rhs,
1179
+ Index depth, Index cols, Index stride,
1180
+ Index offset) {
1181
+ eigen_assert(stride == 0);
1182
+ eigen_assert(offset == 0);
1183
+
1184
+ typedef typename packet_traits<QUInt8>::type Packet;
1185
+
1186
+ // Get vector pointer
1187
+ __m256i* blockB_256 = reinterpret_cast<__m256i*>(blockB);
1188
+
1189
+ // Get even multiples of the dimensions
1190
+ Index cols_32 = (cols / 32) * 32;
1191
+ Index depth_32 = (depth / 32) * 32;
1192
+
1193
+ // Perform a step of the packing for 4 columns
1194
+ __m256i R_AB_L, R_AB_H, R_CD_L, R_CD_H, R_AD_0, R_AD_8, R_AD_16, R_AD_24;
1195
+ #define PACK_STEP \
1196
+ R_AB_L = _mm256_unpacklo_epi64(R_A, R_B); \
1197
+ R_CD_L = _mm256_unpacklo_epi64(R_C, R_D); \
1198
+ R_AB_H = _mm256_unpackhi_epi64(R_A, R_B); \
1199
+ R_CD_H = _mm256_unpackhi_epi64(R_C, R_D); \
1200
+ R_AD_0 = _mm256_permute2x128_si256(R_AB_L, R_CD_L, 0x20); \
1201
+ R_AD_16 = _mm256_permute2x128_si256(R_AB_L, R_CD_L, 0x31); \
1202
+ R_AD_8 = _mm256_permute2x128_si256(R_AB_H, R_CD_H, 0x20); \
1203
+ R_AD_24 = _mm256_permute2x128_si256(R_AB_H, R_CD_H, 0x31); \
1204
+ _mm256_store_si256(blockB_256, R_AD_0); \
1205
+ _mm256_store_si256(blockB_256 + 8, R_AD_8); \
1206
+ _mm256_store_si256(blockB_256 + 16, R_AD_16); \
1207
+ _mm256_store_si256(blockB_256 + 24, R_AD_24); \
1208
+ blockB_256++;
1209
+
1210
+ // Pack cols in sets of 32
1211
+ for (Index n = 0; n < cols_32; n += 32) {
1212
+ // Pack depth in sets of 32
1213
+ for (Index k = 0; k < depth_32; k += 32) {
1214
+ __m256i R_A = rhs.template loadPacket<Packet>(k, n);
1215
+ __m256i R_B = rhs.template loadPacket<Packet>(k, n + 1);
1216
+ __m256i R_C = rhs.template loadPacket<Packet>(k, n + 2);
1217
+ __m256i R_D = rhs.template loadPacket<Packet>(k, n + 3);
1218
+ PACK_STEP;
1219
+
1220
+ R_A = rhs.template loadPacket<Packet>(k, n + 4);
1221
+ R_B = rhs.template loadPacket<Packet>(k, n + 5);
1222
+ R_C = rhs.template loadPacket<Packet>(k, n + 6);
1223
+ R_D = rhs.template loadPacket<Packet>(k, n + 7);
1224
+ PACK_STEP;
1225
+
1226
+ R_A = rhs.template loadPacket<Packet>(k, n + 8);
1227
+ R_B = rhs.template loadPacket<Packet>(k, n + 9);
1228
+ R_C = rhs.template loadPacket<Packet>(k, n + 10);
1229
+ R_D = rhs.template loadPacket<Packet>(k, n + 11);
1230
+ PACK_STEP;
1231
+
1232
+ R_A = rhs.template loadPacket<Packet>(k, n + 12);
1233
+ R_B = rhs.template loadPacket<Packet>(k, n + 13);
1234
+ R_C = rhs.template loadPacket<Packet>(k, n + 14);
1235
+ R_D = rhs.template loadPacket<Packet>(k, n + 15);
1236
+ PACK_STEP;
1237
+
1238
+ R_A = rhs.template loadPacket<Packet>(k, n + 16);
1239
+ R_B = rhs.template loadPacket<Packet>(k, n + 17);
1240
+ R_C = rhs.template loadPacket<Packet>(k, n + 18);
1241
+ R_D = rhs.template loadPacket<Packet>(k, n + 19);
1242
+ PACK_STEP;
1243
+
1244
+ R_A = rhs.template loadPacket<Packet>(k, n + 20);
1245
+ R_B = rhs.template loadPacket<Packet>(k, n + 21);
1246
+ R_C = rhs.template loadPacket<Packet>(k, n + 22);
1247
+ R_D = rhs.template loadPacket<Packet>(k, n + 23);
1248
+ PACK_STEP;
1249
+
1250
+ R_A = rhs.template loadPacket<Packet>(k, n + 24);
1251
+ R_B = rhs.template loadPacket<Packet>(k, n + 25);
1252
+ R_C = rhs.template loadPacket<Packet>(k, n + 26);
1253
+ R_D = rhs.template loadPacket<Packet>(k, n + 27);
1254
+ PACK_STEP;
1255
+
1256
+ R_A = rhs.template loadPacket<Packet>(k, n + 28);
1257
+ R_B = rhs.template loadPacket<Packet>(k, n + 29);
1258
+ R_C = rhs.template loadPacket<Packet>(k, n + 30);
1259
+ R_D = rhs.template loadPacket<Packet>(k, n + 31);
1260
+ PACK_STEP;
1261
+
1262
+ blockB_256 += 24;
1263
+ }
1264
+
1265
+ if (depth_32 < depth) {
1266
+ QUInt8* ptr;
1267
+ __m256i R_A = _mm256_setzero_si256();
1268
+ __m256i R_B = _mm256_setzero_si256();
1269
+ __m256i R_C = _mm256_setzero_si256();
1270
+ __m256i R_D = _mm256_setzero_si256();
1271
+ for (Index k = depth_32; k < depth; k++) {
1272
+ ptr = (QUInt8*)&R_A;
1273
+ ptr[k - depth_32] = rhs(k, n);
1274
+ ptr = (QUInt8*)&R_B;
1275
+ ptr[k - depth_32] = rhs(k, n + 1);
1276
+ ptr = (QUInt8*)&R_C;
1277
+ ptr[k - depth_32] = rhs(k, n + 2);
1278
+ ptr = (QUInt8*)&R_D;
1279
+ ptr[k - depth_32] = rhs(k, n + 3);
1280
+ }
1281
+ PACK_STEP;
1282
+
1283
+ R_A = _mm256_setzero_si256();
1284
+ R_B = _mm256_setzero_si256();
1285
+ R_C = _mm256_setzero_si256();
1286
+ R_D = _mm256_setzero_si256();
1287
+ for (Index k = depth_32; k < depth; k++) {
1288
+ ptr = (QUInt8*)&R_A;
1289
+ ptr[k - depth_32] = rhs(k, n + 4);
1290
+ ptr = (QUInt8*)&R_B;
1291
+ ptr[k - depth_32] = rhs(k, n + 5);
1292
+ ptr = (QUInt8*)&R_C;
1293
+ ptr[k - depth_32] = rhs(k, n + 6);
1294
+ ptr = (QUInt8*)&R_D;
1295
+ ptr[k - depth_32] = rhs(k, n + 7);
1296
+ }
1297
+ PACK_STEP;
1298
+
1299
+ R_A = _mm256_setzero_si256();
1300
+ R_B = _mm256_setzero_si256();
1301
+ R_C = _mm256_setzero_si256();
1302
+ R_D = _mm256_setzero_si256();
1303
+ for (Index k = depth_32; k < depth; k++) {
1304
+ ptr = (QUInt8*)&R_A;
1305
+ ptr[k - depth_32] = rhs(k, n + 8);
1306
+ ptr = (QUInt8*)&R_B;
1307
+ ptr[k - depth_32] = rhs(k, n + 9);
1308
+ ptr = (QUInt8*)&R_C;
1309
+ ptr[k - depth_32] = rhs(k, n + 10);
1310
+ ptr = (QUInt8*)&R_D;
1311
+ ptr[k - depth_32] = rhs(k, n + 11);
1312
+ }
1313
+ PACK_STEP;
1314
+
1315
+ R_A = _mm256_setzero_si256();
1316
+ R_B = _mm256_setzero_si256();
1317
+ R_C = _mm256_setzero_si256();
1318
+ R_D = _mm256_setzero_si256();
1319
+ for (Index k = depth_32; k < depth; k++) {
1320
+ ptr = (QUInt8*)&R_A;
1321
+ ptr[k - depth_32] = rhs(k, n + 12);
1322
+ ptr = (QUInt8*)&R_B;
1323
+ ptr[k - depth_32] = rhs(k, n + 13);
1324
+ ptr = (QUInt8*)&R_C;
1325
+ ptr[k - depth_32] = rhs(k, n + 14);
1326
+ ptr = (QUInt8*)&R_D;
1327
+ ptr[k - depth_32] = rhs(k, n + 15);
1328
+ }
1329
+ PACK_STEP;
1330
+
1331
+ R_A = _mm256_setzero_si256();
1332
+ R_B = _mm256_setzero_si256();
1333
+ R_C = _mm256_setzero_si256();
1334
+ R_D = _mm256_setzero_si256();
1335
+ for (Index k = depth_32; k < depth; k++) {
1336
+ ptr = (QUInt8*)&R_A;
1337
+ ptr[k - depth_32] = rhs(k, n + 16);
1338
+ ptr = (QUInt8*)&R_B;
1339
+ ptr[k - depth_32] = rhs(k, n + 17);
1340
+ ptr = (QUInt8*)&R_C;
1341
+ ptr[k - depth_32] = rhs(k, n + 18);
1342
+ ptr = (QUInt8*)&R_D;
1343
+ ptr[k - depth_32] = rhs(k, n + 19);
1344
+ }
1345
+ PACK_STEP;
1346
+
1347
+ R_A = _mm256_setzero_si256();
1348
+ R_B = _mm256_setzero_si256();
1349
+ R_C = _mm256_setzero_si256();
1350
+ R_D = _mm256_setzero_si256();
1351
+ for (Index k = depth_32; k < depth; k++) {
1352
+ ptr = (QUInt8*)&R_A;
1353
+ ptr[k - depth_32] = rhs(k, n + 20);
1354
+ ptr = (QUInt8*)&R_B;
1355
+ ptr[k - depth_32] = rhs(k, n + 21);
1356
+ ptr = (QUInt8*)&R_C;
1357
+ ptr[k - depth_32] = rhs(k, n + 22);
1358
+ ptr = (QUInt8*)&R_D;
1359
+ ptr[k - depth_32] = rhs(k, n + 23);
1360
+ }
1361
+ PACK_STEP;
1362
+
1363
+ R_A = _mm256_setzero_si256();
1364
+ R_B = _mm256_setzero_si256();
1365
+ R_C = _mm256_setzero_si256();
1366
+ R_D = _mm256_setzero_si256();
1367
+ for (Index k = depth_32; k < depth; k++) {
1368
+ ptr = (QUInt8*)&R_A;
1369
+ ptr[k - depth_32] = rhs(k, n + 24);
1370
+ ptr = (QUInt8*)&R_B;
1371
+ ptr[k - depth_32] = rhs(k, n + 25);
1372
+ ptr = (QUInt8*)&R_C;
1373
+ ptr[k - depth_32] = rhs(k, n + 26);
1374
+ ptr = (QUInt8*)&R_D;
1375
+ ptr[k - depth_32] = rhs(k, n + 27);
1376
+ }
1377
+ PACK_STEP;
1378
+
1379
+ R_A = _mm256_setzero_si256();
1380
+ R_B = _mm256_setzero_si256();
1381
+ R_C = _mm256_setzero_si256();
1382
+ R_D = _mm256_setzero_si256();
1383
+ for (Index k = depth_32; k < depth; k++) {
1384
+ ptr = (QUInt8*)&R_A;
1385
+ ptr[k - depth_32] = rhs(k, n + 28);
1386
+ ptr = (QUInt8*)&R_B;
1387
+ ptr[k - depth_32] = rhs(k, n + 29);
1388
+ ptr = (QUInt8*)&R_C;
1389
+ ptr[k - depth_32] = rhs(k, n + 30);
1390
+ ptr = (QUInt8*)&R_D;
1391
+ ptr[k - depth_32] = rhs(k, n + 31);
1392
+ }
1393
+ PACK_STEP;
1394
+ blockB_256 += 24;
1395
+ }
1396
+ }
1397
+
1398
+ // Finish packing cols
1399
+ if (cols_32 < cols) {
1400
+ // Pack depth in sets of 32
1401
+ for (Index k = 0; k < depth_32; k += 32) {
1402
+ __m256i R_A, R_B, R_C, R_D;
1403
+ Index n;
1404
+ for (n = cols_32; n < cols; n += 4) {
1405
+ switch (cols - n) {
1406
+ case 1:
1407
+ R_A = rhs.template loadPacket<Packet>(k, n);
1408
+ R_B = _mm256_setzero_si256();
1409
+ R_C = _mm256_setzero_si256();
1410
+ R_D = _mm256_setzero_si256();
1411
+ PACK_STEP;
1412
+ break;
1413
+ case 2:
1414
+ R_A = rhs.template loadPacket<Packet>(k, n);
1415
+ R_B = rhs.template loadPacket<Packet>(k, n + 1);
1416
+ R_C = _mm256_setzero_si256();
1417
+ R_D = _mm256_setzero_si256();
1418
+ PACK_STEP;
1419
+ break;
1420
+ case 3:
1421
+ R_A = rhs.template loadPacket<Packet>(k, n);
1422
+ R_B = rhs.template loadPacket<Packet>(k, n + 1);
1423
+ R_C = rhs.template loadPacket<Packet>(k, n + 2);
1424
+ R_D = _mm256_setzero_si256();
1425
+ PACK_STEP;
1426
+ break;
1427
+ default:
1428
+ R_A = rhs.template loadPacket<Packet>(k, n);
1429
+ R_B = rhs.template loadPacket<Packet>(k, n + 1);
1430
+ R_C = rhs.template loadPacket<Packet>(k, n + 2);
1431
+ R_D = rhs.template loadPacket<Packet>(k, n + 3);
1432
+ PACK_STEP;
1433
+ break;
1434
+ }
1435
+ }
1436
+
1437
+ // Increment the block pointer.
1438
+ // We must pad if cols is not a multiple of 32.
1439
+ blockB_256 += 32 - (n - cols_32) / 4;
1440
+ }
1441
+
1442
+ if (depth_32 < depth) {
1443
+ for (Index n = cols_32; n < cols; n += 4) {
1444
+ QUInt8* ptr;
1445
+ __m256i R_A = _mm256_setzero_si256();
1446
+ __m256i R_B = _mm256_setzero_si256();
1447
+ __m256i R_C = _mm256_setzero_si256();
1448
+ __m256i R_D = _mm256_setzero_si256();
1449
+ switch (cols - n) {
1450
+ case 1:
1451
+ for (Index k = depth_32; k < depth; k++) {
1452
+ ptr = (QUInt8*)&R_A;
1453
+ ptr[k - depth_32] = rhs(k, n);
1454
+ }
1455
+ PACK_STEP;
1456
+ break;
1457
+ case 2:
1458
+ for (Index k = depth_32; k < depth; k++) {
1459
+ ptr = (QUInt8*)&R_A;
1460
+ ptr[k - depth_32] = rhs(k, n);
1461
+ ptr = (QUInt8*)&R_B;
1462
+ ptr[k - depth_32] = rhs(k, n + 1);
1463
+ }
1464
+ PACK_STEP;
1465
+ break;
1466
+ case 3:
1467
+ for (Index k = depth_32; k < depth; k++) {
1468
+ ptr = (QUInt8*)&R_A;
1469
+ ptr[k - depth_32] = rhs(k, n);
1470
+ ptr = (QUInt8*)&R_B;
1471
+ ptr[k - depth_32] = rhs(k, n + 1);
1472
+ ptr = (QUInt8*)&R_C;
1473
+ ptr[k - depth_32] = rhs(k, n + 2);
1474
+ }
1475
+ PACK_STEP;
1476
+ break;
1477
+ default:
1478
+ for (Index k = depth_32; k < depth; k++) {
1479
+ ptr = (QUInt8*)&R_A;
1480
+ ptr[k - depth_32] = rhs(k, n);
1481
+ ptr = (QUInt8*)&R_B;
1482
+ ptr[k - depth_32] = rhs(k, n + 1);
1483
+ ptr = (QUInt8*)&R_C;
1484
+ ptr[k - depth_32] = rhs(k, n + 2);
1485
+ ptr = (QUInt8*)&R_D;
1486
+ ptr[k - depth_32] = rhs(k, n + 3);
1487
+ }
1488
+ PACK_STEP;
1489
+ break;
1490
+ }
1491
+ }
1492
+ }
1493
+ }
1494
+ #undef PACK_STEP
1495
+ }
1496
+
1497
+ template <typename Index, typename DataMapper, int mr, int nr,
1498
+ bool ConjugateLhs, bool ConjugateRhs>
1499
+ EIGEN_DONT_INLINE void
1500
+ gebp_kernel_any<QInt8, QUInt8, Index, DataMapper, mr, nr, ConjugateLhs,
1501
+ ConjugateRhs>::operator()(const DataMapper& res,
1502
+ const QInt8* blockA,
1503
+ const QUInt8* blockB, Index rows,
1504
+ Index depth, Index cols, QInt32 alpha,
1505
+ Index strideA, Index strideB,
1506
+ Index offsetA, Index offsetB) {
1507
+ EIGEN_STATIC_ASSERT(!ConjugateLhs, YOU_MADE_A_PROGRAMMING_MISTAKE);
1508
+ EIGEN_STATIC_ASSERT(!ConjugateRhs, YOU_MADE_A_PROGRAMMING_MISTAKE);
1509
+ eigen_assert(alpha.value == 1);
1510
+ eigen_assert(strideA == -1);
1511
+ eigen_assert(strideB == -1);
1512
+ eigen_assert(offsetA == 0);
1513
+ eigen_assert(offsetB == 0);
1514
+ eigen_assert(rows > 0);
1515
+ eigen_assert(cols > 0);
1516
+ eigen_assert(depth > 0);
1517
+ eigen_assert(blockA);
1518
+ eigen_assert(blockB);
1519
+
1520
+ Index rows_32 = ((rows + 31) / 32) * 32;
1521
+ Index cols_32 = ((cols + 31) / 32) * 32;
1522
+ Index depth_32 = ((depth + 31) / 32) * 32;
1523
+
1524
+ // Create result block
1525
+ ei_declare_aligned_stack_constructed_variable(QInt32, blockO, 32 * 32, 0);
1526
+ memset(blockO, 0, 32 * 32 * sizeof(QInt32));
1527
+
1528
+ // Get vectorized pointers
1529
+ __m256i* blockO_256 = reinterpret_cast<__m256i*>(blockO);
1530
+ const __m256i* blockA_256 = reinterpret_cast<const __m256i*>(blockA);
1531
+ const __m256i* blockB_256 = reinterpret_cast<const __m256i*>(blockB);
1532
+
1533
+ // Loop over blocks of 32 columns
1534
+ for (Index n = 0; n < cols_32; n += 32) {
1535
+ // Reset index into blockA
1536
+ Index indexL = 0;
1537
+ // Loop over blocks of 32 rows
1538
+ for (Index m = 0; m < rows_32; m += 32) {
1539
+ // Reset index into blockB
1540
+ Index indexR = n / 32 * depth_32;
1541
+ // Loop over blocks of 8 on depth
1542
+ for (Index k = 0; k < depth_32; k += 8) {
1543
+ // Load inputs
1544
+ __m256i L_AD0 = blockA_256[indexL++];
1545
+ __m256i L_AD8 = blockA_256[indexL++];
1546
+ __m256i L_AD16 = blockA_256[indexL++];
1547
+ __m256i L_AD24 = blockA_256[indexL++];
1548
+ __m256i L_EH0 = blockA_256[indexL++];
1549
+ __m256i L_EH8 = blockA_256[indexL++];
1550
+ __m256i L_EH16 = blockA_256[indexL++];
1551
+ __m256i L_EH24 = blockA_256[indexL++];
1552
+ __m256i R_AH0 = blockB_256[indexR++];
1553
+ __m256i R_AH4 = blockB_256[indexR++];
1554
+ __m256i R_AH8 = blockB_256[indexR++];
1555
+ __m256i R_AH12 = blockB_256[indexR++];
1556
+ __m256i R_AH16 = blockB_256[indexR++];
1557
+ __m256i R_AH20 = blockB_256[indexR++];
1558
+ __m256i R_AH24 = blockB_256[indexR++];
1559
+ __m256i R_AH28 = blockB_256[indexR++];
1560
+
1561
+ // This constant is used with madd to convert 16 bit to 32 bit
1562
+ const __m256i ONE = _mm256_set1_epi32(0x00010001);
1563
+
1564
+ // Declare variables used in COMPUTE_STEP
1565
+ __m256i P_16_A, P_16_B, P_32_A, P_32_B, P_32;
1566
+
1567
+ #define COMPUTE_STEP(R_INPUT_A, R_INPUT_B, OFFSET) \
1568
+ P_16_A = _mm256_maddubs_epi16(R_INPUT_A, L_AD0); \
1569
+ P_32_A = _mm256_madd_epi16(P_16_A, ONE); \
1570
+ P_16_B = _mm256_maddubs_epi16(R_INPUT_B, L_EH0); \
1571
+ P_32_B = _mm256_madd_epi16(P_16_B, ONE); \
1572
+ P_32 = _mm256_add_epi32(P_32_A, P_32_B); \
1573
+ _mm256_store_si256( \
1574
+ blockO_256 + 4 * OFFSET, \
1575
+ _mm256_add_epi32(_mm256_load_si256(blockO_256 + 4 * OFFSET), P_32)); \
1576
+ \
1577
+ P_16_A = _mm256_maddubs_epi16(R_INPUT_A, L_AD8); \
1578
+ P_32_A = _mm256_madd_epi16(P_16_A, ONE); \
1579
+ P_16_B = _mm256_maddubs_epi16(R_INPUT_B, L_EH8); \
1580
+ P_32_B = _mm256_madd_epi16(P_16_B, ONE); \
1581
+ P_32 = _mm256_add_epi32(P_32_A, P_32_B); \
1582
+ _mm256_store_si256( \
1583
+ blockO_256 + 4 * OFFSET + 1, \
1584
+ _mm256_add_epi32(_mm256_load_si256(blockO_256 + 4 * OFFSET + 1), P_32)); \
1585
+ \
1586
+ P_16_A = _mm256_maddubs_epi16(R_INPUT_A, L_AD16); \
1587
+ P_32_A = _mm256_madd_epi16(P_16_A, ONE); \
1588
+ P_16_B = _mm256_maddubs_epi16(R_INPUT_B, L_EH16); \
1589
+ P_32_B = _mm256_madd_epi16(P_16_B, ONE); \
1590
+ P_32 = _mm256_add_epi32(P_32_A, P_32_B); \
1591
+ _mm256_store_si256( \
1592
+ blockO_256 + 4 * OFFSET + 2, \
1593
+ _mm256_add_epi32(_mm256_load_si256(blockO_256 + 4 * OFFSET + 2), P_32)); \
1594
+ \
1595
+ P_16_A = _mm256_maddubs_epi16(R_INPUT_A, L_AD24); \
1596
+ P_32_A = _mm256_madd_epi16(P_16_A, ONE); \
1597
+ P_16_B = _mm256_maddubs_epi16(R_INPUT_B, L_EH24); \
1598
+ P_32_B = _mm256_madd_epi16(P_16_B, ONE); \
1599
+ P_32 = _mm256_add_epi32(P_32_A, P_32_B); \
1600
+ _mm256_store_si256( \
1601
+ blockO_256 + 4 * OFFSET + 3, \
1602
+ _mm256_add_epi32(_mm256_load_si256(blockO_256 + 4 * OFFSET + 3), P_32));
1603
+
1604
+ // Permute and shuffle to copy a single value across the entire vector
1605
+ // Then compute the multiplication
1606
+ __m256i R_AH0_ = _mm256_permute2x128_si256(R_AH0, R_AH0, 0x00);
1607
+ __m256i R_AD0 = _mm256_shuffle_epi32(R_AH0_, 0x00);
1608
+ __m256i R_EH0 = _mm256_shuffle_epi32(R_AH0_, 0x55);
1609
+ COMPUTE_STEP(R_AD0, R_EH0, 0);
1610
+ __m256i R_AD1 = _mm256_shuffle_epi32(R_AH0_, 0xAA);
1611
+ __m256i R_EH1 = _mm256_shuffle_epi32(R_AH0_, 0xFF);
1612
+ COMPUTE_STEP(R_AD1, R_EH1, 1);
1613
+ R_AH0_ = _mm256_permute2x128_si256(R_AH0, R_AH0, 0x11);
1614
+ __m256i R_AD2 = _mm256_shuffle_epi32(R_AH0_, 0x00);
1615
+ __m256i R_EH2 = _mm256_shuffle_epi32(R_AH0_, 0x55);
1616
+ COMPUTE_STEP(R_AD2, R_EH2, 2);
1617
+ __m256i R_AD3 = _mm256_shuffle_epi32(R_AH0_, 0xAA);
1618
+ __m256i R_EH3 = _mm256_shuffle_epi32(R_AH0_, 0xFF);
1619
+ COMPUTE_STEP(R_AD3, R_EH3, 3);
1620
+
1621
+ R_AH0_ = _mm256_permute2x128_si256(R_AH4, R_AH4, 0x00);
1622
+ R_AD0 = _mm256_shuffle_epi32(R_AH0_, 0x00);
1623
+ R_EH0 = _mm256_shuffle_epi32(R_AH0_, 0x55);
1624
+ COMPUTE_STEP(R_AD0, R_EH0, 4);
1625
+ R_AD1 = _mm256_shuffle_epi32(R_AH0_, 0xAA);
1626
+ R_EH1 = _mm256_shuffle_epi32(R_AH0_, 0xFF);
1627
+ COMPUTE_STEP(R_AD1, R_EH1, 5);
1628
+ R_AH0_ = _mm256_permute2x128_si256(R_AH4, R_AH4, 0x11);
1629
+ R_AD2 = _mm256_shuffle_epi32(R_AH0_, 0x00);
1630
+ R_EH2 = _mm256_shuffle_epi32(R_AH0_, 0x55);
1631
+ COMPUTE_STEP(R_AD2, R_EH2, 6);
1632
+ R_AD3 = _mm256_shuffle_epi32(R_AH0_, 0xAA);
1633
+ R_EH3 = _mm256_shuffle_epi32(R_AH0_, 0xFF);
1634
+ COMPUTE_STEP(R_AD3, R_EH3, 7);
1635
+
1636
+ R_AH0_ = _mm256_permute2x128_si256(R_AH8, R_AH8, 0x00);
1637
+ R_AD0 = _mm256_shuffle_epi32(R_AH0_, 0x00);
1638
+ R_EH0 = _mm256_shuffle_epi32(R_AH0_, 0x55);
1639
+ COMPUTE_STEP(R_AD0, R_EH0, 8);
1640
+ R_AD1 = _mm256_shuffle_epi32(R_AH0_, 0xAA);
1641
+ R_EH1 = _mm256_shuffle_epi32(R_AH0_, 0xFF);
1642
+ COMPUTE_STEP(R_AD1, R_EH1, 9);
1643
+ R_AH0_ = _mm256_permute2x128_si256(R_AH8, R_AH8, 0x11);
1644
+ R_AD2 = _mm256_shuffle_epi32(R_AH0_, 0x00);
1645
+ R_EH2 = _mm256_shuffle_epi32(R_AH0_, 0x55);
1646
+ COMPUTE_STEP(R_AD2, R_EH2, 10);
1647
+ R_AD3 = _mm256_shuffle_epi32(R_AH0_, 0xAA);
1648
+ R_EH3 = _mm256_shuffle_epi32(R_AH0_, 0xFF);
1649
+ COMPUTE_STEP(R_AD3, R_EH3, 11);
1650
+
1651
+ R_AH0_ = _mm256_permute2x128_si256(R_AH12, R_AH12, 0x00);
1652
+ R_AD0 = _mm256_shuffle_epi32(R_AH0_, 0x00);
1653
+ R_EH0 = _mm256_shuffle_epi32(R_AH0_, 0x55);
1654
+ COMPUTE_STEP(R_AD0, R_EH0, 12);
1655
+ R_AD1 = _mm256_shuffle_epi32(R_AH0_, 0xAA);
1656
+ R_EH1 = _mm256_shuffle_epi32(R_AH0_, 0xFF);
1657
+ COMPUTE_STEP(R_AD1, R_EH1, 13);
1658
+ R_AH0_ = _mm256_permute2x128_si256(R_AH12, R_AH12, 0x11);
1659
+ R_AD2 = _mm256_shuffle_epi32(R_AH0_, 0x00);
1660
+ R_EH2 = _mm256_shuffle_epi32(R_AH0_, 0x55);
1661
+ COMPUTE_STEP(R_AD2, R_EH2, 14);
1662
+ R_AD3 = _mm256_shuffle_epi32(R_AH0_, 0xAA);
1663
+ R_EH3 = _mm256_shuffle_epi32(R_AH0_, 0xFF);
1664
+ COMPUTE_STEP(R_AD3, R_EH3, 15);
1665
+
1666
+ R_AH0_ = _mm256_permute2x128_si256(R_AH16, R_AH16, 0x00);
1667
+ R_AD0 = _mm256_shuffle_epi32(R_AH0_, 0x00);
1668
+ R_EH0 = _mm256_shuffle_epi32(R_AH0_, 0x55);
1669
+ COMPUTE_STEP(R_AD0, R_EH0, 16);
1670
+ R_AD1 = _mm256_shuffle_epi32(R_AH0_, 0xAA);
1671
+ R_EH1 = _mm256_shuffle_epi32(R_AH0_, 0xFF);
1672
+ COMPUTE_STEP(R_AD1, R_EH1, 17);
1673
+ R_AH0_ = _mm256_permute2x128_si256(R_AH16, R_AH16, 0x11);
1674
+ R_AD2 = _mm256_shuffle_epi32(R_AH0_, 0x00);
1675
+ R_EH2 = _mm256_shuffle_epi32(R_AH0_, 0x55);
1676
+ COMPUTE_STEP(R_AD2, R_EH2, 18);
1677
+ R_AD3 = _mm256_shuffle_epi32(R_AH0_, 0xAA);
1678
+ R_EH3 = _mm256_shuffle_epi32(R_AH0_, 0xFF);
1679
+ COMPUTE_STEP(R_AD3, R_EH3, 19);
1680
+
1681
+ R_AH0_ = _mm256_permute2x128_si256(R_AH20, R_AH20, 0x00);
1682
+ R_AD0 = _mm256_shuffle_epi32(R_AH0_, 0x00);
1683
+ R_EH0 = _mm256_shuffle_epi32(R_AH0_, 0x55);
1684
+ COMPUTE_STEP(R_AD0, R_EH0, 20);
1685
+ R_AD1 = _mm256_shuffle_epi32(R_AH0_, 0xAA);
1686
+ R_EH1 = _mm256_shuffle_epi32(R_AH0_, 0xFF);
1687
+ COMPUTE_STEP(R_AD1, R_EH1, 21);
1688
+ R_AH0_ = _mm256_permute2x128_si256(R_AH20, R_AH20, 0x11);
1689
+ R_AD2 = _mm256_shuffle_epi32(R_AH0_, 0x00);
1690
+ R_EH2 = _mm256_shuffle_epi32(R_AH0_, 0x55);
1691
+ COMPUTE_STEP(R_AD2, R_EH2, 22);
1692
+ R_AD3 = _mm256_shuffle_epi32(R_AH0_, 0xAA);
1693
+ R_EH3 = _mm256_shuffle_epi32(R_AH0_, 0xFF);
1694
+ COMPUTE_STEP(R_AD3, R_EH3, 23);
1695
+
1696
+ R_AH0_ = _mm256_permute2x128_si256(R_AH24, R_AH24, 0x00);
1697
+ R_AD0 = _mm256_shuffle_epi32(R_AH0_, 0x00);
1698
+ R_EH0 = _mm256_shuffle_epi32(R_AH0_, 0x55);
1699
+ COMPUTE_STEP(R_AD0, R_EH0, 24);
1700
+ R_AD1 = _mm256_shuffle_epi32(R_AH0_, 0xAA);
1701
+ R_EH1 = _mm256_shuffle_epi32(R_AH0_, 0xFF);
1702
+ COMPUTE_STEP(R_AD1, R_EH1, 25);
1703
+ R_AH0_ = _mm256_permute2x128_si256(R_AH24, R_AH24, 0x11);
1704
+ R_AD2 = _mm256_shuffle_epi32(R_AH0_, 0x00);
1705
+ R_EH2 = _mm256_shuffle_epi32(R_AH0_, 0x55);
1706
+ COMPUTE_STEP(R_AD2, R_EH2, 26);
1707
+ R_AD3 = _mm256_shuffle_epi32(R_AH0_, 0xAA);
1708
+ R_EH3 = _mm256_shuffle_epi32(R_AH0_, 0xFF);
1709
+ COMPUTE_STEP(R_AD3, R_EH3, 27);
1710
+
1711
+ R_AH0_ = _mm256_permute2x128_si256(R_AH28, R_AH28, 0x00);
1712
+ R_AD0 = _mm256_shuffle_epi32(R_AH0_, 0x00);
1713
+ R_EH0 = _mm256_shuffle_epi32(R_AH0_, 0x55);
1714
+ COMPUTE_STEP(R_AD0, R_EH0, 28);
1715
+ R_AD1 = _mm256_shuffle_epi32(R_AH0_, 0xAA);
1716
+ R_EH1 = _mm256_shuffle_epi32(R_AH0_, 0xFF);
1717
+ COMPUTE_STEP(R_AD1, R_EH1, 29);
1718
+ R_AH0_ = _mm256_permute2x128_si256(R_AH28, R_AH28, 0x11);
1719
+ R_AD2 = _mm256_shuffle_epi32(R_AH0_, 0x00);
1720
+ R_EH2 = _mm256_shuffle_epi32(R_AH0_, 0x55);
1721
+ COMPUTE_STEP(R_AD2, R_EH2, 30);
1722
+ R_AD3 = _mm256_shuffle_epi32(R_AH0_, 0xAA);
1723
+ R_EH3 = _mm256_shuffle_epi32(R_AH0_, 0xFF);
1724
+ COMPUTE_STEP(R_AD3, R_EH3, 31);
1725
+
1726
+ #undef COMPUTE_STEP
1727
+ }
1728
+
1729
+ // Transfer the results to the result matrix.
1730
+ if (m + 32 <= rows && n + 32 <= cols) {
1731
+ Index i = 0;
1732
+ for (Index j = n; j < n + 32; j++) {
1733
+ LinearMapper r0 = res.getLinearMapper(m, j);
1734
+ LinearMapper r1 = res.getLinearMapper(m + 8, j);
1735
+ LinearMapper r2 = res.getLinearMapper(m + 16, j);
1736
+ LinearMapper r3 = res.getLinearMapper(m + 24, j);
1737
+ typedef typename packet_traits<QInt32>::type Packet;
1738
+ r0.template storePacket<Packet>(
1739
+ 0, _mm256_add_epi32(blockO_256[i++],
1740
+ r0.template loadPacket<Packet>(0)));
1741
+ r1.template storePacket<Packet>(
1742
+ 0, _mm256_add_epi32(blockO_256[i++],
1743
+ r1.template loadPacket<Packet>(0)));
1744
+ r2.template storePacket<Packet>(
1745
+ 0, _mm256_add_epi32(blockO_256[i++],
1746
+ r2.template loadPacket<Packet>(0)));
1747
+ r3.template storePacket<Packet>(
1748
+ 0, _mm256_add_epi32(blockO_256[i++],
1749
+ r3.template loadPacket<Packet>(0)));
1750
+ }
1751
+ } else {
1752
+ for (Index j = n; j < cols; j++) {
1753
+ for (Index i = m; i < rows; i++) {
1754
+ res(i, j) = blockO[(j - n) * 32 + (i - m)];
1755
+ }
1756
+ }
1757
+ }
1758
+
1759
+ // Zero the result block so it can be reused
1760
+ memset(blockO, 0, 32 * 32 * sizeof(QInt32));
1761
+ }
1762
+ }
1763
+ }
1764
+
1765
+ // Below are the fully optimized versions that are correct only for sizes that
1766
+ // are multiple of 32. It is about a 10% performance benefit to keep these
1767
+ // implementations separate.
1768
+
1769
+ // Arrange a block of the left input matrix in contiguous memory.
1770
+ //
1771
+ // Given column major input (A0 beside A1 in memory):
1772
+ // A0 B0 C0 D0 E0 F0 G0 H0 ...
1773
+ // A1 B1 C1 D1 E1 F1 G1 H1 ...
1774
+ // A2 B2 C2 D2 E2 F2 G2 H2 ...
1775
+ // A3 B3 C3 D3 E3 F3 G3 H3 ...
1776
+ // A4 B4 C4 D4 E4 F4 G4 H4 ...
1777
+ // A5 B5 C5 D5 E5 F5 G5 H5 ...
1778
+ // A6 B6 C6 D6 E6 F6 G6 H6 ...
1779
+ // A7 B7 C7 D7 E7 F7 G7 H7 ...
1780
+ // A8 ...
1781
+ // ...
1782
+ //
1783
+ // Packing yields output (A0 beside B0 in memory):
1784
+ // A0 B0 C0 D0
1785
+ // A1 B1 C1 D1
1786
+ // A2 B2 C2 D2
1787
+ // A3 B3 C3 D3
1788
+ // A4 B4 C4 D4
1789
+ // A5 B5 C5 D5
1790
+ // A6 B6 C6 D6
1791
+ // A7 B7 C7 D7
1792
+ // ...
1793
+ // A31 B31 C31 D31
1794
+ // E0 F0 G0 H0
1795
+ // E1 F1 G1 H1
1796
+ // E2 F2 G2 H2
1797
+ // E3 F3 G3 H3
1798
+ // E4 F4 G4 H4
1799
+ // E5 F5 G5 H5
1800
+ // E6 F6 G6 H6
1801
+ // E7 F7 G7 H7
1802
+ // ...
1803
+ //
1804
+ // Four elements of the same row are arranged contiguously because maddubs and
1805
+ // madd both perform an adjacent addition in the kernel.
1806
+ template <typename Index, typename DataMapper, int Pack1, int Pack2,
1807
+ bool Conjugate, bool PanelMode>
1808
+ struct gemm_pack_lhs<QInt8, Index, DataMapper, Pack1, Pack2, QInt8, ColMajor,
1809
+ Conjugate, PanelMode> {
1810
+ EIGEN_DONT_INLINE void operator()(QInt8* blockA, const DataMapper& lhs,
1811
+ Index depth, Index rows, Index stride = 0,
1812
+ Index offset = 0);
1813
+ };
1814
+
1815
+ template <typename Index, typename DataMapper, int Pack1, int Pack2,
1816
+ bool Conjugate, bool PanelMode>
1817
+ EIGEN_DONT_INLINE void
1818
+ gemm_pack_lhs<QInt8, Index, DataMapper, Pack1, Pack2, QInt8, ColMajor,
1819
+ Conjugate, PanelMode>::operator()(QInt8* blockA,
1820
+ const DataMapper& lhs,
1821
+ Index depth, Index rows,
1822
+ Index stride, Index offset) {
1823
+ eigen_assert(stride == 0);
1824
+ eigen_assert(offset == 0);
1825
+
1826
+ typedef typename packet_traits<QInt8>::type Packet;
1827
+
1828
+ // Use alternate function for weird sizes
1829
+ if (rows % 32 != 0 || depth % 32 != 0) {
1830
+ gemm_pack_lhs_any<QInt8, Index, DataMapper, Pack1, Pack2, ColMajor,
1831
+ Conjugate, PanelMode>
1832
+ lhs_pack;
1833
+ return lhs_pack(blockA, lhs, depth, rows, stride, offset);
1834
+ }
1835
+
1836
+ // Get vector pointer
1837
+ __m256i* blockA_256 = reinterpret_cast<__m256i*>(blockA);
1838
+
1839
+ // Pack rows in sets of 32
1840
+ for (Index m = 0; m < rows; m += 32) {
1841
+ // Pack depth in sets of 8
1842
+ for (Index k = 0; k < depth; k += 8) {
1843
+ // Load vectors
1844
+ __m256i L_A = lhs.template loadPacket<Packet>(m, k);
1845
+ __m256i L_B = lhs.template loadPacket<Packet>(m, k + 1);
1846
+
1847
+ // Interleave 8-bit elements
1848
+ __m256i L_AB0_AB16 = _mm256_unpacklo_epi8(L_A, L_B);
1849
+ __m256i L_AB8_AB24 = _mm256_unpackhi_epi8(L_A, L_B);
1850
+
1851
+ __m256i L_C = lhs.template loadPacket<Packet>(m, k + 2);
1852
+ __m256i L_D = lhs.template loadPacket<Packet>(m, k + 3);
1853
+ __m256i L_CD0_CD16 = _mm256_unpacklo_epi8(L_C, L_D);
1854
+ __m256i L_CD8_CD24 = _mm256_unpackhi_epi8(L_C, L_D);
1855
+
1856
+ // Interleave 16-bit elements
1857
+ __m256i L_AD0_AD16 = _mm256_unpacklo_epi16(L_AB0_AB16, L_CD0_CD16);
1858
+ __m256i L_AD4_AD20 = _mm256_unpackhi_epi16(L_AB0_AB16, L_CD0_CD16);
1859
+
1860
+ // Use permute before we store to cross 128-bit lanes
1861
+ __m256i L_AD0 = _mm256_permute2x128_si256(L_AD0_AD16, L_AD4_AD20, 0x20);
1862
+ _mm256_store_si256(blockA_256++, L_AD0);
1863
+
1864
+ // Complete packing for 32 x 8 block
1865
+ __m256i L_AD16 = _mm256_permute2x128_si256(L_AD0_AD16, L_AD4_AD20, 0x31);
1866
+ __m256i L_AD8_AD24 = _mm256_unpacklo_epi16(L_AB8_AB24, L_CD8_CD24);
1867
+ __m256i L_AD12_AD28 = _mm256_unpackhi_epi16(L_AB8_AB24, L_CD8_CD24);
1868
+ __m256i L_AD8 = _mm256_permute2x128_si256(L_AD8_AD24, L_AD12_AD28, 0x20);
1869
+ _mm256_store_si256(blockA_256++, L_AD8);
1870
+ _mm256_store_si256(blockA_256++, L_AD16);
1871
+ __m256i L_AD24 = _mm256_permute2x128_si256(L_AD8_AD24, L_AD12_AD28, 0x31);
1872
+ _mm256_store_si256(blockA_256++, L_AD24);
1873
+ __m256i L_E = lhs.template loadPacket<Packet>(m, k + 4);
1874
+ __m256i L_F = lhs.template loadPacket<Packet>(m, k + 5);
1875
+ __m256i L_EF0_EF16 = _mm256_unpacklo_epi8(L_E, L_F);
1876
+ __m256i L_EF8_EF24 = _mm256_unpackhi_epi8(L_E, L_F);
1877
+ __m256i L_G = lhs.template loadPacket<Packet>(m, k + 6);
1878
+ __m256i L_H = lhs.template loadPacket<Packet>(m, k + 7);
1879
+ __m256i L_GH0_GH16 = _mm256_unpacklo_epi8(L_G, L_H);
1880
+ __m256i L_GH8_GH24 = _mm256_unpackhi_epi8(L_G, L_H);
1881
+ __m256i L_EH0_EH16 = _mm256_unpacklo_epi16(L_EF0_EF16, L_GH0_GH16);
1882
+ __m256i L_EH4_EH20 = _mm256_unpackhi_epi16(L_EF0_EF16, L_GH0_GH16);
1883
+ __m256i L_EH0 = _mm256_permute2x128_si256(L_EH0_EH16, L_EH4_EH20, 0x20);
1884
+ _mm256_store_si256(blockA_256++, L_EH0);
1885
+ __m256i L_EH16 = _mm256_permute2x128_si256(L_EH0_EH16, L_EH4_EH20, 0x31);
1886
+ __m256i L_EH8_EH24 = _mm256_unpacklo_epi16(L_EF8_EF24, L_GH8_GH24);
1887
+ __m256i L_EH12_EH28 = _mm256_unpackhi_epi16(L_EF8_EF24, L_GH8_GH24);
1888
+ __m256i L_EH8 = _mm256_permute2x128_si256(L_EH8_EH24, L_EH12_EH28, 0x20);
1889
+ _mm256_store_si256(blockA_256++, L_EH8);
1890
+ _mm256_store_si256(blockA_256++, L_EH16);
1891
+ __m256i L_EH24 = _mm256_permute2x128_si256(L_EH8_EH24, L_EH12_EH28, 0x31);
1892
+ _mm256_store_si256(blockA_256++, L_EH24);
1893
+ }
1894
+ }
1895
+ }
1896
+
1897
+ // Arrange a block of the right input matrix in contiguous memory.
1898
+ //
1899
+ // Given column major input (A0 beside A1 in memory):
1900
+ // A0 B0 C0 D0 E0 F0 G0 H0 ...
1901
+ // A1 B1 C1 D1 E1 F1 G1 H1 ...
1902
+ // A2 B2 C2 D2 E2 F2 G2 H2 ...
1903
+ // A3 B3 C3 D3 E3 F3 G3 H3 ...
1904
+ // A4 B4 C4 D4 E4 F4 G4 H4 ...
1905
+ // A5 B5 C5 D5 E5 F5 G5 H5 ...
1906
+ // A6 B6 C6 D6 E6 F6 G6 H6 ...
1907
+ // A7 B7 C7 D7 E7 F7 G7 H7 ...
1908
+ // A8 ...
1909
+ // ...
1910
+ //
1911
+ // Packing yields row major output (A0 beside A1 in memory):
1912
+ // A0 A1 A2 A3 A4 A5 A6 A7
1913
+ // B0 B1 B2 B3 B4 B5 B6 B7
1914
+ // ...
1915
+ //
1916
+ // At least four elements of the same col are arranged contiguously because
1917
+ // maddubs and madd both perform an adjacent addition in the kernel. We can
1918
+ // save work by leaving 8 adjacent elements because kr = 8.
1919
+ template <typename Index, typename DataMapper, int nr, bool Conjugate,
1920
+ bool PanelMode>
1921
+ struct gemm_pack_rhs<QUInt8, Index, DataMapper, nr, ColMajor, Conjugate,
1922
+ PanelMode> {
1923
+ EIGEN_DONT_INLINE void operator()(QUInt8* blockB, const DataMapper& rhs,
1924
+ Index depth, Index cols, Index stride = 0,
1925
+ Index offset = 0);
1926
+ };
1927
+
1928
+ template <typename Index, typename DataMapper, int nr, bool Conjugate,
1929
+ bool PanelMode>
1930
+ EIGEN_DONT_INLINE void
1931
+ gemm_pack_rhs<QUInt8, Index, DataMapper, nr, ColMajor, Conjugate,
1932
+ PanelMode>::operator()(QUInt8* blockB, const DataMapper& rhs,
1933
+ Index depth, Index cols, Index stride,
1934
+ Index offset) {
1935
+ eigen_assert(stride == 0);
1936
+ eigen_assert(offset == 0);
1937
+
1938
+ typedef typename packet_traits<QUInt8>::type Packet;
1939
+
1940
+ // Use alternate function for weird sizes
1941
+ if (cols % 32 != 0 || depth % 32 != 0) {
1942
+ gemm_pack_rhs_any<QUInt8, Index, DataMapper, nr, ColMajor, Conjugate,
1943
+ PanelMode>
1944
+ rhs_pack;
1945
+ return rhs_pack(blockB, rhs, depth, cols, stride, offset);
1946
+ }
1947
+
1948
+ // Get vector pointer
1949
+ __m256i* blockB_256 = reinterpret_cast<__m256i*>(blockB);
1950
+
1951
+ // Perform a step of the packing for 4 columns
1952
+ __m256i R_AB_L, R_AB_H, R_CD_L, R_CD_H, R_AD_0, R_AD_8, R_AD_16, R_AD_24;
1953
+ #define PACK_STEP \
1954
+ R_AB_L = _mm256_unpacklo_epi64(R_A, R_B); \
1955
+ R_CD_L = _mm256_unpacklo_epi64(R_C, R_D); \
1956
+ R_AB_H = _mm256_unpackhi_epi64(R_A, R_B); \
1957
+ R_CD_H = _mm256_unpackhi_epi64(R_C, R_D); \
1958
+ R_AD_0 = _mm256_permute2x128_si256(R_AB_L, R_CD_L, 0x20); \
1959
+ R_AD_16 = _mm256_permute2x128_si256(R_AB_L, R_CD_L, 0x31); \
1960
+ R_AD_8 = _mm256_permute2x128_si256(R_AB_H, R_CD_H, 0x20); \
1961
+ R_AD_24 = _mm256_permute2x128_si256(R_AB_H, R_CD_H, 0x31); \
1962
+ _mm256_store_si256(blockB_256, R_AD_0); \
1963
+ _mm256_store_si256(blockB_256 + 8, R_AD_8); \
1964
+ _mm256_store_si256(blockB_256 + 16, R_AD_16); \
1965
+ _mm256_store_si256(blockB_256 + 24, R_AD_24); \
1966
+ blockB_256++;
1967
+
1968
+ // Pack cols in sets of 32
1969
+ for (Index n = 0; n < cols; n += 32) {
1970
+ // Pack depth in sets of 32
1971
+ for (Index k = 0; k < depth; k += 32) {
1972
+ __m256i R_A = rhs.template loadPacket<Packet>(k, n);
1973
+ __m256i R_B = rhs.template loadPacket<Packet>(k, n + 1);
1974
+ __m256i R_C = rhs.template loadPacket<Packet>(k, n + 2);
1975
+ __m256i R_D = rhs.template loadPacket<Packet>(k, n + 3);
1976
+ PACK_STEP;
1977
+
1978
+ R_A = rhs.template loadPacket<Packet>(k, n + 4);
1979
+ R_B = rhs.template loadPacket<Packet>(k, n + 5);
1980
+ R_C = rhs.template loadPacket<Packet>(k, n + 6);
1981
+ R_D = rhs.template loadPacket<Packet>(k, n + 7);
1982
+ PACK_STEP;
1983
+
1984
+ R_A = rhs.template loadPacket<Packet>(k, n + 8);
1985
+ R_B = rhs.template loadPacket<Packet>(k, n + 9);
1986
+ R_C = rhs.template loadPacket<Packet>(k, n + 10);
1987
+ R_D = rhs.template loadPacket<Packet>(k, n + 11);
1988
+ PACK_STEP;
1989
+
1990
+ R_A = rhs.template loadPacket<Packet>(k, n + 12);
1991
+ R_B = rhs.template loadPacket<Packet>(k, n + 13);
1992
+ R_C = rhs.template loadPacket<Packet>(k, n + 14);
1993
+ R_D = rhs.template loadPacket<Packet>(k, n + 15);
1994
+ PACK_STEP;
1995
+
1996
+ R_A = rhs.template loadPacket<Packet>(k, n + 16);
1997
+ R_B = rhs.template loadPacket<Packet>(k, n + 17);
1998
+ R_C = rhs.template loadPacket<Packet>(k, n + 18);
1999
+ R_D = rhs.template loadPacket<Packet>(k, n + 19);
2000
+ PACK_STEP;
2001
+
2002
+ R_A = rhs.template loadPacket<Packet>(k, n + 20);
2003
+ R_B = rhs.template loadPacket<Packet>(k, n + 21);
2004
+ R_C = rhs.template loadPacket<Packet>(k, n + 22);
2005
+ R_D = rhs.template loadPacket<Packet>(k, n + 23);
2006
+ PACK_STEP;
2007
+
2008
+ R_A = rhs.template loadPacket<Packet>(k, n + 24);
2009
+ R_B = rhs.template loadPacket<Packet>(k, n + 25);
2010
+ R_C = rhs.template loadPacket<Packet>(k, n + 26);
2011
+ R_D = rhs.template loadPacket<Packet>(k, n + 27);
2012
+ PACK_STEP;
2013
+
2014
+ R_A = rhs.template loadPacket<Packet>(k, n + 28);
2015
+ R_B = rhs.template loadPacket<Packet>(k, n + 29);
2016
+ R_C = rhs.template loadPacket<Packet>(k, n + 30);
2017
+ R_D = rhs.template loadPacket<Packet>(k, n + 31);
2018
+ PACK_STEP;
2019
+
2020
+ blockB_256 += 24;
2021
+ }
2022
+ }
2023
+ #undef PACK_STEP
2024
+ }
2025
+
2026
+ // Perform the actual multiplication on packed inputs
2027
+ template <typename Index, typename DataMapper, int mr, int nr,
2028
+ bool ConjugateLhs, bool ConjugateRhs>
2029
+ struct gebp_kernel<QInt8, QUInt8, Index, DataMapper, mr, nr, ConjugateLhs,
2030
+ ConjugateRhs> {
2031
+ typedef typename DataMapper::LinearMapper LinearMapper;
2032
+
2033
+ EIGEN_DONT_INLINE
2034
+ void operator()(const DataMapper& res, const QInt8* blockA,
2035
+ const QUInt8* blockB, Index rows, Index depth, Index cols,
2036
+ QInt32 alpha, Index strideA = -1, Index strideB = -1,
2037
+ Index offsetA = 0, Index offsetB = 0);
2038
+ };
2039
+
2040
+ template <typename Index, typename DataMapper, int mr, int nr,
2041
+ bool ConjugateLhs, bool ConjugateRhs>
2042
+ EIGEN_DONT_INLINE void
2043
+ gebp_kernel<QInt8, QUInt8, Index, DataMapper, mr, nr, ConjugateLhs,
2044
+ ConjugateRhs>::operator()(const DataMapper& res,
2045
+ const QInt8* blockA, const QUInt8* blockB,
2046
+ Index rows, Index depth, Index cols,
2047
+ QInt32 alpha, Index strideA,
2048
+ Index strideB, Index offsetA,
2049
+ Index offsetB) {
2050
+ EIGEN_STATIC_ASSERT(!ConjugateLhs, YOU_MADE_A_PROGRAMMING_MISTAKE);
2051
+ EIGEN_STATIC_ASSERT(!ConjugateRhs, YOU_MADE_A_PROGRAMMING_MISTAKE);
2052
+ eigen_assert(alpha.value == 1);
2053
+ eigen_assert(strideA == -1);
2054
+ eigen_assert(strideB == -1);
2055
+ eigen_assert(offsetA == 0);
2056
+ eigen_assert(offsetB == 0);
2057
+ eigen_assert(rows > 0);
2058
+ eigen_assert(cols > 0);
2059
+ eigen_assert(depth > 0);
2060
+ eigen_assert(blockA);
2061
+ eigen_assert(blockB);
2062
+
2063
+ // Use alternate function for weird sizes
2064
+ if (rows % 32 != 0 || cols % 32 != 0 || depth % 32 != 0) {
2065
+ gebp_kernel_any<QInt8, QUInt8, Index, DataMapper, mr, nr, ConjugateLhs,
2066
+ ConjugateRhs>
2067
+ gebp;
2068
+ return gebp(res, blockA, blockB, rows, depth, cols, alpha, strideA, strideB,
2069
+ offsetA, offsetB);
2070
+ }
2071
+
2072
+ // Create result block
2073
+ QInt32* blockO = aligned_new<QInt32>(32 * 32);
2074
+ // Allocating the result block is about 5-10% faster than declaring stack
2075
+ // space. It is unclear why this is the case.
2076
+ // ei_declare_aligned_stack_constructed_variable(QInt32, blockO, 32 * 32, 0);
2077
+ memset(blockO, 0, 32 * 32 * sizeof(QInt32));
2078
+
2079
+ // Get vectorized pointers
2080
+ __m256i* blockO_256 = reinterpret_cast<__m256i*>(blockO);
2081
+ const __m256i* blockA_256 = reinterpret_cast<const __m256i*>(blockA);
2082
+ const __m256i* blockB_256 = reinterpret_cast<const __m256i*>(blockB);
2083
+
2084
+ // Loop over blocks of 32 columns
2085
+ for (Index n = 0; n < cols; n += 32) {
2086
+ // Reset index into blockA
2087
+ Index indexL = 0;
2088
+ // Loop over blocks of 32 rows
2089
+ for (Index m = 0; m < rows; m += 32) {
2090
+ // Reset index into blockB
2091
+ Index indexR = n / 32 * depth;
2092
+ // Loop over blocks of 8 on depth
2093
+ for (Index k = 0; k < depth; k += 8) {
2094
+ // Load inputs
2095
+ __m256i L_AD0 = blockA_256[indexL++];
2096
+ __m256i L_AD8 = blockA_256[indexL++];
2097
+ __m256i L_AD16 = blockA_256[indexL++];
2098
+ __m256i L_AD24 = blockA_256[indexL++];
2099
+ __m256i L_EH0 = blockA_256[indexL++];
2100
+ __m256i L_EH8 = blockA_256[indexL++];
2101
+ __m256i L_EH16 = blockA_256[indexL++];
2102
+ __m256i L_EH24 = blockA_256[indexL++];
2103
+ __m256i R_AH0 = blockB_256[indexR++];
2104
+ __m256i R_AH4 = blockB_256[indexR++];
2105
+ __m256i R_AH8 = blockB_256[indexR++];
2106
+ __m256i R_AH12 = blockB_256[indexR++];
2107
+ __m256i R_AH16 = blockB_256[indexR++];
2108
+ __m256i R_AH20 = blockB_256[indexR++];
2109
+ __m256i R_AH24 = blockB_256[indexR++];
2110
+ __m256i R_AH28 = blockB_256[indexR++];
2111
+
2112
+ // This constant is used with madd to convert 16 bit to 32 bit
2113
+ const __m256i ONE = _mm256_set1_epi32(0x00010001);
2114
+
2115
+ // Declare variables used in COMPUTE_STEP
2116
+ __m256i P_16_A, P_16_B, P_32_A, P_32_B, P_32;
2117
+
2118
+ #define COMPUTE_STEP(R_INPUT_A, R_INPUT_B, OFFSET) \
2119
+ P_16_A = _mm256_maddubs_epi16(R_INPUT_A, L_AD0); \
2120
+ P_32_A = _mm256_madd_epi16(P_16_A, ONE); \
2121
+ P_16_B = _mm256_maddubs_epi16(R_INPUT_B, L_EH0); \
2122
+ P_32_B = _mm256_madd_epi16(P_16_B, ONE); \
2123
+ P_32 = _mm256_add_epi32(P_32_A, P_32_B); \
2124
+ _mm256_store_si256( \
2125
+ blockO_256 + 4 * OFFSET, \
2126
+ _mm256_add_epi32(_mm256_load_si256(blockO_256 + 4 * OFFSET), P_32)); \
2127
+ \
2128
+ P_16_A = _mm256_maddubs_epi16(R_INPUT_A, L_AD8); \
2129
+ P_32_A = _mm256_madd_epi16(P_16_A, ONE); \
2130
+ P_16_B = _mm256_maddubs_epi16(R_INPUT_B, L_EH8); \
2131
+ P_32_B = _mm256_madd_epi16(P_16_B, ONE); \
2132
+ P_32 = _mm256_add_epi32(P_32_A, P_32_B); \
2133
+ _mm256_store_si256( \
2134
+ blockO_256 + 4 * OFFSET + 1, \
2135
+ _mm256_add_epi32(_mm256_load_si256(blockO_256 + 4 * OFFSET + 1), P_32)); \
2136
+ \
2137
+ P_16_A = _mm256_maddubs_epi16(R_INPUT_A, L_AD16); \
2138
+ P_32_A = _mm256_madd_epi16(P_16_A, ONE); \
2139
+ P_16_B = _mm256_maddubs_epi16(R_INPUT_B, L_EH16); \
2140
+ P_32_B = _mm256_madd_epi16(P_16_B, ONE); \
2141
+ P_32 = _mm256_add_epi32(P_32_A, P_32_B); \
2142
+ _mm256_store_si256( \
2143
+ blockO_256 + 4 * OFFSET + 2, \
2144
+ _mm256_add_epi32(_mm256_load_si256(blockO_256 + 4 * OFFSET + 2), P_32)); \
2145
+ \
2146
+ P_16_A = _mm256_maddubs_epi16(R_INPUT_A, L_AD24); \
2147
+ P_32_A = _mm256_madd_epi16(P_16_A, ONE); \
2148
+ P_16_B = _mm256_maddubs_epi16(R_INPUT_B, L_EH24); \
2149
+ P_32_B = _mm256_madd_epi16(P_16_B, ONE); \
2150
+ P_32 = _mm256_add_epi32(P_32_A, P_32_B); \
2151
+ _mm256_store_si256( \
2152
+ blockO_256 + 4 * OFFSET + 3, \
2153
+ _mm256_add_epi32(_mm256_load_si256(blockO_256 + 4 * OFFSET + 3), P_32));
2154
+
2155
+ // Permute and shuffle to copy a single value across the entire vector
2156
+ // Then compute the multiplication
2157
+ __m256i R_AH0_ = _mm256_permute2x128_si256(R_AH0, R_AH0, 0x00);
2158
+ __m256i R_AD0 = _mm256_shuffle_epi32(R_AH0_, 0x00);
2159
+ __m256i R_EH0 = _mm256_shuffle_epi32(R_AH0_, 0x55);
2160
+ COMPUTE_STEP(R_AD0, R_EH0, 0);
2161
+ __m256i R_AD1 = _mm256_shuffle_epi32(R_AH0_, 0xAA);
2162
+ __m256i R_EH1 = _mm256_shuffle_epi32(R_AH0_, 0xFF);
2163
+ COMPUTE_STEP(R_AD1, R_EH1, 1);
2164
+ R_AH0_ = _mm256_permute2x128_si256(R_AH0, R_AH0, 0x11);
2165
+ __m256i R_AD2 = _mm256_shuffle_epi32(R_AH0_, 0x00);
2166
+ __m256i R_EH2 = _mm256_shuffle_epi32(R_AH0_, 0x55);
2167
+ COMPUTE_STEP(R_AD2, R_EH2, 2);
2168
+ __m256i R_AD3 = _mm256_shuffle_epi32(R_AH0_, 0xAA);
2169
+ __m256i R_EH3 = _mm256_shuffle_epi32(R_AH0_, 0xFF);
2170
+ COMPUTE_STEP(R_AD3, R_EH3, 3);
2171
+
2172
+ R_AH0_ = _mm256_permute2x128_si256(R_AH4, R_AH4, 0x00);
2173
+ R_AD0 = _mm256_shuffle_epi32(R_AH0_, 0x00);
2174
+ R_EH0 = _mm256_shuffle_epi32(R_AH0_, 0x55);
2175
+ COMPUTE_STEP(R_AD0, R_EH0, 4);
2176
+ R_AD1 = _mm256_shuffle_epi32(R_AH0_, 0xAA);
2177
+ R_EH1 = _mm256_shuffle_epi32(R_AH0_, 0xFF);
2178
+ COMPUTE_STEP(R_AD1, R_EH1, 5);
2179
+ R_AH0_ = _mm256_permute2x128_si256(R_AH4, R_AH4, 0x11);
2180
+ R_AD2 = _mm256_shuffle_epi32(R_AH0_, 0x00);
2181
+ R_EH2 = _mm256_shuffle_epi32(R_AH0_, 0x55);
2182
+ COMPUTE_STEP(R_AD2, R_EH2, 6);
2183
+ R_AD3 = _mm256_shuffle_epi32(R_AH0_, 0xAA);
2184
+ R_EH3 = _mm256_shuffle_epi32(R_AH0_, 0xFF);
2185
+ COMPUTE_STEP(R_AD3, R_EH3, 7);
2186
+
2187
+ R_AH0_ = _mm256_permute2x128_si256(R_AH8, R_AH8, 0x00);
2188
+ R_AD0 = _mm256_shuffle_epi32(R_AH0_, 0x00);
2189
+ R_EH0 = _mm256_shuffle_epi32(R_AH0_, 0x55);
2190
+ COMPUTE_STEP(R_AD0, R_EH0, 8);
2191
+ R_AD1 = _mm256_shuffle_epi32(R_AH0_, 0xAA);
2192
+ R_EH1 = _mm256_shuffle_epi32(R_AH0_, 0xFF);
2193
+ COMPUTE_STEP(R_AD1, R_EH1, 9);
2194
+ R_AH0_ = _mm256_permute2x128_si256(R_AH8, R_AH8, 0x11);
2195
+ R_AD2 = _mm256_shuffle_epi32(R_AH0_, 0x00);
2196
+ R_EH2 = _mm256_shuffle_epi32(R_AH0_, 0x55);
2197
+ COMPUTE_STEP(R_AD2, R_EH2, 10);
2198
+ R_AD3 = _mm256_shuffle_epi32(R_AH0_, 0xAA);
2199
+ R_EH3 = _mm256_shuffle_epi32(R_AH0_, 0xFF);
2200
+ COMPUTE_STEP(R_AD3, R_EH3, 11);
2201
+
2202
+ R_AH0_ = _mm256_permute2x128_si256(R_AH12, R_AH12, 0x00);
2203
+ R_AD0 = _mm256_shuffle_epi32(R_AH0_, 0x00);
2204
+ R_EH0 = _mm256_shuffle_epi32(R_AH0_, 0x55);
2205
+ COMPUTE_STEP(R_AD0, R_EH0, 12);
2206
+ R_AD1 = _mm256_shuffle_epi32(R_AH0_, 0xAA);
2207
+ R_EH1 = _mm256_shuffle_epi32(R_AH0_, 0xFF);
2208
+ COMPUTE_STEP(R_AD1, R_EH1, 13);
2209
+ R_AH0_ = _mm256_permute2x128_si256(R_AH12, R_AH12, 0x11);
2210
+ R_AD2 = _mm256_shuffle_epi32(R_AH0_, 0x00);
2211
+ R_EH2 = _mm256_shuffle_epi32(R_AH0_, 0x55);
2212
+ COMPUTE_STEP(R_AD2, R_EH2, 14);
2213
+ R_AD3 = _mm256_shuffle_epi32(R_AH0_, 0xAA);
2214
+ R_EH3 = _mm256_shuffle_epi32(R_AH0_, 0xFF);
2215
+ COMPUTE_STEP(R_AD3, R_EH3, 15);
2216
+
2217
+ R_AH0_ = _mm256_permute2x128_si256(R_AH16, R_AH16, 0x00);
2218
+ R_AD0 = _mm256_shuffle_epi32(R_AH0_, 0x00);
2219
+ R_EH0 = _mm256_shuffle_epi32(R_AH0_, 0x55);
2220
+ COMPUTE_STEP(R_AD0, R_EH0, 16);
2221
+ R_AD1 = _mm256_shuffle_epi32(R_AH0_, 0xAA);
2222
+ R_EH1 = _mm256_shuffle_epi32(R_AH0_, 0xFF);
2223
+ COMPUTE_STEP(R_AD1, R_EH1, 17);
2224
+ R_AH0_ = _mm256_permute2x128_si256(R_AH16, R_AH16, 0x11);
2225
+ R_AD2 = _mm256_shuffle_epi32(R_AH0_, 0x00);
2226
+ R_EH2 = _mm256_shuffle_epi32(R_AH0_, 0x55);
2227
+ COMPUTE_STEP(R_AD2, R_EH2, 18);
2228
+ R_AD3 = _mm256_shuffle_epi32(R_AH0_, 0xAA);
2229
+ R_EH3 = _mm256_shuffle_epi32(R_AH0_, 0xFF);
2230
+ COMPUTE_STEP(R_AD3, R_EH3, 19);
2231
+
2232
+ R_AH0_ = _mm256_permute2x128_si256(R_AH20, R_AH20, 0x00);
2233
+ R_AD0 = _mm256_shuffle_epi32(R_AH0_, 0x00);
2234
+ R_EH0 = _mm256_shuffle_epi32(R_AH0_, 0x55);
2235
+ COMPUTE_STEP(R_AD0, R_EH0, 20);
2236
+ R_AD1 = _mm256_shuffle_epi32(R_AH0_, 0xAA);
2237
+ R_EH1 = _mm256_shuffle_epi32(R_AH0_, 0xFF);
2238
+ COMPUTE_STEP(R_AD1, R_EH1, 21);
2239
+ R_AH0_ = _mm256_permute2x128_si256(R_AH20, R_AH20, 0x11);
2240
+ R_AD2 = _mm256_shuffle_epi32(R_AH0_, 0x00);
2241
+ R_EH2 = _mm256_shuffle_epi32(R_AH0_, 0x55);
2242
+ COMPUTE_STEP(R_AD2, R_EH2, 22);
2243
+ R_AD3 = _mm256_shuffle_epi32(R_AH0_, 0xAA);
2244
+ R_EH3 = _mm256_shuffle_epi32(R_AH0_, 0xFF);
2245
+ COMPUTE_STEP(R_AD3, R_EH3, 23);
2246
+
2247
+ R_AH0_ = _mm256_permute2x128_si256(R_AH24, R_AH24, 0x00);
2248
+ R_AD0 = _mm256_shuffle_epi32(R_AH0_, 0x00);
2249
+ R_EH0 = _mm256_shuffle_epi32(R_AH0_, 0x55);
2250
+ COMPUTE_STEP(R_AD0, R_EH0, 24);
2251
+ R_AD1 = _mm256_shuffle_epi32(R_AH0_, 0xAA);
2252
+ R_EH1 = _mm256_shuffle_epi32(R_AH0_, 0xFF);
2253
+ COMPUTE_STEP(R_AD1, R_EH1, 25);
2254
+ R_AH0_ = _mm256_permute2x128_si256(R_AH24, R_AH24, 0x11);
2255
+ R_AD2 = _mm256_shuffle_epi32(R_AH0_, 0x00);
2256
+ R_EH2 = _mm256_shuffle_epi32(R_AH0_, 0x55);
2257
+ COMPUTE_STEP(R_AD2, R_EH2, 26);
2258
+ R_AD3 = _mm256_shuffle_epi32(R_AH0_, 0xAA);
2259
+ R_EH3 = _mm256_shuffle_epi32(R_AH0_, 0xFF);
2260
+ COMPUTE_STEP(R_AD3, R_EH3, 27);
2261
+
2262
+ R_AH0_ = _mm256_permute2x128_si256(R_AH28, R_AH28, 0x00);
2263
+ R_AD0 = _mm256_shuffle_epi32(R_AH0_, 0x00);
2264
+ R_EH0 = _mm256_shuffle_epi32(R_AH0_, 0x55);
2265
+ COMPUTE_STEP(R_AD0, R_EH0, 28);
2266
+ R_AD1 = _mm256_shuffle_epi32(R_AH0_, 0xAA);
2267
+ R_EH1 = _mm256_shuffle_epi32(R_AH0_, 0xFF);
2268
+ COMPUTE_STEP(R_AD1, R_EH1, 29);
2269
+ R_AH0_ = _mm256_permute2x128_si256(R_AH28, R_AH28, 0x11);
2270
+ R_AD2 = _mm256_shuffle_epi32(R_AH0_, 0x00);
2271
+ R_EH2 = _mm256_shuffle_epi32(R_AH0_, 0x55);
2272
+ COMPUTE_STEP(R_AD2, R_EH2, 30);
2273
+ R_AD3 = _mm256_shuffle_epi32(R_AH0_, 0xAA);
2274
+ R_EH3 = _mm256_shuffle_epi32(R_AH0_, 0xFF);
2275
+ COMPUTE_STEP(R_AD3, R_EH3, 31);
2276
+
2277
+ #undef COMPUTE_STEP
2278
+ }
2279
+
2280
+ // Transfer the results to the result matrix
2281
+ Index i = 0;
2282
+ for (Index j = n; j < n + 32; j++) {
2283
+ LinearMapper r0 = res.getLinearMapper(m, j);
2284
+ LinearMapper r1 = res.getLinearMapper(m + 8, j);
2285
+ LinearMapper r2 = res.getLinearMapper(m + 16, j);
2286
+ LinearMapper r3 = res.getLinearMapper(m + 24, j);
2287
+ typedef typename packet_traits<QInt32>::type Packet;
2288
+ r0.template storePacket<Packet>(
2289
+ 0, _mm256_add_epi32(blockO_256[i++],
2290
+ r0.template loadPacket<Packet>(0)));
2291
+ r1.template storePacket<Packet>(
2292
+ 0, _mm256_add_epi32(blockO_256[i++],
2293
+ r1.template loadPacket<Packet>(0)));
2294
+ r2.template storePacket<Packet>(
2295
+ 0, _mm256_add_epi32(blockO_256[i++],
2296
+ r2.template loadPacket<Packet>(0)));
2297
+ r3.template storePacket<Packet>(
2298
+ 0, _mm256_add_epi32(blockO_256[i++],
2299
+ r3.template loadPacket<Packet>(0)));
2300
+ }
2301
+
2302
+ // Zero the result block so it can be reused
2303
+ memset(blockO, 0, 32 * 32 * sizeof(QInt32));
2304
+ }
2305
+ }
2306
+ aligned_delete(blockO, 32 * 32);
2307
+ }
2308
+
2309
+ #endif // EIGEN_USE_OPTIMIZED_INT8_UINT8_MAT_MAT_PRODUCT
2310
+
2311
+ } // namespace internal
2312
+ } // namespace Eigen
2313
+
2314
+ #endif // TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_MATMATPRODUCTAVX2_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/fixedpoint/MatMatProductNEON.h ADDED
@@ -0,0 +1,316 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_MATMATPRODUCTNEON_H_
17
+ #define TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_MATMATPRODUCTNEON_H_
18
+
19
+ namespace Eigen {
20
+ namespace internal {
21
+
22
+ // Neon optimized implementation where both lhs and rhs are encoded using
23
+ // signed 8bit integers
24
+ #ifdef EIGEN_USE_OPTIMIZED_INT8_INT8_MAT_MAT_PRODUCT
25
+
26
+ template <bool _ConjLhs, bool _ConjRhs>
27
+ class gebp_traits<QInt8, QInt8, _ConjLhs, _ConjRhs> {
28
+ public:
29
+ typedef QInt8 LhsScalar;
30
+ typedef QInt8 RhsScalar;
31
+ typedef QInt32 ResScalar;
32
+
33
+ enum {
34
+ // register block size along the M and N directions
35
+ // One for the current implementation
36
+ nr = 4,
37
+ mr = 1,
38
+ // Progress made at each iteration of the product loop
39
+ // also 1 for the current implementation
40
+ LhsProgress = 1,
41
+ RhsProgress = 1
42
+ };
43
+ };
44
+
45
+ // The signed 8bit Mat-Mat product itself.
46
+ template <typename Index, typename DataMapper, int mr, int nr,
47
+ bool ConjugateLhs, bool ConjugateRhs>
48
+ struct gebp_kernel<QInt8, QInt8, Index, DataMapper, mr, nr, ConjugateLhs,
49
+ ConjugateRhs> {
50
+ EIGEN_DONT_INLINE
51
+ void operator()(const DataMapper& res, const QInt8* blockA,
52
+ const QInt8* blockB, Index rows, Index depth, Index cols,
53
+ QInt32 alpha, Index strideA = -1, Index strideB = -1,
54
+ Index offsetA = 0, Index offsetB = 0);
55
+ };
56
+
57
+ template <typename Index, typename DataMapper, int mr, int nr,
58
+ bool ConjugateLhs, bool ConjugateRhs>
59
+ EIGEN_DONT_INLINE void
60
+ gebp_kernel<QInt8, QInt8, Index, DataMapper, mr, nr, ConjugateLhs,
61
+ ConjugateRhs>::operator()(const DataMapper& res,
62
+ const QInt8* blockA, const QInt8* blockB,
63
+ Index rows, Index depth, Index cols,
64
+ QInt32 alpha, Index strideA,
65
+ Index strideB, Index offsetA,
66
+ Index offsetB) {
67
+ EIGEN_STATIC_ASSERT(!ConjugateLhs, YOU_MADE_A_PROGRAMMING_MISTAKE);
68
+ EIGEN_STATIC_ASSERT(!ConjugateRhs, YOU_MADE_A_PROGRAMMING_MISTAKE);
69
+
70
+ eigen_assert(alpha.value == 1);
71
+ eigen_assert(strideA == -1);
72
+ eigen_assert(strideB == -1);
73
+ eigen_assert(offsetA == 0);
74
+ eigen_assert(offsetB == 0);
75
+
76
+ eigen_assert(rows > 0);
77
+ eigen_assert(cols > 0);
78
+ eigen_assert(depth > 0);
79
+ eigen_assert(blockA);
80
+ eigen_assert(blockB);
81
+
82
+ for (Index j = 0; j < cols; ++j) {
83
+ Index startB = j * depth;
84
+
85
+ for (Index i = 0; i < rows; ++i) {
86
+ Index startA = i * depth;
87
+
88
+ for (Index k = 0; k < depth; ++k) {
89
+ res(i, j) += blockA[startA + k] * blockB[startB + k];
90
+ }
91
+ }
92
+ }
93
+ }
94
+ #endif
95
+
96
+ // Neon optimized implementation of the case where the lhs is encoded using
97
+ // signed 8bit integers and the rhs using unsigned 8bit integers.
98
+ #ifdef EIGEN_USE_OPTIMIZED_INT8_UINT8_MAT_MAT_PRODUCT
99
+
100
+ template <bool _ConjLhs, bool _ConjRhs>
101
+ class gebp_traits<QInt8, QUInt8, _ConjLhs, _ConjRhs> {
102
+ public:
103
+ typedef QInt8 LhsScalar;
104
+ typedef QUInt8 RhsScalar;
105
+ typedef QInt32 ResScalar;
106
+
107
+ enum {
108
+ // register block size along the M and N directions
109
+ nr = 4,
110
+ mr = 1,
111
+ // Progress made at each iteration of the product loop
112
+ // 1 for the current implementation
113
+ LhsProgress = 1,
114
+ RhsProgress = 1
115
+ };
116
+ };
117
+
118
+ // Mat-Mat product of a signed 8bit lhs with an unsigned 8bit rhs
119
+ template <typename Index, typename DataMapper, int mr, int nr,
120
+ bool ConjugateLhs, bool ConjugateRhs>
121
+ struct gebp_kernel<QInt8, QUInt8, Index, DataMapper, mr, nr, ConjugateLhs,
122
+ ConjugateRhs> {
123
+ EIGEN_DONT_INLINE
124
+ void operator()(const DataMapper& res, const QInt8* blockA,
125
+ const QUInt8* blockB, Index rows, Index depth, Index cols,
126
+ QInt32 alpha, Index strideA = -1, Index strideB = -1,
127
+ Index offsetA = 0, Index offsetB = 0);
128
+ };
129
+
130
+ template <typename Index, typename DataMapper, int mr, int nr,
131
+ bool ConjugateLhs, bool ConjugateRhs>
132
+ EIGEN_DONT_INLINE void
133
+ gebp_kernel<QInt8, QUInt8, Index, DataMapper, mr, nr, ConjugateLhs,
134
+ ConjugateRhs>::operator()(const DataMapper& res,
135
+ const QInt8* blockA, const QUInt8* blockB,
136
+ Index rows, Index depth, Index cols,
137
+ QInt32 alpha, Index strideA,
138
+ Index strideB, Index offsetA,
139
+ Index offsetB) {
140
+ EIGEN_STATIC_ASSERT(!ConjugateLhs, YOU_MADE_A_PROGRAMMING_MISTAKE);
141
+ EIGEN_STATIC_ASSERT(!ConjugateRhs, YOU_MADE_A_PROGRAMMING_MISTAKE);
142
+
143
+ eigen_assert(alpha.value == 1);
144
+ eigen_assert(strideA == -1);
145
+ eigen_assert(strideB == -1);
146
+ eigen_assert(offsetA == 0);
147
+ eigen_assert(offsetB == 0);
148
+
149
+ eigen_assert(rows > 0);
150
+ eigen_assert(cols > 0);
151
+ eigen_assert(depth > 0);
152
+ eigen_assert(blockA);
153
+ eigen_assert(blockB);
154
+
155
+ for (Index j = 0; j < cols; ++j) {
156
+ Index startB = j * depth;
157
+
158
+ for (Index i = 0; i < rows; ++i) {
159
+ Index startA = i * depth;
160
+
161
+ for (Index k = 0; k < depth; ++k) {
162
+ res(i, j) += blockA[startA + k] * blockB[startB + k];
163
+ }
164
+ }
165
+ }
166
+ }
167
+ #endif
168
+
169
+ // Neon optimized implementation where the lhs is encoded using unsigned 8bit
170
+ // integers and the rhs using signed 8bit integers.
171
+ #ifdef EIGEN_USE_OPTIMIZED_UINT8_INT8_MAT_MAT_PRODUCT
172
+ template <bool _ConjLhs, bool _ConjRhs>
173
+ class gebp_traits<QUInt8, QInt8, _ConjLhs, _ConjRhs> {
174
+ public:
175
+ typedef QUInt8 LhsScalar;
176
+ typedef QInt8 RhsScalar;
177
+ typedef QInt32 ResScalar;
178
+
179
+ enum {
180
+ // register block size along the M and N directions
181
+ nr = 4,
182
+ mr = 1,
183
+ // Progress made at each iteration of the product loop
184
+ // 1 for the current implementation
185
+ LhsProgress = 1,
186
+ RhsProgress = 1
187
+ };
188
+ };
189
+
190
+ // Mat-Mat product of an unsigned 8bit lhs with a signed 8bit rhs
191
+ template <typename Index, typename DataMapper, int mr, int nr,
192
+ bool ConjugateLhs, bool ConjugateRhs>
193
+ struct gebp_kernel<QUInt8, QInt8, Index, DataMapper, mr, nr, ConjugateLhs,
194
+ ConjugateRhs> {
195
+ EIGEN_DONT_INLINE
196
+ void operator()(const DataMapper& res, const QUInt8* blockA,
197
+ const QInt8* blockB, Index rows, Index depth, Index cols,
198
+ QInt32 alpha, Index strideA = -1, Index strideB = -1,
199
+ Index offsetA = 0, Index offsetB = 0);
200
+ };
201
+
202
+ template <typename Index, typename DataMapper, int mr, int nr,
203
+ bool ConjugateLhs, bool ConjugateRhs>
204
+ EIGEN_DONT_INLINE void
205
+ gebp_kernel<QUInt8, QInt8, Index, DataMapper, mr, nr, ConjugateLhs,
206
+ ConjugateRhs>::operator()(const DataMapper& res,
207
+ const QUInt8* blockA, const QInt8* blockB,
208
+ Index rows, Index depth, Index cols,
209
+ QInt32 alpha, Index strideA,
210
+ Index strideB, Index offsetA,
211
+ Index offsetB) {
212
+ EIGEN_STATIC_ASSERT(!ConjugateLhs, YOU_MADE_A_PROGRAMMING_MISTAKE);
213
+ EIGEN_STATIC_ASSERT(!ConjugateRhs, YOU_MADE_A_PROGRAMMING_MISTAKE);
214
+
215
+ eigen_assert(alpha.value == 1);
216
+ eigen_assert(strideA == -1);
217
+ eigen_assert(strideB == -1);
218
+ eigen_assert(offsetA == 0);
219
+ eigen_assert(offsetB == 0);
220
+
221
+ eigen_assert(rows > 0);
222
+ eigen_assert(cols > 0);
223
+ eigen_assert(depth > 0);
224
+ eigen_assert(blockA);
225
+ eigen_assert(blockB);
226
+
227
+ for (Index j = 0; j < cols; ++j) {
228
+ Index startB = j * depth;
229
+
230
+ for (Index i = 0; i < rows; ++i) {
231
+ Index startA = i * depth;
232
+
233
+ for (Index k = 0; k < depth; ++k) {
234
+ res(i, j) += blockA[startA + k] * blockB[startB + k];
235
+ }
236
+ }
237
+ }
238
+ }
239
+ #endif
240
+
241
+ #ifdef EIGEN_USE_OPTIMIZED_INT16_INT16_MAT_MAT_PRODUCT
242
+
243
+ template <bool _ConjLhs, bool _ConjRhs>
244
+ class gebp_traits<QInt16, QInt16, _ConjLhs, _ConjRhs> {
245
+ public:
246
+ typedef QInt16 LhsScalar;
247
+ typedef QInt16 RhsScalar;
248
+ typedef QInt32 ResScalar;
249
+
250
+ enum {
251
+ // register block size along the M and N directions
252
+ // One for the current implementation
253
+ nr = 4,
254
+ mr = 1,
255
+ // Progress made at each iteration of the product loop
256
+ // also 1 for the current implementation
257
+ LhsProgress = 1,
258
+ RhsProgress = 1
259
+ };
260
+ };
261
+
262
+ // The signed 16bit Mat-Mat product itself.
263
+ template <typename Index, typename DataMapper, int mr, int nr,
264
+ bool ConjugateLhs, bool ConjugateRhs>
265
+ struct gebp_kernel<QInt16, QInt16, Index, DataMapper, mr, nr, ConjugateLhs,
266
+ ConjugateRhs> {
267
+ EIGEN_DONT_INLINE
268
+ void operator()(const DataMapper& res, const QInt16* blockA,
269
+ const QInt16* blockB, Index rows, Index depth, Index cols,
270
+ QInt32 alpha, Index strideA = -1, Index strideB = -1,
271
+ Index offsetA = 0, Index offsetB = 0);
272
+ };
273
+
274
+ template <typename Index, typename DataMapper, int mr, int nr,
275
+ bool ConjugateLhs, bool ConjugateRhs>
276
+ EIGEN_DONT_INLINE void
277
+ gebp_kernel<QInt16, QInt16, Index, DataMapper, mr, nr, ConjugateLhs,
278
+ ConjugateRhs>::operator()(const DataMapper& res,
279
+ const QInt16* blockA,
280
+ const QInt16* blockB, Index rows,
281
+ Index depth, Index cols, QInt32 alpha,
282
+ Index strideA, Index strideB,
283
+ Index offsetA, Index offsetB) {
284
+ EIGEN_STATIC_ASSERT(!ConjugateLhs, YOU_MADE_A_PROGRAMMING_MISTAKE);
285
+ EIGEN_STATIC_ASSERT(!ConjugateRhs, YOU_MADE_A_PROGRAMMING_MISTAKE);
286
+
287
+ eigen_assert(alpha.value == 1);
288
+ eigen_assert(strideA == -1);
289
+ eigen_assert(strideB == -1);
290
+ eigen_assert(offsetA == 0);
291
+ eigen_assert(offsetB == 0);
292
+
293
+ eigen_assert(rows > 0);
294
+ eigen_assert(cols > 0);
295
+ eigen_assert(depth > 0);
296
+ eigen_assert(blockA);
297
+ eigen_assert(blockB);
298
+
299
+ for (Index j = 0; j < cols; ++j) {
300
+ Index startB = j * depth;
301
+
302
+ for (Index i = 0; i < rows; ++i) {
303
+ Index startA = i * depth;
304
+
305
+ for (Index k = 0; k < depth; ++k) {
306
+ res(i, j) += blockA[startA + k] * blockB[startB + k];
307
+ }
308
+ }
309
+ }
310
+ }
311
+ #endif
312
+
313
+ } // namespace internal
314
+ } // namespace Eigen
315
+
316
+ #endif // TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_MATMATPRODUCTNEON_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/fixedpoint/MatVecProduct.h ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_MATVECPRODUCT_H_
17
+ #define TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_MATVECPRODUCT_H_
18
+
19
+ namespace Eigen {
20
+ namespace internal {
21
+
22
+ // Mat-Vec product
23
+ // Both lhs and rhs are encoded as 8bit signed integers
24
+ template <typename Index, typename LhsMapper, bool ConjugateLhs,
25
+ typename RhsMapper, bool ConjugateRhs, int Version>
26
+ struct general_matrix_vector_product<Index, QInt8, LhsMapper, ColMajor,
27
+ ConjugateLhs, QInt8, RhsMapper,
28
+ ConjugateRhs, Version> {
29
+ EIGEN_DONT_INLINE static void run(Index rows, Index cols,
30
+ const LhsMapper& lhs, const RhsMapper& rhs,
31
+ QInt32* res, Index resIncr, QInt8 alpha);
32
+ };
33
+
34
+ template <typename Index, typename LhsMapper, bool ConjugateLhs,
35
+ typename RhsMapper, bool ConjugateRhs, int Version>
36
+ EIGEN_DONT_INLINE void general_matrix_vector_product<
37
+ Index, QInt8, LhsMapper, ColMajor, ConjugateLhs, QInt8, RhsMapper,
38
+ ConjugateRhs, Version>::run(Index rows, Index cols, const LhsMapper& lhs,
39
+ const RhsMapper& rhs, QInt32* res,
40
+ Index resIncr, QInt8 alpha) {
41
+ eigen_assert(alpha.value == 1);
42
+ eigen_assert(resIncr == 1);
43
+ eigen_assert(rows > 0);
44
+ eigen_assert(cols > 0);
45
+
46
+ for (Index i = 0; i < rows; ++i) {
47
+ for (Index j = 0; j < cols; ++j) {
48
+ res[i] += lhs(i, j) * rhs(j, 0);
49
+ }
50
+ }
51
+ }
52
+
53
+ // Mat-Vec product
54
+ // Both lhs and rhs are encoded as 16bit signed integers
55
+ template <typename Index, typename LhsMapper, bool ConjugateLhs,
56
+ typename RhsMapper, bool ConjugateRhs, int Version>
57
+ struct general_matrix_vector_product<Index, QInt16, LhsMapper, ColMajor,
58
+ ConjugateLhs, QInt16, RhsMapper,
59
+ ConjugateRhs, Version> {
60
+ EIGEN_DONT_INLINE static void run(Index rows, Index cols,
61
+ const LhsMapper& lhs, const RhsMapper& rhs,
62
+ QInt32* res, Index resIncr, QInt16 alpha);
63
+ };
64
+
65
+ template <typename Index, typename LhsMapper, bool ConjugateLhs,
66
+ typename RhsMapper, bool ConjugateRhs, int Version>
67
+ EIGEN_DONT_INLINE void general_matrix_vector_product<
68
+ Index, QInt16, LhsMapper, ColMajor, ConjugateLhs, QInt16, RhsMapper,
69
+ ConjugateRhs, Version>::run(Index rows, Index cols, const LhsMapper& lhs,
70
+ const RhsMapper& rhs, QInt32* res,
71
+ Index resIncr, QInt16 alpha) {
72
+ eigen_assert(alpha.value == 1);
73
+ eigen_assert(resIncr == 1);
74
+ eigen_assert(rows > 0);
75
+ eigen_assert(cols > 0);
76
+
77
+ for (Index i = 0; i < rows; ++i) {
78
+ for (Index j = 0; j < cols; ++j) {
79
+ res[i] += lhs(i, j) * rhs(j, 0);
80
+ }
81
+ }
82
+ }
83
+
84
+ // Mat-Vec product
85
+ // The lhs is encoded using 8bit signed integers, the rhs using 8bit unsigned
86
+ // integers
87
+ template <typename Index, typename LhsMapper, bool ConjugateLhs,
88
+ typename RhsMapper, bool ConjugateRhs, int Version>
89
+ struct general_matrix_vector_product<Index, QInt8, LhsMapper, ColMajor,
90
+ ConjugateLhs, QUInt8, RhsMapper,
91
+ ConjugateRhs, Version> {
92
+ EIGEN_DONT_INLINE static void run(Index rows, Index cols,
93
+ const LhsMapper& lhs, const RhsMapper& rhs,
94
+ QInt32* res, Index resIncr, QUInt8 alpha);
95
+ };
96
+
97
+ template <typename Index, typename LhsMapper, bool ConjugateLhs,
98
+ typename RhsMapper, bool ConjugateRhs, int Version>
99
+ EIGEN_DONT_INLINE void general_matrix_vector_product<
100
+ Index, QInt8, LhsMapper, ColMajor, ConjugateLhs, QUInt8, RhsMapper,
101
+ ConjugateRhs, Version>::run(Index rows, Index cols, const LhsMapper& lhs,
102
+ const RhsMapper& rhs, QInt32* res,
103
+ Index resIncr, QUInt8 alpha) {
104
+ eigen_assert(alpha.value == 1);
105
+ eigen_assert(resIncr == 1);
106
+ eigen_assert(rows > 0);
107
+ eigen_assert(cols > 0);
108
+
109
+ for (Index i = 0; i < rows; ++i) {
110
+ for (Index j = 0; j < cols; ++j) {
111
+ res[i] += lhs(i, j) * rhs(j, 0);
112
+ }
113
+ }
114
+ }
115
+
116
+ // Mat-Vec product
117
+ // The lhs is encoded using bit unsigned integers, the rhs using 8bit signed
118
+ // integers
119
+ template <typename Index, typename LhsMapper, bool ConjugateLhs,
120
+ typename RhsMapper, bool ConjugateRhs, int Version>
121
+ struct general_matrix_vector_product<Index, QUInt8, LhsMapper, ColMajor,
122
+ ConjugateLhs, QInt8, RhsMapper,
123
+ ConjugateRhs, Version> {
124
+ EIGEN_DONT_INLINE static void run(Index rows, Index cols,
125
+ const LhsMapper& lhs, const RhsMapper& rhs,
126
+ QInt32* res, Index resIncr, QInt8 alpha);
127
+ };
128
+
129
+ template <typename Index, typename LhsMapper, bool ConjugateLhs,
130
+ typename RhsMapper, bool ConjugateRhs, int Version>
131
+ EIGEN_DONT_INLINE void general_matrix_vector_product<
132
+ Index, QUInt8, LhsMapper, ColMajor, ConjugateLhs, QInt8, RhsMapper,
133
+ ConjugateRhs, Version>::run(Index rows, Index cols, const LhsMapper& lhs,
134
+ const RhsMapper& rhs, QInt32* res,
135
+ Index resIncr, QInt8 alpha) {
136
+ eigen_assert(alpha.value == 1);
137
+ eigen_assert(resIncr == 1);
138
+ eigen_assert(rows > 0);
139
+ eigen_assert(cols > 0);
140
+
141
+ for (Index i = 0; i < rows; ++i) {
142
+ for (Index j = 0; j < cols; ++j) {
143
+ res[i] += lhs(i, j) * rhs(j, 0);
144
+ }
145
+ }
146
+ }
147
+
148
+ } // namespace internal
149
+ } // namespace Eigen
150
+
151
+ #endif // TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_MATVECPRODUCT_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/fixedpoint/PacketMathAVX.h ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_PACKETMATHAVX_H_
17
+ #define TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_PACKETMATHAVX_H_
18
+ #ifdef _MSC_VER
19
+
20
+ #include <emmintrin.h>
21
+ #include <immintrin.h>
22
+ #include <smmintrin.h>
23
+
24
+ #endif
25
+
26
+ namespace Eigen {
27
+ namespace internal {
28
+
29
+ typedef eigen_packet_wrapper<__m256i, 10> Packet32q8i;
30
+ typedef eigen_packet_wrapper<__m128i, 11> Packet16q8i;
31
+
32
+ template <>
33
+ struct packet_traits<QInt8> : default_packet_traits {
34
+ typedef Packet32q8i type;
35
+ typedef Packet16q8i half;
36
+ enum {
37
+ Vectorizable = 1,
38
+ AlignedOnScalar = 1,
39
+ size = 32,
40
+ };
41
+ enum {
42
+ HasAdd = 0,
43
+ HasSub = 0,
44
+ HasMul = 0,
45
+ HasNegate = 0,
46
+ HasAbs = 0,
47
+ HasAbs2 = 0,
48
+ HasMin = 0,
49
+ HasMax = 0,
50
+ HasConj = 0,
51
+ HasSetLinear = 0
52
+ };
53
+ };
54
+
55
+ template <>
56
+ struct unpacket_traits<Packet32q8i> {
57
+ typedef QInt8 type;
58
+ typedef Packet16q8i half;
59
+ enum {
60
+ size = 32,
61
+ alignment = Aligned32,
62
+ vectorizable = true,
63
+ masked_load_available = false,
64
+ masked_store_available = false
65
+ };
66
+ };
67
+
68
+ template <>
69
+ struct unpacket_traits<Packet16q8i> {
70
+ typedef QInt8 type;
71
+ typedef Packet16q8i half;
72
+ enum {
73
+ size = 16,
74
+ alignment = Aligned32,
75
+ vectorizable = true,
76
+ masked_load_available = false,
77
+ masked_store_available = false
78
+ };
79
+ };
80
+ template <>
81
+ EIGEN_STRONG_INLINE Packet32q8i pset1<Packet32q8i>(const QInt8& from) {
82
+ return _mm256_set1_epi8(from.value);
83
+ }
84
+ template <>
85
+ EIGEN_STRONG_INLINE Packet32q8i ploadu<Packet32q8i>(const QInt8* from) {
86
+ EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_si256(
87
+ reinterpret_cast<const __m256i*>(from));
88
+ }
89
+ template <>
90
+ EIGEN_STRONG_INLINE Packet16q8i ploadu<Packet16q8i>(const QInt8* from) {
91
+ EIGEN_DEBUG_UNALIGNED_LOAD return _mm_loadu_si128(
92
+ reinterpret_cast<const __m128i*>(from));
93
+ }
94
+
95
+ template <>
96
+ EIGEN_STRONG_INLINE Packet32q8i pload<Packet32q8i>(const QInt8* from) {
97
+ EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_si256(
98
+ reinterpret_cast<const __m256i*>(from));
99
+ }
100
+ template <>
101
+ EIGEN_STRONG_INLINE Packet16q8i pload<Packet16q8i>(const QInt8* from) {
102
+ EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_si128(
103
+ reinterpret_cast<const __m128i*>(from));
104
+ }
105
+
106
+ template <>
107
+ EIGEN_STRONG_INLINE void pstoreu<QInt8>(QInt8* to, const Packet32q8i& from) {
108
+ EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_si256(
109
+ reinterpret_cast<__m256i*>(to), from.m_val);
110
+ }
111
+ template <>
112
+ EIGEN_STRONG_INLINE void pstoreu<QInt8>(QInt8* to, const Packet16q8i& from) {
113
+ EIGEN_DEBUG_UNALIGNED_STORE _mm_storeu_si128(reinterpret_cast<__m128i*>(to),
114
+ from.m_val);
115
+ }
116
+
117
+ template <>
118
+ EIGEN_STRONG_INLINE void pstore<QInt8>(QInt8* to, const Packet32q8i& from) {
119
+ EIGEN_DEBUG_ALIGNED_STORE _mm256_store_si256(reinterpret_cast<__m256i*>(to),
120
+ from.m_val);
121
+ }
122
+ template <>
123
+ EIGEN_STRONG_INLINE void pstore<QInt8>(QInt8* to, const Packet16q8i& from) {
124
+ EIGEN_DEBUG_ALIGNED_STORE _mm_store_si128(reinterpret_cast<__m128i*>(to),
125
+ from.m_val);
126
+ }
127
+
128
+ typedef __m256 Packet8f;
129
+
130
+ template <>
131
+ struct type_casting_traits<float, QInt8> {
132
+ enum { VectorizedCast = 1, SrcCoeffRatio = 4, TgtCoeffRatio = 1 };
133
+ };
134
+
135
+ template <>
136
+ EIGEN_STRONG_INLINE Packet32q8i
137
+ pcast<Packet8f, Packet32q8i>(const Packet8f& a, const Packet8f& b,
138
+ const Packet8f& c, const Packet8f& d) {
139
+ const __m256i a_conv = _mm256_cvtps_epi32(a);
140
+ const __m256i b_conv = _mm256_cvtps_epi32(b);
141
+ const __m256i c_conv = _mm256_cvtps_epi32(c);
142
+ const __m256i d_conv = _mm256_cvtps_epi32(d);
143
+ __m128i low = _mm256_castsi256_si128(a_conv);
144
+ __m128i high = _mm256_extractf128_si256(a_conv, 1);
145
+ __m128i tmp = _mm_packs_epi32(low, high);
146
+ __m128i low2 = _mm256_castsi256_si128(b_conv);
147
+ __m128i high2 = _mm256_extractf128_si256(b_conv, 1);
148
+ __m128i tmp2 = _mm_packs_epi32(low2, high2);
149
+ __m128i converted_low = _mm_packs_epi16(tmp, tmp2);
150
+ low = _mm256_castsi256_si128(c_conv);
151
+ high = _mm256_extractf128_si256(c_conv, 1);
152
+ tmp = _mm_packs_epi32(low, high);
153
+ low2 = _mm256_castsi256_si128(d_conv);
154
+ high2 = _mm256_extractf128_si256(d_conv, 1);
155
+ tmp2 = _mm_packs_epi32(low2, high2);
156
+ __m128i converted_high = _mm_packs_epi16(tmp, tmp2);
157
+ return _mm256_insertf128_si256(_mm256_castsi128_si256(converted_low),
158
+ converted_high, 1);
159
+ }
160
+
161
+ } // end namespace internal
162
+ } // end namespace Eigen
163
+
164
+ #endif // TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_PACKETMATHAVX_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/fixedpoint/PacketMathAVX2.h ADDED
@@ -0,0 +1,560 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_PACKETMATHAVX2_H_
17
+ #define TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_PACKETMATHAVX2_H_
18
+ #ifdef _MSC_VER
19
+
20
+ #include <emmintrin.h>
21
+ #include <immintrin.h>
22
+ #include <smmintrin.h>
23
+
24
+ #endif
25
+
26
+ inline int _mm256_extract_epi16_N0(const __m256i X) {
27
+ return _mm_extract_epi16(_mm256_extractf128_si256(X, 0 >> 3), 0 % 8);
28
+ }
29
+
30
+ inline int _mm256_extract_epi16_N1(const __m256i X) {
31
+ return _mm_extract_epi16(_mm256_extractf128_si256(X, 1 >> 3), 1 % 8);
32
+ }
33
+
34
+ inline int _mm256_extract_epi8_N0(const __m256i X) {
35
+ return _mm_extract_epi8(_mm256_extractf128_si256((X), 0 >> 4), 0 % 16);
36
+ }
37
+
38
+ inline int _mm256_extract_epi8_N1(const __m256i X) {
39
+ return _mm_extract_epi8(_mm256_extractf128_si256((X), 1 >> 4), 1 % 16);
40
+ }
41
+
42
+ namespace Eigen {
43
+ namespace internal {
44
+
45
+ typedef eigen_packet_wrapper<__m256i, 20> Packet32q8i;
46
+ typedef eigen_packet_wrapper<__m256i, 21> Packet16q16i;
47
+ typedef eigen_packet_wrapper<__m256i, 22> Packet32q8u;
48
+ typedef eigen_packet_wrapper<__m128i, 23> Packet16q8i;
49
+ typedef eigen_packet_wrapper<__m128i, 25> Packet16q8u;
50
+ typedef eigen_packet_wrapper<__m128i, 26> Packet8q16i;
51
+ typedef eigen_packet_wrapper<__m256i, 27> Packet8q32i;
52
+ typedef eigen_packet_wrapper<__m128i, 28> Packet4q32i;
53
+
54
+ #ifndef EIGEN_VECTORIZE_AVX512
55
+ template <>
56
+ struct packet_traits<QInt8> : default_packet_traits {
57
+ typedef Packet32q8i type;
58
+ typedef Packet16q8i half;
59
+ enum {
60
+ Vectorizable = 1,
61
+ AlignedOnScalar = 1,
62
+ size = 32,
63
+ };
64
+ enum {
65
+ HasAdd = 0,
66
+ HasSub = 0,
67
+ HasMul = 0,
68
+ HasNegate = 0,
69
+ HasAbs = 0,
70
+ HasAbs2 = 0,
71
+ HasMin = 1,
72
+ HasMax = 1,
73
+ HasConj = 0,
74
+ HasSetLinear = 0
75
+ };
76
+ };
77
+ template <>
78
+ struct packet_traits<QUInt8> : default_packet_traits {
79
+ typedef Packet32q8u type;
80
+ typedef Packet16q8u half;
81
+ enum {
82
+ Vectorizable = 1,
83
+ AlignedOnScalar = 1,
84
+ size = 32,
85
+ };
86
+ enum {
87
+ HasAdd = 0,
88
+ HasSub = 0,
89
+ HasMul = 0,
90
+ HasNegate = 0,
91
+ HasAbs = 0,
92
+ HasAbs2 = 0,
93
+ HasMin = 1,
94
+ HasMax = 1,
95
+ HasConj = 0,
96
+ HasSetLinear = 0
97
+ };
98
+ };
99
+ template <>
100
+ struct packet_traits<QInt16> : default_packet_traits {
101
+ typedef Packet16q16i type;
102
+ typedef Packet8q16i half;
103
+ enum {
104
+ Vectorizable = 1,
105
+ AlignedOnScalar = 1,
106
+ size = 16,
107
+ };
108
+ enum {
109
+ HasAdd = 0,
110
+ HasSub = 0,
111
+ HasMul = 0,
112
+ HasNegate = 0,
113
+ HasAbs = 0,
114
+ HasAbs2 = 0,
115
+ HasMin = 1,
116
+ HasMax = 1,
117
+ HasConj = 0,
118
+ HasSetLinear = 0
119
+ };
120
+ };
121
+ template <>
122
+ struct packet_traits<QInt32> : default_packet_traits {
123
+ typedef Packet8q32i type;
124
+ typedef Packet4q32i half;
125
+ enum {
126
+ Vectorizable = 1,
127
+ AlignedOnScalar = 1,
128
+ size = 8,
129
+ };
130
+ enum {
131
+ HasAdd = 1,
132
+ HasSub = 1,
133
+ HasMul = 1,
134
+ HasNegate = 1,
135
+ HasAbs = 0,
136
+ HasAbs2 = 0,
137
+ HasMin = 1,
138
+ HasMax = 1,
139
+ HasConj = 0,
140
+ HasSetLinear = 0
141
+ };
142
+ };
143
+ #endif
144
+
145
+ template <>
146
+ struct unpacket_traits<Packet32q8i> {
147
+ typedef QInt8 type;
148
+ typedef Packet16q8i half;
149
+ enum {
150
+ size = 32,
151
+ alignment = Aligned32,
152
+ vectorizable = true,
153
+ masked_load_available = false,
154
+ masked_store_available = false
155
+ };
156
+ };
157
+ template <>
158
+ struct unpacket_traits<Packet16q8i> {
159
+ typedef QInt8 type;
160
+ typedef Packet16q8i half;
161
+ enum {
162
+ size = 16,
163
+ alignment = Aligned32,
164
+ vectorizable = true,
165
+ masked_load_available = false,
166
+ masked_store_available = false
167
+ };
168
+ };
169
+ template <>
170
+ struct unpacket_traits<Packet16q16i> {
171
+ typedef QInt16 type;
172
+ typedef Packet8q16i half;
173
+ enum {
174
+ size = 16,
175
+ alignment = Aligned32,
176
+ vectorizable = true,
177
+ masked_load_available = false,
178
+ masked_store_available = false
179
+ };
180
+ };
181
+ template <>
182
+ struct unpacket_traits<Packet8q16i> {
183
+ typedef QInt16 type;
184
+ typedef Packet8q16i half;
185
+ enum {
186
+ size = 8,
187
+ alignment = Aligned32,
188
+ vectorizable = true,
189
+ masked_load_available = false,
190
+ masked_store_available = false
191
+ };
192
+ };
193
+ template <>
194
+ struct unpacket_traits<Packet32q8u> {
195
+ typedef QUInt8 type;
196
+ typedef Packet16q8u half;
197
+ enum {
198
+ size = 32,
199
+ alignment = Aligned32,
200
+ vectorizable = true,
201
+ masked_load_available = false,
202
+ masked_store_available = false
203
+ };
204
+ };
205
+ template <>
206
+ struct unpacket_traits<Packet8q32i> {
207
+ typedef QInt32 type;
208
+ typedef Packet4q32i half;
209
+ enum {
210
+ size = 8,
211
+ alignment = Aligned32,
212
+ vectorizable = true,
213
+ masked_load_available = false,
214
+ masked_store_available = false
215
+ };
216
+ };
217
+
218
+ // Unaligned load
219
+ template <>
220
+ EIGEN_STRONG_INLINE Packet32q8i ploadu<Packet32q8i>(const QInt8* from) {
221
+ EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_si256(
222
+ reinterpret_cast<const __m256i*>(from));
223
+ }
224
+ template <>
225
+ EIGEN_STRONG_INLINE Packet16q8i ploadu<Packet16q8i>(const QInt8* from) {
226
+ EIGEN_DEBUG_UNALIGNED_LOAD return _mm_loadu_si128(
227
+ reinterpret_cast<const __m128i*>(from));
228
+ }
229
+ template <>
230
+ EIGEN_STRONG_INLINE Packet32q8u ploadu<Packet32q8u>(const QUInt8* from) {
231
+ EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_si256(
232
+ reinterpret_cast<const __m256i*>(from));
233
+ }
234
+ template <>
235
+ EIGEN_STRONG_INLINE Packet16q16i ploadu<Packet16q16i>(const QInt16* from) {
236
+ EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_si256(
237
+ reinterpret_cast<const __m256i*>(from));
238
+ }
239
+ template <>
240
+ EIGEN_STRONG_INLINE Packet8q16i ploadu<Packet8q16i>(const QInt16* from) {
241
+ EIGEN_DEBUG_UNALIGNED_LOAD return _mm_loadu_si128(
242
+ reinterpret_cast<const __m128i*>(from));
243
+ }
244
+ template <>
245
+ EIGEN_STRONG_INLINE Packet8q32i ploadu<Packet8q32i>(const QInt32* from) {
246
+ EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_si256(
247
+ reinterpret_cast<const __m256i*>(from));
248
+ }
249
+
250
+ // Aligned load
251
+ template <>
252
+ EIGEN_STRONG_INLINE Packet32q8i pload<Packet32q8i>(const QInt8* from) {
253
+ EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_si256(
254
+ reinterpret_cast<const __m256i*>(from));
255
+ }
256
+ template <>
257
+ EIGEN_STRONG_INLINE Packet16q8i pload<Packet16q8i>(const QInt8* from) {
258
+ EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_si128(
259
+ reinterpret_cast<const __m128i*>(from));
260
+ }
261
+ template <>
262
+ EIGEN_STRONG_INLINE Packet32q8u pload<Packet32q8u>(const QUInt8* from) {
263
+ EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_si256(
264
+ reinterpret_cast<const __m256i*>(from));
265
+ }
266
+ template <>
267
+ EIGEN_STRONG_INLINE Packet16q16i pload<Packet16q16i>(const QInt16* from) {
268
+ EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_si256(
269
+ reinterpret_cast<const __m256i*>(from));
270
+ }
271
+ template <>
272
+ EIGEN_STRONG_INLINE Packet8q16i pload<Packet8q16i>(const QInt16* from) {
273
+ EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_si128(
274
+ reinterpret_cast<const __m128i*>(from));
275
+ }
276
+ template <>
277
+ EIGEN_STRONG_INLINE Packet8q32i pload<Packet8q32i>(const QInt32* from) {
278
+ EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_si256(
279
+ reinterpret_cast<const __m256i*>(from));
280
+ }
281
+
282
+ // Unaligned store
283
+ template <>
284
+ EIGEN_STRONG_INLINE void pstoreu<QInt8>(QInt8* to, const Packet32q8i& from) {
285
+ EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_si256(
286
+ reinterpret_cast<__m256i*>(to), from.m_val);
287
+ }
288
+ template <>
289
+ EIGEN_STRONG_INLINE void pstoreu<QInt8>(QInt8* to, const Packet16q8i& from) {
290
+ EIGEN_DEBUG_UNALIGNED_STORE _mm_storeu_si128(reinterpret_cast<__m128i*>(to),
291
+ from.m_val);
292
+ }
293
+ template <>
294
+ EIGEN_STRONG_INLINE void pstoreu<QUInt8>(QUInt8* to, const Packet32q8u& from) {
295
+ EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_si256(
296
+ reinterpret_cast<__m256i*>(to), from.m_val);
297
+ }
298
+ template <>
299
+ EIGEN_STRONG_INLINE void pstoreu<QInt16>(QInt16* to, const Packet16q16i& from) {
300
+ EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_si256(
301
+ reinterpret_cast<__m256i*>(to), from.m_val);
302
+ }
303
+ template <>
304
+ EIGEN_STRONG_INLINE void pstoreu<QInt16>(QInt16* to, const Packet8q16i& from) {
305
+ EIGEN_DEBUG_UNALIGNED_STORE _mm_storeu_si128(reinterpret_cast<__m128i*>(to),
306
+ from.m_val);
307
+ }
308
+ template <>
309
+ EIGEN_STRONG_INLINE void pstoreu<QInt32>(QInt32* to, const Packet8q32i& from) {
310
+ EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_si256(
311
+ reinterpret_cast<__m256i*>(to), from.m_val);
312
+ }
313
+
314
+ // Aligned store
315
+ template <>
316
+ EIGEN_STRONG_INLINE void pstore<QInt32>(QInt32* to, const Packet8q32i& from) {
317
+ EIGEN_DEBUG_ALIGNED_STORE _mm256_store_si256(reinterpret_cast<__m256i*>(to),
318
+ from.m_val);
319
+ }
320
+ template <>
321
+ EIGEN_STRONG_INLINE void pstore<QInt16>(QInt16* to, const Packet16q16i& from) {
322
+ EIGEN_DEBUG_ALIGNED_STORE _mm256_store_si256(reinterpret_cast<__m256i*>(to),
323
+ from.m_val);
324
+ }
325
+ template <>
326
+ EIGEN_STRONG_INLINE void pstore<QInt16>(QInt16* to, const Packet8q16i& from) {
327
+ EIGEN_DEBUG_ALIGNED_STORE _mm_store_si128(reinterpret_cast<__m128i*>(to),
328
+ from.m_val);
329
+ }
330
+ template <>
331
+ EIGEN_STRONG_INLINE void pstore<QUInt8>(QUInt8* to, const Packet32q8u& from) {
332
+ EIGEN_DEBUG_ALIGNED_STORE _mm256_store_si256(reinterpret_cast<__m256i*>(to),
333
+ from.m_val);
334
+ }
335
+ template <>
336
+ EIGEN_STRONG_INLINE void pstore<QInt8>(QInt8* to, const Packet32q8i& from) {
337
+ EIGEN_DEBUG_ALIGNED_STORE _mm256_store_si256(reinterpret_cast<__m256i*>(to),
338
+ from.m_val);
339
+ }
340
+ template <>
341
+ EIGEN_STRONG_INLINE void pstore<QInt8>(QInt8* to, const Packet16q8i& from) {
342
+ EIGEN_DEBUG_ALIGNED_STORE _mm_store_si128(reinterpret_cast<__m128i*>(to),
343
+ from.m_val);
344
+ }
345
+
346
+ // Extract first element.
347
+ template <>
348
+ EIGEN_STRONG_INLINE QInt32 pfirst<Packet8q32i>(const Packet8q32i& a) {
349
+ return _mm_cvtsi128_si32(_mm256_castsi256_si128(a));
350
+ }
351
+ template <>
352
+ EIGEN_STRONG_INLINE QInt16 pfirst<Packet16q16i>(const Packet16q16i& a) {
353
+ return _mm256_extract_epi16_N0(a.m_val);
354
+ }
355
+ template <>
356
+ EIGEN_STRONG_INLINE QUInt8 pfirst<Packet32q8u>(const Packet32q8u& a) {
357
+ return static_cast<uint8_t>(_mm256_extract_epi8_N0(a.m_val));
358
+ }
359
+ template <>
360
+ EIGEN_STRONG_INLINE QInt8 pfirst<Packet32q8i>(const Packet32q8i& a) {
361
+ return _mm256_extract_epi8_N0(a.m_val);
362
+ }
363
+
364
+ // Initialize to constant value.
365
+ template <>
366
+ EIGEN_STRONG_INLINE Packet32q8i pset1<Packet32q8i>(const QInt8& from) {
367
+ return _mm256_set1_epi8(from.value);
368
+ }
369
+ template <>
370
+ EIGEN_STRONG_INLINE Packet32q8u pset1<Packet32q8u>(const QUInt8& from) {
371
+ return _mm256_set1_epi8(static_cast<uint8_t>(from.value));
372
+ }
373
+ template <>
374
+ EIGEN_STRONG_INLINE Packet8q32i pset1<Packet8q32i>(const QInt32& from) {
375
+ return _mm256_set1_epi32(from.value);
376
+ }
377
+
378
+ // Basic arithmetic packet ops for QInt32.
379
+ template <>
380
+ EIGEN_STRONG_INLINE Packet8q32i padd<Packet8q32i>(const Packet8q32i& a,
381
+ const Packet8q32i& b) {
382
+ return _mm256_add_epi32(a.m_val, b.m_val);
383
+ }
384
+ template <>
385
+ EIGEN_STRONG_INLINE Packet16q16i pset1<Packet16q16i>(const QInt16& from) {
386
+ return _mm256_set1_epi16(from.value);
387
+ }
388
+ template <>
389
+ EIGEN_STRONG_INLINE Packet8q32i psub<Packet8q32i>(const Packet8q32i& a,
390
+ const Packet8q32i& b) {
391
+ return _mm256_sub_epi32(a.m_val, b.m_val);
392
+ }
393
+ // Note: mullo truncates the result to 32 bits.
394
+ template <>
395
+ EIGEN_STRONG_INLINE Packet8q32i pmul<Packet8q32i>(const Packet8q32i& a,
396
+ const Packet8q32i& b) {
397
+ return _mm256_mullo_epi32(a.m_val, b.m_val);
398
+ }
399
+ template <>
400
+ EIGEN_STRONG_INLINE Packet8q32i pnegate<Packet8q32i>(const Packet8q32i& a) {
401
+ return _mm256_sub_epi32(_mm256_setzero_si256(), a.m_val);
402
+ }
403
+
404
+ // Min and max.
405
+ template <>
406
+ EIGEN_STRONG_INLINE Packet8q32i pmin<Packet8q32i>(const Packet8q32i& a,
407
+ const Packet8q32i& b) {
408
+ return _mm256_min_epi32(a.m_val, b.m_val);
409
+ }
410
+ template <>
411
+ EIGEN_STRONG_INLINE Packet8q32i pmax<Packet8q32i>(const Packet8q32i& a,
412
+ const Packet8q32i& b) {
413
+ return _mm256_max_epi32(a.m_val, b.m_val);
414
+ }
415
+
416
+ template <>
417
+ EIGEN_STRONG_INLINE Packet16q16i pmin<Packet16q16i>(const Packet16q16i& a,
418
+ const Packet16q16i& b) {
419
+ return _mm256_min_epi16(a.m_val, b.m_val);
420
+ }
421
+ template <>
422
+ EIGEN_STRONG_INLINE Packet16q16i pmax<Packet16q16i>(const Packet16q16i& a,
423
+ const Packet16q16i& b) {
424
+ return _mm256_max_epi16(a.m_val, b.m_val);
425
+ }
426
+
427
+ template <>
428
+ EIGEN_STRONG_INLINE Packet32q8u pmin<Packet32q8u>(const Packet32q8u& a,
429
+ const Packet32q8u& b) {
430
+ return _mm256_min_epu8(a.m_val, b.m_val);
431
+ }
432
+ template <>
433
+ EIGEN_STRONG_INLINE Packet32q8u pmax<Packet32q8u>(const Packet32q8u& a,
434
+ const Packet32q8u& b) {
435
+ return _mm256_max_epu8(a.m_val, b.m_val);
436
+ }
437
+
438
+ template <>
439
+ EIGEN_STRONG_INLINE Packet32q8i pmin<Packet32q8i>(const Packet32q8i& a,
440
+ const Packet32q8i& b) {
441
+ return _mm256_min_epi8(a.m_val, b.m_val);
442
+ }
443
+ template <>
444
+ EIGEN_STRONG_INLINE Packet32q8i pmax<Packet32q8i>(const Packet32q8i& a,
445
+ const Packet32q8i& b) {
446
+ return _mm256_max_epi8(a.m_val, b.m_val);
447
+ }
448
+
449
+ // Reductions.
450
+ template <>
451
+ EIGEN_STRONG_INLINE QInt32 predux_min<Packet8q32i>(const Packet8q32i& a) {
452
+ __m256i tmp = _mm256_min_epi32(a, _mm256_permute2f128_si256(a, a, 1));
453
+ tmp =
454
+ _mm256_min_epi32(tmp, _mm256_shuffle_epi32(tmp, _MM_SHUFFLE(1, 0, 3, 2)));
455
+ return pfirst<Packet8q32i>(
456
+ _mm256_min_epi32(tmp, _mm256_shuffle_epi32(tmp, 1)));
457
+ }
458
+ template <>
459
+ EIGEN_STRONG_INLINE QInt32 predux_max<Packet8q32i>(const Packet8q32i& a) {
460
+ __m256i tmp = _mm256_max_epi32(a, _mm256_permute2f128_si256(a, a, 1));
461
+ tmp =
462
+ _mm256_max_epi32(tmp, _mm256_shuffle_epi32(tmp, _MM_SHUFFLE(1, 0, 3, 2)));
463
+ return pfirst<Packet8q32i>(
464
+ _mm256_max_epi32(tmp, _mm256_shuffle_epi32(tmp, 1)));
465
+ }
466
+
467
+ template <>
468
+ EIGEN_STRONG_INLINE QInt16 predux_min<Packet16q16i>(const Packet16q16i& a) {
469
+ __m256i tmp = _mm256_min_epi16(a, _mm256_permute2f128_si256(a, a, 1));
470
+ tmp =
471
+ _mm256_min_epi16(tmp, _mm256_shuffle_epi32(tmp, _MM_SHUFFLE(1, 0, 3, 2)));
472
+ tmp = _mm256_min_epi16(tmp, _mm256_shuffle_epi32(tmp, 1));
473
+ return std::min(_mm256_extract_epi16_N0(tmp), _mm256_extract_epi16_N1(tmp));
474
+ }
475
+ template <>
476
+ EIGEN_STRONG_INLINE QInt16 predux_max<Packet16q16i>(const Packet16q16i& a) {
477
+ __m256i tmp = _mm256_max_epi16(a, _mm256_permute2f128_si256(a, a, 1));
478
+ tmp =
479
+ _mm256_max_epi16(tmp, _mm256_shuffle_epi32(tmp, _MM_SHUFFLE(1, 0, 3, 2)));
480
+ tmp = _mm256_max_epi16(tmp, _mm256_shuffle_epi32(tmp, 1));
481
+ return std::max(_mm256_extract_epi16_N0(tmp), _mm256_extract_epi16_N1(tmp));
482
+ }
483
+
484
+ template <>
485
+ EIGEN_STRONG_INLINE QUInt8 predux_min<Packet32q8u>(const Packet32q8u& a) {
486
+ __m256i tmp = _mm256_min_epu8(a, _mm256_permute2f128_si256(a, a, 1));
487
+ tmp =
488
+ _mm256_min_epu8(tmp, _mm256_shuffle_epi32(tmp, _MM_SHUFFLE(1, 0, 3, 2)));
489
+ tmp = _mm256_min_epu8(tmp, _mm256_shuffle_epi32(tmp, 1));
490
+ tmp = _mm256_min_epu8(tmp,
491
+ _mm256_shufflelo_epi16(tmp, _MM_SHUFFLE(1, 0, 3, 2)));
492
+ return std::min(static_cast<uint8_t>(_mm256_extract_epi8_N0(tmp)),
493
+ static_cast<uint8_t>(_mm256_extract_epi8_N1(tmp)));
494
+ }
495
+ template <>
496
+ EIGEN_STRONG_INLINE QUInt8 predux_max<Packet32q8u>(const Packet32q8u& a) {
497
+ __m256i tmp = _mm256_max_epu8(a, _mm256_permute2f128_si256(a, a, 1));
498
+ tmp =
499
+ _mm256_max_epu8(tmp, _mm256_shuffle_epi32(tmp, _MM_SHUFFLE(1, 0, 3, 2)));
500
+ tmp = _mm256_max_epu8(tmp, _mm256_shuffle_epi32(tmp, 1));
501
+ tmp = _mm256_max_epu8(tmp,
502
+ _mm256_shufflelo_epi16(tmp, _MM_SHUFFLE(1, 0, 3, 2)));
503
+ return std::max(static_cast<uint8_t>(_mm256_extract_epi8_N0(tmp)),
504
+ static_cast<uint8_t>(_mm256_extract_epi8_N1(tmp)));
505
+ }
506
+
507
+ template <>
508
+ EIGEN_STRONG_INLINE QInt8 predux_min<Packet32q8i>(const Packet32q8i& a) {
509
+ __m256i tmp = _mm256_min_epi8(a, _mm256_permute2f128_si256(a, a, 1));
510
+ tmp =
511
+ _mm256_min_epi8(tmp, _mm256_shuffle_epi32(tmp, _MM_SHUFFLE(1, 0, 3, 2)));
512
+ tmp = _mm256_min_epi8(tmp, _mm256_shuffle_epi32(tmp, 1));
513
+ tmp = _mm256_min_epi8(tmp,
514
+ _mm256_shufflelo_epi16(tmp, _MM_SHUFFLE(1, 0, 3, 2)));
515
+ return std::min(_mm256_extract_epi8_N0(tmp), _mm256_extract_epi8_N1(tmp));
516
+ }
517
+ template <>
518
+ EIGEN_STRONG_INLINE QInt8 predux_max<Packet32q8i>(const Packet32q8i& a) {
519
+ __m256i tmp = _mm256_max_epi8(a, _mm256_permute2f128_si256(a, a, 1));
520
+ tmp =
521
+ _mm256_max_epi8(tmp, _mm256_shuffle_epi32(tmp, _MM_SHUFFLE(1, 0, 3, 2)));
522
+ tmp = _mm256_max_epi8(tmp, _mm256_shuffle_epi32(tmp, 1));
523
+ tmp = _mm256_max_epi8(tmp,
524
+ _mm256_shufflelo_epi16(tmp, _MM_SHUFFLE(1, 0, 3, 2)));
525
+ return std::max(_mm256_extract_epi8_N0(tmp), _mm256_extract_epi8_N1(tmp));
526
+ }
527
+
528
+ // Vectorized scaling of Packet32q8i by float.
529
+ template <>
530
+ struct scalar_product_op<QInt32, double> : binary_op_base<QInt32, double> {
531
+ typedef typename ScalarBinaryOpTraits<QInt32, double>::ReturnType result_type;
532
+ #ifdef EIGEN_SCALAR_BINARY_OP_PLUGIN
533
+ scalar_product_op(){EIGEN_SCALAR_BINARY_OP_PLUGIN}
534
+ #endif
535
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type
536
+ operator()(const QInt32& a, const double& b) const {
537
+ return a * b;
538
+ }
539
+
540
+ EIGEN_STRONG_INLINE const Packet8q32i packetOp(const Packet8q32i& a,
541
+ const double& b) const {
542
+ __m256d scale = _mm256_set1_pd(b);
543
+ __m256d a_lo = _mm256_cvtepi32_pd(_mm256_castsi256_si128(a));
544
+ __m128i result_lo = _mm256_cvtpd_epi32(_mm256_mul_pd(scale, a_lo));
545
+ __m256d a_hi = _mm256_cvtepi32_pd(_mm256_extracti128_si256(a, 1));
546
+ __m128i result_hi = _mm256_cvtpd_epi32(_mm256_mul_pd(scale, a_hi));
547
+ return _mm256_insertf128_si256(_mm256_castsi128_si256(result_lo), result_hi,
548
+ 1);
549
+ }
550
+ };
551
+
552
+ template <>
553
+ struct functor_traits<scalar_product_op<QInt32, double>> {
554
+ enum { Cost = 4 * NumTraits<float>::MulCost, PacketAccess = true };
555
+ };
556
+
557
+ } // end namespace internal
558
+ } // end namespace Eigen
559
+
560
+ #endif // TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_PACKETMATHAVX2_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/fixedpoint/PacketMathAVX512.h ADDED
@@ -0,0 +1,531 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_PACKETMATHAVX512_H_
17
+ #define TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_PACKETMATHAVX512_H_
18
+
19
+ #include "PacketMathAVX2.h"
20
+
21
+ namespace Eigen {
22
+ namespace internal {
23
+
24
+ typedef eigen_packet_wrapper<__m512i, 30> Packet64q8i;
25
+ typedef eigen_packet_wrapper<__m512i, 31> Packet32q16i;
26
+ typedef eigen_packet_wrapper<__m512i, 32> Packet64q8u;
27
+ typedef eigen_packet_wrapper<__m512i, 33> Packet16q32i;
28
+
29
+ template <>
30
+ struct packet_traits<QInt8> : default_packet_traits {
31
+ typedef Packet64q8i type;
32
+ typedef Packet32q8i half;
33
+ enum {
34
+ Vectorizable = 1,
35
+ AlignedOnScalar = 1,
36
+ size = 64,
37
+ };
38
+ enum {
39
+ HasAdd = 0,
40
+ HasSub = 0,
41
+ HasMul = 0,
42
+ HasNegate = 0,
43
+ HasAbs = 0,
44
+ HasAbs2 = 0,
45
+ HasMin = 1,
46
+ HasMax = 1,
47
+ HasConj = 0,
48
+ HasSetLinear = 0
49
+ };
50
+ };
51
+ template <>
52
+ struct packet_traits<QUInt8> : default_packet_traits {
53
+ typedef Packet64q8u type;
54
+ typedef Packet32q8u half;
55
+ enum {
56
+ Vectorizable = 1,
57
+ AlignedOnScalar = 1,
58
+ size = 64,
59
+ };
60
+ enum {
61
+ HasAdd = 0,
62
+ HasSub = 0,
63
+ HasMul = 0,
64
+ HasNegate = 0,
65
+ HasAbs = 0,
66
+ HasAbs2 = 0,
67
+ HasMin = 1,
68
+ HasMax = 1,
69
+ HasConj = 0,
70
+ HasSetLinear = 0
71
+ };
72
+ };
73
+ template <>
74
+ struct packet_traits<QInt16> : default_packet_traits {
75
+ typedef Packet32q16i type;
76
+ typedef Packet16q16i half;
77
+ enum {
78
+ Vectorizable = 1,
79
+ AlignedOnScalar = 1,
80
+ size = 32,
81
+ };
82
+ enum {
83
+ HasAdd = 0,
84
+ HasSub = 0,
85
+ HasMul = 0,
86
+ HasNegate = 0,
87
+ HasAbs = 0,
88
+ HasAbs2 = 0,
89
+ HasMin = 1,
90
+ HasMax = 1,
91
+ HasConj = 0,
92
+ HasSetLinear = 0
93
+ };
94
+ };
95
+ template <>
96
+ struct packet_traits<QInt32> : default_packet_traits {
97
+ typedef Packet16q32i type;
98
+ typedef Packet8q32i half;
99
+ enum {
100
+ Vectorizable = 1,
101
+ AlignedOnScalar = 1,
102
+ size = 16,
103
+ };
104
+ enum {
105
+ HasAdd = 1,
106
+ HasSub = 1,
107
+ HasMul = 1,
108
+ HasNegate = 1,
109
+ HasAbs = 0,
110
+ HasAbs2 = 0,
111
+ HasMin = 1,
112
+ HasMax = 1,
113
+ HasConj = 0,
114
+ HasSetLinear = 0
115
+ };
116
+ };
117
+
118
+ template <>
119
+ struct unpacket_traits<Packet64q8i> {
120
+ typedef QInt8 type;
121
+ typedef Packet32q8i half;
122
+ enum {
123
+ size = 64,
124
+ alignment = Aligned64,
125
+ masked_load_available = false,
126
+ masked_store_available = false
127
+ };
128
+ };
129
+ template <>
130
+ struct unpacket_traits<Packet32q16i> {
131
+ typedef QInt16 type;
132
+ typedef Packet16q16i half;
133
+ enum {
134
+ size = 32,
135
+ alignment = Aligned64,
136
+ masked_load_available = false,
137
+ masked_store_available = false
138
+ };
139
+ };
140
+ template <>
141
+ struct unpacket_traits<Packet64q8u> {
142
+ typedef QUInt8 type;
143
+ typedef Packet32q8u half;
144
+ enum {
145
+ size = 64,
146
+ alignment = Aligned64,
147
+ masked_load_available = false,
148
+ masked_store_available = false
149
+ };
150
+ };
151
+ template <>
152
+ struct unpacket_traits<Packet16q32i> {
153
+ typedef QInt32 type;
154
+ typedef Packet8q32i half;
155
+ enum {
156
+ size = 16,
157
+ alignment = Aligned64,
158
+ masked_load_available = false,
159
+ masked_store_available = false
160
+ };
161
+ };
162
+
163
+ // Unaligned load
164
+ template <>
165
+ EIGEN_STRONG_INLINE Packet64q8i ploadu<Packet64q8i>(const QInt8* from) {
166
+ EIGEN_DEBUG_UNALIGNED_LOAD return _mm512_loadu_si512(
167
+ reinterpret_cast<const __m512i*>(from));
168
+ }
169
+ template <>
170
+ EIGEN_STRONG_INLINE Packet32q16i ploadu<Packet32q16i>(const QInt16* from) {
171
+ EIGEN_DEBUG_UNALIGNED_LOAD return _mm512_loadu_si512(
172
+ reinterpret_cast<const __m512i*>(from));
173
+ }
174
+ template <>
175
+ EIGEN_STRONG_INLINE Packet64q8u ploadu<Packet64q8u>(const QUInt8* from) {
176
+ EIGEN_DEBUG_UNALIGNED_LOAD return _mm512_loadu_si512(
177
+ reinterpret_cast<const __m512i*>(from));
178
+ }
179
+ template <>
180
+ EIGEN_STRONG_INLINE Packet16q32i ploadu<Packet16q32i>(const QInt32* from) {
181
+ EIGEN_DEBUG_UNALIGNED_LOAD return _mm512_loadu_si512(
182
+ reinterpret_cast<const __m512i*>(from));
183
+ }
184
+
185
+ // Aligned load
186
+ template <>
187
+ EIGEN_STRONG_INLINE Packet64q8i pload<Packet64q8i>(const QInt8* from) {
188
+ EIGEN_DEBUG_ALIGNED_LOAD return _mm512_load_si512(
189
+ reinterpret_cast<const __m512i*>(from));
190
+ }
191
+ template <>
192
+ EIGEN_STRONG_INLINE Packet32q16i pload<Packet32q16i>(const QInt16* from) {
193
+ EIGEN_DEBUG_ALIGNED_LOAD return _mm512_load_si512(
194
+ reinterpret_cast<const __m512i*>(from));
195
+ }
196
+ template <>
197
+ EIGEN_STRONG_INLINE Packet64q8u pload<Packet64q8u>(const QUInt8* from) {
198
+ EIGEN_DEBUG_ALIGNED_LOAD return _mm512_load_si512(
199
+ reinterpret_cast<const __m512i*>(from));
200
+ }
201
+ template <>
202
+ EIGEN_STRONG_INLINE Packet16q32i pload<Packet16q32i>(const QInt32* from) {
203
+ EIGEN_DEBUG_ALIGNED_LOAD return _mm512_load_si512(
204
+ reinterpret_cast<const __m512i*>(from));
205
+ }
206
+
207
+ // Unaligned store
208
+ template <>
209
+ EIGEN_STRONG_INLINE void pstoreu<QInt8>(QInt8* to, const Packet64q8i& from) {
210
+ EIGEN_DEBUG_UNALIGNED_STORE _mm512_storeu_si512(
211
+ reinterpret_cast<__m512i*>(to), from.m_val);
212
+ }
213
+ template <>
214
+ EIGEN_STRONG_INLINE void pstoreu<QInt16>(QInt16* to, const Packet32q16i& from) {
215
+ EIGEN_DEBUG_UNALIGNED_STORE _mm512_storeu_si512(
216
+ reinterpret_cast<__m512i*>(to), from.m_val);
217
+ }
218
+ template <>
219
+ EIGEN_STRONG_INLINE void pstoreu<QUInt8>(QUInt8* to, const Packet64q8u& from) {
220
+ EIGEN_DEBUG_UNALIGNED_STORE _mm512_storeu_si512(
221
+ reinterpret_cast<__m512i*>(to), from.m_val);
222
+ }
223
+ template <>
224
+ EIGEN_STRONG_INLINE void pstoreu<QInt32>(QInt32* to, const Packet16q32i& from) {
225
+ EIGEN_DEBUG_UNALIGNED_STORE _mm512_storeu_si512(
226
+ reinterpret_cast<__m512i*>(to), from.m_val);
227
+ }
228
+
229
+ // Aligned store
230
+ template <>
231
+ EIGEN_STRONG_INLINE void pstore<QInt32>(QInt32* to, const Packet16q32i& from) {
232
+ EIGEN_DEBUG_ALIGNED_STORE _mm512_store_si512(reinterpret_cast<__m512i*>(to),
233
+ from.m_val);
234
+ }
235
+ template <>
236
+ EIGEN_STRONG_INLINE void pstore<QUInt8>(QUInt8* to, const Packet64q8u& from) {
237
+ EIGEN_DEBUG_ALIGNED_STORE _mm512_store_si512(reinterpret_cast<__m512i*>(to),
238
+ from.m_val);
239
+ }
240
+ template <>
241
+ EIGEN_STRONG_INLINE void pstore<QInt8>(QInt8* to, const Packet64q8i& from) {
242
+ EIGEN_DEBUG_ALIGNED_STORE _mm512_store_si512(reinterpret_cast<__m512i*>(to),
243
+ from.m_val);
244
+ }
245
+ template <>
246
+ EIGEN_STRONG_INLINE void pstore<QInt16>(QInt16* to, const Packet32q16i& from) {
247
+ EIGEN_DEBUG_ALIGNED_STORE _mm512_store_si512(reinterpret_cast<__m512i*>(to),
248
+ from.m_val);
249
+ }
250
+
251
+ // Extract first element.
252
+ template <>
253
+ EIGEN_STRONG_INLINE QInt32 pfirst<Packet16q32i>(const Packet16q32i& a) {
254
+ return _mm_cvtsi128_si32(_mm512_extracti32x4_epi32(a, 0));
255
+ }
256
+ template <>
257
+ EIGEN_STRONG_INLINE QUInt8 pfirst<Packet64q8u>(const Packet64q8u& a) {
258
+ return static_cast<uint8_t>(
259
+ _mm_extract_epi8(_mm512_extracti32x4_epi32(a.m_val, 0), 0));
260
+ }
261
+ template <>
262
+ EIGEN_STRONG_INLINE QInt8 pfirst<Packet64q8i>(const Packet64q8i& a) {
263
+ return _mm_extract_epi8(_mm512_extracti32x4_epi32(a.m_val, 0), 0);
264
+ }
265
+ template <>
266
+ EIGEN_STRONG_INLINE QInt16 pfirst<Packet32q16i>(const Packet32q16i& a) {
267
+ return _mm_extract_epi16(_mm512_extracti32x4_epi32(a.m_val, 0), 0);
268
+ }
269
+
270
+ // Initialize to constant value.
271
+ template <>
272
+ EIGEN_STRONG_INLINE Packet64q8i pset1<Packet64q8i>(const QInt8& from) {
273
+ return _mm512_set1_epi8(from.value);
274
+ }
275
+ template <>
276
+ EIGEN_STRONG_INLINE Packet32q16i pset1<Packet32q16i>(const QInt16& from) {
277
+ return _mm512_set1_epi16(from.value);
278
+ }
279
+ template <>
280
+ EIGEN_STRONG_INLINE Packet64q8u pset1<Packet64q8u>(const QUInt8& from) {
281
+ return _mm512_set1_epi8(static_cast<uint8_t>(from.value));
282
+ }
283
+ template <>
284
+ EIGEN_STRONG_INLINE Packet16q32i pset1<Packet16q32i>(const QInt32& from) {
285
+ return _mm512_set1_epi32(from.value);
286
+ }
287
+
288
+ // Basic arithmetic packet ops for QInt32.
289
+ template <>
290
+ EIGEN_STRONG_INLINE Packet16q32i padd<Packet16q32i>(const Packet16q32i& a,
291
+ const Packet16q32i& b) {
292
+ return _mm512_add_epi32(a.m_val, b.m_val);
293
+ }
294
+ template <>
295
+ EIGEN_STRONG_INLINE Packet16q32i psub<Packet16q32i>(const Packet16q32i& a,
296
+ const Packet16q32i& b) {
297
+ return _mm512_sub_epi32(a.m_val, b.m_val);
298
+ }
299
+ // Note: mullo truncates the result to 32 bits.
300
+ template <>
301
+ EIGEN_STRONG_INLINE Packet16q32i pmul<Packet16q32i>(const Packet16q32i& a,
302
+ const Packet16q32i& b) {
303
+ return _mm512_mullo_epi32(a.m_val, b.m_val);
304
+ }
305
+ template <>
306
+ EIGEN_STRONG_INLINE Packet16q32i pnegate<Packet16q32i>(const Packet16q32i& a) {
307
+ return _mm512_sub_epi32(_mm512_setzero_si512(), a.m_val);
308
+ }
309
+
310
+ // Min and max.
311
+ template <>
312
+ EIGEN_STRONG_INLINE Packet16q32i pmin<Packet16q32i>(const Packet16q32i& a,
313
+ const Packet16q32i& b) {
314
+ return _mm512_min_epi32(a.m_val, b.m_val);
315
+ }
316
+ template <>
317
+ EIGEN_STRONG_INLINE Packet16q32i pmax<Packet16q32i>(const Packet16q32i& a,
318
+ const Packet16q32i& b) {
319
+ return _mm512_max_epi32(a.m_val, b.m_val);
320
+ }
321
+
322
+ template <>
323
+ EIGEN_STRONG_INLINE Packet64q8u pmin<Packet64q8u>(const Packet64q8u& a,
324
+ const Packet64q8u& b) {
325
+ #ifdef EIGEN_VECTORIZE_AVX512BW
326
+ return _mm512_min_epu8(a.m_val, b.m_val);
327
+ #else
328
+ __m256i ap0 = _mm512_extracti32x8_epi32(a.m_val, 0);
329
+ __m256i ap1 = _mm512_extracti32x8_epi32(a.m_val, 1);
330
+ __m256i bp0 = _mm512_extracti32x8_epi32(b.m_val, 0);
331
+ __m256i bp1 = _mm512_extracti32x8_epi32(b.m_val, 1);
332
+ __m256i r0 = _mm256_min_epu8(ap0, bp0);
333
+ __m256i r1 = _mm256_min_epu8(ap1, bp1);
334
+ return _mm512_inserti32x8(_mm512_castsi256_si512(r0), r1, 1);
335
+ #endif
336
+ }
337
+ template <>
338
+ EIGEN_STRONG_INLINE Packet64q8u pmax<Packet64q8u>(const Packet64q8u& a,
339
+ const Packet64q8u& b) {
340
+ #ifdef EIGEN_VECTORIZE_AVX512BW
341
+ return _mm512_max_epu8(a.m_val, b.m_val);
342
+ #else
343
+ __m256i ap0 = _mm512_extracti32x8_epi32(a.m_val, 0);
344
+ __m256i ap1 = _mm512_extracti32x8_epi32(a.m_val, 1);
345
+ __m256i bp0 = _mm512_extracti32x8_epi32(b.m_val, 0);
346
+ __m256i bp1 = _mm512_extracti32x8_epi32(b.m_val, 1);
347
+ __m256i r0 = _mm256_max_epu8(ap0, bp0);
348
+ __m256i r1 = _mm256_max_epu8(ap1, bp1);
349
+ return _mm512_inserti32x8(_mm512_castsi256_si512(r0), r1, 1);
350
+ #endif
351
+ }
352
+
353
+ template <>
354
+ EIGEN_STRONG_INLINE Packet64q8i pmin<Packet64q8i>(const Packet64q8i& a,
355
+ const Packet64q8i& b) {
356
+ #ifdef EIGEN_VECTORIZE_AVX512BW
357
+ return _mm512_min_epi8(a.m_val, b.m_val);
358
+ #else
359
+ __m256i ap0 = _mm512_extracti32x8_epi32(a.m_val, 0);
360
+ __m256i ap1 = _mm512_extracti32x8_epi32(a.m_val, 1);
361
+ __m256i bp0 = _mm512_extracti32x8_epi32(b.m_val, 0);
362
+ __m256i bp1 = _mm512_extracti32x8_epi32(b.m_val, 1);
363
+ __m256i r0 = _mm256_min_epi8(ap0, bp0);
364
+ __m256i r1 = _mm256_min_epi8(ap1, bp1);
365
+ return _mm512_inserti32x8(_mm512_castsi256_si512(r0), r1, 1);
366
+ #endif
367
+ }
368
+ template <>
369
+ EIGEN_STRONG_INLINE Packet32q16i pmin<Packet32q16i>(const Packet32q16i& a,
370
+ const Packet32q16i& b) {
371
+ #ifdef EIGEN_VECTORIZE_AVX512BW
372
+ return _mm512_min_epi16(a.m_val, b.m_val);
373
+ #else
374
+ __m256i ap0 = _mm512_extracti32x8_epi32(a.m_val, 0);
375
+ __m256i ap1 = _mm512_extracti32x8_epi32(a.m_val, 1);
376
+ __m256i bp0 = _mm512_extracti32x8_epi32(b.m_val, 0);
377
+ __m256i bp1 = _mm512_extracti32x8_epi32(b.m_val, 1);
378
+ __m256i r0 = _mm256_min_epi16(ap0, bp0);
379
+ __m256i r1 = _mm256_min_epi16(ap1, bp1);
380
+ return _mm512_inserti32x8(_mm512_castsi256_si512(r0), r1, 1);
381
+ #endif
382
+ }
383
+ template <>
384
+ EIGEN_STRONG_INLINE Packet64q8i pmax<Packet64q8i>(const Packet64q8i& a,
385
+ const Packet64q8i& b) {
386
+ #ifdef EIGEN_VECTORIZE_AVX512BW
387
+ return _mm512_max_epi8(a.m_val, b.m_val);
388
+ #else
389
+ __m256i ap0 = _mm512_extracti32x8_epi32(a.m_val, 0);
390
+ __m256i ap1 = _mm512_extracti32x8_epi32(a.m_val, 1);
391
+ __m256i bp0 = _mm512_extracti32x8_epi32(b.m_val, 0);
392
+ __m256i bp1 = _mm512_extracti32x8_epi32(b.m_val, 1);
393
+ __m256i r0 = _mm256_max_epi8(ap0, bp0);
394
+ __m256i r1 = _mm256_max_epi8(ap1, bp1);
395
+ return _mm512_inserti32x8(_mm512_castsi256_si512(r0), r1, 1);
396
+ #endif
397
+ }
398
+ template <>
399
+ EIGEN_STRONG_INLINE Packet32q16i pmax<Packet32q16i>(const Packet32q16i& a,
400
+ const Packet32q16i& b) {
401
+ #ifdef EIGEN_VECTORIZE_AVX512BW
402
+ return _mm512_max_epi16(a.m_val, b.m_val);
403
+ #else
404
+ __m256i ap0 = _mm512_extracti32x8_epi32(a.m_val, 0);
405
+ __m256i ap1 = _mm512_extracti32x8_epi32(a.m_val, 1);
406
+ __m256i bp0 = _mm512_extracti32x8_epi32(b.m_val, 0);
407
+ __m256i bp1 = _mm512_extracti32x8_epi32(b.m_val, 1);
408
+ __m256i r0 = _mm256_max_epi16(ap0, bp0);
409
+ __m256i r1 = _mm256_max_epi16(ap1, bp1);
410
+ return _mm512_inserti32x8(_mm512_castsi256_si512(r0), r1, 1);
411
+ #endif
412
+ }
413
+
414
+ // Reductions.
415
+ template <>
416
+ EIGEN_STRONG_INLINE QInt32 predux_min<Packet16q32i>(const Packet16q32i& a) {
417
+ Packet4i lane0 = _mm512_extracti32x4_epi32(a.m_val, 0);
418
+ Packet4i lane1 = _mm512_extracti32x4_epi32(a.m_val, 1);
419
+ Packet4i lane2 = _mm512_extracti32x4_epi32(a.m_val, 2);
420
+ Packet4i lane3 = _mm512_extracti32x4_epi32(a.m_val, 3);
421
+ Packet4i res =
422
+ _mm_min_epi32(_mm_min_epi32(lane0, lane1), _mm_min_epi32(lane2, lane3));
423
+ res = _mm_min_epi32(res, _mm_shuffle_epi32(res, _MM_SHUFFLE(0, 0, 3, 2)));
424
+ res = _mm_min_epi32(res, _mm_shuffle_epi32(res, _MM_SHUFFLE(0, 0, 0, 1)));
425
+ return pfirst(res);
426
+ }
427
+ template <>
428
+ EIGEN_STRONG_INLINE QInt32 predux_max<Packet16q32i>(const Packet16q32i& a) {
429
+ Packet4i lane0 = _mm512_extracti32x4_epi32(a.m_val, 0);
430
+ Packet4i lane1 = _mm512_extracti32x4_epi32(a.m_val, 1);
431
+ Packet4i lane2 = _mm512_extracti32x4_epi32(a.m_val, 2);
432
+ Packet4i lane3 = _mm512_extracti32x4_epi32(a.m_val, 3);
433
+ Packet4i res =
434
+ _mm_max_epi32(_mm_max_epi32(lane0, lane1), _mm_max_epi32(lane2, lane3));
435
+ res = _mm_max_epi32(res, _mm_shuffle_epi32(res, _MM_SHUFFLE(0, 0, 3, 2)));
436
+ res = _mm_max_epi32(res, _mm_shuffle_epi32(res, _MM_SHUFFLE(0, 0, 0, 1)));
437
+ return pfirst(res);
438
+ }
439
+ template <>
440
+ EIGEN_STRONG_INLINE QInt16 predux_min<Packet32q16i>(const Packet32q16i& a) {
441
+ Packet4i lane0 = _mm512_extracti32x4_epi32(a.m_val, 0);
442
+ Packet4i lane1 = _mm512_extracti32x4_epi32(a.m_val, 1);
443
+ Packet4i lane2 = _mm512_extracti32x4_epi32(a.m_val, 2);
444
+ Packet4i lane3 = _mm512_extracti32x4_epi32(a.m_val, 3);
445
+ Packet4i res =
446
+ _mm_min_epi16(_mm_min_epi16(lane0, lane1), _mm_min_epi16(lane2, lane3));
447
+ res = _mm_min_epi16(res, _mm_shuffle_epi32(res, _MM_SHUFFLE(0, 0, 3, 2)));
448
+ res = _mm_min_epi16(res, _mm_shuffle_epi32(res, _MM_SHUFFLE(0, 0, 0, 1)));
449
+ std::uint32_t w = pfirst(res);
450
+ return std::min(
451
+ {static_cast<std::int16_t>(w >> 16), static_cast<std::int16_t>(w)});
452
+ }
453
+ template <>
454
+ EIGEN_STRONG_INLINE QInt16 predux_max<Packet32q16i>(const Packet32q16i& a) {
455
+ Packet4i lane0 = _mm512_extracti32x4_epi32(a.m_val, 0);
456
+ Packet4i lane1 = _mm512_extracti32x4_epi32(a.m_val, 1);
457
+ Packet4i lane2 = _mm512_extracti32x4_epi32(a.m_val, 2);
458
+ Packet4i lane3 = _mm512_extracti32x4_epi32(a.m_val, 3);
459
+ Packet4i res =
460
+ _mm_max_epi16(_mm_max_epi16(lane0, lane1), _mm_max_epi16(lane2, lane3));
461
+ res = _mm_max_epi16(res, _mm_shuffle_epi32(res, _MM_SHUFFLE(0, 0, 3, 2)));
462
+ res = _mm_max_epi16(res, _mm_shuffle_epi32(res, _MM_SHUFFLE(0, 0, 0, 1)));
463
+ std::uint32_t w = pfirst(res);
464
+ return std::max(
465
+ {static_cast<std::int16_t>(w >> 16), static_cast<std::int16_t>(w)});
466
+ }
467
+ template <>
468
+ EIGEN_STRONG_INLINE QUInt8 predux_min<Packet64q8u>(const Packet64q8u& a) {
469
+ Packet4i lane0 = _mm512_extracti32x4_epi32(a.m_val, 0);
470
+ Packet4i lane1 = _mm512_extracti32x4_epi32(a.m_val, 1);
471
+ Packet4i lane2 = _mm512_extracti32x4_epi32(a.m_val, 2);
472
+ Packet4i lane3 = _mm512_extracti32x4_epi32(a.m_val, 3);
473
+ Packet4i res =
474
+ _mm_min_epu8(_mm_min_epu8(lane0, lane1), _mm_min_epu8(lane2, lane3));
475
+ res = _mm_min_epu8(res, _mm_shuffle_epi32(res, _MM_SHUFFLE(0, 0, 3, 2)));
476
+ res = _mm_min_epu8(res, _mm_shuffle_epi32(res, _MM_SHUFFLE(0, 0, 0, 1)));
477
+ std::uint32_t w = pfirst(res);
478
+ return std::min(
479
+ {static_cast<std::uint8_t>(w >> 24), static_cast<std::uint8_t>(w >> 16),
480
+ static_cast<std::uint8_t>(w >> 8), static_cast<std::uint8_t>(w)});
481
+ }
482
+ template <>
483
+ EIGEN_STRONG_INLINE QUInt8 predux_max<Packet64q8u>(const Packet64q8u& a) {
484
+ Packet4i lane0 = _mm512_extracti32x4_epi32(a.m_val, 0);
485
+ Packet4i lane1 = _mm512_extracti32x4_epi32(a.m_val, 1);
486
+ Packet4i lane2 = _mm512_extracti32x4_epi32(a.m_val, 2);
487
+ Packet4i lane3 = _mm512_extracti32x4_epi32(a.m_val, 3);
488
+ Packet4i res =
489
+ _mm_max_epu8(_mm_max_epu8(lane0, lane1), _mm_max_epu8(lane2, lane3));
490
+ res = _mm_max_epu8(res, _mm_shuffle_epi32(res, _MM_SHUFFLE(0, 0, 3, 2)));
491
+ res = _mm_max_epu8(res, _mm_shuffle_epi32(res, _MM_SHUFFLE(0, 0, 0, 1)));
492
+ std::uint32_t w = pfirst(res);
493
+ return std::max(
494
+ {static_cast<std::uint8_t>(w >> 24), static_cast<std::uint8_t>(w >> 16),
495
+ static_cast<std::uint8_t>(w >> 8), static_cast<std::uint8_t>(w)});
496
+ }
497
+ template <>
498
+ EIGEN_STRONG_INLINE QInt8 predux_min<Packet64q8i>(const Packet64q8i& a) {
499
+ Packet4i lane0 = _mm512_extracti32x4_epi32(a.m_val, 0);
500
+ Packet4i lane1 = _mm512_extracti32x4_epi32(a.m_val, 1);
501
+ Packet4i lane2 = _mm512_extracti32x4_epi32(a.m_val, 2);
502
+ Packet4i lane3 = _mm512_extracti32x4_epi32(a.m_val, 3);
503
+ Packet4i res =
504
+ _mm_min_epi8(_mm_min_epi8(lane0, lane1), _mm_min_epi8(lane2, lane3));
505
+ res = _mm_min_epi8(res, _mm_shuffle_epi32(res, _MM_SHUFFLE(0, 0, 3, 2)));
506
+ res = _mm_min_epi8(res, _mm_shuffle_epi32(res, _MM_SHUFFLE(0, 0, 0, 1)));
507
+ std::uint32_t w = pfirst(res);
508
+ return std::min(
509
+ {static_cast<std::int8_t>(w >> 24), static_cast<std::int8_t>(w >> 16),
510
+ static_cast<std::int8_t>(w >> 8), static_cast<std::int8_t>(w)});
511
+ }
512
+ template <>
513
+ EIGEN_STRONG_INLINE QInt8 predux_max<Packet64q8i>(const Packet64q8i& a) {
514
+ Packet4i lane0 = _mm512_extracti32x4_epi32(a.m_val, 0);
515
+ Packet4i lane1 = _mm512_extracti32x4_epi32(a.m_val, 1);
516
+ Packet4i lane2 = _mm512_extracti32x4_epi32(a.m_val, 2);
517
+ Packet4i lane3 = _mm512_extracti32x4_epi32(a.m_val, 3);
518
+ Packet4i res =
519
+ _mm_max_epi8(_mm_max_epi8(lane0, lane1), _mm_max_epi8(lane2, lane3));
520
+ res = _mm_max_epi8(res, _mm_shuffle_epi32(res, _MM_SHUFFLE(0, 0, 3, 2)));
521
+ res = _mm_max_epi8(res, _mm_shuffle_epi32(res, _MM_SHUFFLE(0, 0, 0, 1)));
522
+ std::uint32_t w = pfirst(res);
523
+ return std::min(
524
+ {static_cast<std::int8_t>(w >> 24), static_cast<std::int8_t>(w >> 16),
525
+ static_cast<std::int8_t>(w >> 8), static_cast<std::int8_t>(w)});
526
+ }
527
+
528
+ } // end namespace internal
529
+ } // end namespace Eigen
530
+
531
+ #endif // TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_PACKETMATHAVX512_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/fixedpoint/TypeCastingAVX2.h ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_TYPECASTINGAVX2_H_
17
+ #define TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_TYPECASTINGAVX2_H_
18
+
19
+ namespace Eigen {
20
+ namespace internal {
21
+
22
+ typedef __m256 Packet8f;
23
+
24
+ template <>
25
+ struct type_casting_traits<QInt32, float> {
26
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
27
+ };
28
+
29
+ template <>
30
+ EIGEN_STRONG_INLINE Packet8f pcast<Packet8q32i>(const Packet8q32i& a) {
31
+ return _mm256_cvtepi32_ps(a.m_val);
32
+ }
33
+
34
+ template <>
35
+ struct type_casting_traits<float, QInt32> {
36
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
37
+ };
38
+
39
+ template <>
40
+ EIGEN_STRONG_INLINE Packet8q32i pcast<Packet8f>(const Packet8f& a) {
41
+ return _mm256_cvtps_epi32(a);
42
+ }
43
+
44
+ template <>
45
+ struct type_casting_traits<QInt32, QInt8> {
46
+ enum { VectorizedCast = 1, SrcCoeffRatio = 4, TgtCoeffRatio = 1 };
47
+ };
48
+
49
+ template <>
50
+ EIGEN_STRONG_INLINE Packet32q8i
51
+ pcast<Packet8q32i, Packet32q8i>(const Packet8q32i& a, const Packet8q32i& b,
52
+ const Packet8q32i& c, const Packet8q32i& d) {
53
+ __m256i converted = _mm256_packs_epi16(_mm256_packs_epi32(a.m_val, b.m_val),
54
+ _mm256_packs_epi32(c.m_val, d.m_val));
55
+ // Since packs does not cross 128 bit lane boundaries,
56
+ // we have to permute to properly order the final result.
57
+ const __m256i permute_mask = _mm256_set_epi32(7, 3, 6, 2, 5, 1, 4, 0);
58
+ return _mm256_permutevar8x32_epi32(converted, permute_mask);
59
+ }
60
+
61
+ template <>
62
+ struct type_casting_traits<float, QInt8> {
63
+ enum { VectorizedCast = 1, SrcCoeffRatio = 4, TgtCoeffRatio = 1 };
64
+ };
65
+
66
+ template <>
67
+ EIGEN_STRONG_INLINE Packet32q8i
68
+ pcast<Packet8f, Packet32q8i>(const Packet8f& a, const Packet8f& b,
69
+ const Packet8f& c, const Packet8f& d) {
70
+ const __m256i a_conv = _mm256_cvtps_epi32(a);
71
+ const __m256i b_conv = _mm256_cvtps_epi32(b);
72
+ const __m256i c_conv = _mm256_cvtps_epi32(c);
73
+ const __m256i d_conv = _mm256_cvtps_epi32(d);
74
+ __m256i converted = _mm256_packs_epi16(_mm256_packs_epi32(a_conv, b_conv),
75
+ _mm256_packs_epi32(c_conv, d_conv));
76
+ const __m256i permute_mask = _mm256_set_epi32(7, 3, 6, 2, 5, 1, 4, 0);
77
+ return _mm256_permutevar8x32_epi32(converted, permute_mask);
78
+ }
79
+
80
+ template <>
81
+ struct type_casting_traits<QInt32, QUInt8> {
82
+ enum { VectorizedCast = 1, SrcCoeffRatio = 4, TgtCoeffRatio = 1 };
83
+ };
84
+
85
+ template <>
86
+ EIGEN_STRONG_INLINE Packet32q8u
87
+ pcast<Packet8q32i, Packet32q8u>(const Packet8q32i& a, const Packet8q32i& b,
88
+ const Packet8q32i& c, const Packet8q32i& d) {
89
+ // _mm256_packus_epi32 trims negative numbers to 0 but we can't allow numbers
90
+ // that are too large because _mm256_packus_epi16 expects signed input
91
+ // (example of problem input: 0x11111111, which saturates to 0xffff = -1,
92
+ // which saturates to 0).
93
+ const __m256i a_clip = _mm256_min_epi32(a, _mm256_set1_epi32(255));
94
+ const __m256i b_clip = _mm256_min_epi32(b, _mm256_set1_epi32(255));
95
+ const __m256i c_clip = _mm256_min_epi32(c, _mm256_set1_epi32(255));
96
+ const __m256i d_clip = _mm256_min_epi32(d, _mm256_set1_epi32(255));
97
+ const __m256i converted = _mm256_packus_epi16(
98
+ _mm256_packus_epi32(a_clip, b_clip), _mm256_packus_epi32(c_clip, d_clip));
99
+ // Since packus does not cross 128 bit lane boundaries,
100
+ // we have to permute to properly order the final result.
101
+ const __m256i permute_mask = _mm256_set_epi32(7, 3, 6, 2, 5, 1, 4, 0);
102
+ return _mm256_permutevar8x32_epi32(converted, permute_mask);
103
+ }
104
+
105
+ } // end namespace internal
106
+ } // end namespace Eigen
107
+
108
+ #endif // TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_TYPECASTINGAVX2_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/fixedpoint/TypeCastingAVX512.h ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_TYPECASTINGAVX512_H_
17
+ #define TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_TYPECASTINGAVX512_H_
18
+
19
+ namespace Eigen {
20
+ namespace internal {
21
+
22
+ typedef __m512 Packet16f;
23
+ typedef __m512i Packet16i;
24
+
25
+ template <>
26
+ struct type_casting_traits<QInt32, float> {
27
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
28
+ };
29
+
30
+ template <>
31
+ EIGEN_STRONG_INLINE Packet16f pcast<Packet16q32i>(const Packet16q32i& a) {
32
+ return _mm512_cvtepi32_ps(a.m_val);
33
+ }
34
+
35
+ template <>
36
+ struct type_casting_traits<float, QInt32> {
37
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
38
+ };
39
+
40
+ template <>
41
+ EIGEN_STRONG_INLINE Packet16q32i pcast<Packet16f>(const Packet16f& a) {
42
+ return _mm512_cvtps_epi32(a);
43
+ }
44
+
45
+ template <>
46
+ struct type_casting_traits<float, QInt16> {
47
+ enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 };
48
+ };
49
+
50
+ template <>
51
+ EIGEN_STRONG_INLINE Packet32q16i pcast<Packet16f>(const Packet16f& a,
52
+ const Packet16f& b) {
53
+ Packet16i a_int = _mm512_cvtps_epi32(a);
54
+ Packet16i b_int = _mm512_cvtps_epi32(b);
55
+ #ifdef EIGEN_VECTORIZE_AVX512BW
56
+ return _mm512_packs_epi32(a_int, b_int);
57
+ #else
58
+ Packet8i ab_int16_low = _mm256_permute4x64_epi64(
59
+ _mm256_packs_epi32(_mm512_castsi512_si256(a_int),
60
+ _mm512_castsi512_si256(b_int)),
61
+ _MM_SHUFFLE(0, 2, 1, 3));
62
+ Packet8i ab_int16_high = _mm256_permute4x64_epi64(
63
+ _mm256_packs_epi32(_mm512_extracti32x8_epi32(a_int, 1),
64
+ _mm512_extracti32x8_epi32(b_int, 1)),
65
+ _MM_SHUFFLE(0, 2, 1, 3));
66
+ return _mm512_inserti32x8(_mm512_castsi256_si512(ab_int16_low), ab_int16_high,
67
+ 1);
68
+ #endif
69
+ }
70
+
71
+ template <>
72
+ struct type_casting_traits<float, QInt8> {
73
+ enum { VectorizedCast = 1, SrcCoeffRatio = 4, TgtCoeffRatio = 1 };
74
+ };
75
+
76
+ template <>
77
+ EIGEN_STRONG_INLINE Packet64q8i pcast<Packet16f>(const Packet16f& a,
78
+ const Packet16f& b,
79
+ const Packet16f& c,
80
+ const Packet16f& d) {
81
+ Packet16i a_int = _mm512_cvtps_epi32(a);
82
+ Packet16i b_int = _mm512_cvtps_epi32(b);
83
+ Packet16i c_int = _mm512_cvtps_epi32(c);
84
+ Packet16i d_int = _mm512_cvtps_epi32(d);
85
+ #ifdef EIGEN_VECTORIZE_AVX512BW
86
+ return _mm512_packs_epi16(_mm512_packs_epi32(a_int, b_int),
87
+ _mm512_packs_epi32(c_int, d_int));
88
+ #else
89
+ Packet8i ab_int16_low = _mm256_permute4x64_epi64(
90
+ _mm256_packs_epi32(_mm512_castsi512_si256(a_int),
91
+ _mm512_castsi512_si256(b_int)),
92
+ _MM_SHUFFLE(0, 2, 1, 3));
93
+ Packet8i cd_int16_low = _mm256_permute4x64_epi64(
94
+ _mm256_packs_epi32(_mm512_castsi512_si256(c_int),
95
+ _mm512_castsi512_si256(d_int)),
96
+ _MM_SHUFFLE(0, 2, 1, 3));
97
+ Packet8i ab_int16_high = _mm256_permute4x64_epi64(
98
+ _mm256_packs_epi32(_mm512_extracti32x8_epi32(a_int, 1),
99
+ _mm512_extracti32x8_epi32(b_int, 1)),
100
+ _MM_SHUFFLE(0, 2, 1, 3));
101
+ Packet8i cd_int16_high = _mm256_permute4x64_epi64(
102
+ _mm256_packs_epi32(_mm512_extracti32x8_epi32(c_int, 1),
103
+ _mm512_extracti32x8_epi32(d_int, 1)),
104
+ _MM_SHUFFLE(0, 2, 1, 3));
105
+ Packet8i abcd_int8_low = _mm256_permute4x64_epi64(
106
+ _mm256_packs_epi16(ab_int16_low, cd_int16_low), _MM_SHUFFLE(0, 2, 1, 3));
107
+ Packet8i abcd_int8_high =
108
+ _mm256_permute4x64_epi64(_mm256_packs_epi16(ab_int16_high, cd_int16_high),
109
+ _MM_SHUFFLE(0, 2, 1, 3));
110
+ return _mm512_inserti32x8(_mm512_castsi256_si512(abcd_int8_low),
111
+ abcd_int8_high, 1);
112
+ #endif
113
+ }
114
+
115
+ template <>
116
+ struct type_casting_traits<QInt32, QInt8> {
117
+ enum { VectorizedCast = 1, SrcCoeffRatio = 4, TgtCoeffRatio = 1 };
118
+ };
119
+
120
+ template <>
121
+ struct type_casting_traits<QInt32, QInt16> {
122
+ enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 };
123
+ };
124
+
125
+ template <>
126
+ EIGEN_STRONG_INLINE Packet64q8i
127
+ pcast<Packet16q32i, Packet64q8i>(const Packet16q32i& a, const Packet16q32i& b,
128
+ const Packet16q32i& c, const Packet16q32i& d) {
129
+ __m128i a_part = _mm512_cvtsepi32_epi8(a);
130
+ __m128i b_part = _mm512_cvtsepi32_epi8(b);
131
+ __m128i c_part = _mm512_cvtsepi32_epi8(c);
132
+ __m128i d_part = _mm512_cvtsepi32_epi8(d);
133
+ __m256i ab =
134
+ _mm256_inserti128_si256(_mm256_castsi128_si256(a_part), b_part, 1);
135
+ __m256i cd =
136
+ _mm256_inserti128_si256(_mm256_castsi128_si256(c_part), d_part, 1);
137
+ __m512i converted = _mm512_inserti64x4(_mm512_castsi256_si512(ab), cd, 1);
138
+ return converted;
139
+ }
140
+
141
+ template <>
142
+ EIGEN_STRONG_INLINE Packet32q16i pcast<Packet16q32i, Packet32q16i>(
143
+ const Packet16q32i& a, const Packet16q32i& b) {
144
+ __m256i a_part = _mm512_cvtsepi32_epi16(a);
145
+ __m256i b_part = _mm512_cvtsepi32_epi16(b);
146
+ __m512i converted =
147
+ _mm512_inserti64x4(_mm512_castsi256_si512(a_part), b_part, 1);
148
+ return converted;
149
+ }
150
+
151
+ template <>
152
+ struct type_casting_traits<QInt32, QUInt8> {
153
+ enum { VectorizedCast = 1, SrcCoeffRatio = 4, TgtCoeffRatio = 1 };
154
+ };
155
+
156
+ template <>
157
+ EIGEN_STRONG_INLINE Packet64q8u
158
+ pcast<Packet16q32i, Packet64q8u>(const Packet16q32i& a, const Packet16q32i& b,
159
+ const Packet16q32i& c, const Packet16q32i& d) {
160
+ // Brute-force saturation since there isn't a pack operation for unsigned
161
+ // numbers that keeps the elements in order.
162
+ __m128i a_part = _mm512_cvtepi32_epi8(_mm512_max_epi32(
163
+ _mm512_min_epi32(a, _mm512_set1_epi32(255)), _mm512_setzero_si512()));
164
+ __m128i b_part = _mm512_cvtepi32_epi8(_mm512_max_epi32(
165
+ _mm512_min_epi32(b, _mm512_set1_epi32(255)), _mm512_setzero_si512()));
166
+ __m128i c_part = _mm512_cvtepi32_epi8(_mm512_max_epi32(
167
+ _mm512_min_epi32(c, _mm512_set1_epi32(255)), _mm512_setzero_si512()));
168
+ __m128i d_part = _mm512_cvtepi32_epi8(_mm512_max_epi32(
169
+ _mm512_min_epi32(d, _mm512_set1_epi32(255)), _mm512_setzero_si512()));
170
+ __m256i ab =
171
+ _mm256_inserti128_si256(_mm256_castsi128_si256(a_part), b_part, 1);
172
+ __m256i cd =
173
+ _mm256_inserti128_si256(_mm256_castsi128_si256(c_part), d_part, 1);
174
+ __m512i converted = _mm512_inserti64x4(_mm512_castsi256_si512(ab), cd, 1);
175
+ return converted;
176
+ }
177
+
178
+ #if 0
179
+ // The type Packet32q16u does not exist for AVX-512 yet
180
+ template <>
181
+ struct type_casting_traits<QInt32, QUInt16> {
182
+ enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 };
183
+ };
184
+
185
+ template <>
186
+ EIGEN_STRONG_INLINE Packet32q16u
187
+ pcast<Packet16q32i, Packet32q16u>(const Packet16q32i& a,
188
+ const Packet16q32i& b) {
189
+ // Brute-force saturation since there isn't a pack operation for unsigned
190
+ // numbers that keeps the elements in order.
191
+ __m256i a_part =
192
+ _mm512_cvtepi32_epi16(_mm512_max_epi32(
193
+ _mm512_min_epi32(a, _mm512_set1_epi32(65535)), _mm512_setzero_si512()));
194
+ __m256i b_part = _mm512_cvtepi32_epi16(
195
+ _mm512_max_epi32(_mm512_min_epi32(b, _mm512_set1_epi32(65535)),
196
+ _mm512_setzero_si512()));
197
+ __m512i converted =
198
+ _mm512_inserti64x4(_mm512_castsi256_si512(a_part), b_part, 1);
199
+ return converted;
200
+ }
201
+ #endif
202
+
203
+ } // end namespace internal
204
+ } // end namespace Eigen
205
+
206
+ #endif // TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_TYPECASTINGAVX512_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/fixedpoint_types.h ADDED
@@ -0,0 +1,354 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_TYPES_H_
17
+ #define TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_TYPES_H_
18
+
19
+ #include <stdint.h>
20
+
21
+ #include <Eigen/Core>
22
+ #include <cmath>
23
+ #include <iostream>
24
+
25
+ namespace Eigen {
26
+
27
+ // The mantissa part of the fixed point representation. See
28
+ // go/tensorfixedpoint for details
29
+ struct QInt8;
30
+ struct QUInt8;
31
+ struct QInt16;
32
+ struct QUInt16;
33
+ struct QInt32;
34
+
35
+ template <>
36
+ struct NumTraits<QInt8> : GenericNumTraits<int8_t> {};
37
+ template <>
38
+ struct NumTraits<QUInt8> : GenericNumTraits<uint8_t> {};
39
+ template <>
40
+ struct NumTraits<QInt16> : GenericNumTraits<int16_t> {};
41
+ template <>
42
+ struct NumTraits<QUInt16> : GenericNumTraits<uint16_t> {};
43
+ template <>
44
+ struct NumTraits<QInt32> : GenericNumTraits<int32_t> {};
45
+
46
+ namespace internal {
47
+ template <>
48
+ struct scalar_product_traits<QInt32, double> {
49
+ enum {
50
+ // Cost = NumTraits<T>::MulCost,
51
+ Defined = 1
52
+ };
53
+ typedef QInt32 ReturnType;
54
+ };
55
+ } // namespace internal
56
+
57
+ // Wrap the 8bit int into a QInt8 struct instead of using a typedef to prevent
58
+ // the compiler from silently type cast the mantissa into a bigger or a smaller
59
+ // representation.
60
+ struct QInt8 {
61
+ QInt8() : value(0) {}
62
+ QInt8(const int8_t v) : value(v) {}
63
+ QInt8(const QInt32 v);
64
+
65
+ operator int() const { return static_cast<int>(value); }
66
+
67
+ int8_t value;
68
+ };
69
+
70
+ struct QUInt8 {
71
+ QUInt8() : value(0) {}
72
+ QUInt8(const uint8_t v) : value(v) {}
73
+ QUInt8(const QInt32 v);
74
+
75
+ operator int() const { return static_cast<int>(value); }
76
+
77
+ uint8_t value;
78
+ };
79
+
80
+ struct QInt16 {
81
+ QInt16() : value(0) {}
82
+ QInt16(const int16_t v) : value(v) {}
83
+ QInt16(const QInt32 v);
84
+ operator int() const { return static_cast<int>(value); }
85
+
86
+ int16_t value;
87
+ };
88
+
89
+ struct QUInt16 {
90
+ QUInt16() : value(0) {}
91
+ QUInt16(const uint16_t v) : value(v) {}
92
+ QUInt16(const QInt32 v);
93
+ operator int() const { return static_cast<int>(value); }
94
+
95
+ uint16_t value;
96
+ };
97
+
98
+ struct QInt32 {
99
+ QInt32() : value(0) {}
100
+ QInt32(const int8_t v) : value(v) {}
101
+ QInt32(const int32_t v) : value(v) {}
102
+ QInt32(const uint32_t v) : value(static_cast<int32_t>(v)) {}
103
+ QInt32(const QInt8 v) : value(v.value) {}
104
+ QInt32(const float v) : value(static_cast<int32_t>(lrint(v))) {}
105
+ #ifdef EIGEN_MAKING_DOCS
106
+ // Workaround to fix build on PPC.
107
+ QInt32(unsigned long v) : value(v) {}
108
+ #endif
109
+
110
+ operator float() const { return static_cast<float>(value); }
111
+
112
+ int32_t value;
113
+ };
114
+
115
+ EIGEN_STRONG_INLINE QInt8::QInt8(const QInt32 v)
116
+ : value(static_cast<int8_t>(
117
+ v.value > 127 ? 127 : (v.value < -128 ? -128 : v.value))) {}
118
+ EIGEN_STRONG_INLINE QUInt8::QUInt8(const QInt32 v)
119
+ : value(static_cast<uint8_t>(v.value > 255 ? 255
120
+ : (v.value < 0 ? 0 : v.value))) {
121
+ }
122
+ EIGEN_STRONG_INLINE QInt16::QInt16(const QInt32 v)
123
+ : value(static_cast<int16_t>(
124
+ v.value > 32767 ? 32767 : (v.value < -32768 ? -32768 : v.value))) {}
125
+ EIGEN_STRONG_INLINE QUInt16::QUInt16(const QInt32 v)
126
+ : value(static_cast<uint16_t>(
127
+ v.value > 65535 ? 65535 : (v.value < 0 ? 0 : v.value))) {}
128
+
129
+ // Basic widening 8-bit operations: This will be vectorized in future CLs.
130
+ EIGEN_STRONG_INLINE QInt32 operator*(const QInt8 a, const QInt8 b) {
131
+ return QInt32(static_cast<int32_t>(a.value) * static_cast<int32_t>(b.value));
132
+ }
133
+ EIGEN_STRONG_INLINE QInt32 operator*(const QInt8 a, const QUInt8 b) {
134
+ return QInt32(static_cast<int32_t>(a.value) * static_cast<int32_t>(b.value));
135
+ }
136
+ EIGEN_STRONG_INLINE QInt32 operator+(const QInt8 a, const QInt8 b) {
137
+ return QInt32(static_cast<int32_t>(a.value) + static_cast<int32_t>(b.value));
138
+ }
139
+ EIGEN_STRONG_INLINE QInt32 operator-(const QInt8 a, const QInt8 b) {
140
+ return QInt32(static_cast<int32_t>(a.value) - static_cast<int32_t>(b.value));
141
+ }
142
+
143
+ // Basic widening 16-bit operations: This will be vectorized in future CLs.
144
+ EIGEN_STRONG_INLINE QInt32 operator*(const QInt16 a, const QInt16 b) {
145
+ return QInt32(static_cast<int32_t>(a.value) * static_cast<int32_t>(b.value));
146
+ }
147
+ EIGEN_STRONG_INLINE QInt32 operator*(const QInt16 a, const QUInt16 b) {
148
+ return QInt32(static_cast<int32_t>(a.value) * static_cast<int32_t>(b.value));
149
+ }
150
+ EIGEN_STRONG_INLINE QInt32 operator+(const QInt16 a, const QInt16 b) {
151
+ return QInt32(static_cast<int32_t>(a.value) + static_cast<int32_t>(b.value));
152
+ }
153
+ EIGEN_STRONG_INLINE QInt32 operator-(const QInt16 a, const QInt16 b) {
154
+ return QInt32(static_cast<int32_t>(a.value) - static_cast<int32_t>(b.value));
155
+ }
156
+
157
+ // Mixed QInt32 op QInt8 operations. This will be vectorized in future CLs.
158
+ EIGEN_STRONG_INLINE QInt32 operator+(const QInt32 a, const QInt8 b) {
159
+ return QInt32(a.value + static_cast<int32_t>(b.value));
160
+ }
161
+ EIGEN_STRONG_INLINE QInt32 operator+(const QInt8 a, const QInt32 b) {
162
+ return QInt32(static_cast<int32_t>(a.value) + b.value);
163
+ }
164
+ EIGEN_STRONG_INLINE QInt32 operator-(const QInt32 a, const QInt8 b) {
165
+ return QInt32(a.value - static_cast<int32_t>(b.value));
166
+ }
167
+ EIGEN_STRONG_INLINE QInt32 operator-(const QInt8 a, const QInt32 b) {
168
+ return QInt32(static_cast<int32_t>(a.value) - b.value);
169
+ }
170
+ EIGEN_STRONG_INLINE QInt32 operator*(const QInt32 a, const QInt8 b) {
171
+ return QInt32(a.value * static_cast<int32_t>(b.value));
172
+ }
173
+ EIGEN_STRONG_INLINE QInt32 operator*(const QInt8 a, const QInt32 b) {
174
+ return QInt32(static_cast<int32_t>(a.value) * b.value);
175
+ }
176
+
177
+ // Mixed QInt32 op QInt16 operations. This will be vectorized in future CLs.
178
+ EIGEN_STRONG_INLINE QInt32 operator+(const QInt32 a, const QInt16 b) {
179
+ return QInt32(a.value + static_cast<int32_t>(b.value));
180
+ }
181
+ EIGEN_STRONG_INLINE QInt32 operator+(const QInt16 a, const QInt32 b) {
182
+ return QInt32(static_cast<int32_t>(a.value) + b.value);
183
+ }
184
+ EIGEN_STRONG_INLINE QInt32 operator-(const QInt32 a, const QInt16 b) {
185
+ return QInt32(a.value - static_cast<int32_t>(b.value));
186
+ }
187
+ EIGEN_STRONG_INLINE QInt32 operator-(const QInt16 a, const QInt32 b) {
188
+ return QInt32(static_cast<int32_t>(a.value) - b.value);
189
+ }
190
+ EIGEN_STRONG_INLINE QInt32 operator*(const QInt32 a, const QInt16 b) {
191
+ return QInt32(a.value * static_cast<int32_t>(b.value));
192
+ }
193
+ EIGEN_STRONG_INLINE QInt32 operator*(const QInt16 a, const QInt32 b) {
194
+ return QInt32(static_cast<int32_t>(a.value) * b.value);
195
+ }
196
+
197
+ // Mixed QInt32 op QUInt8 operations. This will be vectorized in future CLs.
198
+ EIGEN_STRONG_INLINE QInt32 operator+(const QInt32 a, const QUInt8 b) {
199
+ return QInt32(a.value + static_cast<int32_t>(b.value));
200
+ }
201
+ EIGEN_STRONG_INLINE QInt32 operator+(const QUInt8 a, const QInt32 b) {
202
+ return QInt32(static_cast<int32_t>(a.value) + b.value);
203
+ }
204
+ EIGEN_STRONG_INLINE QInt32 operator-(const QInt32 a, const QUInt8 b) {
205
+ return QInt32(a.value - static_cast<int32_t>(b.value));
206
+ }
207
+ EIGEN_STRONG_INLINE QInt32 operator-(const QUInt8 a, const QInt32 b) {
208
+ return QInt32(static_cast<int32_t>(a.value) - b.value);
209
+ }
210
+ EIGEN_STRONG_INLINE QInt32 operator*(const QInt32 a, const QUInt8 b) {
211
+ return QInt32(a.value * static_cast<int32_t>(b.value));
212
+ }
213
+ EIGEN_STRONG_INLINE QInt32 operator*(const QUInt8 a, const QInt32 b) {
214
+ return QInt32(static_cast<int32_t>(a.value) * b.value);
215
+ }
216
+
217
+ // Mixed QInt32 op QUInt16 operations. This will be vectorized in future CLs.
218
+ EIGEN_STRONG_INLINE QInt32 operator+(const QInt32 a, const QUInt16 b) {
219
+ return QInt32(a.value + static_cast<int32_t>(b.value));
220
+ }
221
+ EIGEN_STRONG_INLINE QInt32 operator+(const QUInt16 a, const QInt32 b) {
222
+ return QInt32(static_cast<int32_t>(a.value) + b.value);
223
+ }
224
+ EIGEN_STRONG_INLINE QInt32 operator-(const QInt32 a, const QUInt16 b) {
225
+ return QInt32(a.value - static_cast<int32_t>(b.value));
226
+ }
227
+ EIGEN_STRONG_INLINE QInt32 operator-(const QUInt16 a, const QInt32 b) {
228
+ return QInt32(static_cast<int32_t>(a.value) - b.value);
229
+ }
230
+ EIGEN_STRONG_INLINE QInt32 operator*(const QInt32 a, const QUInt16 b) {
231
+ return QInt32(a.value * static_cast<int32_t>(b.value));
232
+ }
233
+ EIGEN_STRONG_INLINE QInt32 operator*(const QUInt16 a, const QInt32 b) {
234
+ return QInt32(static_cast<int32_t>(a.value) * b.value);
235
+ }
236
+
237
+ // Basic arithmetic operations on QInt32, which behaves like a int32_t.
238
+ EIGEN_STRONG_INLINE QInt32 operator+(const QInt32 a, const QInt32 b) {
239
+ return a.value + b.value;
240
+ }
241
+ EIGEN_STRONG_INLINE QInt32 operator-(const QInt32 a, const QInt32 b) {
242
+ return a.value - b.value;
243
+ }
244
+ EIGEN_STRONG_INLINE QInt32 operator*(const QInt32 a, const QInt32 b) {
245
+ return a.value * b.value;
246
+ }
247
+ EIGEN_STRONG_INLINE QInt32 operator/(const QInt32 a, const QInt32 b) {
248
+ return a.value / b.value;
249
+ }
250
+ EIGEN_STRONG_INLINE QInt32& operator+=(QInt32& a, const QInt32 b) {
251
+ a.value += b.value;
252
+ return a;
253
+ }
254
+ EIGEN_STRONG_INLINE QInt32& operator-=(QInt32& a, const QInt32 b) {
255
+ a.value -= b.value;
256
+ return a;
257
+ }
258
+ EIGEN_STRONG_INLINE QInt32& operator*=(QInt32& a, const QInt32 b) {
259
+ a.value *= b.value;
260
+ return a;
261
+ }
262
+ EIGEN_STRONG_INLINE QInt32& operator/=(QInt32& a, const QInt32 b) {
263
+ a.value /= b.value;
264
+ return a;
265
+ }
266
+ EIGEN_STRONG_INLINE QInt32 operator-(const QInt32 a) { return -a.value; }
267
+
268
+ // Scaling QInt32 by double. We do the arithmetic in double because
269
+ // float only has 23 bits of mantissa, so casting QInt32 to float might reduce
270
+ // accuracy by discarding up to 7 (least significant) bits.
271
+ EIGEN_STRONG_INLINE QInt32 operator*(const QInt32 a, const double b) {
272
+ return static_cast<int32_t>(lrint(static_cast<double>(a.value) * b));
273
+ }
274
+ EIGEN_STRONG_INLINE QInt32 operator*(const double a, const QInt32 b) {
275
+ return static_cast<int32_t>(lrint(a * static_cast<double>(b.value)));
276
+ }
277
+ EIGEN_STRONG_INLINE QInt32& operator*=(QInt32& a, const double b) {
278
+ a.value = static_cast<int32_t>(lrint(static_cast<double>(a.value) * b));
279
+ return a;
280
+ }
281
+
282
+ // Comparisons
283
+ EIGEN_STRONG_INLINE bool operator==(const QInt8 a, const QInt8 b) {
284
+ return a.value == b.value;
285
+ }
286
+ EIGEN_STRONG_INLINE bool operator==(const QUInt8 a, const QUInt8 b) {
287
+ return a.value == b.value;
288
+ }
289
+ EIGEN_STRONG_INLINE bool operator==(const QInt16 a, const QInt16 b) {
290
+ return a.value == b.value;
291
+ }
292
+ EIGEN_STRONG_INLINE bool operator==(const QUInt16 a, const QUInt16 b) {
293
+ return a.value == b.value;
294
+ }
295
+ EIGEN_STRONG_INLINE bool operator==(const QInt32 a, const QInt32 b) {
296
+ return a.value == b.value;
297
+ }
298
+
299
+ EIGEN_STRONG_INLINE bool operator<(const QInt8 a, const QInt8 b) {
300
+ return a.value < b.value;
301
+ }
302
+ EIGEN_STRONG_INLINE bool operator<(const QUInt8 a, const QUInt8 b) {
303
+ return a.value < b.value;
304
+ }
305
+ EIGEN_STRONG_INLINE bool operator<(const QInt16 a, const QInt16 b) {
306
+ return a.value < b.value;
307
+ }
308
+ EIGEN_STRONG_INLINE bool operator<(const QUInt16 a, const QUInt16 b) {
309
+ return a.value < b.value;
310
+ }
311
+ EIGEN_STRONG_INLINE bool operator<(const QInt32 a, const QInt32 b) {
312
+ return a.value < b.value;
313
+ }
314
+
315
+ EIGEN_STRONG_INLINE bool operator>(const QInt8 a, const QInt8 b) {
316
+ return a.value > b.value;
317
+ }
318
+ EIGEN_STRONG_INLINE bool operator>(const QUInt8 a, const QUInt8 b) {
319
+ return a.value > b.value;
320
+ }
321
+ EIGEN_STRONG_INLINE bool operator>(const QInt16 a, const QInt16 b) {
322
+ return a.value > b.value;
323
+ }
324
+ EIGEN_STRONG_INLINE bool operator>(const QUInt16 a, const QUInt16 b) {
325
+ return a.value > b.value;
326
+ }
327
+ EIGEN_STRONG_INLINE bool operator>(const QInt32 a, const QInt32 b) {
328
+ return a.value > b.value;
329
+ }
330
+
331
+ EIGEN_STRONG_INLINE std::ostream& operator<<(std::ostream& os, QInt8 a) {
332
+ os << static_cast<int>(a.value);
333
+ return os;
334
+ }
335
+ EIGEN_STRONG_INLINE std::ostream& operator<<(std::ostream& os, QUInt8 a) {
336
+ os << static_cast<int>(a.value);
337
+ return os;
338
+ }
339
+ EIGEN_STRONG_INLINE std::ostream& operator<<(std::ostream& os, QInt16 a) {
340
+ os << static_cast<int>(a.value);
341
+ return os;
342
+ }
343
+ EIGEN_STRONG_INLINE std::ostream& operator<<(std::ostream& os, QUInt16 a) {
344
+ os << static_cast<int>(a.value);
345
+ return os;
346
+ }
347
+ EIGEN_STRONG_INLINE std::ostream& operator<<(std::ostream& os, QInt32 a) {
348
+ os << a.value;
349
+ return os;
350
+ }
351
+
352
+ } // namespace Eigen
353
+
354
+ #endif // TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_TYPES_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/metrics.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_FRAMEWORK_METRICS_H_
17
+ #define TENSORFLOW_TSL_FRAMEWORK_METRICS_H_
18
+
19
+ #include <cstdint>
20
+
21
+ namespace tsl {
22
+ namespace metrics {
23
+
24
+ // Updates the metrics stored about time BFC allocator spents during delay.
25
+ void UpdateBfcAllocatorDelayTime(const uint64_t delay_usecs);
26
+
27
+ } // namespace metrics
28
+ } // namespace tsl
29
+
30
+ #endif // TENSORFLOW_TSL_FRAMEWORK_METRICS_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/numeric_types.h ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_FRAMEWORK_NUMERIC_TYPES_H_
17
+ #define TENSORFLOW_TSL_FRAMEWORK_NUMERIC_TYPES_H_
18
+
19
+ #include <complex>
20
+
21
+ #include "tsl/framework/fixedpoint_types.h"
22
+ #include "tsl/platform/types.h"
23
+
24
+ namespace tsl {
25
+
26
+ // Single precision complex.
27
+ typedef std::complex<float> complex64;
28
+ // Double precision complex.
29
+ typedef std::complex<double> complex128;
30
+
31
+ // We use Eigen's QInt implementations for our quantized int types.
32
+ typedef Eigen::QInt8 qint8;
33
+ typedef Eigen::QUInt8 quint8;
34
+ typedef Eigen::QInt32 qint32;
35
+ typedef Eigen::QInt16 qint16;
36
+ typedef Eigen::QUInt16 quint16;
37
+
38
+ } // namespace tsl
39
+
40
+ static inline tsl::bfloat16 FloatToBFloat16(float float_val) {
41
+ #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
42
+ return *reinterpret_cast<tsl::bfloat16*>(
43
+ reinterpret_cast<uint16_t*>(&float_val));
44
+ #else
45
+ return *reinterpret_cast<tsl::bfloat16*>(
46
+ &(reinterpret_cast<uint16_t*>(&float_val)[1]));
47
+ #endif
48
+ }
49
+
50
+ namespace Eigen {
51
+ template <>
52
+ struct NumTraits<tsl::tstring> : GenericNumTraits<tsl::tstring> {
53
+ enum {
54
+ RequireInitialization = 1,
55
+ ReadCost = HugeCost,
56
+ AddCost = HugeCost,
57
+ MulCost = HugeCost
58
+ };
59
+
60
+ static constexpr inline int digits10() { return 0; }
61
+ static constexpr inline int max_digits10() { return 0; }
62
+
63
+ private:
64
+ static inline tsl::tstring epsilon();
65
+ static inline tsl::tstring dummy_precision();
66
+ static inline tsl::tstring lowest();
67
+ static inline tsl::tstring highest();
68
+ static inline tsl::tstring infinity();
69
+ static inline tsl::tstring quiet_NaN();
70
+ };
71
+
72
+ } // namespace Eigen
73
+
74
+ #endif // TENSORFLOW_TSL_FRAMEWORK_NUMERIC_TYPES_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/shared_counter.h ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+ #ifndef TENSORFLOW_TSL_FRAMEWORK_SHARED_COUNTER_H_
16
+ #define TENSORFLOW_TSL_FRAMEWORK_SHARED_COUNTER_H_
17
+
18
+ #include <atomic>
19
+
20
+ #include "tsl/platform/types.h"
21
+
22
+ namespace tsl {
23
+ // A lightweight thread-safe monotone counter for establishing
24
+ // temporal ordering.
25
+ class SharedCounter {
26
+ public:
27
+ int64_t get() { return value_; }
28
+ int64_t next() { return ++value_; }
29
+
30
+ private:
31
+ std::atomic<int64_t> value_{0};
32
+ };
33
+
34
+ } // namespace tsl
35
+ #endif // TENSORFLOW_TSL_FRAMEWORK_SHARED_COUNTER_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/tracking_allocator.h ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_FRAMEWORK_TRACKING_ALLOCATOR_H_
17
+ #define TENSORFLOW_TSL_FRAMEWORK_TRACKING_ALLOCATOR_H_
18
+
19
+ #include <unordered_map>
20
+
21
+ #include "tsl/framework/allocator.h"
22
+ #include "tsl/lib/gtl/inlined_vector.h"
23
+ #include "tsl/platform/mutex.h"
24
+ #include "tsl/platform/thread_annotations.h"
25
+ #include "tsl/platform/types.h"
26
+
27
+ namespace tsl {
28
+
29
+ // TrackingAllocator is a wrapper for an Allocator. It keeps a running
30
+ // count of the number of bytes allocated through the wrapper. It is
31
+ // used by the Executor to "charge" allocations to particular Op
32
+ // executions. Each Op gets a separate TrackingAllocator wrapper
33
+ // around the underlying allocator.
34
+ //
35
+ // The implementation assumes the invariant that all calls to
36
+ // AllocateRaw by an Op (or work items spawned by the Op) will occur
37
+ // before the Op's Compute method returns. Thus the high watermark is
38
+ // established once Compute returns.
39
+ //
40
+ // DeallocateRaw can be called long after the Op has finished,
41
+ // e.g. when an output tensor is deallocated, and the wrapper cannot
42
+ // be deleted until the last of these calls has occurred. The
43
+ // TrackingAllocator keeps track of outstanding calls using a
44
+ // reference count, and deletes itself once the last call has been
45
+ // received and the high watermark has been retrieved.
46
+ struct AllocRecord {
47
+ AllocRecord(int64_t a_btyes, int64_t a_micros)
48
+ : alloc_bytes(a_btyes), alloc_micros(a_micros) {}
49
+ AllocRecord() : AllocRecord(0, 0) {}
50
+
51
+ int64_t alloc_bytes;
52
+ int64_t alloc_micros;
53
+ };
54
+
55
+ class TrackingAllocator : public Allocator {
56
+ public:
57
+ explicit TrackingAllocator(Allocator* allocator, bool track_ids);
58
+ std::string Name() override { return allocator_->Name(); }
59
+ void* AllocateRaw(size_t alignment, size_t num_bytes) override {
60
+ return AllocateRaw(alignment, num_bytes, AllocationAttributes());
61
+ }
62
+ void* AllocateRaw(size_t alignment, size_t num_bytes,
63
+ const AllocationAttributes& allocation_attr) override;
64
+ void DeallocateRaw(void* ptr) override;
65
+ bool TracksAllocationSizes() const override;
66
+ size_t RequestedSize(const void* ptr) const override;
67
+ size_t AllocatedSize(const void* ptr) const override;
68
+ int64_t AllocationId(const void* ptr) const override;
69
+ absl::optional<AllocatorStats> GetStats() override;
70
+ bool ClearStats() override;
71
+
72
+ AllocatorMemoryType GetMemoryType() const override {
73
+ return allocator_->GetMemoryType();
74
+ }
75
+
76
+ // If the underlying allocator tracks allocation sizes, this returns
77
+ // a tuple where the first value is the total number of bytes
78
+ // allocated through this wrapper, the second value is the high
79
+ // watermark of bytes allocated through this wrapper and the third value is
80
+ // the allocated bytes through this wrapper that are still alive. If the
81
+ // underlying allocator does not track allocation sizes the first
82
+ // value is the total number of bytes requested through this wrapper
83
+ // and the second and the third are 0.
84
+ //
85
+ std::tuple<size_t, size_t, size_t> GetSizes();
86
+ // After GetRecordsAndUnRef is called, the only further calls allowed
87
+ // on this wrapper are calls to DeallocateRaw with pointers that
88
+ // were allocated by this wrapper and have not yet been
89
+ // deallocated. After this call completes and all allocated pointers
90
+ // have been deallocated the wrapper will delete itself.
91
+ gtl::InlinedVector<AllocRecord, 4> GetRecordsAndUnRef();
92
+ // Returns a copy of allocation records collected so far.
93
+ gtl::InlinedVector<AllocRecord, 4> GetCurrentRecords();
94
+
95
+ protected:
96
+ ~TrackingAllocator() override {}
97
+
98
+ private:
99
+ bool UnRef() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
100
+
101
+ Allocator* allocator_; // not owned.
102
+ mutable mutex mu_;
103
+ // the number of calls to AllocateRaw that have not yet been matched
104
+ // by a corresponding call to DeAllocateRaw, plus 1 if the Executor
105
+ // has not yet read out the high watermark.
106
+ int ref_ TF_GUARDED_BY(mu_);
107
+ // the current number of outstanding bytes that have been allocated
108
+ // by this wrapper, or 0 if the underlying allocator does not track
109
+ // allocation sizes.
110
+ size_t allocated_ TF_GUARDED_BY(mu_);
111
+ // the maximum number of outstanding bytes that have been allocated
112
+ // by this wrapper, or 0 if the underlying allocator does not track
113
+ // allocation sizes.
114
+ size_t high_watermark_ TF_GUARDED_BY(mu_);
115
+ // the total number of bytes that have been allocated by this
116
+ // wrapper if the underlying allocator tracks allocation sizes,
117
+ // otherwise the total number of bytes that have been requested by
118
+ // this allocator.
119
+ size_t total_bytes_ TF_GUARDED_BY(mu_);
120
+
121
+ gtl::InlinedVector<AllocRecord, 4> allocations_ TF_GUARDED_BY(mu_);
122
+
123
+ // Track allocations locally if requested in the constructor and the
124
+ // underlying allocator doesn't already do it for us.
125
+ const bool track_sizes_locally_;
126
+ struct Chunk {
127
+ size_t requested_size;
128
+ size_t allocated_size;
129
+ int64_t allocation_id;
130
+ };
131
+ std::unordered_map<const void*, Chunk> in_use_ TF_GUARDED_BY(mu_);
132
+ int64_t next_allocation_id_ TF_GUARDED_BY(mu_);
133
+ };
134
+
135
+ } // end namespace tsl
136
+
137
+ #endif // TENSORFLOW_TSL_FRAMEWORK_TRACKING_ALLOCATOR_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/type_traits.h ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_FRAMEWORK_TYPE_TRAITS_H_
17
+ #define TENSORFLOW_TSL_FRAMEWORK_TYPE_TRAITS_H_
18
+
19
+ #include <limits>
20
+ #include <type_traits>
21
+ #include <utility>
22
+
23
+ #include "tsl/framework/numeric_types.h"
24
+ #include "tsl/platform/types.h"
25
+
26
+ namespace tsl {
27
+
28
+ // Functions to define quantization attribute of types.
29
+ struct true_type {
30
+ static constexpr bool value = true;
31
+ };
32
+ struct false_type {
33
+ static constexpr bool value = false;
34
+ };
35
+
36
+ // Default is_quantized is false.
37
+ template <typename T>
38
+ struct is_quantized : false_type {};
39
+
40
+ // Specialize the quantized types.
41
+ template <>
42
+ struct is_quantized<qint8> : true_type {};
43
+ template <>
44
+ struct is_quantized<quint8> : true_type {};
45
+ template <>
46
+ struct is_quantized<qint32> : true_type {};
47
+ template <>
48
+ struct is_quantized<qint16> : true_type {};
49
+ template <>
50
+ struct is_quantized<quint16> : true_type {};
51
+
52
+ // Default is_complex is false.
53
+ template <typename T>
54
+ struct is_complex : false_type {};
55
+
56
+ // Specialize std::complex<float> and std::complex<double> types.
57
+ template <>
58
+ struct is_complex<std::complex<float>> : true_type {};
59
+ template <>
60
+ struct is_complex<std::complex<double>> : true_type {};
61
+
62
+ // is_simple_type<T>::value if T[] can be safely constructed and destructed
63
+ // without running T() and ~T(). We do not use std::is_trivial<T>
64
+ // directly because std::complex<float> and std::complex<double> are
65
+ // not trivial, but their arrays can be constructed and destructed
66
+ // without running their default ctors and dtors.
67
+ template <typename T>
68
+ struct is_simple_type {
69
+ static constexpr bool value =
70
+ std::is_trivial<T>::value || std::is_same<T, Eigen::half>::value ||
71
+ std::is_same<T, complex64>::value || std::is_same<T, complex128>::value ||
72
+ is_quantized<T>::value || std::is_same<T, bfloat16>::value ||
73
+ std::is_same<T, float8_e4m3fn>::value ||
74
+ std::is_same<T, float8_e4m3b11>::value ||
75
+ std::is_same<T, float8_e5m2>::value || std::is_same<T, int4>::value ||
76
+ std::is_same<T, uint4>::value;
77
+ };
78
+
79
+ } // namespace tsl
80
+
81
+ // Define numeric limits for our quantized as subclasses of the
82
+ // standard types.
83
+ namespace std {
84
+ template <>
85
+ class numeric_limits<tsl::qint8> : public numeric_limits<tsl::int8> {};
86
+ template <>
87
+ class numeric_limits<tsl::quint8> : public numeric_limits<tsl::uint8> {};
88
+ template <>
89
+ class numeric_limits<tsl::qint16> : public numeric_limits<tsl::int16> {};
90
+ template <>
91
+ class numeric_limits<tsl::quint16> : public numeric_limits<tsl::uint16> {};
92
+ template <>
93
+ class numeric_limits<tsl::qint32> : public numeric_limits<tsl::int32> {};
94
+
95
+ // Specialize is_signed for quantized types.
96
+ template <>
97
+ struct is_signed<tsl::qint8> : public is_signed<tsl::int8> {};
98
+ template <>
99
+ struct is_signed<tsl::quint8> : public is_signed<tsl::uint8> {};
100
+ template <>
101
+ struct is_signed<tsl::qint16> : public is_signed<tsl::int16> {};
102
+ template <>
103
+ struct is_signed<tsl::quint16> : public is_signed<tsl::uint16> {};
104
+ template <>
105
+ struct is_signed<tsl::qint32> : public is_signed<tsl::int32> {};
106
+
107
+ } // namespace std
108
+
109
+ #endif // TENSORFLOW_TSL_FRAMEWORK_TYPE_TRAITS_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/connected_traceme.h ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+ #ifndef TENSORFLOW_TSL_PROFILER_LIB_CONNECTED_TRACEME_H_
16
+ #define TENSORFLOW_TSL_PROFILER_LIB_CONNECTED_TRACEME_H_
17
+
18
+ #include <string>
19
+ #include <utility>
20
+
21
+ #include "absl/strings/string_view.h"
22
+ #include "absl/types/optional.h"
23
+ #include "tsl/profiler/lib/context_types.h"
24
+ #include "tsl/profiler/lib/traceme.h"
25
+ #include "tsl/profiler/lib/traceme_encode.h"
26
+
27
+ namespace tsl {
28
+ namespace profiler {
29
+
30
+ /*
31
+ * TraceMeProducer and TraceMeConsumer are used to correlate TraceMe events on
32
+ * different threads. TraceMeProducer generates the context information to be
33
+ * passed to TraceMeConsumer, which consists of the context id and optionally
34
+ * the context type. They may be provided by the user. Then, the events of the
35
+ * same context information can be correlated during the analysis.
36
+ *
37
+ * Example Usages:
38
+ * (1) Using the user-provided context type and id. The user is responsible for
39
+ * providing the same context type and id to TraceMeProducer and
40
+ * TraceMeConsumer.
41
+ * [Producer Thread]
42
+ * // user_context_id is provided by the user.
43
+ * TraceMeProducer producer(
44
+ * [&] { return TraceMeEncode("op_dispatch", {{"op_type", "matmul"}}); },
45
+ * ContextType::kTfExecutor, user_context_id);
46
+ * [Consumer Thread]
47
+ * // user_context_id is provided by the user.
48
+ * TraceMeConsumer consumer(
49
+ * [&] { return "op_execute"; }, ContextType::kTfExecutor, user_context_id);
50
+ *
51
+ * (2) Using the user-provided context type and generic id. The user is
52
+ * responsible for passing the TraceMeProducer's context id to
53
+ * TraceMeConsumer as well as providing the same context type to
54
+ * TraceMeProducer and TraceMeConsumer.
55
+ * [Producer Thread]
56
+ * TraceMeProducer producer(
57
+ * [&] { return TraceMeEncode("op_dispatch", {{"op_type", "matmul"}}); },
58
+ * ContextType::kTfExecutor);
59
+ * context_id = producer.GetContextId();
60
+ * // Pass context_id to the consumer thread.
61
+ * [Consumer Thread]
62
+ * // context_id is passed from the producer thread.
63
+ * TraceMeConsumer consumer(
64
+ * [&] { return "op_execute"; }, ContextType::kTfExecutor, context_id);
65
+ *
66
+ * (3) Using the generic context information. The user is responsible for
67
+ * passing the TraceMeProducer's context id to TraceMeConsumer.
68
+ * [Producer Thread]
69
+ * TraceMeProducer producer(
70
+ * [&] { return TraceMeEncode("op_dispatch", {{"op_type", "matmul"}}); });
71
+ * context_id = producer.GetContextId();
72
+ * // Pass context_id to the consumer thread.
73
+ * [Consumer Thread]
74
+ * // context_id is passed from the producer thread.
75
+ * TraceMeConsumer consumer([&] { return "op_execute"; }, context_id);
76
+ */
77
+ class TraceMeProducer : public TraceMe {
78
+ public:
79
+ template <typename NameT>
80
+ explicit TraceMeProducer(NameT&& name,
81
+ ContextType context_type = ContextType::kGeneric,
82
+ absl::optional<uint64> context_id = absl::nullopt,
83
+ int level = 2)
84
+ : TraceMe(std::forward<NameT>(name), level),
85
+ context_id_(context_id.has_value() ? context_id.value()
86
+ : TraceMe::NewActivityId()) {
87
+ AppendMetadata([&] {
88
+ return TraceMeEncode({{"_pt", context_type}, {"_p", context_id_}});
89
+ });
90
+ }
91
+
92
+ uint64 GetContextId() const { return context_id_; }
93
+
94
+ private:
95
+ uint64 context_id_;
96
+ };
97
+
98
+ class TraceMeConsumer : public TraceMe {
99
+ public:
100
+ template <typename NameT>
101
+ TraceMeConsumer(NameT&& name, ContextType context_type, uint64 context_id,
102
+ int level = 2)
103
+ : TraceMe(std::forward<NameT>(name), level) {
104
+ AppendMetadata([&] {
105
+ return TraceMeEncode({{"_ct", context_type}, {"_c", context_id}});
106
+ });
107
+ }
108
+
109
+ template <typename NameT>
110
+ TraceMeConsumer(NameT&& name, uint64 context_id, int level = 2)
111
+ : TraceMeConsumer(std::forward<NameT>(name), ContextType::kGeneric,
112
+ context_id, level) {}
113
+ };
114
+
115
+ } // namespace profiler
116
+ } // namespace tsl
117
+
118
+ #endif // TENSORFLOW_TSL_PROFILER_LIB_CONNECTED_TRACEME_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/context_types.h ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+ #ifndef TENSORFLOW_TSL_PROFILER_LIB_CONTEXT_TYPES_H_
16
+ #define TENSORFLOW_TSL_PROFILER_LIB_CONTEXT_TYPES_H_
17
+
18
+ #include <cstdint>
19
+
20
+ namespace tsl {
21
+ namespace profiler {
22
+
23
+ // Note: Please add new context type after all existing ones.
24
+ enum class ContextType : int {
25
+ kGeneric = 0,
26
+ kLegacy,
27
+ kTfExecutor,
28
+ kTfrtExecutor,
29
+ kSharedBatchScheduler,
30
+ kPjRt,
31
+ kAdaptiveSharedBatchScheduler,
32
+ kTfrtTpuRuntime,
33
+ kTpuEmbeddingEngine,
34
+ kGpuLaunch,
35
+ kBatcher,
36
+ kTpuStream,
37
+ kTpuLaunch,
38
+ kPathwaysExecutor,
39
+ kPjrtLibraryCall,
40
+ kLastContextType = ContextType::kTpuLaunch,
41
+ };
42
+
43
+ // In XFlow we encode context type as flow category as 6 bits.
44
+ static_assert(static_cast<int>(ContextType::kLastContextType) < 64,
45
+ "Should have less than 64 categories.");
46
+
47
+ const char* GetContextTypeString(ContextType context_type);
48
+
49
+ inline ContextType GetSafeContextType(uint32_t context_type) {
50
+ if (context_type > static_cast<uint32_t>(ContextType::kLastContextType)) {
51
+ return ContextType::kGeneric;
52
+ }
53
+ return static_cast<ContextType>(context_type);
54
+ }
55
+
56
+ } // namespace profiler
57
+ } // namespace tsl
58
+
59
+ #endif // TENSORFLOW_TSL_PROFILER_LIB_CONTEXT_TYPES_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/nvtx_utils.h ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef TENSORFLOW_TSL_PROFILER_LIB_NVTX_UTILS_H_
17
+ #define TENSORFLOW_TSL_PROFILER_LIB_NVTX_UTILS_H_
18
+
19
+ #include <optional>
20
+
21
+ #include "absl/strings/string_view.h"
22
+ #include "tsl/platform/logging.h"
23
+ #include "tsl/platform/macros.h"
24
+
25
+ #if GOOGLE_CUDA
26
+ #include "nvtx3/nvToolsExt.h"
27
+ #else
28
+ // Some typedef to help build without NVTX.
29
+ typedef void* nvtxEventAttributes_t;
30
+ typedef void* nvtxDomainHandle_t;
31
+ typedef void* nvtxStringHandle_t;
32
+ #endif
33
+
34
+ namespace tsl {
35
+ namespace profiler {
36
+ namespace nvtx {
37
+
38
+ // A helper function that return the domains to use if NVTX profiling
39
+ // is enabled.
40
+ inline std::optional<nvtxDomainHandle_t> GetNVTXDomain() {
41
+ #if GOOGLE_CUDA
42
+ static nvtxDomainHandle_t domain;
43
+ static bool is_enabled = [] {
44
+ bool _is_enabled = false;
45
+ // Force NVTX marker if a tool triggered the profiler.
46
+ domain = nvtxDomainCreateA("TSL");
47
+ if (domain) {
48
+ _is_enabled = true;
49
+ }
50
+ VLOG(1) << "Is NVTX marker enabled? " << _is_enabled;
51
+ return _is_enabled;
52
+ }();
53
+ if (is_enabled) return domain;
54
+ #endif
55
+ return {};
56
+ }
57
+
58
+ // A helper function to decide whether to enable CUDA NVTX profiling ranges.
59
+ inline bool RangesEnabled() {
60
+ #if GOOGLE_CUDA
61
+ return GetNVTXDomain().has_value();
62
+ #else
63
+ return false;
64
+ #endif
65
+ }
66
+
67
+ // Two types of NVTX range annotation are supported, the older/simpler option
68
+ // is to use std::string and have the NVTX implementation copy a C-style
69
+ // string every time. The other option is to pass a struct implementing two
70
+ // methods:
71
+ //
72
+ // std::string_view Title() const;
73
+ // nvtxStringHandle_t NvtxRegisteredTitle() const;
74
+ //
75
+ // in which case NvtxRegisteredTitle() will be used when starting NVTX ranges,
76
+ // avoiding this string copy.
77
+ // The Title() method is needed because AnnotationStack::PushAnnotation(...) is
78
+ // the backend for some annotations when NVTX is not enabled, and it does not
79
+ // recognise registered strings. has_annotation_api_v<AnnotationType>
80
+ // distinguishes between the two types of annotation.
81
+ template <typename AnnotationType>
82
+ inline constexpr bool has_annotation_api_v =
83
+ !std::is_same_v<AnnotationType, std::string>;
84
+
85
+ template <typename AnnotationType>
86
+ void RangePush(nvtxDomainHandle_t domain, const AnnotationType& annotation) {
87
+ #if GOOGLE_CUDA
88
+ nvtxEventAttributes_t attrs{};
89
+ attrs.version = NVTX_VERSION;
90
+ attrs.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
91
+ if constexpr (has_annotation_api_v<std::decay_t<AnnotationType>>) {
92
+ attrs.messageType = NVTX_MESSAGE_TYPE_REGISTERED;
93
+ attrs.message.registered = annotation.NvtxRegisteredTitle();
94
+ } else {
95
+ attrs.messageType = NVTX_MESSAGE_TYPE_ASCII;
96
+ attrs.message.ascii = annotation.c_str();
97
+ }
98
+ ::nvtxDomainRangePushEx(domain, &attrs);
99
+ #endif
100
+ }
101
+
102
+ } // namespace nvtx
103
+ } // namespace profiler
104
+ } // namespace tsl
105
+ #endif // TENSORFLOW_TSL_PROFILER_LIB_NVTX_UTILS_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/profiler_factory.h ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2019 The TensorFlow Authors All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+ #ifndef TENSORFLOW_TSL_PROFILER_LIB_PROFILER_FACTORY_H_
16
+ #define TENSORFLOW_TSL_PROFILER_LIB_PROFILER_FACTORY_H_
17
+
18
+ #include <functional>
19
+ #include <memory>
20
+ #include <vector>
21
+
22
+ #include "tsl/profiler/lib/profiler_interface.h"
23
+ #include "tsl/profiler/protobuf/profiler_options.pb.h"
24
+
25
+ namespace tsl {
26
+ namespace profiler {
27
+
28
+ // A ProfilerFactory returns an instance of ProfilerInterface if ProfileOptions
29
+ // require it. Otherwise, it might return nullptr.
30
+ using ProfilerFactory = std::function<std::unique_ptr<ProfilerInterface>(
31
+ const tensorflow::ProfileOptions&)>;
32
+
33
+ // Registers a profiler factory. Should be invoked at most once per factory.
34
+ void RegisterProfilerFactory(ProfilerFactory factory);
35
+
36
+ // Invokes all registered profiler factories with the given options, and
37
+ // returns the instantiated (non-null) profiler interfaces.
38
+ std::vector<std::unique_ptr<ProfilerInterface>> CreateProfilers(
39
+ const tensorflow::ProfileOptions& options);
40
+
41
+ // For testing only.
42
+ void ClearRegisteredProfilersForTest();
43
+
44
+ } // namespace profiler
45
+ } // namespace tsl
46
+
47
+ #endif // TENSORFLOW_TSL_PROFILER_LIB_PROFILER_FACTORY_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/profiler_interface.h ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2016 The TensorFlow Authors All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+ #ifndef TENSORFLOW_TSL_PROFILER_LIB_PROFILER_INTERFACE_H_
16
+ #define TENSORFLOW_TSL_PROFILER_LIB_PROFILER_INTERFACE_H_
17
+
18
+ #include "tsl/platform/status.h"
19
+ #include "tsl/profiler/protobuf/xplane.pb.h"
20
+
21
+ namespace tsl {
22
+ namespace profiler {
23
+
24
+ // Interface for tensorflow profiler plugins.
25
+ //
26
+ // ProfileSession calls each of these methods at most once per instance, and
27
+ // implementations can rely on that guarantee for simplicity.
28
+ //
29
+ // Thread-safety: Implementations are only required to be go/thread-compatible.
30
+ // ProfileSession is go/thread-safe and synchronizes access to ProfilerInterface
31
+ // instances.
32
+ class ProfilerInterface {
33
+ public:
34
+ virtual ~ProfilerInterface() = default;
35
+
36
+ // Starts profiling.
37
+ virtual Status Start() = 0;
38
+
39
+ // Stops profiling.
40
+ virtual Status Stop() = 0;
41
+
42
+ // Saves collected profile data into XSpace.
43
+ virtual Status CollectData(tensorflow::profiler::XSpace* space) = 0;
44
+ };
45
+
46
+ } // namespace profiler
47
+ } // namespace tsl
48
+
49
+ #endif // TENSORFLOW_TSL_PROFILER_LIB_PROFILER_INTERFACE_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/profiler_lock.h ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+ #ifndef TENSORFLOW_TSL_PROFILER_LIB_PROFILER_LOCK_H_
16
+ #define TENSORFLOW_TSL_PROFILER_LIB_PROFILER_LOCK_H_
17
+
18
+ #include <utility>
19
+
20
+ #include "tsl/platform/statusor.h"
21
+
22
+ namespace tsl {
23
+ namespace profiler {
24
+
25
+ constexpr absl::string_view kProfilerLockContention =
26
+ "Another profiling session active.";
27
+
28
+ // Handle for the profiler lock. At most one instance of this class, the
29
+ // "active" instance, owns the profiler lock.
30
+ class ProfilerLock {
31
+ public:
32
+ // Returns true if the process has active profiling session.
33
+ static bool HasActiveSession();
34
+
35
+ // Acquires the profiler lock if no other profiler session is currently
36
+ // active.
37
+ static StatusOr<ProfilerLock> Acquire();
38
+
39
+ // Default constructor creates an inactive instance.
40
+ ProfilerLock() = default;
41
+
42
+ // Non-copyable.
43
+ ProfilerLock(const ProfilerLock&) = delete;
44
+ ProfilerLock& operator=(const ProfilerLock&) = delete;
45
+
46
+ // Movable.
47
+ ProfilerLock(ProfilerLock&& other)
48
+ : active_(std::exchange(other.active_, false)) {}
49
+ ProfilerLock& operator=(ProfilerLock&& other) {
50
+ active_ = std::exchange(other.active_, false);
51
+ return *this;
52
+ }
53
+
54
+ ~ProfilerLock() { ReleaseIfActive(); }
55
+
56
+ // Allow creating another active instance.
57
+ void ReleaseIfActive();
58
+
59
+ // Returns true if this is the active instance.
60
+ bool Active() const { return active_; }
61
+
62
+ private:
63
+ // Explicit constructor allows creating an active instance, private so it can
64
+ // only be called by Acquire.
65
+ explicit ProfilerLock(bool active) : active_(active) {}
66
+
67
+ bool active_ = false;
68
+ };
69
+
70
+ } // namespace profiler
71
+ } // namespace tsl
72
+
73
+ #endif // TENSORFLOW_TSL_PROFILER_LIB_PROFILER_LOCK_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/profiler_session.h ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+ #ifndef TENSORFLOW_TSL_PROFILER_LIB_PROFILER_SESSION_H_
16
+ #define TENSORFLOW_TSL_PROFILER_LIB_PROFILER_SESSION_H_
17
+
18
+ #include <functional>
19
+ #include <memory>
20
+ #include <vector>
21
+
22
+ #include "tsl/platform/mutex.h"
23
+ #include "tsl/platform/platform.h"
24
+ #include "tsl/platform/status.h"
25
+ #include "tsl/platform/thread_annotations.h"
26
+ #include "tsl/platform/types.h"
27
+ #include "tsl/profiler/protobuf/profiler_options.pb.h"
28
+ #include "tsl/profiler/protobuf/xplane.pb.h"
29
+
30
+ #if !defined(IS_MOBILE_PLATFORM)
31
+ #include "tsl/profiler/lib/profiler_interface.h"
32
+ #include "tsl/profiler/lib/profiler_lock.h"
33
+ #endif
34
+
35
+ namespace tsl {
36
+
37
+ // A profiler which will start profiling when creating the object and will stop
38
+ // when either the object is destroyed or CollectData is called.
39
+ // Multiple instances can be created, but at most one of them will profile.
40
+ // Status() will return OK only for the instance that is profiling.
41
+ // Thread-safety: ProfilerSession is thread-safe.
42
+ class ProfilerSession {
43
+ public:
44
+ // Creates a ProfilerSession and starts profiling.
45
+ static std::unique_ptr<ProfilerSession> Create(
46
+ const tensorflow::ProfileOptions& options);
47
+
48
+ static tensorflow::ProfileOptions DefaultOptions() {
49
+ tensorflow::ProfileOptions options;
50
+ options.set_version(1);
51
+ options.set_device_tracer_level(1);
52
+ options.set_host_tracer_level(2);
53
+ options.set_device_type(tensorflow::ProfileOptions::UNSPECIFIED);
54
+ options.set_python_tracer_level(0);
55
+ options.set_enable_hlo_proto(true);
56
+ options.set_include_dataset_ops(true);
57
+ return options;
58
+ }
59
+
60
+ // Deletes an existing Profiler and enables starting a new one.
61
+ ~ProfilerSession();
62
+
63
+ tsl::Status Status() TF_LOCKS_EXCLUDED(mutex_);
64
+
65
+ // Collects profile data into XSpace.
66
+ tsl::Status CollectData(tensorflow::profiler::XSpace* space)
67
+ TF_LOCKS_EXCLUDED(mutex_);
68
+
69
+ private:
70
+ // Constructs an instance of the class and starts profiling
71
+ explicit ProfilerSession(const tensorflow::ProfileOptions& options);
72
+
73
+ // ProfilerSession is neither copyable or movable.
74
+ ProfilerSession(const ProfilerSession&) = delete;
75
+ ProfilerSession& operator=(const ProfilerSession&) = delete;
76
+
77
+ #if !defined(IS_MOBILE_PLATFORM)
78
+ // Collects profile data into XSpace without post-processsing.
79
+ tsl::Status CollectDataInternal(tensorflow::profiler::XSpace* space);
80
+
81
+ profiler::ProfilerLock profiler_lock_ TF_GUARDED_BY(mutex_);
82
+
83
+ std::unique_ptr<profiler::ProfilerInterface> profilers_ TF_GUARDED_BY(mutex_);
84
+
85
+ uint64 start_time_ns_;
86
+ tensorflow::ProfileOptions options_;
87
+ #endif
88
+ tsl::Status status_ TF_GUARDED_BY(mutex_);
89
+ mutex mutex_;
90
+ };
91
+
92
+ } // namespace tsl
93
+ #endif // TENSORFLOW_TSL_PROFILER_LIB_PROFILER_SESSION_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/scoped_annotation.h ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+ #ifndef TENSORFLOW_TSL_PROFILER_LIB_SCOPED_ANNOTATION_H_
16
+ #define TENSORFLOW_TSL_PROFILER_LIB_SCOPED_ANNOTATION_H_
17
+
18
+ #include <stddef.h>
19
+
20
+ #include <atomic>
21
+ #include <optional>
22
+ #include <string>
23
+ #include <string_view>
24
+ #include <utility>
25
+
26
+ #include "absl/strings/string_view.h"
27
+ #include "tsl/platform/macros.h"
28
+ #include "tsl/platform/types.h"
29
+
30
+ #if !defined(IS_MOBILE_PLATFORM)
31
+ #include "tsl/profiler/backends/cpu/annotation_stack.h"
32
+ #include "tsl/profiler/lib/nvtx_utils.h"
33
+ #endif
34
+
35
+ namespace tsl {
36
+ namespace profiler {
37
+
38
+ // Adds an annotation to all activities for the duration of the instance
39
+ // lifetime through the currently registered TraceCollector.
40
+ //
41
+ // Usage: {
42
+ // ScopedAnnotation annotation("my kernels");
43
+ // Kernel1<<<x,y>>>;
44
+ // LaunchKernel2(); // Launches a CUDA kernel.
45
+ // }
46
+ // This will add 'my kernels' to both kernels in the profiler UI
47
+ class ScopedAnnotation {
48
+ public:
49
+ explicit ScopedAnnotation(absl::string_view name) {
50
+ #if !defined(IS_MOBILE_PLATFORM)
51
+ #if GOOGLE_CUDA
52
+ std::optional<nvtxDomainHandle_t> domain =
53
+ tsl::profiler::nvtx::GetNVTXDomain();
54
+ if (TF_PREDICT_FALSE(domain.has_value())) {
55
+ tsl::profiler::nvtx::RangePush(domain.value(), std::string{name});
56
+ } else // NOLINT
57
+ #endif
58
+ if (TF_PREDICT_FALSE(AnnotationStack::IsEnabled())) {
59
+ old_length_ = AnnotationStack::PushAnnotation(name);
60
+ }
61
+ #endif
62
+ }
63
+
64
+ explicit ScopedAnnotation(const char* name)
65
+ : ScopedAnnotation(absl::string_view(name)) {}
66
+
67
+ explicit ScopedAnnotation(const string& name) {
68
+ #if !defined(IS_MOBILE_PLATFORM)
69
+ #if GOOGLE_CUDA
70
+ std::optional<nvtxDomainHandle_t> domain =
71
+ tsl::profiler::nvtx::GetNVTXDomain();
72
+ if (TF_PREDICT_FALSE(domain.has_value())) {
73
+ tsl::profiler::nvtx::RangePush(domain.value(), name);
74
+ } else // NOLINT
75
+ #endif
76
+ if (TF_PREDICT_FALSE(AnnotationStack::IsEnabled())) {
77
+ old_length_ = AnnotationStack::PushAnnotation(name);
78
+ }
79
+ #endif
80
+ }
81
+
82
+ explicit ScopedAnnotation(string&& name) {
83
+ #if !defined(IS_MOBILE_PLATFORM)
84
+ #if GOOGLE_CUDA
85
+ std::optional<nvtxDomainHandle_t> domain =
86
+ tsl::profiler::nvtx::GetNVTXDomain();
87
+ if (TF_PREDICT_FALSE(domain.has_value())) {
88
+ tsl::profiler::nvtx::RangePush(domain.value(), name);
89
+ } else // NOLINT
90
+ #endif
91
+ if (TF_PREDICT_FALSE(AnnotationStack::IsEnabled())) {
92
+ old_length_ = AnnotationStack::PushAnnotation(std::move(name));
93
+ }
94
+ #endif
95
+ }
96
+
97
+ template <typename NameGeneratorT>
98
+ explicit ScopedAnnotation(NameGeneratorT name_generator) {
99
+ #if !defined(IS_MOBILE_PLATFORM)
100
+ #if GOOGLE_CUDA
101
+ std::optional<nvtxDomainHandle_t> domain =
102
+ tsl::profiler::nvtx::GetNVTXDomain();
103
+ if (TF_PREDICT_FALSE(domain.has_value())) {
104
+ tsl::profiler::nvtx::RangePush(domain.value(), name_generator());
105
+ } else // NOLINT
106
+ #endif
107
+ if (TF_PREDICT_FALSE(AnnotationStack::IsEnabled())) {
108
+ auto annotation = name_generator();
109
+ if constexpr (tsl::profiler::nvtx::has_annotation_api_v<
110
+ std::decay_t<decltype(annotation)>>) {
111
+ old_length_ = AnnotationStack::PushAnnotation(annotation.Title());
112
+ } else {
113
+ old_length_ = AnnotationStack::PushAnnotation(std::move(annotation));
114
+ }
115
+ }
116
+ #endif
117
+ }
118
+
119
+ // Pops the name passed in the constructor from the current annotation.
120
+ ~ScopedAnnotation() {
121
+ // TODO(b/137971921): without this memory fence, two presubmit tests will
122
+ // fail probably due to compiler in that presubmit config.
123
+ std::atomic_thread_fence(std::memory_order_acquire);
124
+ #if !defined(IS_MOBILE_PLATFORM)
125
+ #if GOOGLE_CUDA
126
+ std::optional<nvtxDomainHandle_t> domain =
127
+ tsl::profiler::nvtx::GetNVTXDomain();
128
+ if (TF_PREDICT_FALSE(domain.has_value())) {
129
+ ::nvtxDomainRangePop(domain.value());
130
+ } else // NOLINT
131
+ #endif
132
+ if (TF_PREDICT_FALSE(old_length_ != kInvalidLength)) {
133
+ AnnotationStack::PopAnnotation(old_length_);
134
+ }
135
+ #endif
136
+ }
137
+
138
+ static bool IsEnabled() {
139
+ #if !defined(IS_MOBILE_PLATFORM)
140
+ return AnnotationStack::IsEnabled();
141
+ #else
142
+ return false;
143
+ #endif
144
+ }
145
+
146
+ private:
147
+ // signals that annotation is disabled at the constructor.
148
+ static constexpr size_t kInvalidLength = static_cast<size_t>(-1);
149
+
150
+ ScopedAnnotation(const ScopedAnnotation&) = delete;
151
+ void operator=(const ScopedAnnotation&) = delete;
152
+
153
+ size_t old_length_ = kInvalidLength;
154
+ };
155
+
156
+ } // namespace profiler
157
+ } // namespace tsl
158
+
159
+ #endif // TENSORFLOW_TSL_PROFILER_LIB_SCOPED_ANNOTATION_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/scoped_annotation_stack.h ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+ #ifndef TENSORFLOW_TSL_PROFILER_LIB_SCOPED_ANNOTATION_STACK_H_
16
+ #define TENSORFLOW_TSL_PROFILER_LIB_SCOPED_ANNOTATION_STACK_H_
17
+
18
+ #include <stddef.h>
19
+
20
+ #include <atomic>
21
+ #include <string>
22
+ #include <string_view>
23
+ #include <utility>
24
+
25
+ #include "absl/strings/string_view.h"
26
+ #if !defined(IS_MOBILE_PLATFORM)
27
+ #include "tsl/profiler/backends/cpu/annotation_stack.h"
28
+ #include "tsl/profiler/lib/nvtx_utils.h"
29
+ #endif
30
+
31
+ namespace tsl {
32
+ namespace profiler {
33
+
34
+ // ScopedAnnotation for clients that can't use RAII for managing the lifetime
35
+ // of annotations. It provides an API similar to the `TraceMe::ActivityStart`
36
+ // and `TraceMe::ActivityEnd`.
37
+ //
38
+ // Usage:
39
+ // int64_t id = ScopedAnnotationStack::ActivityStart("foo");
40
+ // foo();
41
+ // ScopedAnnotationStack::ActivityEnd(id);
42
+ //
43
+ // Prefer a regular `ScopedAnnotation`. The name of this class is a misnomer,
44
+ // because it doesn't do any automatic destruction at the scope end, it's just
45
+ // for the sake of consistency.
46
+ class ScopedAnnotationStack {
47
+ static constexpr size_t kInvalidActivity = static_cast<size_t>(-1);
48
+
49
+ public:
50
+ static bool IsEnabled() { return AnnotationStack::IsEnabled(); }
51
+
52
+ static int64_t ActivityStart(std::string name) {
53
+ #if !defined(IS_MOBILE_PLATFORM)
54
+ #if GOOGLE_CUDA
55
+ std::optional<nvtxDomainHandle_t> domain =
56
+ tsl::profiler::nvtx::GetNVTXDomain();
57
+ if (TF_PREDICT_FALSE(domain.has_value())) {
58
+ tsl::profiler::nvtx::RangePush(domain.value(), name);
59
+ } else // NOLINT
60
+ #endif
61
+ if (TF_PREDICT_FALSE(AnnotationStack::IsEnabled())) {
62
+ return AnnotationStack::PushAnnotation(std::move(name));
63
+ }
64
+ #endif
65
+ return kInvalidActivity;
66
+ }
67
+
68
+ static int64_t ActivityStart(std::string_view name) {
69
+ return ActivityStart(std::string(name));
70
+ }
71
+
72
+ static int64_t ActivityStart(const char* name) {
73
+ return ActivityStart(std::string_view(name));
74
+ }
75
+
76
+ template <typename NameGeneratorT>
77
+ static int64_t ActivityStart(NameGeneratorT name_generator) {
78
+ #if !defined(IS_MOBILE_PLATFORM)
79
+ #if GOOGLE_CUDA
80
+ std::optional<nvtxDomainHandle_t> domain =
81
+ tsl::profiler::nvtx::GetNVTXDomain();
82
+ if (TF_PREDICT_FALSE(domain.has_value())) {
83
+ tsl::profiler::nvtx::RangePush(domain.value(), name_generator());
84
+ } else // NOLINT
85
+ #endif
86
+ if (TF_PREDICT_FALSE(AnnotationStack::IsEnabled())) {
87
+ auto annotation = name_generator();
88
+ if constexpr (tsl::profiler::nvtx::has_annotation_api_v<
89
+ std::decay_t<decltype(annotation)>>) {
90
+ return AnnotationStack::PushAnnotation(annotation.Title());
91
+ } else {
92
+ return AnnotationStack::PushAnnotation(std::move(annotation));
93
+ }
94
+ }
95
+ #endif
96
+ return kInvalidActivity;
97
+ }
98
+
99
+ static void ActivityEnd(int64_t activity_id) {
100
+ #if !defined(IS_MOBILE_PLATFORM)
101
+ #if GOOGLE_CUDA
102
+ std::optional<nvtxDomainHandle_t> domain =
103
+ tsl::profiler::nvtx::GetNVTXDomain();
104
+ if (TF_PREDICT_FALSE(domain.has_value())) {
105
+ ::nvtxDomainRangePop(domain.value());
106
+ } else // NOLINT
107
+ #endif
108
+ if (TF_PREDICT_FALSE(activity_id != kInvalidActivity)) {
109
+ AnnotationStack::PopAnnotation(activity_id);
110
+ }
111
+ #endif
112
+ }
113
+ };
114
+
115
+ } // namespace profiler
116
+ } // namespace tsl
117
+
118
+ #endif // TENSORFLOW_TSL_PROFILER_LIB_SCOPED_ANNOTATION_STACK_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/scoped_memory_debug_annotation.h ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+ #ifndef TENSORFLOW_TSL_PROFILER_LIB_SCOPED_MEMORY_DEBUG_ANNOTATION_H_
16
+ #define TENSORFLOW_TSL_PROFILER_LIB_SCOPED_MEMORY_DEBUG_ANNOTATION_H_
17
+
18
+ #include <cstdint>
19
+ #include <functional>
20
+ #include <string>
21
+ #include <utility>
22
+
23
+ namespace tsl {
24
+ namespace profiler {
25
+
26
+ // Annotations for memory profiling and debugging purpose.
27
+ // ScopedMemoryDebugAnnotation will cache the annotations in thread-local
28
+ // memory, and some allocators will try to tag allocations with the annotations.
29
+ struct MemoryDebugAnnotation {
30
+ const char* pending_op_name = nullptr;
31
+ int64_t pending_step_id = 0;
32
+ const char* pending_region_type = nullptr;
33
+ int32_t pending_data_type = 0;
34
+ // A lambda function, when invoked, it will generate the string that describe
35
+ // the shape of the pending tensor. By default, the TensorShape string is an
36
+ // empty string.
37
+ std::function<std::string()> pending_shape_func = []() { return ""; };
38
+ };
39
+
40
+ // Wrapper class of MemoryDebugAnnotation for RAII.
41
+ class ScopedMemoryDebugAnnotation {
42
+ public:
43
+ static const MemoryDebugAnnotation& CurrentAnnotation() {
44
+ return *ThreadMemoryDebugAnnotation();
45
+ }
46
+
47
+ explicit ScopedMemoryDebugAnnotation(const char* op_name) {
48
+ MemoryDebugAnnotation* thread_local_annotation =
49
+ ThreadMemoryDebugAnnotation();
50
+ last_annotation_ = *thread_local_annotation;
51
+ *thread_local_annotation = MemoryDebugAnnotation();
52
+ thread_local_annotation->pending_op_name = op_name;
53
+ }
54
+
55
+ explicit ScopedMemoryDebugAnnotation(const char* op_name, int64_t step_id) {
56
+ MemoryDebugAnnotation* thread_local_annotation =
57
+ ThreadMemoryDebugAnnotation();
58
+ last_annotation_ = *thread_local_annotation;
59
+ *thread_local_annotation = MemoryDebugAnnotation();
60
+ thread_local_annotation->pending_op_name = op_name;
61
+ thread_local_annotation->pending_step_id = step_id;
62
+ }
63
+
64
+ // This constructor keeps the pending_op_name and pending_step_id from parent
65
+ // (if any). Otherwise it overwrites with op_name.
66
+ explicit ScopedMemoryDebugAnnotation(
67
+ const char* op_name, const char* region_type, int32_t data_type,
68
+ std::function<std::string()>&& pending_shape_func) {
69
+ MemoryDebugAnnotation* thread_local_annotation =
70
+ ThreadMemoryDebugAnnotation();
71
+ last_annotation_ = *thread_local_annotation;
72
+ if (!thread_local_annotation->pending_op_name) {
73
+ thread_local_annotation->pending_op_name = op_name;
74
+ }
75
+ thread_local_annotation->pending_region_type = region_type;
76
+ thread_local_annotation->pending_data_type = data_type;
77
+ thread_local_annotation->pending_shape_func = std::move(pending_shape_func);
78
+ }
79
+
80
+ explicit ScopedMemoryDebugAnnotation(
81
+ const char* op_name, int64_t step_id, const char* region_type,
82
+ int32_t data_type, std::function<std::string()>&& pending_shape_func) {
83
+ MemoryDebugAnnotation* thread_local_annotation =
84
+ ThreadMemoryDebugAnnotation();
85
+ last_annotation_ = *thread_local_annotation;
86
+ thread_local_annotation->pending_op_name = op_name;
87
+ thread_local_annotation->pending_step_id = step_id;
88
+ thread_local_annotation->pending_region_type = region_type;
89
+ thread_local_annotation->pending_data_type = data_type;
90
+ thread_local_annotation->pending_shape_func = std::move(pending_shape_func);
91
+ }
92
+
93
+ ~ScopedMemoryDebugAnnotation() {
94
+ *ThreadMemoryDebugAnnotation() = last_annotation_;
95
+ }
96
+
97
+ private:
98
+ // Returns a pointer to the MemoryDebugAnnotation for the current thread.
99
+ static MemoryDebugAnnotation* ThreadMemoryDebugAnnotation();
100
+
101
+ // Stores the previous values in case the annotations are nested.
102
+ MemoryDebugAnnotation last_annotation_;
103
+
104
+ ScopedMemoryDebugAnnotation(const ScopedMemoryDebugAnnotation&) = delete;
105
+ ScopedMemoryDebugAnnotation& operator=(const ScopedMemoryDebugAnnotation&) =
106
+ delete;
107
+ };
108
+
109
+ } // namespace profiler
110
+ } // namespace tsl
111
+
112
+ #endif // TENSORFLOW_TSL_PROFILER_LIB_SCOPED_MEMORY_DEBUG_ANNOTATION_H_
videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/traceme.h ADDED
@@ -0,0 +1,333 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+ #ifndef TENSORFLOW_TSL_PROFILER_LIB_TRACEME_H_
16
+ #define TENSORFLOW_TSL_PROFILER_LIB_TRACEME_H_
17
+
18
+ #include <new>
19
+ #include <string>
20
+ #include <utility>
21
+
22
+ #include "absl/strings/string_view.h"
23
+ #include "tsl/platform/logging.h"
24
+ #include "tsl/platform/macros.h"
25
+ #include "tsl/platform/platform.h"
26
+ #include "tsl/platform/types.h"
27
+ #include "tsl/profiler/lib/traceme_encode.h" // IWYU pragma: export
28
+
29
+ #if !defined(IS_MOBILE_PLATFORM)
30
+ #include "tsl/profiler/backends/cpu/traceme_recorder.h"
31
+ #include "tsl/profiler/utils/time_utils.h"
32
+ #endif
33
+
34
+ namespace tsl {
35
+ namespace profiler {
36
+
37
+ // NOTE: Borrowed from boost C++ libraries. When TF embrace C++17 this should
38
+ // be replaced with std::is_invocable;
39
+ template <typename F, typename... Args>
40
+ struct is_invocable
41
+ : std::is_constructible<
42
+ std::function<void(Args...)>,
43
+ std::reference_wrapper<typename std::remove_reference<F>::type> > {};
44
+
45
+ // Predefined levels:
46
+ // - Level 1 (kCritical) is the default and used only for user instrumentation.
47
+ // - Level 2 (kInfo) is used by profiler for instrumenting high level program
48
+ // execution details (expensive TF ops, XLA ops, etc).
49
+ // - Level 3 (kVerbose) is also used by profiler to instrument more verbose
50
+ // (low-level) program execution details (cheap TF ops, etc).
51
+ enum TraceMeLevel {
52
+ kCritical = 1,
53
+ kInfo = 2,
54
+ kVerbose = 3,
55
+ };
56
+
57
+ // This is specifically used for instrumenting Tensorflow ops.
58
+ // Takes input as whether a TF op is expensive or not and returns the TraceMe
59
+ // level to be assigned to trace that particular op. Assigns level 2 for
60
+ // expensive ops (these are high-level details and shown by default in profiler
61
+ // UI). Assigns level 3 for cheap ops (low-level details not shown by default).
62
+ inline int GetTFTraceMeLevel(bool is_expensive) {
63
+ return is_expensive ? kInfo : kVerbose;
64
+ }
65
+
66
+ // This class permits user-specified (CPU) tracing activities. A trace activity
67
+ // is started when an object of this class is created and stopped when the
68
+ // object is destroyed.
69
+ //
70
+ // CPU tracing can be useful when trying to understand what parts of GPU
71
+ // computation (e.g., kernels and memcpy) correspond to higher level activities
72
+ // in the overall program. For instance, a collection of kernels maybe
73
+ // performing one "step" of a program that is better visualized together than
74
+ // interspersed with kernels from other "steps". Therefore, a TraceMe object
75
+ // can be created at each "step".
76
+ //
77
+ // Two APIs are provided:
78
+ // (1) Scoped object: a TraceMe object starts tracing on construction, and
79
+ // stops tracing when it goes out of scope.
80
+ // {
81
+ // TraceMe trace("step");
82
+ // ... do some work ...
83
+ // }
84
+ // TraceMe objects can be members of a class, or allocated on the heap.
85
+ // (2) Static methods: ActivityStart and ActivityEnd may be called in pairs.
86
+ // auto id = ActivityStart("step");
87
+ // ... do some work ...
88
+ // ActivityEnd(id);
89
+ // The two static methods should be called within the same thread.
90
+ class TraceMe {
91
+ public:
92
+ // Constructor that traces a user-defined activity labeled with name
93
+ // in the UI. Level defines the trace priority, used for filtering TraceMe
94
+ // events. By default, traces with TraceMe level <= 2 are recorded. Levels:
95
+ // - Must be a positive integer.
96
+ // - Can be a value in enum TraceMeLevel.
97
+ // Users are welcome to use level > 3 in their code, if they wish to filter
98
+ // out their host traces based on verbosity.
99
+ explicit TraceMe(absl::string_view name, int level = 1) {
100
+ DCHECK_GE(level, 1);
101
+ #if !defined(IS_MOBILE_PLATFORM)
102
+ if (TF_PREDICT_FALSE(TraceMeRecorder::Active(level))) {
103
+ new (&no_init_.name) std::string(name);
104
+ start_time_ = GetCurrentTimeNanos();
105
+ }
106
+ #endif
107
+ }
108
+
109
+ // Do not allow passing a temporary string as the overhead of generating that
110
+ // string should only be incurred when tracing is enabled. Wrap the temporary
111
+ // string generation (e.g., StrCat) in a lambda and use the name_generator
112
+ // template instead.
113
+ explicit TraceMe(std::string&& name, int level = 1) = delete;
114
+
115
+ // Do not allow passing strings by reference or value since the caller
116
+ // may unintentionally maintain ownership of the name.
117
+ // Explicitly wrap the name in a string_view if you really wish to maintain
118
+ // ownership of a string already generated for other purposes. For temporary
119
+ // strings (e.g., result of StrCat) use the name_generator template.
120
+ explicit TraceMe(const std::string& name, int level = 1) = delete;
121
+
122
+ // This overload is necessary to make TraceMe's with string literals work.
123
+ // Otherwise, the name_generator template would be used.
124
+ explicit TraceMe(const char* raw, int level = 1)
125
+ : TraceMe(absl::string_view(raw), level) {}
126
+
127
+ // This overload only generates the name (and possibly metadata) if tracing is
128
+ // enabled. Useful for avoiding expensive operations (e.g., string
129
+ // concatenation) when tracing is disabled.
130
+ // name_generator may be a lambda or functor that returns a type that the
131
+ // string() constructor can take, e.g., the result of TraceMeEncode.
132
+ // name_generator is templated, rather than a std::function to avoid
133
+ // allocations std::function might make even if never called.
134
+ // Example Usage:
135
+ // TraceMe trace_me([&]() {
136
+ // return StrCat("my_trace", id);
137
+ // }
138
+ // TraceMe op_trace_me([&]() {
139
+ // return TraceMeOp(op_name, op_type);
140
+ // }
141
+ // TraceMe trace_me_with_metadata([&value1]() {
142
+ // return TraceMeEncode("my_trace", {{"key1", value1}, {"key2", 42}});
143
+ // });
144
+ template <typename NameGeneratorT,
145
+ std::enable_if_t<is_invocable<NameGeneratorT>::value, bool> = true>
146
+ explicit TraceMe(NameGeneratorT&& name_generator, int level = 1) {
147
+ DCHECK_GE(level, 1);
148
+ #if !defined(IS_MOBILE_PLATFORM)
149
+ if (TF_PREDICT_FALSE(TraceMeRecorder::Active(level))) {
150
+ new (&no_init_.name)
151
+ std::string(std::forward<NameGeneratorT>(name_generator)());
152
+ start_time_ = GetCurrentTimeNanos();
153
+ }
154
+ #endif
155
+ }
156
+
157
+ // Movable.
158
+ TraceMe(TraceMe&& other) { *this = std::move(other); }
159
+ TraceMe& operator=(TraceMe&& other) {
160
+ #if !defined(IS_MOBILE_PLATFORM)
161
+ if (TF_PREDICT_FALSE(other.start_time_ != kUntracedActivity)) {
162
+ new (&no_init_.name) std::string(std::move(other.no_init_.name));
163
+ other.no_init_.name.~string();
164
+ start_time_ = std::exchange(other.start_time_, kUntracedActivity);
165
+ }
166
+ #endif
167
+ return *this;
168
+ }
169
+
170
+ ~TraceMe() { Stop(); }
171
+
172
+ // Stop tracing the activity. Called by the destructor, but exposed to allow
173
+ // stopping tracing before the object goes out of scope. Only has an effect
174
+ // the first time it is called.
175
+ void Stop() {
176
+ // We do not need to check the trace level again here.
177
+ // - If tracing wasn't active to start with, we have kUntracedActivity.
178
+ // - If tracing was active and was stopped, we have
179
+ // TraceMeRecorder::Active().
180
+ // - If tracing was active and was restarted at a lower level, we may
181
+ // spuriously record the event. This is extremely rare, and acceptable as
182
+ // event will be discarded when its start timestamp fall outside of the
183
+ // start/stop session timestamp.
184
+ #if !defined(IS_MOBILE_PLATFORM)
185
+ if (TF_PREDICT_FALSE(start_time_ != kUntracedActivity)) {
186
+ if (TF_PREDICT_TRUE(TraceMeRecorder::Active())) {
187
+ TraceMeRecorder::Record(
188
+ {std::move(no_init_.name), start_time_, GetCurrentTimeNanos()});
189
+ }
190
+ no_init_.name.~string();
191
+ start_time_ = kUntracedActivity;
192
+ }
193
+ #endif
194
+ }
195
+
196
+ // Appends new_metadata to the TraceMe name passed to the constructor.
197
+ // metadata_generator may be a lambda or functor that returns a type that the
198
+ // string() constructor can take, e.g., the result of TraceMeEncode.
199
+ // metadata_generator is only evaluated when tracing is enabled.
200
+ // metadata_generator is templated, rather than a std::function to avoid
201
+ // allocations std::function might make even if never called.
202
+ // Example Usage:
203
+ // trace_me.AppendMetadata([&value1]() {
204
+ // return TraceMeEncode({{"key1", value1}, {"key2", 42}});
205
+ // });
206
+ template <
207
+ typename MetadataGeneratorT,
208
+ std::enable_if_t<is_invocable<MetadataGeneratorT>::value, bool> = true>
209
+ void AppendMetadata(MetadataGeneratorT&& metadata_generator) {
210
+ #if !defined(IS_MOBILE_PLATFORM)
211
+ if (TF_PREDICT_FALSE(start_time_ != kUntracedActivity)) {
212
+ if (TF_PREDICT_TRUE(TraceMeRecorder::Active())) {
213
+ traceme_internal::AppendMetadata(
214
+ &no_init_.name,
215
+ std::forward<MetadataGeneratorT>(metadata_generator)());
216
+ }
217
+ }
218
+ #endif
219
+ }
220
+
221
+ // Static API, for use when scoped objects are inconvenient.
222
+
223
+ // Record the start time of an activity.
224
+ // Returns the activity ID, which is used to stop the activity.
225
+ // Calls `name_generator` to get the name for activity.
226
+ template <typename NameGeneratorT,
227
+ std::enable_if_t<is_invocable<NameGeneratorT>::value, bool> = true>
228
+ static int64_t ActivityStart(NameGeneratorT&& name_generator, int level = 1) {
229
+ #if !defined(IS_MOBILE_PLATFORM)
230
+ if (TF_PREDICT_FALSE(TraceMeRecorder::Active(level))) {
231
+ int64_t activity_id = TraceMeRecorder::NewActivityId();
232
+ TraceMeRecorder::Record({std::forward<NameGeneratorT>(name_generator)(),
233
+ GetCurrentTimeNanos(), -activity_id});
234
+ return activity_id;
235
+ }
236
+ #endif
237
+ return kUntracedActivity;
238
+ }
239
+
240
+ // Record the start time of an activity.
241
+ // Returns the activity ID, which is used to stop the activity.
242
+ static int64_t ActivityStart(absl::string_view name, int level = 1) {
243
+ #if !defined(IS_MOBILE_PLATFORM)
244
+ if (TF_PREDICT_FALSE(TraceMeRecorder::Active(level))) {
245
+ int64_t activity_id = TraceMeRecorder::NewActivityId();
246
+ TraceMeRecorder::Record(
247
+ {std::string(name), GetCurrentTimeNanos(), -activity_id});
248
+ return activity_id;
249
+ }
250
+ #endif
251
+ return kUntracedActivity;
252
+ }
253
+
254
+ // Same as ActivityStart above, an overload for "const std::string&"
255
+ static int64_t ActivityStart(const std::string& name, int level = 1) {
256
+ return ActivityStart(absl::string_view(name), level);
257
+ }
258
+
259
+ // Same as ActivityStart above, an overload for "const char*"
260
+ static int64_t ActivityStart(const char* name, int level = 1) {
261
+ return ActivityStart(absl::string_view(name), level);
262
+ }
263
+
264
+ // Record the end time of an activity started by ActivityStart().
265
+ static void ActivityEnd(int64_t activity_id) {
266
+ #if !defined(IS_MOBILE_PLATFORM)
267
+ // We don't check the level again (see TraceMe::Stop()).
268
+ if (TF_PREDICT_FALSE(activity_id != kUntracedActivity)) {
269
+ if (TF_PREDICT_TRUE(TraceMeRecorder::Active())) {
270
+ TraceMeRecorder::Record(
271
+ {std::string(), -activity_id, GetCurrentTimeNanos()});
272
+ }
273
+ }
274
+ #endif
275
+ }
276
+
277
+ // Records the time of an instant activity.
278
+ template <typename NameGeneratorT,
279
+ std::enable_if_t<is_invocable<NameGeneratorT>::value, bool> = true>
280
+ static void InstantActivity(NameGeneratorT&& name_generator, int level = 1) {
281
+ #if !defined(IS_MOBILE_PLATFORM)
282
+ if (TF_PREDICT_FALSE(TraceMeRecorder::Active(level))) {
283
+ int64_t now = GetCurrentTimeNanos();
284
+ TraceMeRecorder::Record({std::forward<NameGeneratorT>(name_generator)(),
285
+ /*start_time=*/now, /*end_time=*/now});
286
+ }
287
+ #endif
288
+ }
289
+
290
+ static bool Active(int level = 1) {
291
+ #if !defined(IS_MOBILE_PLATFORM)
292
+ return TraceMeRecorder::Active(level);
293
+ #else
294
+ return false;
295
+ #endif
296
+ }
297
+
298
+ static int64_t NewActivityId() {
299
+ #if !defined(IS_MOBILE_PLATFORM)
300
+ return TraceMeRecorder::NewActivityId();
301
+ #else
302
+ return 0;
303
+ #endif
304
+ }
305
+
306
+ private:
307
+ // Start time used when tracing is disabled.
308
+ constexpr static int64_t kUntracedActivity = 0;
309
+
310
+ TraceMe(const TraceMe&) = delete;
311
+ void operator=(const TraceMe&) = delete;
312
+
313
+ // Wrap the name into a union so that we can avoid the cost of string
314
+ // initialization when tracing is disabled.
315
+ union NoInit {
316
+ NoInit() {}
317
+ ~NoInit() {}
318
+ std::string name;
319
+ } no_init_;
320
+
321
+ int64_t start_time_ = kUntracedActivity;
322
+ };
323
+
324
+ // Whether OpKernel::TraceString will populate additional information for
325
+ // profiler, such as tensor shapes.
326
+ inline bool TfOpDetailsEnabled() {
327
+ return TraceMe::Active(TraceMeLevel::kVerbose);
328
+ }
329
+
330
+ } // namespace profiler
331
+ } // namespace tsl
332
+
333
+ #endif // TENSORFLOW_TSL_PROFILER_LIB_TRACEME_H_