diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/call_options.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/call_options.h new file mode 100644 index 0000000000000000000000000000000000000000..6021ba8f1d24eb82ccf5f6958b9ca01d8fd4934f --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/call_options.h @@ -0,0 +1,82 @@ +/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_CALL_OPTIONS_H_ +#define TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_CALL_OPTIONS_H_ + +#include + +#include "tsl/platform/macros.h" +#include "tsl/platform/mutex.h" +#include "tsl/platform/thread_annotations.h" +#include "tsl/platform/types.h" + +namespace tsl { + +// Options passed to interface calls. This class provides portable +// functionality across different RPC systems on top of +// platform-specific mechanisms (for client and server contexts, +// cancellation, etc.). +// +// TODO(zhifengc): Maybe change all RPC methods to take CallOptions. +class CallOptions { + public: + CallOptions(); + + // Cancellation. + // + // The caller may call StartCancel() anytime as long as this + // CallOptions object is alive. The callee may or may not receive + // the cancellation notification depending on the rpc layer + // implementation. + void StartCancel(); + + // The callee (the rpc layer implementation) must set a cancellation + // notifier before its blocking operation and clear the notifier + // before the call returns. + // + // "cancel_func" may be called zero, once or more time. Therefore, it + // should _not_ be responsible for memory management of any objects. + // + // "cancel_func" must be very light-weight. It should not block on + // IO or locking. Typically, it just calls the rpc implementation + // layer's specific cancellation mechanism and does nothing else. + // + // NOTE: "cancel_func" itself is pass-by-value. Therefore, we do not + // worry about its ownership here. + typedef std::function CancelFunction; + void SetCancelCallback(CancelFunction cancel_func); + void ClearCancelCallback(); + + // Get and set operation timeout. Timeout value is in milliseconds. + // + // Default: 0. indicating there is no timeout for this call. + int64_t GetTimeout(); + void SetTimeout(int64_t ms); + + private: + mutex mu_; + CancelFunction cancel_func_ TF_GUARDED_BY(mu_); + + // RPC operation timeout in milliseconds. + int64_t timeout_in_ms_ TF_GUARDED_BY(mu_) = 0; + + CallOptions(const CallOptions&) = delete; + void operator=(const CallOptions&) = delete; +}; + +} // namespace tsl + +#endif // TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_CALL_OPTIONS_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/coordination/coordination_client.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/coordination/coordination_client.h new file mode 100644 index 0000000000000000000000000000000000000000..1de6a45fe70e191fa1251b35c91e8f1e879f6135 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/coordination/coordination_client.h @@ -0,0 +1,146 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_COORDINATION_COORDINATION_CLIENT_H_ +#define TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_COORDINATION_COORDINATION_CLIENT_H_ + +#include +#include + +#include "tsl/distributed_runtime/call_options.h" +#include "tsl/platform/status.h" +#include "tsl/protobuf/coordination_service.pb.h" + +namespace tsl { +using tensorflow::BarrierRequest; +using tensorflow::BarrierResponse; +using tensorflow::CancelBarrierRequest; +using tensorflow::CancelBarrierResponse; +using tensorflow::DeleteKeyValueRequest; +using tensorflow::DeleteKeyValueResponse; +using tensorflow::GetKeyValueDirRequest; +using tensorflow::GetKeyValueDirResponse; +using tensorflow::GetKeyValueRequest; +using tensorflow::GetKeyValueResponse; +using tensorflow::GetTaskStateRequest; +using tensorflow::GetTaskStateResponse; +using tensorflow::HeartbeatRequest; +using tensorflow::HeartbeatResponse; +using tensorflow::InsertKeyValueRequest; +using tensorflow::InsertKeyValueResponse; +using tensorflow::RegisterTaskRequest; +using tensorflow::RegisterTaskResponse; +using tensorflow::ReportErrorToServiceRequest; +using tensorflow::ReportErrorToServiceResponse; +using tensorflow::ReportErrorToTaskRequest; +using tensorflow::ReportErrorToTaskResponse; +using tensorflow::ResetTaskRequest; +using tensorflow::ResetTaskResponse; +using tensorflow::ShutdownTaskRequest; +using tensorflow::ShutdownTaskResponse; +using tensorflow::TryGetKeyValueRequest; +using tensorflow::TryGetKeyValueResponse; +using tensorflow::WaitForAllTasksRequest; +using tensorflow::WaitForAllTasksResponse; + +// Base class of client interface for communicating with coordination service. +// Can be implemented by a variety of transports such as gRPC. +class CoordinationClient { + public: + virtual ~CoordinationClient() = default; + + virtual void RegisterTaskAsync(CallOptions* call_opts, + const RegisterTaskRequest* request, + RegisterTaskResponse* response, + StatusCallback done) = 0; + + virtual void HeartbeatAsync(CallOptions* call_opts, + const HeartbeatRequest* request, + HeartbeatResponse* response, + StatusCallback done) = 0; + + virtual void WaitForAllTasksAsync(const WaitForAllTasksRequest* request, + WaitForAllTasksResponse* response, + StatusCallback done) = 0; + + virtual void ShutdownTaskAsync(CallOptions* call_opts, + const ShutdownTaskRequest* request, + ShutdownTaskResponse* response, + StatusCallback done) = 0; + + virtual void ResetTaskAsync(const ResetTaskRequest* request, + ResetTaskResponse* response, + StatusCallback done) = 0; + + virtual void ReportErrorToTaskAsync(CallOptions* call_opts, + const ReportErrorToTaskRequest* request, + ReportErrorToTaskResponse* response, + StatusCallback done) = 0; + + virtual void ReportErrorToServiceAsync( + const ReportErrorToServiceRequest* request, + ReportErrorToServiceResponse* response, StatusCallback done) = 0; + + virtual void GetTaskStateAsync(const GetTaskStateRequest* request, + GetTaskStateResponse* response, + StatusCallback done) = 0; + + virtual void InsertKeyValueAsync(const InsertKeyValueRequest* request, + InsertKeyValueResponse* response, + StatusCallback done) = 0; + + virtual void GetKeyValueAsync(CallOptions* call_opts, + const GetKeyValueRequest* request, + GetKeyValueResponse* response, + StatusCallback done) = 0; + + virtual void TryGetKeyValueAsync(const TryGetKeyValueRequest* request, + TryGetKeyValueResponse* response, + StatusCallback done) = 0; + + virtual void GetKeyValueDirAsync(const GetKeyValueDirRequest* request, + GetKeyValueDirResponse* response, + StatusCallback done) = 0; + + virtual void DeleteKeyValueAsync(const DeleteKeyValueRequest* request, + DeleteKeyValueResponse* response, + StatusCallback done) = 0; + + virtual void BarrierAsync(const BarrierRequest* request, + BarrierResponse* response, StatusCallback done) = 0; + + virtual void CancelBarrierAsync(const CancelBarrierRequest* request, + CancelBarrierResponse* response, + StatusCallback done) = 0; +}; + +// Simple wrapper class that can be used to retrieve CoordinationClients. +class CoordinationClientCache { + public: + virtual ~CoordinationClientCache() = default; + + // If the `target` names a remote task, returns a pointer of the + // CoordinationClient object wrapping that channel to the remote task. + virtual CoordinationClient* GetClient(const std::string& target) = 0; + + // If the `target` names a remote task, returns an owned pointer of the + // CoordinationClient object wrapping that channel to the remote task. + virtual std::unique_ptr GetOwnedClient( + const std::string& target) = 0; +}; + +} // namespace tsl + +#endif // TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_COORDINATION_COORDINATION_CLIENT_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/coordination/coordination_service.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/coordination/coordination_service.h new file mode 100644 index 0000000000000000000000000000000000000000..d95d4f7f54d0df976b27b4f41fdc9c7ab807b064 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/coordination/coordination_service.h @@ -0,0 +1,253 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_COORDINATION_COORDINATION_SERVICE_H_ +#define TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_COORDINATION_COORDINATION_SERVICE_H_ + +#include +#include +#include +#include +#include +#include + +#include "absl/strings/string_view.h" +#include "absl/time/time.h" +#include "tsl/distributed_runtime/coordination/coordination_client.h" +#include "tsl/platform/status.h" +#include "tsl/platform/statusor.h" +#include "tsl/protobuf/coordination_config.pb.h" + +namespace tsl { +class Env; + +// Static registration for coordination service implementations. +#define REGISTER_COORDINATION_SERVICE(service_type_name, factory_fn) \ + REGISTER_COORDINATION_SERVICE_UNIQ_HELPER(__COUNTER__, service_type_name, \ + factory_fn) +#define REGISTER_COORDINATION_SERVICE_UNIQ_HELPER(counter, service_type_name, \ + factory_fn) \ + static bool static_coordination_service_##counter TF_ATTRIBUTE_UNUSED = \ + []() { \ + ::tsl::CoordinationServiceInterface::RegisterCoordinationService( \ + service_type_name, std::move(factory_fn)); \ + return true; \ + }() + +// Coordination service is used for controlling and coordinating distributed +// execution in a cluster of multiple tasks. +// +// When enabled, the service keeps track of cluster configurations and the state +// of cluster members. TF runtime and libraries can use it to orchastrate +// cluster initialization, check the healthiness of tasks, and propagate error +// messages to the cluster. +// +// Normally, the service should first Start(), then perform the supported +// coordination operations, and finally Stop(). When service runs into error or +// SetError() is called, all subsequent operations will be in error state. +// +// CoordinationServiceInterface defines the service interface for distributed +// coordination. One instance of the service should be deployed in a cluster, +// handling various requests and stores configuration key-value data for the +// tasks. Each task interacts with the service through CoordinationServiceAgent. +class CoordinationServiceInterface { + public: + using CoordinationServiceFactory = + std::function( + Env* env, const tensorflow::CoordinationServiceConfig& config, + std::unique_ptr cache)>; + + using StatusOrValueCallback = + std::function&)>; + + virtual ~CoordinationServiceInterface() = default; + + static void RegisterCoordinationService( + const std::string& service_type_name, + CoordinationServiceFactory factory_fn) { + auto factories = GetCoordinationServiceFactories(); + factories->emplace(service_type_name, factory_fn); + } + + static std::unique_ptr + EnableCoordinationService(Env* env, + const tensorflow::CoordinationServiceConfig& config, + std::unique_ptr cache) { + const auto* factories = GetCoordinationServiceFactories(); + auto factories_iter = factories->find(config.service_type()); + if (factories_iter == factories->end()) { + LOG(ERROR) << "No coordination service factory found for service type " + << config.service_type(); + return nullptr; + } + auto service = factories_iter->second(env, config, std::move(cache)); + if (service != nullptr) { + *GetCoordinationServiceInstancePtr() = service.get(); + } + return service; + } + + static CoordinationServiceInterface* GetCoordinationServiceInstance() { + return *GetCoordinationServiceInstancePtr(); + } + + // This function is invoked after each task's local devices are appended in a + // deterministic order during WaitForAllTasks(). This is useful to convert the + // result into another message, or set global device ids. + virtual void SetDeviceAggregationFunction( + std::function< + tensorflow::DeviceInfo(const tensorflow::DeviceInfo& devices)> + post_aggregate_device_fn) = 0; + + // Register a task to the service. + // Possible service errors: + // - InvalidArgument: Unexpected task request. + // - Aborted: (1) task is in error state, or (2) task is in connected state + // with a different incarnation, indicating that it restarted. + virtual Status RegisterTask(const tensorflow::CoordinatedTask& task, + uint64_t incarnation) = 0; + + // Wait for all tasks to be up and running, and register local device + // info. The callback is invoked when all tasks are up and registered, or some + // error occurs. + // Each task's local devices will be appended in a deterministic order, and + // post-processed by the callback in SetDeviceAggregationFunction() (if set). + virtual void WaitForAllTasks(const tensorflow::CoordinatedTask& task, + const tensorflow::DeviceInfo& devices, + StatusCallback done) = 0; + + // Disconnects task from the service. If `shutdown_barrier_timeout_in_ms` is + // specified in the config, blocks until all tasks reach the barrier before + // disconnecting together. + // Possible service errors: + // - InvalidArgument: Unexpected task request. + // - FailedPrecondition: task has already disconnected. + virtual void ShutdownTaskAsync(const tensorflow::CoordinatedTask& task, + StatusCallback done) = 0; + + // Disconnects task from the service and cleans up its internal error state. + // Possible service errors: + // - InvalidArgument: Unexpected task request. + // - FailedPrecondition: task has already disconnected. + virtual Status ResetTask(const tensorflow::CoordinatedTask& task) = 0; + + // Update the heartbeat timestamp of a task. This should only be invoked on + // the leader of the cluster. + virtual Status RecordHeartbeat(const tensorflow::CoordinatedTask& task, + uint64_t incarnation) = 0; + + // Set a task in error state permanently. + virtual Status ReportTaskError(const tensorflow::CoordinatedTask& task, + Status error) = 0; + + // Get the state and the error status of the tasks. + virtual std::vector GetTaskState( + const std::vector& task) = 0; + + // Insert a configuration key-value in the coordination service. + // For now, a key-value can only be inserted once and cannot be updated. + // The key-values are not persisted and will be lost if the leader fails. + virtual Status InsertKeyValue(const std::string& key, + const std::string& value) = 0; + + // Get a configuration key-value from the coordination service. The `done` + // callback is invoked when the key-value becomes available. + virtual void GetKeyValueAsync(const std::string& key, + StatusOrValueCallback done) = 0; + + // Get a configuration key-value from the coordination service. If the key + // does not exist, return NotFound error. + virtual StatusOr TryGetKeyValue(const std::string& key) = 0; + + // Gets all values under a directory (key). + // A value is considered to be in the directory if its key is prefixed with + // the directory. This is not a blocking call. Agent does not need to be + // connected to utilize the distributed key-value store. + virtual std::vector GetKeyValueDir( + absl::string_view directory_key) = 0; + + // Delete configuration key-value. If key is a directory, recursively clean + // up all key-values under the directory. + virtual Status DeleteKeyValue(const std::string& key) = 0; + + // Blocks until all (or a subset of) tasks are at the barrier or the barrier + // fails. + // + // `barrier_id` should be unique across barriers. Once the barrier has passed + // or failed, subsequent calls will not block, and immediately respond with + // the previous response. + // + // The first WaitAtBarrier() call received by the service for a particular + // barrier id is special in that it determines the barrier deadline based on + // timeout duration. + // However, if subsequent calls by different agents specify a different set of + // `participating_tasks` for the same `barrier_id`, the barrier will fail + // instantly. + // + // If no tasks are specified (default), the barrier will block for all the + // connected tasks. + // + // Possible service errors: + // - DeadlineExceeded: Timed out waiting for specified tasks at the barrier. + // Deadline is determined by the server timestamp when it receives the + // first WaitAtBarrier() + timeout duration. + // - Cancelled: One of the tasks called CancelBarrier(). + // - Aborted: Service is shutting down. + // - Internal: Any participating task is in ERROR state. + // - InvalidArgument: (1) Conflicting tasks specified by different agents + // for the same barrier, (2) one of the participating tasks is not in + // the cluster, or (3) task making the request is not included in the + // list of participating tasks. + // - FailedPrecondition: Agent is in UNINITIALIZED or ERROR state. + virtual void BarrierAsync( + const std::string& barrier_id, absl::Duration timeout, + const tensorflow::CoordinatedTask& task, + const std::vector& participating_tasks, + StatusCallback done) = 0; + + // Aborts the barrier if it is ongoing. + // Current and future WaitAtBarrier() calls with the same id will return a + // CANCELLED error status. + // Possible service errors: + // - FailedPrecondition: Barrier has already been passed. + virtual Status CancelBarrier(const std::string& barrier_id, + const tensorflow::CoordinatedTask& task) = 0; + + private: + friend class CoordinationServiceRpcHandler; + friend class CoordinationServiceTest_ListClusterDevices_TfDevice_Test; + friend class CoordinationServiceTest_ListClusterDevices_XlaDevice_Test; + friend class + CoordinationServiceTest_ListClusterDevices_DevicesAreNotAddedTwice_Test; + + virtual const tensorflow::DeviceInfo& ListClusterDevices() = 0; + virtual uint64_t GetServiceIncarnation() = 0; + + static std::unordered_map* + GetCoordinationServiceFactories() { + static auto* coordination_service_factories = + new std::unordered_map(); + return coordination_service_factories; + } + + static CoordinationServiceInterface** GetCoordinationServiceInstancePtr() { + static CoordinationServiceInterface* instance = nullptr; + return &instance; + } +}; + +} // namespace tsl + +#endif // TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_COORDINATION_COORDINATION_SERVICE_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/coordination/coordination_service_rpc_handler.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/coordination/coordination_service_rpc_handler.h new file mode 100644 index 0000000000000000000000000000000000000000..cb92ea68bb173d2634b6e1cab7749f7b440d25ce --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/coordination/coordination_service_rpc_handler.h @@ -0,0 +1,102 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_COORDINATION_COORDINATION_SERVICE_RPC_HANDLER_H_ +#define TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_COORDINATION_COORDINATION_SERVICE_RPC_HANDLER_H_ + +#include "tsl/distributed_runtime/coordination/coordination_service.h" +#include "tsl/distributed_runtime/coordination/coordination_service_agent.h" +#include "tsl/platform/mutex.h" +#include "tsl/platform/status.h" +#include "tsl/platform/thread_annotations.h" +#include "tsl/protobuf/coordination_service.pb.h" + +namespace tsl { +class CoordinationServiceRpcHandler { + public: + explicit CoordinationServiceRpcHandler() {} + + void SetAgentInstance(CoordinationServiceAgent* agent); + + void SetServiceInstance(CoordinationServiceInterface* service); + + void RegisterTaskAsync(const tensorflow::RegisterTaskRequest* request, + tensorflow::RegisterTaskResponse* response, + StatusCallback done); + + void HeartbeatAsync(const tensorflow::HeartbeatRequest* request, + tensorflow::HeartbeatResponse* response, + StatusCallback done); + + void WaitForAllTasksAsync(const tensorflow::WaitForAllTasksRequest* request, + tensorflow::WaitForAllTasksResponse* response, + StatusCallback done); + + void ShutdownTaskAsync(const tensorflow::ShutdownTaskRequest* request, + tensorflow::ShutdownTaskResponse* response, + StatusCallback done); + + void ResetTaskAsync(const tensorflow::ResetTaskRequest* request, + tensorflow::ResetTaskResponse* response, + StatusCallback done); + + void ReportErrorToTaskAsync( + const tensorflow::ReportErrorToTaskRequest* request, + tensorflow::ReportErrorToTaskResponse* response, StatusCallback done); + + void ReportErrorToServiceAsync( + const tensorflow::ReportErrorToServiceRequest* request, + tensorflow::ReportErrorToServiceResponse* response, StatusCallback done); + + void GetTaskStateAsync(const tensorflow::GetTaskStateRequest* request, + tensorflow::GetTaskStateResponse* response, + StatusCallback done); + + void InsertKeyValueAsync(const tensorflow::InsertKeyValueRequest* request, + tensorflow::InsertKeyValueResponse* response, + StatusCallback done); + + void GetKeyValueAsync(const tensorflow::GetKeyValueRequest* request, + tensorflow::GetKeyValueResponse* response, + StatusCallback done); + + void TryGetKeyValueAsync(const tensorflow::TryGetKeyValueRequest* request, + tensorflow::TryGetKeyValueResponse* response, + StatusCallback done); + + void GetKeyValueDirAsync(const tensorflow::GetKeyValueDirRequest* request, + tensorflow::GetKeyValueDirResponse* response, + StatusCallback done); + + void DeleteKeyValueAsync(const tensorflow::DeleteKeyValueRequest* request, + tensorflow::DeleteKeyValueResponse* response, + StatusCallback done); + + void BarrierAsync(const tensorflow::BarrierRequest* request, + tensorflow::BarrierResponse* response, StatusCallback done); + + void CancelBarrierAsync(const tensorflow::CancelBarrierRequest* request, + tensorflow::CancelBarrierResponse* response, + StatusCallback done); + + private: + mutex mu_; + CoordinationServiceAgent* agent_ TF_GUARDED_BY(mu_) = nullptr; + CoordinationServiceInterface* service_ TF_GUARDED_BY(mu_) = nullptr; +}; + +} // namespace tsl + +#endif // TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_COORDINATION_COORDINATION_SERVICE_RPC_HANDLER_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/preemption/preemption_notifier.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/preemption/preemption_notifier.h new file mode 100644 index 0000000000000000000000000000000000000000..53941ceea6493e412ccf148338dca0aa46787c3c --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/preemption/preemption_notifier.h @@ -0,0 +1,147 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_PREEMPTION_PREEMPTION_NOTIFIER_H_ +#define TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_PREEMPTION_PREEMPTION_NOTIFIER_H_ + +#include +#include +#include +#include +#include +#include + +#include "absl/strings/str_join.h" +#include "absl/time/time.h" +#include "tsl/platform/env.h" +#include "tsl/platform/mutex.h" +#include "tsl/platform/statusor.h" + +namespace tsl { + +// Static registration for preemption notifiers. +#define REGISTER_PREEMPTION_NOTIFIER(notifier_type_name, factory_fn) \ + REGISTER_PREEMPTION_NOTIFIER_UNIQ_HELPER(__COUNTER__, notifier_type_name, \ + factory_fn) +#define REGISTER_PREEMPTION_NOTIFIER_UNIQ_HELPER(counter, notifier_type_name, \ + factory_fn) \ + static bool static_preemption_notifier_##counter TF_ATTRIBUTE_UNUSED = \ + []() { \ + ::tsl::PreemptionNotifier::RegisterPreemptionNotifier( \ + notifier_type_name, factory_fn); \ + return true; \ + }() + +// Base class for listening and propagating task preemption notices. +// +// This class provides common mechanism to block on waiting for preemption +// signals, or register callbacks that will be triggered upon preemption. +// +// Example: +// +// // Monitors the SIGTERM preemption signal +// notifier = PreemptionNotifier::CreatePreemptionNotifier("sigterm", env); +// +// // Register callback that will be invoked once preempted +// notifier->WillBePreemptedAtAsync( +// [](StatusOr status_or_time) { +// if (status_or_time.ok()) { +// LOG(INFO) << "Preempted at time: " << status_or_time.value(); +// } else { +// LOG(ERROR) << "Received error: " << status_or_time.status(); +// } +// }); +// +// // Block current thread until preemption +// absl::Time preempt_time = notifier->WillBePreemptedAt().value(); +// +// Users can extend this class to support custom preemption signals, by subclass +// `PreemptionNotifier` with a custom constructor, register its creator (factory +// function) with `REGISTER_PREEMPTION_NOTIFIER`. The custom constructor should +// set up the communication with the cluster scheduler, and invoke the +// `NotifyRegisteredListeners` method once a preemption signal is received. +// See `SigtermNotifier` as an example. + +class PreemptionNotifier { + public: + typedef std::function)> PreemptTimeCallback; + using PreemptionNotifierFactory = + std::function(Env* env)>; + + explicit PreemptionNotifier(Env* env) : env_(env) {} + virtual ~PreemptionNotifier() = default; + + static void RegisterPreemptionNotifier(const std::string& notifier_type_name, + PreemptionNotifierFactory factory_fn) { + GetPreemptionNotifierFactories()->emplace(notifier_type_name, + std::move(factory_fn)); + } + + static std::unique_ptr CreatePreemptionNotifier( + const std::string& notifier_type, Env* env) { + const auto* factories = GetPreemptionNotifierFactories(); + auto it = factories->find(notifier_type); + if (it == factories->end()) { + std::vector registered_types; + registered_types.reserve(factories->size()); + for (auto& kv : *factories) { + registered_types.push_back(kv.first); + } + LOG(ERROR) << "No preemption notifier factory found for notifier type " + << notifier_type + << ". All registered preemption notifier types are: " + << absl::StrJoin(registered_types, ", ") + << ". Make sure the library is loaded to the program."; + return nullptr; + } + return it->second(env); + } + + // This is a blocking call that returns a death time when preemption / + // termination will occur once the listener receives the preemption + // notification. If no death time is specified, absl::Now() is returned. + // Returns error::Cancelled if UnregisterListeners() is called. + StatusOr WillBePreemptedAt(); + + // Registers a callback that takes the death time as input once the listener + // receives the preemption notification. + // If no death time is specified, absl::Now() is specified as input. + // Note: callback should be kept as simple and fast as possible (e.g. simply + // retrieve result). It should not wait for work done by another callback, and + // invoke ahy PreemptionNotifier method (e.g. Reset(), destructor). + void WillBePreemptedAtAsync(PreemptTimeCallback callback); + + protected: + Env* GetEnv() { return env_; } + // Invokes all pending callbacks upon receipt of preemption notice with death + // time or errors (e.g. cancellation during shutdown). + void NotifyRegisteredListeners(StatusOr death_time); + + private: + static std::unordered_map* + GetPreemptionNotifierFactories() { + static auto* preemption_notifier_factories = + new std::unordered_map(); + return preemption_notifier_factories; + } + + Env* env_; // Not owned. + mutex mu_; + absl::Time death_time_ TF_GUARDED_BY(mu_) = absl::InfinitePast(); + std::vector callbacks_ TF_GUARDED_BY(mu_); +}; + +} // namespace tsl + +#endif // TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_PREEMPTION_PREEMPTION_NOTIFIER_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/rpc/grpc_call.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/rpc/grpc_call.h new file mode 100644 index 0000000000000000000000000000000000000000..73d285e3ef8b8c71ecc7db07c28cb1c353586b82 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/rpc/grpc_call.h @@ -0,0 +1,521 @@ +/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_RPC_GRPC_CALL_H_ +#define TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_RPC_GRPC_CALL_H_ + +#include "grpcpp/completion_queue.h" +#include "grpcpp/impl/service_type.h" +#include "grpcpp/server_builder.h" +#include "grpcpp/server_context.h" +#include "grpcpp/support/async_stream.h" +#include "grpcpp/support/async_unary_call.h" +#include "tsl/platform/mutex.h" +#include "tsl/platform/refcount.h" + +namespace tsl { + +// CALL STRUCTURES +// =============== +// +// Each pending (incoming) request corresponds to a call object that +// encapsulates the state of the call. Templates and +// pointers-to-member functions are used to avoid boilerplate and +// redundant closure creation. The class hierarchy is as follows: +// +// * `UntypedCall`: The base class represents a call that +// could be associated with any of the methods on a service of type +// `Service`. Also defines a `Tag` nested class that can be used as +// the tag in a `grpc::CompletionQueue`. Each class that +// instantiates `Service` should have a completion queue polling +// loop that knows about `UntypedCall::Tag` objects, and +// invokes their `OnCompleted()` method to continue processing. +// +// * `Call`: This class extends +// `UntypedCall` and is additionally parameterized by the +// gRPC-generated asynchronous service class, and the request and +// response message types. It defines the state associated with a +// call (whose type depends on the message types), and stores a +// pointer to a `Service::HandleFoo()` handler method. Each +// `Service::HandleFoo()` method knows about the corresponding +// `Call` type, in order to access its state, and invoke its +// `SendResponse()` method. +// +// The lifecycle of a call object is as follows. +// +// 1. A `Service` creates a `Call` for a particular method and +// enqueues it in its completion queue (via an +// `UntypedCall::Tag`). +// +// 2. When the tag is returned from `cq_->Next()`, the +// `UntypedCall::RequestReceived()` method is invoked and takes +// ownership of the call object. This indirectly invokes the +// appropriate handler method on `Service`. +// +// 3. After the response has been written (perhaps in another thread), +// the `Call::SendResponse()` method is invoked. It transfers +// ownership of the call object back to the completion queue (via +// an `UntypedCall::Tag`). +// +// 4. When the response has been sent, the tag is returned from +// `cq_->Next()`, and the call object is deleted. +// + +template +class GrpcCallTag { + public: + virtual ~GrpcCallTag() {} + + // Calls the callback associated with this tag. + virtual void OnCompleted(Service* service, bool ok) = 0; +}; + +// Represents a pending request with unknown message types. +template +class UntypedCall : public core::RefCounted { + public: + virtual ~UntypedCall() {} + + // The implementation of this method should use `service` to handle + // an incoming request, and (perhaps asynchronously) send the + // response. + // + // One reference on `this` is transferred to the callee, and the + // callee is responsible for releasing it (typically via + // `Call::SendResponse()`). + // + // `ok` is true if the request was received in a "regular event", + // otherwise false. + virtual void RequestReceived(Service* service, bool ok) = 0; + + // This method will be called either (i) when the server is notified + // that the request has been canceled, or (ii) when the request completes + // normally. The implementation should distinguish these cases by querying + // the `grpc::ServerContext` associated with the request. + virtual void RequestCancelled(Service* service, bool ok) = 0; + + // Associates a tag in a `::grpc::CompletionQueue` with a callback + // for an incoming RPC. An active Tag owns a reference on the corresponding + // Call object. + class Tag : public GrpcCallTag { + public: + // One enum value per supported callback. + enum Callback { kRequestReceived, kResponseSent, kCancelled }; + + Tag(UntypedCall* call, Callback cb) : call_(call), callback_(cb) {} + + // Calls the callback associated with this tag. + // + // The callback takes ownership of `this->call_`. + void OnCompleted(Service* service, bool ok) override { + switch (callback_) { + case kRequestReceived: + call_->RequestReceived(service, ok); + break; + case kResponseSent: + // No special handling needed apart from the Unref below. + break; + case kCancelled: + call_->RequestCancelled(service, ok); + break; + } + call_->Unref(); // Ref acquired when tag handed to grpc. + } + + private: + UntypedCall* const call_; // `this` owns one reference. + Callback callback_; + }; +}; + +// Represents a pending call with known request and response message +// types, and a known request-handling method. +template +class Call : public UntypedCall { + public: + // Represents the generic signature of a generated + // `GrpcService::RequestFoo()` method, where `Foo` is the name of an + // RPC method. + using EnqueueFunction = void (GrpcService::*)( + ::grpc::ServerContext*, RequestMessage*, + ::grpc::ServerAsyncResponseWriter*, + ::grpc::CompletionQueue*, ::grpc::ServerCompletionQueue*, void*); + + // Represents the generic signature of a `Service::HandleFoo()` + // method, where `Foo` is the name of an RPC method. + using HandleRequestFunction = void (Service::*)( + Call*); + + Call(HandleRequestFunction handle_request_function) + : handle_request_function_(handle_request_function), responder_(&ctx_) {} + + virtual ~Call() {} + + void RequestReceived(Service* service, bool ok) override { + if (ok) { + this->Ref(); + (service->*handle_request_function_)(this); + } + } + + void SendResponse(::grpc::Status status) { + this->Ref(); // Ref for grpc; released in Tag callback. + responder_.Finish(response, status, &response_sent_tag_); + this->Unref(); + } + + void RequestCancelled(Service* service, bool ok) override { + if (ctx_.IsCancelled()) { + mutex_lock l(mu_); + if (cancel_callback_) { + cancel_callback_(); + } + } + } + + // Registers `callback` as the function that should be called if and when this + // call is canceled by the client. + void SetCancelCallback(std::function callback) { + mutex_lock l(mu_); + cancel_callback_ = std::move(callback); + } + + // Clears any cancellation callback that has been registered for this call. + void ClearCancelCallback() { + mutex_lock l(mu_); + cancel_callback_ = nullptr; + } + + // Enqueues a new request for the given service on the given + // completion queue, using the given `enqueue_function`. + // + // The request will be handled with the given + // `handle_request_function`. + static void EnqueueRequest(GrpcService* grpc_service, + ::grpc::ServerCompletionQueue* cq, + EnqueueFunction enqueue_function, + HandleRequestFunction handle_request_function, + bool supports_cancel) { + auto call = new Call( + handle_request_function); + if (supports_cancel) { + call->RegisterCancellationHandler(); + } + + // Initial ref for call handed to grpc; released in Tag callback. + (grpc_service->*enqueue_function)(&call->ctx_, &call->request, + &call->responder_, cq, cq, + &call->request_received_tag_); + } + + // Enqueues a new request for the given service on the given + // completion queue, using the given `method_id`. + // + // The request will be handled with the given + // `handle_request_function`. + static void EnqueueRequestForMethod( + GrpcService* grpc_service, ::grpc::ServerCompletionQueue* cq, + int method_id, HandleRequestFunction handle_request_function, + bool supports_cancel) { + auto call = new Call( + handle_request_function); + if (supports_cancel) { + call->RegisterCancellationHandler(); + } + + // Initial ref for call handed to grpc; released in Tag callback. + grpc_service->RequestAsyncUnary(method_id, &call->ctx_, &call->request, + &call->responder_, cq, cq, + &call->request_received_tag_); + } + + RequestMessage request; + ResponseMessage response; + + const std::multimap<::grpc::string_ref, ::grpc::string_ref>& client_metadata() + const { + return ctx_.client_metadata(); + } + + private: + // Creates a completion queue tag for handling cancellation by the client. + // NOTE: This method must be called before this call is enqueued on a + // completion queue. + void RegisterCancellationHandler() { + this->Ref(); // Ref for grpc; released in Tag callback. + ctx_.AsyncNotifyWhenDone(&cancelled_tag_); + } + + HandleRequestFunction handle_request_function_; + ::grpc::ServerContext ctx_; + ::grpc::ServerAsyncResponseWriter responder_; + + // Used as void* completion markers from grpc to indicate different + // events of interest for a Call. + typedef typename UntypedCall::Tag Tag; + Tag request_received_tag_{this, Tag::kRequestReceived}; + Tag response_sent_tag_{this, Tag::kResponseSent}; + Tag cancelled_tag_{this, Tag::kCancelled}; + + mutex mu_; + std::function cancel_callback_ TF_GUARDED_BY(mu_); +}; + +// Lifetime of a server-side bidirectional streaming call: +// - The call is created in the static EnqueueRequest method. It transfers +// ownership to the kCallOpen tag pushed onto the completion queue. +// - If kCallOpen completes successfully, a read is requested and the +// kRequestReceived tag takes ownership of the call. If kCallOpen fails, +// e.g. server is shutdown, no further requests are pushed and the call is +// destroyed (at the end of Tag::OnCompleted). +// - When the first request is received, we Ref() the call and invoke the +// handler method thereby transferring ownership to the handler method. +// The handler is responsible for calling SendResponse() or Finish() on this +// call. +// - If the handler calls Finish(), e.g. the request was invalid, Finish() +// transfers ownership from the handler to the kServerFinished tag that +// it pushes on the completion queue. The ownership is transferred because +// the ref count is not incremented before putting the tag on the queue. +// - If the handler calls SendResponse(), SendResponse() transfers ownership +// to the kResponseSent tag. +// - When kResponseSent completes, we request a new read, which owns the call +// now. +// - When the next request is received, it is handled the same way as the first +// request. +// +// Because we request a read only after the write is sent, we can safely reuse +// the same request and response messages for the whole call. +template +class ServerUntypedBidirectionalStreamingCall : public core::RefCounted { + public: + virtual void RequestReceived(Service* service) = 0; + + // Enqueues a request on the completion queue to read the next request. + virtual void CallOpen() = 0; + + virtual void RequestRead() = 0; + + // Associates a tag in a `::grpc::CompletionQueue` with a callback. + // An active Tag owns a reference on the corresponding Call object. + class Tag : public GrpcCallTag { + public: + // One enum value per supported callback. + enum class TagType { + kCallOpen, + kRequestReceived, + kResponseSent, + kServerFinished, + }; + + Tag(ServerUntypedBidirectionalStreamingCall* call, TagType cb) + : call_(call), callback_(cb) {} + + // Calls the callback associated with this tag and Unrefs this->call_. + void OnCompleted(Service* service, bool ok) override { + switch (callback_) { + case TagType::kCallOpen: + // Non-ok value indicates that the server has been shutdown before we + // received a message for this call type. We do nothing to let this + // call object be destroyed and avoid enqueuing request for another + // call. + if (ok) { + call_->CallOpen(); + } + break; + case TagType::kRequestReceived: + // Non-ok value from completion queue here means that we will not + // receive any more messages from the client, e.g. the client called + // WritesDone. There is nothing we need to do in this case. The call + // will be Unref'ed and deleted. If the client wants to open a new + // call, we have already enqueued a request for a new call in CallOpen + // above. + if (ok) { + call_->RequestReceived(service); + } + break; + case TagType::kResponseSent: + if (ok) { + // The obvious place to request a read would be at the end of + // RequestReceived(). Unfortunately, this can result in multiple + // outstanding write requests in the completion queue. This is + // currently not supported by gRPC, which requires at most one + // outstanding write request in the completion queue. + // Requesting a read here, in ResponseSent, works because at + // this point, the completion queue has no write requests + // (kResponseSent happens when a write completes). + // This might be synchronizing the processing more than strictly + // necessary, but is probably fine because, AFAICT from gRPC docs, + // the write request completes as soon as it can be written to + // outgoing buffer. + call_->RequestRead(); + } + // ok == false means that the response is not going on the wire + // because the call is already dead (i.e., canceled, deadline + // expired, other side dropped the channel, etc). Since the call is + // dead, there is nothing for us to do, we just let the call be + // deleted. + break; + case TagType::kServerFinished: + // Whether our finish request is successful or not (whether it went + // on the wire towards the client), there is nothing for us to do. + // In the current implementation, there can be no read or write + // requests in the completion queue (see the comment in kResponseSent) + // above. Even if there were pending requests, they would complete + // with a non-ok status, we would not do anything, and let the call be + // deleted. + break; + } + call_->Unref(); // Ref acquired when tag was handed to grpc. + } + + private: + ServerUntypedBidirectionalStreamingCall* const + call_; // `this` owns one reference. + TagType callback_; + }; +}; + +// Represents a pending call with known request and response message +// types, and a known request-handling method. +// Common usage pattern is to have a single thread waiting on events from +// completion queue and calling Tag::OnCompleted(), which invokes methods +// on this. +// This implementation assumes that the server will generate a single response +// message for each request message. More precisely, this class expects that +// each time it invokes handle_request_function_, the service implementation +// will either call SendResponse or Finish exactly once. +// Not thread-safe. +template +class ServerBidirectionalStreamingCall + : public ServerUntypedBidirectionalStreamingCall { + public: + // Represents the generic signature of a generated + // `GrpcService::RequestFoo()` method, where `Foo` is the name of an + // RPC method. + using EnqueueFunction = void (GrpcService::*)( + ::grpc::ServerContext*, + ::grpc::ServerAsyncReaderWriter*, + ::grpc::CompletionQueue*, ::grpc::ServerCompletionQueue*, void*); + + // Represents the generic signature of a `Service::HandleFoo()` + // method, where `Foo` is the name of an RPC method. + using HandleRequestFunction = void (Service::*)( + ServerBidirectionalStreamingCall*); + + ServerBidirectionalStreamingCall( + HandleRequestFunction handle_request_function, GrpcService* grpc_service, + ::grpc::ServerCompletionQueue* cq, EnqueueFunction enqueue_function) + : handle_request_function_(handle_request_function), + stream_(&ctx_), + grpc_service_(grpc_service), + cq_(cq), + enqueue_function_(enqueue_function) { + VLOG(3) << "Creating ServerBidirectionalStreamingCall " << this; + } + + ~ServerBidirectionalStreamingCall() override { + VLOG(3) << "Destroying ServerBidirectionalStreamingCall " << this; + } + + void CallOpen() override { + // Let gRPC know that we can accept another call. + ServerBidirectionalStreamingCall< + Service, GrpcService, RequestMessage, + ResponseMessage>::EnqueueRequest(grpc_service_, cq_, enqueue_function_, + handle_request_function_); + RequestRead(); + } + + void RequestRead() override { + this->Ref(); + request_.Clear(); + stream_.Read(&request_, &request_received_tag_); + } + + void RequestReceived(Service* service) override { + this->Ref(); + // Request handling should result in a call to SendResponse or Finish. + (service->*handle_request_function_)(this); + } + + void SendResponse() { + // Transferring ownership of this to the response_sent_tag_. + stream_.Write(response_, &response_sent_tag_); + // stream_.Write does not save references to response_. We are free to muck + // around with it as soon as Write returns. + // We clear the response_ to prepare it for the next response. + response_.Clear(); + } + + void Finish(::grpc::Status status) { + // Transferring ownership of this to the server_finished_tag_. + stream_.Finish(status, &server_finished_tag_); + } + + // Enqueues a new request for the given service on the given + // completion queue, using the given `enqueue_function`. + // + // The request will be handled by the given `handle_request_function`. + static void EnqueueRequest(GrpcService* grpc_service, + ::grpc::ServerCompletionQueue* cq, + EnqueueFunction enqueue_function, + HandleRequestFunction handle_request_function) { + auto call = + new ServerBidirectionalStreamingCall( + handle_request_function, grpc_service, cq, enqueue_function); + + // Initial ref for call handed to grpc; released in Tag callback. + (grpc_service->*enqueue_function)(&call->ctx_, &call->stream_, cq, cq, + &call->call_open_tag_); + } + + const RequestMessage& request() const { return request_; } + ResponseMessage* mutable_response() { return &response_; } + + private: + // Request and response messages are reused for each request/response exchange + // between the client and the server. + RequestMessage request_; + ResponseMessage response_; + ::grpc::ServerContext ctx_; + + HandleRequestFunction handle_request_function_; + ::grpc::ServerAsyncReaderWriter stream_; + + // Used as void* completion markers from grpc to indicate different + // events of interest for a ServerBidirectionalStreamingCall. + typedef typename ServerUntypedBidirectionalStreamingCall::Tag Tag; + // At most one tag of each kind may be given to gRPC at any one time. + // Beyond semantic sanity, this is needed to ensure proper ref counting + // of this call object. + Tag call_open_tag_{this, Tag::TagType::kCallOpen}; + Tag request_received_tag_{this, Tag::TagType::kRequestReceived}; + Tag response_sent_tag_{this, Tag::TagType::kResponseSent}; + Tag server_finished_tag_{this, Tag::TagType::kServerFinished}; + + // These fields are used only to spawn another instance of this to accept + // more streaming calls. + GrpcService* grpc_service_; + ::grpc::ServerCompletionQueue* cq_; + EnqueueFunction enqueue_function_; +}; + +} // namespace tsl + +#endif // TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_RPC_GRPC_CALL_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/rpc/grpc_channel.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/rpc/grpc_channel.h new file mode 100644 index 0000000000000000000000000000000000000000..b019377f9986dd312b5742749a9d687b0fb83c1c --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/rpc/grpc_channel.h @@ -0,0 +1,100 @@ +/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_RPC_GRPC_CHANNEL_H_ +#define TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_RPC_GRPC_CHANNEL_H_ + +#include +#include +#include +#include +#include + +#include "grpcpp/grpcpp.h" +#include "tsl/distributed_runtime/rpc/grpc_util.h" +#include "tsl/protobuf/rpc_options.pb.h" + +namespace tsl { +using tensorflow::RPCOptions; + +// Consolidated parameter structure to ease use of generic interfaces. +// +// Each job_id requires: +// - a list of host:port (or sparse list of index:host:port) +// - the number of tasks per replica +class GrpcChannelSpec { + public: + struct HostPortsJob { + HostPortsJob(const string& job_id, const std::map& host_ports) + : job_id(job_id), host_ports(host_ports) {} + const string job_id; + const std::map host_ports; + }; + + Status AddHostPortsJob(const string& job_id, + const std::map& host_ports); + + const std::vector& host_ports_jobs() const { + return host_ports_jobs_; + } + + private: + std::vector host_ports_jobs_; + std::set job_ids_; +}; + +class GrpcChannelCache { + public: + virtual ~GrpcChannelCache() {} + + // Populates *workers with names of all workers which this object + // was created to handle. Worker names are in the format + // /job:/task: + // e.g. /job:mnist/task:2 + virtual void ListWorkers(std::vector* workers) = 0; + virtual void ListWorkersInJob(const string& job_name, + std::vector* workers) = 0; + + // If found, returns a gRPC channel that is connected to the remote + // worker named by 'target'. 'target' is of the following + // format: /job:/task: + // E.g., /job:mnist/task:2 + virtual SharedGrpcChannelPtr FindWorkerChannel(const string& target) = 0; + + // Translates a string in the form `/job:X/task:Z` into a host_port. + virtual string TranslateTask(const string& task) = 0; +}; + +typedef std::function ChannelCreationFunction; + +GrpcChannelCache* NewGrpcChannelCache( + const GrpcChannelSpec& channel_spec, ChannelCreationFunction channel_func, + const RPCOptions& rpc_options = RPCOptions()); + +// Below here are internal-only functions. + +::grpc::ChannelArguments GetChannelArguments(const RPCOptions* rpc_options); + +ChannelCreationFunction ConvertToChannelCreationFunction( + const std::function& new_channel_func_ptr); + +Status NewHostPortGrpcChannel(const string& target, + const RPCOptions* rpc_options, + SharedGrpcChannelPtr* channel_pointer); + +} // namespace tsl + +#endif // TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_RPC_GRPC_CHANNEL_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/rpc/grpc_channel_common.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/rpc/grpc_channel_common.h new file mode 100644 index 0000000000000000000000000000000000000000..7e1ef32151f1480c888e3ca22d46bfe4ad0b6608 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/rpc/grpc_channel_common.h @@ -0,0 +1,103 @@ +/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_RPC_GRPC_CHANNEL_COMMON_H_ +#define TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_RPC_GRPC_CHANNEL_COMMON_H_ + +#include +#include + +#include "absl/container/flat_hash_map.h" +#include "tsl/distributed_runtime/rpc/grpc_util.h" +#include "tsl/platform/logging.h" +#include "tsl/platform/mutex.h" + +namespace tsl { + +// GenericCachingChannelCache that caches results to FindWorkerChannel() calls. +// To use instantiate with the type of channel cache needed. +// GenericCachingChannelCache allows using multiple channels to communiate with +// same target to provide throughput gains. When multiple channels exist for +// the same target they are chosen in a simple round robin fashion on each call +// to FindWorkerChannel. +template +class GenericCachingChannelCache : public ChannelCacheT { + public: + explicit GenericCachingChannelCache(int num_channels_per_target) + : num_channels_per_target_( + num_channels_per_target > 0 ? num_channels_per_target : 1) {} + + ~GenericCachingChannelCache() override {} + + SharedGrpcChannelPtr FindWorkerChannel(const string& target) override { + { + mutex_lock l(mu_); + auto iter = channels_.find(target); + if (iter != channels_.end()) { + return GetNextChannelPtrAndUpdateState(iter->second); + } + } + ChannelState new_chan_state; + for (int indx = 0; indx < num_channels_per_target_; indx++) { + auto ch = FindChannelOnce(target); + if (!ch) return nullptr; + new_chan_state.channels.push_back(ch); + } + new_chan_state.last_used = num_channels_per_target_ - 1; + + { + mutex_lock l(mu_); + typename absl::flat_hash_map::iterator iter; + bool was_inserted; + std::tie(iter, was_inserted) = channels_.insert({target, new_chan_state}); + VLOG(2) << "Channel cache for target: " << target + << " Size: " << new_chan_state.channels.size() + << " insertion: " << was_inserted; + return GetNextChannelPtrAndUpdateState(iter->second); + } + } + + protected: + // Find the ClientChannel for "target". Only called when no channel was + // found in the channels_ cache for "target". A non nullptr result will be + // cached in channels_. + virtual SharedGrpcChannelPtr FindChannelOnce(const string& target) = 0; + + private: + struct ChannelState { + std::vector channels; + int last_used; + }; + + // Should be called with mu_ held. + SharedGrpcChannelPtr GetNextChannelPtrAndUpdateState( + ChannelState& chan_state) { + // Following statement is marked as Crash OK as this is an invariant of + // code flow in this class. + CHECK_EQ(chan_state.channels.size(), num_channels_per_target_); // Crash OK + chan_state.last_used = + (chan_state.last_used + 1) % num_channels_per_target_; + return chan_state.channels[chan_state.last_used]; + } + + const int num_channels_per_target_; + // TODO(zhifengc): Eviction when the map becomes too big. + mutex mu_; + absl::flat_hash_map channels_ TF_GUARDED_BY(mu_); +}; + +} // namespace tsl + +#endif // TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_RPC_GRPC_CHANNEL_COMMON_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/rpc/grpc_client_cq_tag.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/rpc/grpc_client_cq_tag.h new file mode 100644 index 0000000000000000000000000000000000000000..183ca52e8d2d0a4c772e0fe4fbbdefb3236011be --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/rpc/grpc_client_cq_tag.h @@ -0,0 +1,41 @@ +/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_RPC_GRPC_CLIENT_CQ_TAG_H_ +#define TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_RPC_GRPC_CLIENT_CQ_TAG_H_ + +#include "tsl/platform/macros.h" + +namespace tsl { + +// Represents a pending asynchronous client call as a tag that can be +// stored in a `grpc::CompletionQueue`. +class GrpcClientCQTag { + public: + GrpcClientCQTag() = default; + virtual ~GrpcClientCQTag() = default; + + // OnCompleted is invoked when the RPC has finished. + // Implementations of OnCompleted can delete *this. + virtual void OnCompleted(bool ok) = 0; + + private: + GrpcClientCQTag(const GrpcClientCQTag&) = delete; + void operator=(const GrpcClientCQTag&) = delete; +}; + +} // namespace tsl + +#endif // TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_RPC_GRPC_CLIENT_CQ_TAG_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/rpc/grpc_util.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/rpc/grpc_util.h new file mode 100644 index 0000000000000000000000000000000000000000..c1cce692b2a19768003e12951984db338324dcdc --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/rpc/grpc_util.h @@ -0,0 +1,130 @@ +/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_RPC_GRPC_UTIL_H_ +#define TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_RPC_GRPC_UTIL_H_ + +#include +#include + +#include "grpcpp/grpcpp.h" +#include "grpcpp/support/byte_buffer.h" +#include "absl/status/status.h" +#include "absl/strings/cord.h" +#include "tsl/platform/protobuf.h" +#include "tsl/platform/status.h" +#include "tsl/platform/stringpiece.h" +#include "tsl/platform/stringprintf.h" +#include "tsl/protobuf/distributed_runtime_payloads.pb.h" + +namespace tsl { + +// Proto: tensorflow::distributed_runtime::GrpcPayloadsLost +// Location: tsl/protobuf/distributed_runtime_payloads.proto +// Usage: Flags the Status to have lost payloads during GRPC conversion. +constexpr char kGrpcPayloadsLost[] = + "type.googleapis.com/tensorflow.distributed_runtime.GrpcPayloadsLost"; + +constexpr char kStreamRemovedMessage[] = "Stream removed"; + +// Identify if the given grpc::Status corresponds to an HTTP stream removed +// error (see chttp2_transport.cc). +// +// When auto-reconnecting to a remote worker after it restarts, gRPC can return +// an UNKNOWN error code with a "Stream removed" error message. This should not +// be treated as an unrecoverable error. +// +// N.B. This is dependent on the error message from grpc remaining consistent. +inline bool IsStreamRemovedError(const ::grpc::Status& s) { + return !s.ok() && s.error_code() == ::grpc::StatusCode::UNKNOWN && + s.error_message() == kStreamRemovedMessage; +} + +inline std::string SerializePayloads(const Status& s) { + tensorflow::distributed_runtime::GrpcPayloadContainer container; + s.ForEachPayload([&container](StringPiece key, const absl::Cord& value) { + (*container.mutable_payloads())[std::string(key)] = std::string(value); + }); + return container.SerializeAsString(); +} + +inline void InsertSerializedPayloads(Status& s, std::string payloads) { + tensorflow::distributed_runtime::GrpcPayloadContainer container; + if (container.ParseFromString(payloads)) { + for (const auto& key_val : container.payloads()) { + s.SetPayload(key_val.first, absl::Cord(key_val.second)); + } + } else { + s.SetPayload(kGrpcPayloadsLost, + absl::Cord(tensorflow::distributed_runtime::GrpcPayloadsLost() + .SerializeAsString())); + } +} + +inline Status FromGrpcStatus(const ::grpc::Status& s) { + if (s.ok()) { + return OkStatus(); + } else { + Status converted; + // Convert "UNKNOWN" stream removed errors into unavailable, to allow + // for retry upstream. + if (IsStreamRemovedError(s)) { + converted = Status(absl::StatusCode::kUnavailable, s.error_message()); + } + converted = Status(static_cast(s.error_code()), + s.error_message()); + InsertSerializedPayloads(converted, s.error_details()); + return converted; + } +} + +inline ::grpc::Status ToGrpcStatus(const Status& s) { + if (s.ok()) { + return ::grpc::Status::OK; + } else { + if (s.message().size() > 3072 /* 3k bytes */) { + // TODO(b/62947679): Remove truncation once the gRPC issue is resolved. + string scratch = strings::Printf("%.3072s ... [truncated]", + tsl::NullTerminatedMessage(s)); + LOG(ERROR) << "Truncated error message: " << s; + return ::grpc::Status(static_cast<::grpc::StatusCode>(s.code()), scratch, + SerializePayloads(s)); + } + return ::grpc::Status(static_cast<::grpc::StatusCode>(s.code()), + std::string(s.message()), SerializePayloads(s)); + } +} + +typedef std::shared_ptr<::grpc::Channel> SharedGrpcChannelPtr; + +// Serialize src and store in *dst. +::grpc::Status GrpcMaybeUnparseProto(const protobuf::Message& src, + ::grpc::ByteBuffer* dst); + +// Parse contents of src and initialize *dst with them. +bool GrpcMaybeParseProto(::grpc::ByteBuffer* src, protobuf::Message* dst); + +// Copy string src to grpc buffer *dst. +::grpc::Status GrpcMaybeUnparseProto(const string& src, + ::grpc::ByteBuffer* dst); + +// Copy grpc buffer src to string *dst. +bool GrpcMaybeParseProto(::grpc::ByteBuffer* src, string* dst); + +// Copy grpc buffer src to tstring *dst. +bool GrpcMaybeParseProto(::grpc::ByteBuffer* src, tstring* dst); +} // namespace tsl + +#endif // TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_RPC_GRPC_UTIL_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/allocator.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/allocator.h new file mode 100644 index 0000000000000000000000000000000000000000..84aef0b1f99c1fdbf9f35ba8f11caebf3d783fb6 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/allocator.h @@ -0,0 +1,430 @@ +/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_FRAMEWORK_ALLOCATOR_H_ +#define TENSORFLOW_TSL_FRAMEWORK_ALLOCATOR_H_ + +#include + +#include +#include +#include + +#include "absl/strings/string_view.h" +#include "absl/types/optional.h" +#include "tsl/framework/numeric_types.h" +#include "tsl/framework/type_traits.h" +#include "tsl/platform/logging.h" +#include "tsl/platform/macros.h" +#include "tsl/platform/numa.h" +#include "tsl/platform/types.h" + +namespace tsl { + +// Attributes for a single allocation call. Different calls to the same +// allocator could potentially have different allocation attributes. +struct AllocationAttributes { + AllocationAttributes() = default; + + AllocationAttributes(bool retry_on_failure, bool allocation_will_be_logged, + std::function* freed_by_func) + : retry_on_failure(retry_on_failure), + allocation_will_be_logged(allocation_will_be_logged), + freed_by_func(freed_by_func) {} + + // If the first attempt to allocate the memory fails, the allocation should + // wait and retry (with a timeout). + // + // This is usually set to true, but we may set it to false in cases where a + // failure has only performance impact (e.g. optional scratch space + // allocation). + bool retry_on_failure = true; + // If a Tensor is allocated without the following set to true, then + // it is logged as an unknown allocation. During execution Tensors + // should be allocated through the OpKernelContext which records + // which Op is performing the allocation, and sets this flag to + // true. + bool allocation_will_be_logged = false; + // EXPERIMENTAL: If provided, then evaluates to a timing count such that only + // a memory chunk whose freed_at_count is at this value or earlier may be + // returned. + std::function* freed_by_func = nullptr; // Not owned. + + AllocationAttributes(const AllocationAttributes&) = delete; + void operator=(const AllocationAttributes&) = delete; +}; + +// Runtime statistics collected by an allocator. Exactly the same as +// stream_executor::AllocatorStats, but independently defined to preserve the +// mutual independence of StreamExecutor and TensorFlow. +struct AllocatorStats { + int64_t num_allocs; // Number of allocations. + int64_t bytes_in_use; // Number of bytes in use. + int64_t peak_bytes_in_use; // The peak bytes in use. + int64_t largest_alloc_size; // The largest single allocation seen. + + // The upper limit of bytes of user allocatable device memory, if such a limit + // is known. + std::optional bytes_limit; + + // Stats for reserved memory usage. + int64_t bytes_reserved; // Number of bytes reserved. + int64_t peak_bytes_reserved; // The peak number of bytes reserved. + // The upper limit on the number bytes of reservable memory, + // if such a limit is known. + std::optional bytes_reservable_limit; + + int64_t largest_free_block_bytes; // Largest free block's size in heap. + + // Number of bytes of memory held by the allocator. This may be higher than + // bytes_in_use if the allocator holds a pool of memory (e.g. BFCAllocator). + std::optional pool_bytes; + std::optional peak_pool_bytes; + + AllocatorStats() + : num_allocs(0), + bytes_in_use(0), + peak_bytes_in_use(0), + largest_alloc_size(0), + bytes_reserved(0), + peak_bytes_reserved(0), + largest_free_block_bytes(0) {} + + std::string DebugString() const; +}; + +// The type of the allocated memory. +enum class AllocatorMemoryType { + kUnknown = 0, // Memory type unknown. + kDevice = 1, // Memory on device. + kHostPageable = 2, // Memory on host and it is pagable. + kHostPinned = 3, // Memory on host and it is pinned. +}; + +// Allocator is an abstract interface for allocating and deallocating +// device memory. +class Allocator { + public: + // Align to 64 byte boundary. + static constexpr size_t kAllocatorAlignment = 64; + + virtual ~Allocator(); + + // Return a string identifying this allocator + virtual std::string Name() = 0; + + // Return an uninitialized block of memory that is "num_bytes" bytes + // in size. The returned pointer is guaranteed to be aligned to a + // multiple of "alignment" bytes. + // REQUIRES: "alignment" is a power of 2. + virtual void* AllocateRaw(size_t alignment, size_t num_bytes) = 0; + + // Return an uninitialized block of memory that is "num_bytes" bytes + // in size with specified allocation attributes. The returned pointer is + // guaranteed to be aligned to a multiple of "alignment" bytes. + // REQUIRES: "alignment" is a power of 2. + virtual void* AllocateRaw(size_t alignment, size_t num_bytes, + const AllocationAttributes& allocation_attr) { + // The default behavior is to use the implementation without any allocation + // attributes. + return AllocateRaw(alignment, num_bytes); + } + + // Deallocate a block of memory pointer to by "ptr" + // REQUIRES: "ptr" was previously returned by a call to AllocateRaw + virtual void DeallocateRaw(void* ptr) = 0; + + // Returns true if this allocator tracks the sizes of allocations. + // RequestedSize and AllocatedSize must be overridden if + // TracksAllocationSizes is overridden to return true. + virtual bool TracksAllocationSizes() const { return false; } + + // Returns true if this allocator allocates an opaque handle rather than the + // requested number of bytes. + // + // This method returns false for most allocators, but may be used by + // special-case allocators that track tensor usage. If this method returns + // true, AllocateRaw() should be invoked for all values of `num_bytes`, + // including 0. + // + // NOTE: It is the caller's responsibility to track whether an allocated + // object is a buffer or an opaque handle. In particular, when this method + // returns `true`, users of this allocator must not run any constructors or + // destructors for complex objects, since there is no backing store for the + // tensor in which to place their outputs. + virtual bool AllocatesOpaqueHandle() const { return false; } + + // Returns the user-requested size of the data allocated at + // 'ptr'. Note that the actual buffer allocated might be larger + // than requested, but this function returns the size requested by + // the user. + // + // REQUIRES: TracksAllocationSizes() is true. + // + // REQUIRES: 'ptr!=nullptr' and points to a buffer previously + // allocated by this allocator. + virtual size_t RequestedSize(const void* ptr) const { + CHECK(false) << "allocator doesn't track sizes"; + return size_t(0); + } + + // Returns the allocated size of the buffer at 'ptr' if known, + // otherwise returns RequestedSize(ptr). AllocatedSize(ptr) is + // guaranteed to be >= RequestedSize(ptr). + // + // REQUIRES: TracksAllocationSizes() is true. + // + // REQUIRES: 'ptr!=nullptr' and points to a buffer previously + // allocated by this allocator. + virtual size_t AllocatedSize(const void* ptr) const { + return RequestedSize(ptr); + } + + // Returns either 0 or an identifier assigned to the buffer at 'ptr' + // when the buffer was returned by AllocateRaw. If non-zero, the + // identifier differs from every other ID assigned by this + // allocator. + // + // REQUIRES: TracksAllocationSizes() is true. + // + // REQUIRES: 'ptr!=nullptr' and points to a buffer previously + // allocated by this allocator. + virtual int64_t AllocationId(const void* ptr) const { return 0; } + + // Returns the allocated size of the buffer at 'ptr' if known, + // otherwise returns 0. This method can be called when + // TracksAllocationSizes() is false, but can be extremely slow. + // + // REQUIRES: 'ptr!=nullptr' and points to a buffer previously + // allocated by this allocator. + virtual size_t AllocatedSizeSlow(const void* ptr) const { + if (TracksAllocationSizes()) { + return AllocatedSize(ptr); + } + return 0; + } + + // Fills in 'stats' with statistics collected by this allocator. + virtual absl::optional GetStats() { return absl::nullopt; } + + // If implemented, clears the internal stats except for the `in_use` fields + // and sets the `peak_bytes_in_use` to be equal to the `bytes_in_use`. Returns + // true if implemented. + // + // REQUIRES: GetStats is overridden. + virtual bool ClearStats() TF_MUST_USE_RESULT { return false; } + + virtual void SetSafeFrontier(uint64 count) {} + + // For allocator that are stream aware, allow to specify the compute + // stream this allocator is used for. This can also trigger memory + // preallocation. + virtual void SetStreamAndPreallocateMemory(void* stream) {} + + // Returns the type of the memory allocated by this allocator. + virtual AllocatorMemoryType GetMemoryType() const { + return AllocatorMemoryType::kUnknown; + } +}; + +// An implementation of Allocator that delegates all calls to another Allocator. +// +// Useful to clients who want to override part of the functionality of another +// allocator. +class AllocatorWrapper : public Allocator { + public: + explicit AllocatorWrapper(Allocator* wrapped) : wrapped_(wrapped) {} + + ~AllocatorWrapper() override {} + + // Returns the wrapped allocator to which all calls are delegated. + Allocator* wrapped() const { return wrapped_; } + + std::string Name() override { return wrapped_->Name(); } + + void* AllocateRaw(size_t alignment, size_t num_bytes) override { + return wrapped_->AllocateRaw(alignment, num_bytes); + } + + void* AllocateRaw(size_t alignment, size_t num_bytes, + const AllocationAttributes& allocation_attr) override { + return wrapped_->AllocateRaw(alignment, num_bytes, allocation_attr); + } + + void DeallocateRaw(void* ptr) override { wrapped_->DeallocateRaw(ptr); } + + bool TracksAllocationSizes() const override { + return wrapped_->TracksAllocationSizes(); + } + + bool AllocatesOpaqueHandle() const override { + return wrapped_->AllocatesOpaqueHandle(); + } + + size_t RequestedSize(const void* ptr) const override { + return wrapped_->RequestedSize(ptr); + } + + size_t AllocatedSize(const void* ptr) const override { + return wrapped_->AllocatedSize(ptr); + } + + int64_t AllocationId(const void* ptr) const override { + return wrapped_->AllocationId(ptr); + } + + size_t AllocatedSizeSlow(const void* ptr) const override { + return wrapped_->AllocatedSizeSlow(ptr); + } + + AllocatorMemoryType GetMemoryType() const override { + return wrapped_->GetMemoryType(); + } + + private: + Allocator* const wrapped_; +}; + +// A tensorflow Op may need access to different kinds of memory that +// are not simply a function of the device to which the Op has been +// assigned. For example, an Op executing on a GPU may still need +// to allocate CPU RAM for some purpose. Internal to the tensorflow +// runtime we may choose to allocate CPU ram from special regions +// that have been prepared for higher performance in some use +// contexts, e.g. doing DMA with particular devices. For these +// reasons, the Device interface does not expose just one memory +// Allocator, but instead provides an accessor that takes a +// specification of the desired memory attributes in order to select +// an Allocator. +// +// Example use: +// // Allocator for ordinary device memory: +// Allocator* a = allocator(AllocatorAttributes()); +// ... +// // Allocator for CPU RAM, regardless of where Op is executing: +// AllocatorAttributes attr; +// attr.set_on_host(true); +// Allocator* a = allocator(attr); +struct AllocatorAttributes { + void set_on_host(bool v) { value |= (static_cast(v)); } + bool on_host() const { return value & 0x1; } + void set_nic_compatible(bool v) { value |= (static_cast(v) << 1); } + bool nic_compatible() const { return value & (0x1 << 1); } + void set_gpu_compatible(bool v) { value |= (static_cast(v) << 2); } + bool gpu_compatible() const { return value & (0x1 << 2); } + void set_use_pjrt_allocator(bool v) { value |= (static_cast(v) << 3); } + bool use_pjrt_allocator() const { return value & (0x1 << 3); } + void Merge(AllocatorAttributes other) { + value |= other.value; + if (scope_id != other.scope_id) { + CHECK(scope_id == 0 || other.scope_id == 0) + << "At least one scope_id should be zero to merge " + "AllocatorAttributes but found this.scope_id=" + << scope_id << " and other.scope_id=" << other.scope_id; + scope_id = scope_id == 0 ? other.scope_id : scope_id; + } + } + // Returns true if the fields set in *this is a subset of or equal to + // those set in other. + bool IsEqualOrLessRestrictiveThan(const AllocatorAttributes& other) const { + return (value | other.value) == other.value; + } + + // NOTE: The upper 8 bits of the value are reserved for + // device-specific uses. Implementors of a device can interpret these + // upper 8 bits in device-specific ways, and ops implemented for those + // devices are responsible for setting those 8 bits appropriately. + uint32 value = 0; + // EXPERIMENTAL: If this is greater than zero, then allocation is delegated to + // a named special-purpose allocator on the same device. + int32 scope_id = 0; + + // Returns a human readable representation of this. + std::string DebugString() const; +}; + +// Returns a trivial implementation of Allocator, which is a process singleton. +// Access through this function is only intended for use by restricted parts +// of the infrastructure. +Allocator* cpu_allocator_base(); + +// If available, calls ProcessState::GetCPUAllocator(numa_node). +// If not, falls back to cpu_allocator_base(). +// Intended for use in contexts where ProcessState is not visible at +// compile time. Where ProcessState is visible, it's preferable to +// call it directly. +Allocator* cpu_allocator(int numa_node = port::kNUMANoAffinity); + +// Enables AllocatorStats in the default CPU allocator implementation. By +// default, it's disabled. +void EnableCPUAllocatorStats(); +// Disables AllocatorStats in the default CPU allocator implementation. By +// default, it's disabled. +void DisableCPUAllocatorStats(); +bool CPUAllocatorStatsEnabled(); + +// Enables full statistics collection in the default CPU allocator +// implementation. By default, it's disabled. +void EnableCPUAllocatorFullStats(); +bool CPUAllocatorFullStatsEnabled(); + +// An object that does the underlying suballoc/free of memory for a higher-level +// allocator. The expectation is that the higher-level allocator is doing some +// kind of cache or pool management so that it will call SubAllocator::Alloc and +// Free relatively infrequently, compared to the number of times its own +// AllocateRaw and Free methods are called. +class SubAllocator { + public: + // Visitor gets called with a pointer to a memory area and its + // size in bytes. The index value will be numa_node for a CPU + // allocator and GPU id for a GPU allocator. + typedef std::function Visitor; + + SubAllocator(const std::vector& alloc_visitors, + const std::vector& free_visitors); + + virtual ~SubAllocator() {} + // Allocates at least num_bytes. Returns actual number of bytes allocated in + // bytes_received. The caller can safely use the full bytes_received sized + // buffer following the returend pointer. + virtual void* Alloc(size_t alignment, size_t num_bytes, + size_t* bytes_received) = 0; + virtual void Free(void* ptr, size_t num_bytes) = 0; + + // Returns true if the BFC allocator can safely coalesce adjacent regions + // returned by this allocator. + virtual bool SupportsCoalescing() const = 0; + + // Returns the type of the memory allocated by this SubAllocator. + virtual AllocatorMemoryType GetMemoryType() const { + return AllocatorMemoryType::kUnknown; + } + + protected: + // Implementation of Alloc() method must call this on newly allocated + // value. + void VisitAlloc(void* ptr, int index, size_t num_bytes); + + // Implementation of Free() method must call this on value to be + // freed immediately before deallocation. + void VisitFree(void* ptr, int index, size_t num_bytes); + + const std::vector alloc_visitors_; + const std::vector free_visitors_; +}; + +} // namespace tsl + +#endif // TENSORFLOW_TSL_FRAMEWORK_ALLOCATOR_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/allocator_registry.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/allocator_registry.h new file mode 100644 index 0000000000000000000000000000000000000000..b827ce42dc7ab42f2f863c0bc3696c0c5cfa2f48 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/allocator_registry.h @@ -0,0 +1,154 @@ +/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +// Classes to maintain a static registry of memory allocator factories. +#ifndef TENSORFLOW_TSL_FRAMEWORK_ALLOCATOR_REGISTRY_H_ +#define TENSORFLOW_TSL_FRAMEWORK_ALLOCATOR_REGISTRY_H_ + +#include +#include +#include + +#include "absl/base/thread_annotations.h" +#include "tsl/framework/allocator.h" +#include "tsl/platform/macros.h" +#include "tsl/platform/mutex.h" +#include "tsl/platform/numa.h" + +namespace tensorflow { + +class ProcessState; + +} + +namespace tsl { + +class AllocatorFactory { + public: + virtual ~AllocatorFactory() {} + + // Returns true if the factory will create a functionally different + // SubAllocator for different (legal) values of numa_node. + virtual bool NumaEnabled() { return false; } + + // Create an Allocator. + virtual Allocator* CreateAllocator() = 0; + + // Create a SubAllocator. If NumaEnabled() is true, then returned SubAllocator + // will allocate memory local to numa_node. If numa_node == kNUMANoAffinity + // then allocated memory is not specific to any NUMA node. + virtual SubAllocator* CreateSubAllocator(int numa_node) = 0; +}; + +// ProcessState is defined in a package that cannot be a dependency of +// framework. This definition allows us to access the one method we need. +class ProcessStateInterface { + public: + virtual ~ProcessStateInterface() {} + virtual Allocator* GetCPUAllocator(int numa_node) = 0; +}; + +// A singleton registry of AllocatorFactories. +// +// Allocators should be obtained through ProcessState or cpu_allocator() +// (deprecated), not directly through this interface. The purpose of this +// registry is to allow link-time discovery of multiple AllocatorFactories among +// which ProcessState will obtain the best fit at startup. +class AllocatorFactoryRegistry { + public: + AllocatorFactoryRegistry() {} + ~AllocatorFactoryRegistry() {} + + void Register(const char* source_file, int source_line, const string& name, + int priority, AllocatorFactory* factory); + + // Returns 'best fit' Allocator. Find the factory with the highest priority + // and return an allocator constructed by it. If multiple factories have + // been registered with the same priority, picks one by unspecified criteria. + Allocator* GetAllocator(); + + // Returns 'best fit' SubAllocator. First look for the highest priority + // factory that is NUMA-enabled. If none is registered, fall back to the + // highest priority non-NUMA-enabled factory. If NUMA-enabled, return a + // SubAllocator specific to numa_node, otherwise return a NUMA-insensitive + // SubAllocator. + SubAllocator* GetSubAllocator(int numa_node); + + // Returns the singleton value. + static AllocatorFactoryRegistry* singleton(); + + ProcessStateInterface* process_state() const { + mutex_lock ml(mu_); + return process_state_; + } + + protected: + friend class tensorflow::ProcessState; + + void SetProcessState(ProcessStateInterface* interface) { + mutex_lock ml(mu_); + process_state_ = interface; + } + + private: + mutable mutex mu_; + ProcessStateInterface* process_state_ ABSL_GUARDED_BY(mu_) = nullptr; + bool first_alloc_made_ = false; + struct FactoryEntry { + const char* source_file; + int source_line; + string name; + int priority; + std::unique_ptr factory; + std::unique_ptr allocator; + // Index 0 corresponds to kNUMANoAffinity, other indices are (numa_node + + // 1). + std::vector> sub_allocators; + }; + std::vector factories_ ABSL_GUARDED_BY(mu_); + + // Returns any FactoryEntry registered under 'name' and 'priority', + // or 'nullptr' if none found. + const FactoryEntry* FindEntry(const string& name, int priority) const + ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_); + + AllocatorFactoryRegistry(const AllocatorFactoryRegistry&) = delete; + void operator=(const AllocatorFactoryRegistry&) = delete; +}; + +class AllocatorFactoryRegistration { + public: + AllocatorFactoryRegistration(const char* file, int line, const string& name, + int priority, AllocatorFactory* factory) { + AllocatorFactoryRegistry::singleton()->Register(file, line, name, priority, + factory); + } +}; + +#define REGISTER_MEM_ALLOCATOR(name, priority, factory) \ + REGISTER_MEM_ALLOCATOR_UNIQ_HELPER(__COUNTER__, __FILE__, __LINE__, name, \ + priority, factory) + +#define REGISTER_MEM_ALLOCATOR_UNIQ_HELPER(ctr, file, line, name, priority, \ + factory) \ + REGISTER_MEM_ALLOCATOR_UNIQ(ctr, file, line, name, priority, factory) + +#define REGISTER_MEM_ALLOCATOR_UNIQ(ctr, file, line, name, priority, factory) \ + static AllocatorFactoryRegistration allocator_factory_reg_##ctr( \ + file, line, name, priority, new factory) + +} // namespace tsl + +#endif // TENSORFLOW_TSL_FRAMEWORK_ALLOCATOR_REGISTRY_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/allocator_retry.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/allocator_retry.h new file mode 100644 index 0000000000000000000000000000000000000000..6ca14c12d040826ec784bedc5ee26e6c786b47e8 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/allocator_retry.h @@ -0,0 +1,60 @@ +/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_FRAMEWORK_ALLOCATOR_RETRY_H_ +#define TENSORFLOW_TSL_FRAMEWORK_ALLOCATOR_RETRY_H_ + +#include "tsl/platform/env.h" +#include "tsl/platform/mutex.h" +#include "tsl/platform/types.h" + +namespace tsl { + +// A retrying wrapper for a memory allocator. +class AllocatorRetry { + public: + AllocatorRetry(); + + // Call 'alloc_func' to obtain memory. On first call, + // 'verbose_failure' will be false. If return value is nullptr, + // then wait up to 'max_millis_to_wait' milliseconds, retrying each + // time a call to DeallocateRaw() is detected, until either a good + // pointer is returned or the deadline is exhausted. If the + // deadline is exhausted, try one more time with 'verbose_failure' + // set to true. The value returned is either the first good pointer + // obtained from 'alloc_func' or nullptr. + void* AllocateRaw(std::function + alloc_func, + int max_millis_to_wait, size_t alignment, size_t bytes); + + // Called to notify clients that some memory was returned. + void NotifyDealloc(); + + private: + Env* env_; + mutex mu_; + condition_variable memory_returned_; +}; + +// Implementation details below +inline void AllocatorRetry::NotifyDealloc() { + mutex_lock l(mu_); + memory_returned_.notify_all(); +} + +} // namespace tsl + +#endif // TENSORFLOW_TSL_FRAMEWORK_ALLOCATOR_RETRY_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/bfc_allocator.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/bfc_allocator.h new file mode 100644 index 0000000000000000000000000000000000000000..47619856abe8dd92f059278395233ae6aaae53d1 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/bfc_allocator.h @@ -0,0 +1,629 @@ +/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_FRAMEWORK_BFC_ALLOCATOR_H_ +#define TENSORFLOW_TSL_FRAMEWORK_BFC_ALLOCATOR_H_ + +#include +#include +#include +#include +#include +#include + +#include "absl/container/flat_hash_set.h" +#include "tsl/framework/allocator.h" +#include "tsl/framework/allocator_retry.h" +#include "tsl/framework/shared_counter.h" +#include "tsl/platform/macros.h" +#include "tsl/platform/mutex.h" +#include "tsl/platform/numbers.h" +#include "tsl/platform/strcat.h" +#include "tsl/platform/thread_annotations.h" +#include "tsl/platform/types.h" + +namespace tensorflow { +class MemoryDump; +} +namespace tsl { +using tensorflow::MemoryDump; + +// A memory allocator that implements a 'best-fit with coalescing' +// algorithm. This is essentially a very simple version of Doug Lea's +// malloc (dlmalloc). +// +// The goal of this allocator is to support defragmentation via +// coalescing. One assumption we make is that the process using this +// allocator owns pretty much all of the memory, and that nearly +// all requests to allocate memory go through this interface. +class BFCAllocator : public Allocator { + public: + struct Options { + bool allow_growth = true; + + // If true, the allocator may sleep for a period of time when it can't + // fulfill an allocation request, in the hopes that another thread will free + // up memory in the meantime. + // + // If false, the allocator will never sleep, even if + // AllocationAttributes::attr_retry_on_failure is true. + bool allow_retry_on_failure = true; + + // Whether the allocator will deallocate free regions to avoid OOM due to + // memory fragmentation. + bool garbage_collection = false; + + // Controls when a chunk should be split, if its size exceeds the requested + // allocation size. + double fragmentation_fraction = 0; + }; + BFCAllocator(std::unique_ptr sub_allocator, size_t total_memory, + const string& name, const Options& opts); + + ~BFCAllocator() override; + + string Name() override { return name_; } + + void* AllocateRaw(size_t alignment, size_t num_bytes) override { + return AllocateRaw(alignment, num_bytes, AllocationAttributes()); + } + + void* AllocateRaw(size_t alignment, size_t num_bytes, + const AllocationAttributes& allocation_attr) override; + + void DeallocateRaw(void* ptr) override; + + bool TracksAllocationSizes() const override; + + size_t RequestedSize(const void* ptr) const override; + + size_t AllocatedSize(const void* ptr) const override; + + int64_t AllocationId(const void* ptr) const override; + + absl::optional GetStats() override; + + bool ClearStats() override; + + void SetTimingCounter(SharedCounter* sc) { timing_counter_ = sc; } + + void SetSafeFrontier(uint64 count) override; + + AllocatorMemoryType GetMemoryType() const override; + + bool ShouldRecordOpName() const { return true; } + + MemoryDump RecordMemoryMap(); + + private: + struct Bin; + + void* AllocateRawInternal(size_t alignment, size_t num_bytes, + bool dump_log_on_failure, + uint64 freed_before_count); + + void* AllocateRawInternalWithRetry( + size_t alignment, size_t num_bytes, + const AllocationAttributes& allocation_attr); + + void DeallocateRawInternal(void* ptr); + + // Chunks whose freed_at_count is later than the safe frontier value are kept + // on a special list and not subject to merging immediately upon being freed. + // + // This function sweeps that list looking for Chunks whose timestamp is now + // safe. When found their freed_at_count is set to 0 and we attempt to merge + // them with their neighbors. + // + // If required_bytes > 0 then this function is being called in the context of + // a need for this many bytes that could not be satisfied without merging + // unsafe chunks, so we go ahead and merge the unsafe chunks too, just up to + // the point that a free chunk of required_bytes is produced. Note that + // unsafe merged chunks adopt the most conservative timestamp from their + // constituents so they're only useful for allocations not requiring a + // particular timestamp. + bool MergeTimestampedChunks(size_t required_bytes) + TF_EXCLUSIVE_LOCKS_REQUIRED(lock_); + + // Return the largest free chunk bytes from the largest bin in constant time. + // The free chunks are sorted by size (and then address) in a bin. + int64_t LargestFreeChunk() TF_EXCLUSIVE_LOCKS_REQUIRED(lock_); + + // Add TraceMe (in memory allocation and deallocation) for memory stats + // profiling. The chunk_ptr is passed to get information such as address, + // chunk size and requested_size. + void AddTraceMe(absl::string_view traceme_name, const void* ptr) + TF_EXCLUSIVE_LOCKS_REQUIRED(lock_); + + // Overloaded AddTraceMe function with chunk information. + void AddTraceMe(absl::string_view traceme_name, const void* chunk_ptr, + int64_t req_bytes, int64_t alloc_bytes) + TF_EXCLUSIVE_LOCKS_REQUIRED(lock_); + + // A ChunkHandle is an index into the chunks_ vector in BFCAllocator + // kInvalidChunkHandle means an invalid chunk + typedef size_t ChunkHandle; + static constexpr ChunkHandle kInvalidChunkHandle = SIZE_MAX; + + typedef int BinNum; + static constexpr int kInvalidBinNum = -1; + // The following means that the largest bin'd chunk size is 256 << 21 = 512MB. + static constexpr int kNumBins = 21; + + // A Chunk points to a piece of memory that's either entirely free or entirely + // in use by one user memory allocation. + // + // An AllocationRegion's memory is split up into one or more disjoint Chunks, + // which together cover the whole region without gaps. Chunks participate in + // a doubly-linked list, and the prev/next pointers point to the physically + // adjacent chunks. + // + // Since a chunk cannot be partially in use, we may need to split a free chunk + // in order to service a user allocation. We always merge adjacent free + // chunks. + // + // Chunks contain information about whether they are in use or whether they + // are free, and contain a pointer to the bin they are in. + struct Chunk { + size_t size = 0; // Full size of buffer. + + // We sometimes give chunks that are larger than needed to reduce + // fragmentation. requested_size keeps track of what the client + // actually wanted so we can understand whether our splitting + // strategy is efficient. + size_t requested_size = 0; + + // allocation_id is set to -1 when the chunk is not in use. It is assigned a + // value greater than zero before the chunk is returned from + // AllocateRaw, and this value is unique among values assigned by + // the parent allocator. + int64_t allocation_id = -1; + void* ptr = nullptr; // pointer to granted subbuffer. + + // If not kInvalidChunkHandle, the memory referred to by 'prev' is directly + // preceding the memory used by this chunk. E.g., It should start + // at 'ptr - prev->size' + ChunkHandle prev = kInvalidChunkHandle; + + // If not kInvalidChunkHandle, the memory referred to by 'next' is directly + // following the memory used by this chunk. E.g., It should be at + // 'ptr + size' + ChunkHandle next = kInvalidChunkHandle; + + // What bin are we in? + BinNum bin_num = kInvalidBinNum; + + // Optional count when this chunk was most recently made free. + uint64 freed_at_count = 0; + + bool in_use() const { return allocation_id != -1; } + +#ifdef TENSORFLOW_MEM_DEBUG + // optional debugging info + const char* op_name = nullptr; + uint64 step_id = 0; + int64 action_count = 0; +#endif + + string DebugString(BFCAllocator* a, + bool recurse) TF_NO_THREAD_SAFETY_ANALYSIS { + string dbg; + strings::StrAppend( + &dbg, " Size: ", strings::HumanReadableNumBytes(size), + " | Requested Size: ", strings::HumanReadableNumBytes(requested_size), + " | in_use: ", in_use(), " | bin_num: ", bin_num); + if (recurse && prev != BFCAllocator::kInvalidChunkHandle) { + Chunk* p = a->ChunkFromHandle(prev); + strings::StrAppend(&dbg, ", prev: ", p->DebugString(a, false)); + } + if (recurse && next != BFCAllocator::kInvalidChunkHandle) { + Chunk* n = a->ChunkFromHandle(next); + strings::StrAppend(&dbg, ", next: ", n->DebugString(a, false)); + } +#ifdef TENSORFLOW_MEM_DEBUG + strings::StrAppend(&dbg, ", for: ", op_name ? op_name : "UNKNOWN", + ", stepid: ", step_id, + ", last_action: ", action_count); +#endif + return dbg; + } + }; + + // A Bin is a collection of similar-sized free chunks. + // Allocated chunks are never in a Bin. + struct Bin { + // All chunks in this bin have >= bin_size memory. + size_t bin_size = 0; + + class ChunkComparator { + public: + explicit ChunkComparator(BFCAllocator* allocator) + : allocator_(allocator) {} + // Sort first by size and then use pointer address as a tie breaker. + bool operator()(const ChunkHandle ha, + const ChunkHandle hb) const TF_NO_THREAD_SAFETY_ANALYSIS { + const Chunk* a = allocator_->ChunkFromHandle(ha); + const Chunk* b = allocator_->ChunkFromHandle(hb); + if (a->size != b->size) { + return a->size < b->size; + } + return a->ptr < b->ptr; + } + + private: + BFCAllocator* allocator_; // The parent allocator + }; + + typedef std::set FreeChunkSet; + // List of free chunks within the bin, sorted by chunk size. + // Chunk * not owned. + FreeChunkSet free_chunks; + Bin(BFCAllocator* allocator, size_t bs) + : bin_size(bs), free_chunks(ChunkComparator(allocator)) {} + }; + + static constexpr size_t kMinAllocationBits = 8; + static constexpr size_t kMinAllocationSize = 1 << kMinAllocationBits; + + // BFCAllocator allocates memory into a collection of disjoint + // AllocationRegions. Each AllocationRegion corresponds to one call to + // SubAllocator::Alloc(). (Actually, if a subsequent call to + // SubAllocator::Alloc() returns another region immediately adjacent to the + // last, it will be used to extend the first AllocationRegion, not create a + // separate one.) + // + // An AllocationRegion contains one or more Chunks, covering all of its + // memory. Its primary job is to map pointers to ChunkHandles. + // + // This class is thread-compatible. + class AllocationRegion { + public: + AllocationRegion(void* ptr, size_t memory_size) + : ptr_(ptr), + memory_size_(memory_size), + end_ptr_( + static_cast(static_cast(ptr_) + memory_size_)) { + DCHECK_EQ(0, memory_size % kMinAllocationSize); + const size_t n_handles = + (memory_size + kMinAllocationSize - 1) / kMinAllocationSize; + handles_.resize(n_handles, kInvalidChunkHandle); + } + + AllocationRegion() = default; + AllocationRegion(AllocationRegion&& other) { Swap(&other); } + AllocationRegion& operator=(AllocationRegion&& other) { + Swap(&other); + return *this; + } + + void* ptr() const { return ptr_; } + void* end_ptr() const { return end_ptr_; } + size_t memory_size() const { return memory_size_; } + void extend(size_t size) { + memory_size_ += size; + DCHECK_EQ(0, memory_size_ % kMinAllocationSize); + + end_ptr_ = static_cast(static_cast(end_ptr_) + size); + const size_t n_handles = + (memory_size_ + kMinAllocationSize - 1) / kMinAllocationSize; + handles_.resize(n_handles, kInvalidChunkHandle); + } + ChunkHandle get_handle(const void* p) const { + return handles_[IndexFor(p)]; + } + void set_handle(const void* p, ChunkHandle h) { handles_[IndexFor(p)] = h; } + void erase(const void* p) { set_handle(p, kInvalidChunkHandle); } + + private: + void Swap(AllocationRegion* other) { + std::swap(ptr_, other->ptr_); + std::swap(memory_size_, other->memory_size_); + std::swap(end_ptr_, other->end_ptr_); + std::swap(handles_, other->handles_); + } + + size_t IndexFor(const void* p) const { + std::uintptr_t p_int = reinterpret_cast(p); + std::uintptr_t base_int = reinterpret_cast(ptr_); + DCHECK_GE(p_int, base_int); + DCHECK_LT(p_int, base_int + memory_size_); + return static_cast(((p_int - base_int) >> kMinAllocationBits)); + } + + // Metadata about the allocation region. + void* ptr_ = nullptr; + size_t memory_size_ = 0; + void* end_ptr_ = nullptr; + + // Array of size "memory_size / kMinAllocationSize". It is + // indexed by (p-base) / kMinAllocationSize, contains ChunkHandle + // for the memory allocation represented by "p" + std::vector handles_; + + AllocationRegion(const AllocationRegion&) = delete; + void operator=(const AllocationRegion&) = delete; + }; + + // RegionManager aggregates one or more "AllocationRegions" and provides + // a layer of indirection from pointers to the underlying ChunkHandle, + // allowing allocation across multiple discontiguous memory regions. + // + // This class is thread-compatible. + class RegionManager { + public: + RegionManager() {} + ~RegionManager() {} + + void AddAllocationRegion(void* ptr, size_t memory_size) { + // Insert sorted by end_ptr. + auto entry = + std::upper_bound(regions_.begin(), regions_.end(), ptr, &Comparator); + regions_.insert(entry, AllocationRegion(ptr, memory_size)); + } + + // Adds an alloation region for the given ptr and size, potentially + // extending a region if ptr matches the end_ptr of an existing region. + // If a region is extended, returns a pointer to the extended region so that + // the BFC allocator can reason about chunkification. + AllocationRegion* AddOrExtendAllocationRegion(void* ptr, + size_t memory_size) { + // Insert sorted by end_ptr. + auto entry = + std::upper_bound(regions_.begin(), regions_.end(), ptr, &Comparator); + // Check if can be coalesced with preceding region. + if (entry != regions_.begin()) { + auto preceding_region = entry - 1; + if (preceding_region->end_ptr() == ptr) { + if (VLOG_IS_ON(1)) { + LOG(INFO) << "Extending region " << preceding_region->ptr() + << " of " + << strings::HumanReadableNumBytes( + preceding_region->memory_size()) + << " by " << strings::HumanReadableNumBytes(memory_size) + << " bytes"; + } + preceding_region->extend(memory_size); + return &*preceding_region; + } + } + VLOG(1) << "Inserting new region " << ptr << " of " + << strings::HumanReadableNumBytes(memory_size); + regions_.insert(entry, AllocationRegion(ptr, memory_size)); + return nullptr; + } + + std::vector::iterator RemoveAllocationRegion( + std::vector::iterator it) { + return regions_.erase(it); + } + + ChunkHandle get_handle(const void* p) const { + return RegionFor(p)->get_handle(p); + } + + void set_handle(const void* p, ChunkHandle h) { + return MutableRegionFor(p)->set_handle(p, h); + } + void erase(const void* p) { return MutableRegionFor(p)->erase(p); } + + const std::vector& regions() const { return regions_; } + + private: + static bool Comparator(const void* ptr, const AllocationRegion& other) { + return ptr < other.end_ptr(); + } + + AllocationRegion* MutableRegionFor(const void* p) { + return const_cast(RegionFor(p)); + } + + const AllocationRegion* RegionFor(const void* p) const { + auto entry = + std::upper_bound(regions_.begin(), regions_.end(), p, &Comparator); + + if (entry != regions_.end()) { + return &(*entry); + } + + LOG(FATAL) << "Could not find Region for " << p; + return nullptr; + } + + private: + std::vector regions_; + }; + + // Returns 'bytes' rounded up to the next highest kMinAllocationSize. + static size_t RoundedBytes(size_t bytes); + + // Try to add a new memory region that can satisfy an allocation of + // 'rounded_bytes' bytes. Returns true on success and false on + // failure. + bool Extend(size_t alignment, size_t rounded_bytes) + TF_EXCLUSIVE_LOCKS_REQUIRED(lock_); + + // Deallocate free regions to give back the memory to suballocator, so that + // we can re-allocate a larger region. The main use scenario of this function + // is when OOM happens but we have free regions and the sum of sizes of free + // regions and unallocated bytes is larger than the requested size, implying + // (external) memory fragmentation. Returns true if any free regions are + // found and freed; false otherwise. + bool DeallocateFreeRegions(size_t rounded_bytes); + + // Helper function to deallocate regions. + void DeallocateRegions(const absl::flat_hash_set& region_ptrs) + TF_EXCLUSIVE_LOCKS_REQUIRED(lock_); + + // Returns a pointer to an underlying allocated chunk of size + // 'rounded_bytes'. + void* FindChunkPtr(BinNum bin_num, size_t rounded_bytes, size_t num_bytes, + uint64 freed_before) TF_EXCLUSIVE_LOCKS_REQUIRED(lock_); + + // Splits the chunk specified by 'h' into two chunks, one at least + // of size 'num_bytes'. + void SplitChunk(ChunkHandle h, size_t num_bytes) + TF_EXCLUSIVE_LOCKS_REQUIRED(lock_); + + // Merges the two chunk handles. Requires that the chunks are + // contiguous in their allocation. + void Merge(ChunkHandle h, ChunkHandle h2) TF_EXCLUSIVE_LOCKS_REQUIRED(lock_); + + // Adds the chunk 'h' to the proper free bin. + void InsertFreeChunkIntoBin(ChunkHandle h) TF_EXCLUSIVE_LOCKS_REQUIRED(lock_); + + // Removes the free chunk pointed to by 'c' from the set free_chunks. + void RemoveFreeChunkIterFromBin(Bin::FreeChunkSet* free_chunks, + const Bin::FreeChunkSet::iterator& c) + TF_EXCLUSIVE_LOCKS_REQUIRED(lock_); + + // Removes a free chunk from the bin. + void RemoveFreeChunkFromBin(ChunkHandle h) TF_EXCLUSIVE_LOCKS_REQUIRED(lock_); + void MaybeRemoveFreeChunkFromBin(ChunkHandle h) + TF_EXCLUSIVE_LOCKS_REQUIRED(lock_); + + // Removes the chunk metadata represented by 'h'. + void DeleteChunk(ChunkHandle h) TF_EXCLUSIVE_LOCKS_REQUIRED(lock_); + + string RenderOccupancy() TF_EXCLUSIVE_LOCKS_REQUIRED(lock_); + void DumpMemoryLog(size_t num_bytes) TF_EXCLUSIVE_LOCKS_REQUIRED(lock_); + tensorflow::MemoryDump RecordMemoryMapInternal() + TF_EXCLUSIVE_LOCKS_REQUIRED(lock_); + void MaybeWriteMemoryMap() TF_EXCLUSIVE_LOCKS_REQUIRED(lock_); + + ChunkHandle AllocateChunk() TF_EXCLUSIVE_LOCKS_REQUIRED(lock_); + void DeallocateChunk(ChunkHandle h) TF_EXCLUSIVE_LOCKS_REQUIRED(lock_); + + Chunk* ChunkFromHandle(ChunkHandle h) TF_EXCLUSIVE_LOCKS_REQUIRED(lock_); + const Chunk* ChunkFromHandle(ChunkHandle h) const + TF_EXCLUSIVE_LOCKS_REQUIRED(lock_); + + void MarkFree(ChunkHandle h) TF_EXCLUSIVE_LOCKS_REQUIRED(lock_); + + ChunkHandle TryToCoalesce(ChunkHandle h, bool ignore_freed_at) + TF_EXCLUSIVE_LOCKS_REQUIRED(lock_); + + // Fragmentation is calculated as the reverse ratio of the largest free chunk + // size over total free memory, and returns a value within [0, 1]. + double GetFragmentation() TF_EXCLUSIVE_LOCKS_REQUIRED(lock_); + + // Information about a Bin that is useful for debugging. + struct BinDebugInfo { + size_t total_bytes_in_use = 0; + size_t total_bytes_in_bin = 0; + size_t total_requested_bytes_in_use = 0; + size_t total_chunks_in_use = 0; + size_t total_chunks_in_bin = 0; + }; + + // Computes and returns a BinDebugInfo for each Bin. + std::array get_bin_debug_info() + TF_EXCLUSIVE_LOCKS_REQUIRED(lock_); + + AllocatorRetry retry_helper_; + + // Structures immutable after construction + size_t memory_limit_ = 0; + + inline int Log2FloorNonZeroSlow(uint64 n) { + int r = 0; + while (n > 0) { + r++; + n >>= 1; + } + return r - 1; + } + + // Returns floor(log2(n)). + inline int Log2FloorNonZero(uint64 n) { +#if defined(__GNUC__) + return 63 ^ __builtin_clzll(n); +#elif defined(PLATFORM_WINDOWS) && (_WIN64) + unsigned long index; + _BitScanReverse64(&index, n); + return index; +#else + return Log2FloorNonZeroSlow(n); +#endif + } + + // Map from bin size to Bin + Bin* BinFromIndex(BinNum index) { + return reinterpret_cast(&(bins_space_[index * sizeof(Bin)])); + } + size_t BinNumToSize(BinNum index) { + return static_cast(256) << index; + } + BinNum BinNumForSize(size_t bytes) { + uint64 v = std::max(bytes, 256) >> kMinAllocationBits; + int b = std::min(kNumBins - 1, Log2FloorNonZero(v)); + return b; + } + Bin* BinForSize(size_t bytes) { return BinFromIndex(BinNumForSize(bytes)); } + + char bins_space_[sizeof(Bin) * kNumBins]; + + const Options opts_; + + // The size of the current region allocation. + size_t curr_region_allocation_bytes_; + + // An indicator that expansion of a region has hit the limits + // of the available memory. + bool started_backpedal_ = false; + + // Whether the allocator will coalesce adjacent sub allocator provided + // AllocationRegions. This may be disabled if discrete sub allocator + // regions can't be treated as contiguous (e.g. if the allocation refers to + // device visible memory which is not adjacent to the other region in the + // device's address space). + const bool coalesce_regions_; + + std::unique_ptr sub_allocator_; + string name_; + SharedCounter* timing_counter_ = nullptr; + std::deque timestamped_chunks_; + + std::atomic safe_frontier_ = {0}; + + // Structures mutable after construction + mutable mutex lock_; + RegionManager region_manager_ TF_GUARDED_BY(lock_); + + std::vector chunks_ TF_GUARDED_BY(lock_); + + // Pointer to head of linked list of free Chunks + ChunkHandle free_chunks_list_ TF_GUARDED_BY(lock_); + + // Counter containing the next unique identifier to assign to a + // newly-created chunk. + int64_t next_allocation_id_ TF_GUARDED_BY(lock_); + + // Stats. + AllocatorStats stats_ TF_GUARDED_BY(lock_); +#ifdef TENSORFLOW_MEM_DEBUG + int64 action_counter_ TF_GUARDED_BY(lock_) = 0; +#define MEM_DEBUG_SIZE_HISTORY_SIZE 4096 + int64 size_history_[MEM_DEBUG_SIZE_HISTORY_SIZE]; +#endif + + friend class GPUBFCAllocatorPrivateMethodsTest; + friend class GPUBFCAllocatorPrivateMethodsTest_SubAllocatorSpecific; + BFCAllocator(const BFCAllocator&) = delete; + void operator=(const BFCAllocator&) = delete; +}; + +} // namespace tsl + +#endif // TENSORFLOW_TSL_FRAMEWORK_BFC_ALLOCATOR_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/cancellation.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/cancellation.h new file mode 100644 index 0000000000000000000000000000000000000000..90792a742a528add3331ab22f72fca11f2652540 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/cancellation.h @@ -0,0 +1,217 @@ +/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_FRAMEWORK_CANCELLATION_H_ +#define TENSORFLOW_TSL_FRAMEWORK_CANCELLATION_H_ + +#include +#include + +#include "tsl/lib/gtl/flatmap.h" +#include "tsl/platform/hash.h" +#include "tsl/platform/mutex.h" +#include "tsl/platform/notification.h" +#include "tsl/platform/status.h" +#include "tsl/platform/stringpiece.h" +#include "tsl/platform/thread_annotations.h" +#include "tsl/platform/types.h" + +namespace tsl { + +// A token that can be used to register and deregister a +// CancelCallback with a CancellationManager. +// +// CancellationToken values must be created by a call to +// CancellationManager::get_cancellation_token. +typedef int64_t CancellationToken; + +// A callback that is invoked when a step is canceled. +// +// NOTE(mrry): See caveats about CancelCallback implementations in the +// comment for CancellationManager::RegisterCallback. +typedef std::function CancelCallback; + +// This class should never simultaneously be used as the cancellation manager +// for two separate sets of executions (i.e two separate steps, or two separate +// function executions). +class CancellationManager { + public: + // A value that won't be returned by get_cancellation_token(). + static const CancellationToken kInvalidToken; + + CancellationManager(); + + // Constructs a new CancellationManager that is a "child" of `*parent`. + // + // If `*parent` is cancelled, `*this` will be cancelled. `*parent` must + // outlive the created CancellationManager. + explicit CancellationManager(CancellationManager* parent); + + ~CancellationManager(); + + // Run all callbacks associated with this manager. + void StartCancel(); + + // Run all callbacks associated with this manager with a status. + // Currently the status is for logging purpose only. See also + // CancellationManager::RegisterCallbackWithErrorLogging. + void StartCancelWithStatus(const Status& status); + + // Returns true iff StartCancel() has been called. + bool IsCancelled() { return is_cancelled_.load(std::memory_order_acquire); } + + // Returns a token that must be used in calls to RegisterCallback + // and DeregisterCallback. + CancellationToken get_cancellation_token() { + return next_cancellation_token_.fetch_add(1); + } + + // Attempts to register the given callback to be invoked when this + // manager is cancelled. Returns true if the callback was + // registered; returns false if this manager was already cancelled, + // and the callback was not registered. + // + // If this method returns false, it is the caller's responsibility + // to perform any cancellation cleanup. + // + // This method is tricky to use correctly. The following usage pattern + // is recommended: + // + // class ObjectWithCancellableOperation { + // mutex mu_; + // void CancellableOperation(CancellationManager* cm, + // std::function callback) { + // bool already_cancelled; + // CancellationToken token = cm->get_cancellation_token(); + // { + // mutex_lock(mu_); + // already_cancelled = !cm->RegisterCallback( + // [this, token]() { Cancel(token); }); + // if (!already_cancelled) { + // // Issue asynchronous operation. Associate the pending operation + // // with `token` in some object state, or provide another way for + // // the Cancel method to look up the operation for cancellation. + // // Ensure that `cm->DeregisterCallback(token)` is called without + // // holding `mu_`, before `callback` is invoked. + // // ... + // } + // } + // if (already_cancelled) { + // callback(errors::Cancelled("Operation was cancelled")); + // } + // } + // + // void Cancel(CancellationToken token) { + // mutex_lock(mu_); + // // Take action to cancel the operation with the given cancellation + // // token. + // } + // + // NOTE(mrry): The caller should take care that (i) the calling code + // is robust to `callback` being invoked asynchronously (e.g. from + // another thread), (ii) `callback` is deregistered by a call to + // this->DeregisterCallback(token) when the operation completes + // successfully, and (iii) `callback` does not invoke any method + // on this cancellation manager. Furthermore, it is important that + // the eventual caller of the complementary DeregisterCallback does not + // hold any mutexes that are required by `callback`. + bool RegisterCallback(CancellationToken token, CancelCallback callback); + + // Similar to RegisterCallback, but if the cancellation manager starts a + // cancellation with an error status, it will log the error status before + // invoking the callback. `callback_name` is a human-readable name of the + // callback, which will be displayed on the log. + bool RegisterCallbackWithErrorLogging(CancellationToken token, + CancelCallback callback, + tsl::StringPiece callback_name); + + // Deregister the callback that, when registered, was associated + // with the given cancellation token. Returns true iff the callback + // was deregistered and will not be invoked; otherwise returns false + // after the callback has been invoked, blocking if necessary. + // + // NOTE(mrry): This method may block if cancellation is in progress. + // The caller of this method must not hold any mutexes that are required + // to invoke any cancellation callback that has been registered with this + // cancellation manager. + bool DeregisterCallback(CancellationToken token); + + // Deregister the callback that, when registered, was associated + // with the given cancellation token. Returns true iff the callback + // was deregistered and will not be invoked; otherwise returns false + // immediately, with no guarantee that the callback has completed. + // + // This method is guaranteed to return true if StartCancel has not been + // called. + bool TryDeregisterCallback(CancellationToken token); + + // Returns true iff cancellation is in progress. + bool IsCancelling(); + + private: + struct CallbackConfiguration { + CancelCallback callback; + std::string name; + bool log_error = false; + }; + + struct State { + Notification cancelled_notification; + gtl::FlatMap callbacks; + + // If this CancellationManager has any children, this member points to the + // head of a doubly-linked list of its children. + CancellationManager* first_child = nullptr; // Not owned. + }; + + bool RegisterCallbackConfig(CancellationToken token, + CallbackConfiguration config); + + bool RegisterChild(CancellationManager* child); + void DeregisterChild(CancellationManager* child); + + bool is_cancelling_; + std::atomic_bool is_cancelled_; + std::atomic next_cancellation_token_; + + CancellationManager* const parent_ = nullptr; // Not owned. + + // If this CancellationManager is associated with a parent, this member will + // be set to `true` after this is removed from the parent's list of children. + bool is_removed_from_parent_ TF_GUARDED_BY(parent_->mu_) = false; + + // If this CancellationManager is associated with a parent, these members form + // a doubly-linked list of that parent's children. + // + // These fields are valid only when `this->is_removed_from_parent_` is false. + CancellationManager* prev_sibling_ TF_GUARDED_BY(parent_->mu_) = + nullptr; // Not owned. + CancellationManager* next_sibling_ TF_GUARDED_BY(parent_->mu_) = + nullptr; // Not owned. + + mutex mu_; + std::unique_ptr state_ TF_GUARDED_BY(mu_); +}; + +// Registers the given cancellation callback, returning a function that can be +// used to deregister the callback. If `cancellation_manager` is NULL, no +// registration occurs and `deregister_fn` will be a no-op. +Status RegisterCancellationCallback(CancellationManager* cancellation_manager, + std::function callback, + std::function* deregister_fn); + +} // namespace tsl + +#endif // TENSORFLOW_TSL_FRAMEWORK_CANCELLATION_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/contraction/eigen_contraction_kernel.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/contraction/eigen_contraction_kernel.h new file mode 100644 index 0000000000000000000000000000000000000000..efb55e93de7559f9e4c340da322e693d93f3b891 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/contraction/eigen_contraction_kernel.h @@ -0,0 +1,905 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_FRAMEWORK_CONTRACTION_EIGEN_CONTRACTION_KERNEL_H_ +#define TENSORFLOW_TSL_FRAMEWORK_CONTRACTION_EIGEN_CONTRACTION_KERNEL_H_ + +// Depending on a build configuration this header provides custom kernel for +// Eigen tensor contractions (small matrix multiplication kernel used to +// multiple together blocks of the original tensors). +// +// 1) --define tensorflow_mkldnn_contraction_kernel=1 +// Use Mkldnn single threaded sgemm. The mkldnn kernels are generated at +// runtime and use avx/avx2/fma/avx512 based on cpu status registers +// (https://en.wikipedia.org/wiki/CPUID). +// +// If you use `tensor.contract(other_tensor)` in your code, you must include +// this header to get the benefit of custom contraction kernel: +// +// #if defined(TENSORFLOW_USE_CUSTOM_CONTRACTION_KERNEL) +// #include +// "third_party/tensorflow/tsl/framework/contraction/eigen_contraction_kernel.h" +// #endif + +#include "unsupported/Eigen/CXX11/Tensor" // from @eigen_archive +#include "tsl/framework/fixedpoint/FixedPoint.h" + +#if defined(TENSORFLOW_USE_MKLDNN_CONTRACTION_KERNEL) +#include "dnnl.h" +#endif + +#include "tsl/platform/dynamic_annotations.h" + +namespace Eigen { +namespace internal { + +#if defined(TENSORFLOW_USE_CUSTOM_CONTRACTION_KERNEL) +// Returns `true` iff we can use custom contraction kernels. This is a runtime +// check, that uses environment variables. +EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE bool UseCustomContractionKernels(); + +// Pack a 2D block of a Tensor expression into contiguous block of memory with +// col-major storage order. We do not have access to the underlying Tensor +// expression, we only have a DataMapper (TensorContractionInputMapper for +// tensor contractions, or blas_data_mapper for plain tensors), that provides a +// two-dimensional view into the Tensor expression. +// +// Default Eigen gemm_pack_rhs and gemm_pack_lhs pack blocks of tensor +// expressions into the packed format described in "Anatomy of High-Performance +// Matrix Multiplication" paper (1). Eigen::internal::gebp_kernel relies on this +// packing format for efficient micro-panel multiplication. +// +// This simple packing can be used with any '?gemm' function from BLAS +// libraries, that work with col-major matrices. +// +// (1) http://www.cs.utexas.edu/~flame/pubs/GotoTOMS_revision.pdf +// +// IMPORTANT: `gemm_pack_colmajor_block` always packs the block in column major +// order, DataMapperStorageOrder specifies the storage order of the underlying +// Tensor expression. +template +struct gemm_pack_colmajor_block; + +// gemm_pack_colmajor_block for ColMajor storage order. +template +struct gemm_pack_colmajor_block { + typedef typename internal::packet_traits::type Packet; + typedef typename DataMapper::LinearMapper LinearMapper; + + enum { PacketSize = internal::packet_traits::size }; + + EIGEN_DONT_INLINE + void operator()(Scalar* block, const DataMapper& data_mapper, IndexType rows, + IndexType cols) { + const IndexType unrolled_rows = rows - 4 * PacketSize; + const IndexType vectorized_rows = rows - PacketSize; + + for (IndexType col = 0; col < cols; ++col) { + LinearMapper lm = data_mapper.getLinearMapper(0, col); + + IndexType row = 0; + // Give compiler a strong possibility to unroll the loop. + for (; row <= unrolled_rows; row += 4 * PacketSize) { + for (IndexType j = 0; j < 4; ++j) { + const Packet p = lm.template loadPacket(row + j * PacketSize); + internal::pstoreu(block + j * PacketSize, p); + } + block += 4 * PacketSize; + } + // Process remaining rows with packets. + for (; row <= vectorized_rows; row += PacketSize) { + const Packet p = lm.template loadPacket(row); + internal::pstoreu(block, p); + block += PacketSize; + } + // Finalize with coefficients. + for (; row < rows; ++row) { + *block = lm(row); + ++block; + } + } + } +}; + +#endif // TENSORFLOW_USE_CUSTOM_CONTRACTION_KERNEL + +// Enabled by build option: "--define tensorflow_mkldnn_contraction_kernel=1" +#if defined(TENSORFLOW_USE_MKLDNN_CONTRACTION_KERNEL) + +template +struct dnnl_gemm_kernel; + +// dnnl_gemm_kernel for floats defined as a thin layer on top of mkldnn_sgemm. +template +struct dnnl_gemm_kernel { + static_assert(!ConjugateLhs, "DNNL kernel doesn't support ConjugateLhs"); + static_assert(!ConjugateRhs, "DNNL kernel doesn't support ConjugateRhs"); + + static constexpr int kComputeStrideFromBlockDimensions = -1; + + using LhsScalar = float; + using RhsScalar = float; + using ResScalar = float; + + EIGEN_DONT_INLINE + void operator()(const OutputMapper& output, const LhsScalar* blockA, + const RhsScalar* blockB, const IndexType rows, + const IndexType depth, const IndexType cols, float alpha, + float beta, int ldA = kComputeStrideFromBlockDimensions, + int ldB = kComputeStrideFromBlockDimensions, + char transposeA = 'N', char transposeB = 'N') { + static const int max_index = (std::numeric_limits::max)(); + + eigen_assert(max_index >= rows); + eigen_assert(max_index >= cols); + eigen_assert(max_index >= depth); + eigen_assert(max_index >= output.stride()); + + const int m = static_cast(rows); + const int n = static_cast(cols); + const int k = static_cast(depth); + + ldA = ldA == kComputeStrideFromBlockDimensions ? m : ldA; + ldB = ldB == kComputeStrideFromBlockDimensions ? k : ldB; + const int ldC = static_cast(output.stride()); + + // DNNL takes row-major matrices. Our packed column-major matrices can be + // viewed as a transposed row-major matrix, i.e., + // C_colmajor = C_rowmajor^T = (A_rowmajor * B_rowmajor)^T + // = B_rowmajor^T * A_rowmajor^T + // = B_colmajor * A_colmajor + // So we can just swap the input matrices A and B for DNNL. + // TODO(penporn): Switch to row-major packing instead. + dnnl_status_t st = + dnnl_sgemm(transposeB, transposeA, n, m, k, alpha, blockB, ldB, blockA, + ldA, beta, const_cast(output.data()), ldC); + eigen_assert(st == 0); + +#if DYNAMIC_ANNOTATIONS_ENABLED == 1 || defined(MEMORY_SANITIZER) + for (IndexType col = 0; col < cols; ++col) { + ResScalar* row_base = &output(0, col); + EIGEN_UNUSED_VARIABLE(row_base); // Suppress unused variable error. + TF_ANNOTATE_MEMORY_IS_INITIALIZED(row_base, sizeof(ResScalar) * rows); + } +#endif + + // eigen_assert is a no-op in optimized mode so we add these to avoid + // compiler's unused-variable errors. + EIGEN_UNUSED_VARIABLE(max_index); + EIGEN_UNUSED_VARIABLE(st); + } +}; + +template +struct mkldnn_gemm_s8u8s32_kernel { + static_assert(!ConjugateLhs, "DNNL kernel doesn't support ConjugateLhs"); + static_assert(!ConjugateRhs, "DNNL kernel doesn't support ConjugateRhs"); + + static constexpr int kComputeStrideFromBlockDimensions = -1; + + using LhsScalar = Eigen::QInt8; + using RhsScalar = Eigen::QUInt8; + using ResScalar = Eigen::QInt32; + + EIGEN_DONT_INLINE + void operator()(const OutputMapper& output, const LhsScalar* blockA, + const RhsScalar* blockB, const IndexType rows, + const IndexType depth, const IndexType cols, float alpha, + float beta, int ldA = kComputeStrideFromBlockDimensions, + int ldB = kComputeStrideFromBlockDimensions, + char transposeA = 'N', char transposeB = 'N') { + static const int max_index = (std::numeric_limits::max)(); + + eigen_assert(max_index >= rows); + eigen_assert(max_index >= cols); + eigen_assert(max_index >= depth); + eigen_assert(max_index >= output.stride()); + + const int m = static_cast(rows); + const int n = static_cast(cols); + const int k = static_cast(depth); + + ldA = ldA == kComputeStrideFromBlockDimensions ? m : ldA; + ldB = ldB == kComputeStrideFromBlockDimensions ? k : ldB; + const int ldC = static_cast(output.stride()); + + // Currently we support only symmetric quantization with zero point at 0. + const int8_t ao = 0; + const int8_t bo = 0; + + // Don't add any offset to the result C. + const char offsetc = 'F'; + const int32_t co = 0; + + const auto* A = reinterpret_cast(blockA); + const auto* B = reinterpret_cast(blockB); + auto* C = reinterpret_cast(const_cast(output.data())); + + // DNNL takes row-major matrices. Our packed column-major matrices can be + // viewed as a transposed row-major matrix, i.e., C_colmajor = C_rowmajor^T. + // C_colmajor = C_rowmajor^T = (A_rowmajor * B_rowmajor)^T + // = B_rowmajor^T * A_rowmajor^T + // = B_colmajor * A_colmajor + // So we can just swap the input matrices A and B for DNNL. + // TODO(penporn): Switch to row-major packing instead. + dnnl_status_t st = dnnl_gemm_u8s8s32(transposeB, transposeA, offsetc, // + n, m, k, // + alpha, // + B, ldB, bo, // + A, ldA, ao, // + beta, // + C, ldC, &co); + eigen_assert(st == 0); + +#if DYNAMIC_ANNOTATIONS_ENABLED == 1 || defined(MEMORY_SANITIZER) + for (IndexType col = 0; col < cols; ++col) { + ResScalar* row_base = &output(0, col); + EIGEN_UNUSED_VARIABLE(row_base); // Suppress unused variable error. + TF_ANNOTATE_MEMORY_IS_INITIALIZED(row_base, sizeof(ResScalar) * rows); + } +#endif + + // eigen_assert is a no-op in optimized mode so we add these to avoid + // compiler's unused-variable errors. + EIGEN_UNUSED_VARIABLE(max_index); + EIGEN_UNUSED_VARIABLE(st); + } +}; + +// For mkldnn_sgemm having the right dimensions (especially for small matrices) +// is more important than fitting all the working set in L1/L2 caches. +// TODO(ezhulenev): Do better heuristics. +template +class TensorContractionBlocking { + // For now mkldnn has only mkldnn_sgemm (gemm for floats). + using Scalar = float; + + // Adjust the block sizes to work well with mkldnn kernels. + + // Multiply default choice of block size along M and N dimensions. + // TODO(ezhulenev): Explore if this can work in general (kScaleM=2.0 worked + // well in some of models). + static constexpr float kScaleM = 1.5; + static constexpr float kScaleN = 1.0; + + // Mkldnn Avx/Avx2/Avx512 unroll factors are: 8/16/48. + static constexpr StorageIndex kUnrollM = 48; + + // Mkldnn Avx/Avx2/Avx512 unroll factors are: 6/6/8. + static constexpr StorageIndex kUnrollN = 24; + + public: + TensorContractionBlocking(StorageIndex k, StorageIndex m, StorageIndex n, + StorageIndex num_threads = 1) + : kc_(k), mc_(m), nc_(n) { + // 1. Compute block sizes using default Eigen heuristics. + if (sharding_type == ShardByCol) { + computeProductBlockingSizes(kc_, mc_, nc_, + num_threads); + } else { + computeProductBlockingSizes(kc_, nc_, mc_, + num_threads); + } + + // If dimensions do not pass basic sanity checks return immediately. + if (kc_ <= 0 || mc_ <= 0 || nc_ <= 0) return; + + // If we are using default Eigen gebp kernel there is no need to adjust the + // block sizes for DNNL. + if (!UseCustomContractionKernels()) return; + + // 2. And refine them to work well with mkldnn sgemm. + mc_ = (std::min)( + m, Eigen::divup(static_cast(mc_ * kScaleM), kUnrollM) * + kUnrollM); + nc_ = (std::min)( + n, Eigen::divup(static_cast(nc_ * kScaleN), kUnrollN) * + kUnrollN); + + // We split Kth dimensions in roughly equal slices. + StorageIndex target_k_slices = + (std::max)(StorageIndex(1), Eigen::divup(k, kc_)); + StorageIndex packet_size = internal::packet_traits::size; + if (packet_size < 8) packet_size = 8; + StorageIndex target_bk = + Eigen::divup(k / target_k_slices, packet_size) * packet_size; + kc_ = (std::min)(k, target_bk); + } + + EIGEN_ALWAYS_INLINE StorageIndex kc() const { return kc_; } + EIGEN_ALWAYS_INLINE StorageIndex mc() const { return mc_; } + EIGEN_ALWAYS_INLINE StorageIndex nc() const { return nc_; } + + private: + StorageIndex kc_; + StorageIndex mc_; + StorageIndex nc_; +}; + +template +class TensorContractionBlocking { + // TODO(ezhulenev): Define proper gebp_traits in Eigen for quantized types? + + // Default Eigen block heuristics for `QInt8xQUInt8 -> QInt32` are wrong. + // Mostly because gebp_traits are not correctly defined. But we know that we + // are going to use s8u8s32_gemm from DNNL, so we use float heuristics, and + // adjust them to work well with DNNL. + using LhsScalar = Eigen::QInt8; + using RhsScalar = Eigen::QUInt8; + using ResScalar = Eigen::QInt32; + + // Multiply default choice of block size along M, N and K dimensions. + static constexpr float kScaleM = 1.5; + static constexpr float kScaleN = 1.5; + static constexpr float kScaleK = 1.5; + + public: + TensorContractionBlocking(StorageIndex k, StorageIndex m, StorageIndex n, + StorageIndex num_threads = 1) + : kc_(k), mc_(m), nc_(n) { + // Each dimension is a multiple of 32 (fits into _m256i). + mc_ = (std::min)(m, static_cast(192)); + nc_ = (std::min)(n, static_cast(288)); + kc_ = (std::min)(k, static_cast(320)); + } + + EIGEN_ALWAYS_INLINE StorageIndex kc() const { return kc_; } + EIGEN_ALWAYS_INLINE StorageIndex mc() const { return mc_; } + EIGEN_ALWAYS_INLINE StorageIndex nc() const { return nc_; } + + private: + StorageIndex kc_; + StorageIndex mc_; + StorageIndex nc_; +}; + +// If the Lhs or Rhs Tensor expressions are already evaluated and have access to +// raw data, we can skip packing step and setup pointers and a stride to the +// underlying memory buffer and pass them directly to Gemm. +template +struct ColMajorBlock { + bool is_direct_access; + + // Valid iff `is_direct_access == false` + Scalar* packed_data; + + // Valid iff `is_direct_access == true` + Scalar* raw_data; + StorageIndex stride; + char transpose; +}; + +template +struct DirectColMajorAccess { + enum { value = false }; + + template + static bool block(const typename DataMapper::SubMapper& data_mapper, + const StorageIndex rows, const StorageIndex cols, + const StorageIndex num_kernels, + ColMajorBlock* block) { + eigen_assert(false && "Not implemented"); + return false; + } +}; + +// If we have an access to raw memory of the contraction input, we can safely +// skip packing if: +// (1) Packing is a no-op. +// (2) Packed block will be used just once. +// +// If a packed block is used many times, it's more efficient to pack it into +// contiguous block of memory to reduce pressure on TLB. +// +// TODO(ezhulenev): Add support for more tensor expressions that matters. +#define REGISTER_DIRECT_COL_MAJOR_ACCESS(TENSOR_EXPR) \ + template \ + struct DirectColMajorAccess, \ + nocontract_t, contract_t, packet_size, /*inner_dim_contiguous=*/true, \ + /*inner_dim_reordered=*/false, Alignment>> { \ + enum { value = true }; \ + \ + using DataMapper = TensorContractionInputMapper< \ + Scalar, StorageIndex, Side, TensorEvaluator, \ + nocontract_t, contract_t, packet_size, /*inner_dim_contiguous=*/true, \ + /*inner_dim_reordered=*/false, Alignment>; \ + \ + static bool block(const typename DataMapper::SubMapper& data_mapper, \ + const StorageIndex rows, const StorageIndex cols, \ + const StorageIndex num_kernels, \ + ColMajorBlock* block) { \ + static_assert(DataMapper::DirectOffsets == true, \ + "DataMapper must support direct offsets"); \ + \ + const StorageIndex vert_offset = data_mapper.vert_offset(); \ + const StorageIndex horiz_offset = data_mapper.horiz_offset(); \ + const StorageIndex stride = \ + Side == Lhs ? data_mapper.base_mapper().stride() \ + : data_mapper.base_mapper().nocontract_strides()[0]; \ + const Scalar* data = data_mapper.base_mapper().tensor().data(); \ + data = Side == Lhs ? data : data + vert_offset + horiz_offset * stride; \ + \ + const bool is_no_op_packing = stride == rows; \ + const StorageIndex addressable_mem = (stride * cols * sizeof(Scalar)); \ + const bool use_direct_access = \ + is_no_op_packing || num_kernels == 1 /* used once */ || \ + ((num_kernels == 2) && \ + (addressable_mem < (256 << 10) /* 256 kb */)); \ + \ + if (use_direct_access) { \ + block->is_direct_access = true; \ + block->raw_data = const_cast(data); \ + block->stride = stride; \ + block->transpose = 'N'; \ + return true; \ + } \ + return false; \ + } \ + } + +#define SIMPLE_TENSOR const Tensor + +#define TENSOR_MAP_ROWMAJOR \ + const TensorMap, \ + Eigen::Aligned> + +#define TENSOR_MAP_COLMAJOR \ + const TensorMap, \ + Eigen::Aligned> + +#define TENSOR_MAP_CONST_ROWMAJOR \ + const TensorMap, \ + Eigen::Aligned> + +#define TENSOR_MAP_CONST_COLMAJOR \ + const TensorMap, \ + Eigen::Aligned> + +// This is reshaped convolution filter from `eigen_spatial_convolutions.h`. +#define TENSOR_RESHAPE \ + const TensorReshapingOp< \ + const Eigen::DSizes, \ + const TensorMap, \ + Eigen::Aligned>> + +REGISTER_DIRECT_COL_MAJOR_ACCESS(SIMPLE_TENSOR); +REGISTER_DIRECT_COL_MAJOR_ACCESS(TENSOR_MAP_ROWMAJOR); +REGISTER_DIRECT_COL_MAJOR_ACCESS(TENSOR_MAP_COLMAJOR); +REGISTER_DIRECT_COL_MAJOR_ACCESS(TENSOR_MAP_CONST_ROWMAJOR); +REGISTER_DIRECT_COL_MAJOR_ACCESS(TENSOR_MAP_CONST_COLMAJOR); +REGISTER_DIRECT_COL_MAJOR_ACCESS(TENSOR_RESHAPE); + +#undef SIMPLE_TENSOR +#undef TENSOR_MAP_ROWMAJOR +#undef TENSOR_MAP_COLMAJOR +#undef TENSOR_MAP_CONST_ROWMAJOR +#undef TENSOR_MAP_CONST_COLMAJOR +#undef TENSOR_RESHAPE +#undef REGISTER_DIRECT_COL_MAJOR_ACCESS + +template +struct GemmKernelProvider { + enum { Defined = 0 }; + using GemmKernel = void; +}; + +template +struct GemmKernelProvider { + enum { Defined = 1 }; + using GemmKernel = dnnl_gemm_kernel; +}; + +template +struct GemmKernelProvider { + enum { Defined = 1 }; + using GemmKernel = mkldnn_gemm_s8u8s32_kernel; +}; + +// NOTE: 'std::enable_if' doesn't work for template specializations. See +// "default template argument in a class template partial specialization". + +// Tensor contraction kernel that can fallback on Eigen gebp_kernel at runtime. +#define REGISTER_TENSOR_CONTRACTION_KERNEL_WITH_FALLBACK( \ + RES_SCALAR, LHS_SCALAR, RHS_SCALAR) \ + \ + template \ + struct TensorContractionKernel { \ + TensorContractionKernel(StorageIndex m, StorageIndex k, StorageIndex n, \ + StorageIndex bm, StorageIndex bk, StorageIndex bn) \ + : m(m), k(k), n(n), bm(bm), bk(bk), bn(bn) {} \ + \ + enum { HasBeta = true }; \ + \ + using ResScalar = RES_SCALAR; \ + using LhsScalar = LHS_SCALAR; \ + using RhsScalar = RHS_SCALAR; \ + \ + using Traits = typename internal::gebp_traits; \ + \ + using LhsBlock = ColMajorBlock; \ + using RhsBlock = ColMajorBlock; \ + \ + using DirectLhsAccess = DirectColMajorAccess; \ + using DirectRhsAccess = DirectColMajorAccess; \ + \ + /* Packed Lhs/Rhs block memory allocator.*/ \ + typedef TensorContractionBlockMemAllocator \ + BlockMemAllocator; \ + typedef typename BlockMemAllocator::BlockMemHandle BlockMemHandle; \ + \ + using LhsPacker = \ + gemm_pack_colmajor_block; \ + using RhsPacker = \ + gemm_pack_colmajor_block; \ + \ + using GemmKernelProviderType = \ + GemmKernelProvider; \ + static_assert( \ + GemmKernelProviderType::Defined, \ + "Custom GEMM kernel is not registered for given scalar types"); \ + using GemmKernel = typename GemmKernelProviderType::GemmKernel; \ + \ + /* Fallback on default Eigen pack and GEBP kernel if custom contraction */ \ + /* kernels disabled at runtime. */ \ + using EigenLhsPacker = \ + gemm_pack_lhs; \ + using EigenRhsPacker = \ + gemm_pack_rhs; \ + using GebpKernel = \ + gebp_kernel; \ + \ + template \ + EIGEN_DEVICE_FUNC BlockMemHandle allocate(Device& d, LhsBlock* lhs_block, \ + RhsBlock* rhs_block) { \ + return BlockMemAllocator::allocate( \ + d, bm, bk, bn, &lhs_block->packed_data, &rhs_block->packed_data); \ + } \ + \ + template \ + EIGEN_DEVICE_FUNC BlockMemHandle \ + allocateSlices(Device& d, const int num_lhs, const int num_rhs, \ + const int num_slices, std::vector* lhs_blocks, \ + std::vector* rhs_blocks) { \ + eigen_assert(num_slices > 0); \ + std::vector> lhs_mem(num_slices); \ + std::vector> rhs_mem(num_slices); \ + \ + BlockMemHandle block_mem = BlockMemAllocator::allocateSlices( \ + d, bm, bk, bn, num_lhs, num_rhs, num_slices, lhs_mem.data(), \ + rhs_mem.data()); \ + \ + for (Index x = 0; x < num_slices; x++) { \ + if (num_lhs > 0) lhs_blocks[x].resize(num_lhs); \ + for (Index m = 0; m < num_lhs; m++) { \ + lhs_blocks[x][m].packed_data = lhs_mem[x][m]; \ + } \ + if (num_rhs > 0) rhs_blocks[x].resize(num_rhs); \ + for (Index n = 0; n < num_rhs; n++) { \ + rhs_blocks[x][n].packed_data = rhs_mem[x][n]; \ + } \ + } \ + \ + return block_mem; \ + } \ + \ + template \ + EIGEN_DEVICE_FUNC static void deallocate(Device& d, \ + BlockMemHandle handle) { \ + BlockMemAllocator::deallocate(d, handle); \ + } \ + \ + EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE void packLhs( \ + LhsBlock* lhsBlock, const typename LhsMapper::SubMapper& data_mapper, \ + const StorageIndex depth, const StorageIndex rows) { \ + if (UseCustomContractionKernels()) { \ + const bool is_direct_access = \ + DirectLhsAccess::value && \ + DirectLhsAccess::block(data_mapper, rows, depth, \ + bn > 0 ? divup(n, bn) : 0, lhsBlock); \ + \ + if (!is_direct_access) { \ + lhsBlock->is_direct_access = false; \ + LhsPacker()(lhsBlock->packed_data, data_mapper, rows, depth); \ + } \ + } else { \ + lhsBlock->is_direct_access = false; \ + EigenLhsPacker()(lhsBlock->packed_data, data_mapper, depth, rows, \ + /*stride*/ 0, /*offset*/ 0); \ + } \ + } \ + \ + EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE void packRhs( \ + RhsBlock* rhsBlock, const typename RhsMapper::SubMapper& data_mapper, \ + const StorageIndex depth, const StorageIndex cols) { \ + if (UseCustomContractionKernels()) { \ + const bool is_direct_access = \ + DirectRhsAccess::value && \ + DirectRhsAccess::block(data_mapper, depth, cols, \ + bm > 0 ? divup(m, bm) : 0, rhsBlock); \ + \ + if (!is_direct_access) { \ + rhsBlock->is_direct_access = false; \ + RhsPacker()(rhsBlock->packed_data, data_mapper, depth, cols); \ + } \ + } else { \ + rhsBlock->is_direct_access = false; \ + EigenRhsPacker()(rhsBlock->packed_data, data_mapper, depth, cols); \ + } \ + } \ + \ + EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE void invoke( \ + const OutputMapper& output_mapper, const LhsBlock& lhsBlock, \ + const RhsBlock& rhsBlock, const StorageIndex rows, \ + const StorageIndex depth, const StorageIndex cols, const float alpha, \ + const float beta) { \ + if (UseCustomContractionKernels()) { \ + if ((DirectLhsAccess::value && lhsBlock.is_direct_access) && \ + (DirectRhsAccess::value && rhsBlock.is_direct_access)) { \ + GemmKernel()(output_mapper, lhsBlock.raw_data, rhsBlock.raw_data, \ + rows, depth, cols, alpha, beta, \ + /*ldA=*/lhsBlock.stride, /*ldB=*/rhsBlock.stride, \ + /*transposeA=*/lhsBlock.transpose, \ + /*transposeB=*/rhsBlock.transpose); \ + \ + } else if (DirectLhsAccess::value && lhsBlock.is_direct_access) { \ + GemmKernel()(output_mapper, lhsBlock.raw_data, rhsBlock.packed_data, \ + rows, depth, cols, alpha, beta, \ + /*ldA=*/lhsBlock.stride, \ + /*ldB=*/GemmKernel::kComputeStrideFromBlockDimensions, \ + /*transposeA=*/lhsBlock.transpose, /*transposeB=*/'N'); \ + \ + } else if (DirectRhsAccess::value && rhsBlock.is_direct_access) { \ + GemmKernel()(output_mapper, lhsBlock.packed_data, rhsBlock.raw_data, \ + rows, depth, cols, alpha, beta, \ + /*ldA=*/GemmKernel::kComputeStrideFromBlockDimensions, \ + /*ldB=*/rhsBlock.stride, /*transposeA=*/'N', \ + /*transposeB=*/rhsBlock.transpose); \ + \ + } else { \ + GemmKernel()(output_mapper, lhsBlock.packed_data, \ + rhsBlock.packed_data, rows, depth, cols, alpha, beta); \ + } \ + } else { \ + /* Gebp kernel does not support beta, so we have to clear memory in */ \ + /* the output mapper manually. */ \ + /* WARNING(ezhulenev): This is optimized into a memset in a loop, */ \ + /* could be much slower for small matrices. Currently this code */ \ + /* path used only for testing, and performance does not matter. */ \ + if (beta == 0.0) { \ + for (StorageIndex col = 0; col < cols; ++col) { \ + ResScalar* output_base = &output_mapper(0, col); \ + typedef Array OutputRow; \ + typedef Map> OutputRowMap; \ + OutputRowMap(output_base, rows).setZero(); \ + } \ + } \ + \ + GebpKernel()( \ + output_mapper, lhsBlock.packed_data, rhsBlock.packed_data, rows, \ + depth, cols, alpha, \ + /*strideA*/ GemmKernel::kComputeStrideFromBlockDimensions, \ + /*strideB*/ GemmKernel::kComputeStrideFromBlockDimensions, \ + /*offsetA*/ 0, /*offsetB*/ 0); \ + } \ + } \ + \ + private: \ + /* These are dimensions of the original Tensors, and selected block */ \ + /* sizes. The actual block sizes passed to all function above might be */ \ + /* smaller because of the partial blocks at the end. */ \ + const StorageIndex m; \ + const StorageIndex k; \ + const StorageIndex n; \ + const StorageIndex bm; \ + const StorageIndex bk; \ + const StorageIndex bn; \ + } + +// Tensor contraction kernel that do not fallback on Eigen. Currently not all +// data types are supported by Eigen data packing and default gebp_kernel. +#define REGISTER_TENSOR_CONTRACTION_KERNEL_NO_FALLBACK(RES_SCALAR, LHS_SCALAR, \ + RHS_SCALAR) \ + \ + template \ + struct TensorContractionKernel { \ + TensorContractionKernel(StorageIndex m, StorageIndex k, StorageIndex n, \ + StorageIndex bm, StorageIndex bk, StorageIndex bn) \ + : m(m), k(k), n(n), bm(bm), bk(bk), bn(bn) {} \ + \ + enum { HasBeta = true }; \ + \ + using ResScalar = RES_SCALAR; \ + using LhsScalar = LHS_SCALAR; \ + using RhsScalar = RHS_SCALAR; \ + \ + using Traits = typename internal::gebp_traits; \ + \ + using LhsBlock = ColMajorBlock; \ + using RhsBlock = ColMajorBlock; \ + \ + using DirectLhsAccess = DirectColMajorAccess; \ + using DirectRhsAccess = DirectColMajorAccess; \ + \ + /* Packed Lhs/Rhs block memory allocator.*/ \ + typedef TensorContractionBlockMemAllocator \ + BlockMemAllocator; \ + typedef typename BlockMemAllocator::BlockMemHandle BlockMemHandle; \ + \ + using LhsPacker = \ + gemm_pack_colmajor_block; \ + using RhsPacker = \ + gemm_pack_colmajor_block; \ + \ + using GemmKernelProviderType = \ + GemmKernelProvider; \ + static_assert( \ + GemmKernelProviderType::Defined, \ + "Custom GEMM kernel is not registered for given scalar types"); \ + using GemmKernel = typename GemmKernelProviderType::GemmKernel; \ + \ + template \ + EIGEN_DEVICE_FUNC BlockMemHandle allocate(Device& d, LhsBlock* lhs_block, \ + RhsBlock* rhs_block) { \ + return BlockMemAllocator::allocate( \ + d, bm, bk, bn, &lhs_block->packed_data, &rhs_block->packed_data); \ + } \ + \ + template \ + EIGEN_DEVICE_FUNC BlockMemHandle \ + allocateSlices(Device& d, const int num_lhs, const int num_rhs, \ + const int num_slices, std::vector* lhs_blocks, \ + std::vector* rhs_blocks) { \ + eigen_assert(num_slices > 0); \ + std::vector> lhs_mem(num_slices); \ + std::vector> rhs_mem(num_slices); \ + \ + BlockMemHandle block_mem = BlockMemAllocator::allocateSlices( \ + d, bm, bk, bn, num_lhs, num_rhs, num_slices, lhs_mem.data(), \ + rhs_mem.data()); \ + \ + for (Index x = 0; x < num_slices; x++) { \ + if (num_lhs > 0) lhs_blocks[x].resize(num_lhs); \ + for (Index m = 0; m < num_lhs; m++) { \ + lhs_blocks[x][m].packed_data = lhs_mem[x][m]; \ + } \ + if (num_rhs > 0) rhs_blocks[x].resize(num_rhs); \ + for (Index n = 0; n < num_rhs; n++) { \ + rhs_blocks[x][n].packed_data = rhs_mem[x][n]; \ + } \ + } \ + \ + return block_mem; \ + } \ + \ + template \ + EIGEN_DEVICE_FUNC static void deallocate(Device& d, \ + BlockMemHandle handle) { \ + BlockMemAllocator::deallocate(d, handle); \ + } \ + \ + EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE void packLhs( \ + LhsBlock* lhsBlock, const typename LhsMapper::SubMapper& data_mapper, \ + const StorageIndex depth, const StorageIndex rows) { \ + const bool is_direct_access = \ + DirectLhsAccess::value && \ + DirectLhsAccess::block(data_mapper, rows, depth, \ + bn > 0 ? divup(n, bn) : 0, lhsBlock); \ + \ + if (!is_direct_access) { \ + lhsBlock->is_direct_access = false; \ + LhsPacker()(lhsBlock->packed_data, data_mapper, rows, depth); \ + } \ + } \ + \ + EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE void packRhs( \ + RhsBlock* rhsBlock, const typename RhsMapper::SubMapper& data_mapper, \ + const StorageIndex depth, const StorageIndex cols) { \ + const bool is_direct_access = \ + DirectRhsAccess::value && \ + DirectRhsAccess::block(data_mapper, depth, cols, \ + bm > 0 ? divup(m, bm) : 0, rhsBlock); \ + \ + if (!is_direct_access) { \ + rhsBlock->is_direct_access = false; \ + RhsPacker()(rhsBlock->packed_data, data_mapper, depth, cols); \ + } \ + } \ + \ + EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE void invoke( \ + const OutputMapper& output_mapper, const LhsBlock& lhsBlock, \ + const RhsBlock& rhsBlock, const StorageIndex rows, \ + const StorageIndex depth, const StorageIndex cols, const float alpha, \ + const float beta) { \ + if ((DirectLhsAccess::value && lhsBlock.is_direct_access) && \ + (DirectRhsAccess::value && rhsBlock.is_direct_access)) { \ + GemmKernel()(output_mapper, lhsBlock.raw_data, rhsBlock.raw_data, \ + rows, depth, cols, alpha, beta, /*ldA=*/lhsBlock.stride, \ + /*ldB=*/rhsBlock.stride, \ + /*transposeA=*/lhsBlock.transpose, \ + /*transposeB=*/rhsBlock.transpose); \ + \ + } else if (DirectLhsAccess::value && lhsBlock.is_direct_access) { \ + GemmKernel()(output_mapper, lhsBlock.raw_data, rhsBlock.packed_data, \ + rows, depth, cols, alpha, beta, /*ldA=*/lhsBlock.stride, \ + /*ldB=*/GemmKernel::kComputeStrideFromBlockDimensions, \ + /*transposeA=*/lhsBlock.transpose, /*transposeB=*/'N'); \ + \ + } else if (DirectRhsAccess::value && rhsBlock.is_direct_access) { \ + GemmKernel()(output_mapper, lhsBlock.packed_data, rhsBlock.raw_data, \ + rows, depth, cols, alpha, beta, \ + /*ldA=*/GemmKernel::kComputeStrideFromBlockDimensions, \ + /*ldB=*/rhsBlock.stride, /*transposeA=*/'N', \ + /*transposeB=*/rhsBlock.transpose); \ + \ + } else { \ + GemmKernel()(output_mapper, lhsBlock.packed_data, \ + rhsBlock.packed_data, rows, depth, cols, alpha, beta); \ + } \ + } \ + \ + private: \ + /* These are dimensions of the original Tensors, and selected block */ \ + /* sizes. The actual block sizes passed to all function above might be */ \ + /* smaller because of the partial blocks at the end. */ \ + const StorageIndex m; \ + const StorageIndex k; \ + const StorageIndex n; \ + const StorageIndex bm; \ + const StorageIndex bk; \ + const StorageIndex bn; \ + } + +REGISTER_TENSOR_CONTRACTION_KERNEL_WITH_FALLBACK(float, float, float); +REGISTER_TENSOR_CONTRACTION_KERNEL_NO_FALLBACK(Eigen::QInt32, Eigen::QInt8, + Eigen::QUInt8); + +#undef REGISTER_TENSOR_CONTRACTION_KERNEL + +#endif // defined(TENSORFLOW_USE_MKLDNN_CONTRACTION_KERNEL) + +} // namespace internal +} // namespace Eigen + +#endif // TENSORFLOW_TSL_FRAMEWORK_CONTRACTION_EIGEN_CONTRACTION_KERNEL_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/convolution/eigen_convolution_helpers.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/convolution/eigen_convolution_helpers.h new file mode 100644 index 0000000000000000000000000000000000000000..5b94994d4084bb1610537c1019ce41d59bd02718 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/convolution/eigen_convolution_helpers.h @@ -0,0 +1,87 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_FRAMEWORK_CONVOLUTION_EIGEN_CONVOLUTION_HELPERS_H_ +#define TENSORFLOW_TSL_FRAMEWORK_CONVOLUTION_EIGEN_CONVOLUTION_HELPERS_H_ + +#include "unsupported/Eigen/CXX11/Tensor" // from @eigen_archive + +namespace Eigen { +namespace internal { + +// TensorEvaluatorHasPartialPacket +// provides `value` that is true if TensorEvaluatorType has `PacketType +// partialPacket(IndexType, unpacket_traits::mask_t) +// const` and if the PacketType supports masked load. +// +// Partial packets are used to: +// +// 1) Split the packet over two columns in eigen based spatial convolution and +// use partial loads for each individual part before combining them to get the +// required packet. This class is used to pick the correct implementation of +// loadPacketStandard function. +// +// 2) Split the packet over two rows (within the same column) in eigen based +// cuboid convolution and use partial loads for each individual part before +// combining them to get the required packet. This class is used to pick the +// correct implementation of loadPacketStandard function. This usage is similar +// to the usage in eigen based spatial convolution described above. +// +// 3) Finalize packing of columns in gemm_pack_colmajor after processing +// vectorized part with full packets (see eigen_spatial_convolutions.h). +template +class TensorEvaluatorHasPartialPacket { + public: + template + static auto functionExistsSfinae( + typename std::enable_if< + unpacket_traits::masked_load_available && + std::is_same() + .template partialPacket( + std::declval(), + std::declval::mask_t>()))>::value>:: + type*) -> std::true_type; + + template + static auto functionExistsSfinae(...) -> std::false_type; + + typedef decltype(functionExistsSfinae(nullptr)) status; + + static constexpr bool value = status::value; +}; + +// Compute a mask for loading/storing coefficients in/from a packet in a +// [from, to) range. If the mask bit is 1, element will be loaded/stored. +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE + typename std::enable_if::masked_load_available, + typename unpacket_traits::mask_t>::type + mask(int from, int to) { + const Index packet_size = internal::unpacket_traits::size; + eigen_assert(0 <= from && to <= (packet_size + 1) && from < to); + + using Mask = typename internal::unpacket_traits::mask_t; + const Mask mask_max = std::numeric_limits::max(); + + return (mask_max >> (packet_size - to)) ^ (mask_max >> (packet_size - from)); +} + +} // namespace internal +} // namespace Eigen + +#endif // TENSORFLOW_TSL_FRAMEWORK_CONVOLUTION_EIGEN_CONVOLUTION_HELPERS_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/convolution/eigen_spatial_convolutions-inl.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/convolution/eigen_spatial_convolutions-inl.h new file mode 100644 index 0000000000000000000000000000000000000000..fe689c9f40c95b18a822fc5ea62a05bf46c0a007 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/convolution/eigen_spatial_convolutions-inl.h @@ -0,0 +1,1772 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_FRAMEWORK_CONVOLUTION_EIGEN_SPATIAL_CONVOLUTIONS_INL_H_ +#define TENSORFLOW_TSL_FRAMEWORK_CONVOLUTION_EIGEN_SPATIAL_CONVOLUTIONS_INL_H_ + +#include "tsl/framework/convolution/eigen_convolution_helpers.h" + +// Note this header is used in both TF and TFLite. +namespace Eigen { + +namespace internal { + +#if !EIGEN_ALTIVEC_USE_CUSTOM_PACK +// WARNING: Most of the code here implicitly assumes that the matrix is in +// ColMajor layout. This is guaranteed by the tensor contraction (see +// TensorContraction.h). +// +// Inside Eigen a tensor contraction is represented by a matrix multiplication. +// We don't want to actually extract image patches and reshape the result into +// a matrix (this involves allocating huge extra memory), so the patch +// extraction and reshape operations are implicit. +// +// TensorContractionInputMapper takes a matrix index and returns the coefficient +// (or the packet) of the "virtual tensor", that would be at that index if we +// were to actually reshape the result of patch extraction. +// +// TensorContractionSubMapper provides a similar view into the "virtual matrix" +// at the given vertical and horizontal offsets. +// +// "Virtual matrix" dimensions: +// *0: kernelChannels * kernelRows * kernelCols; +// 1: out_height * out_width; * OTHERS (e.g batches, etc...) +// +// *) extracted patches are continuous in memory (innermost dimension assuming +// col major layout) +// +// With this dimensions: +// row - offset within a single patch (in code: patchId) +// col - index of the extracted patch (in code: patchIndex) +// patchIndex ∈ [0..num_patches * OTHERS] (batch and other dimensions) +// +// TODO(ezhulenev): Consolidate this part of the code with the image patch +// extraction code since they are both very similar. + +template +class TensorContractionInputMapper< + Scalar_, Index, Side, + TensorEvaluator< + const TensorReshapingOp >, + Device>, + nocontract_t, contract_t, packet_size, inner_dim_contiguous, + inner_dim_reordered, Alignment> { + public: + typedef Scalar_ Scalar; + + typedef TensorContractionInputMapper< + Scalar, Index, Side, + TensorEvaluator< + const TensorReshapingOp< + NewDimension, const TensorImagePatchOp >, + Device>, + nocontract_t, contract_t, packet_size, inner_dim_contiguous, + inner_dim_reordered, Alignment> + Self; + + typedef TensorContractionSubMapper< + Scalar, Index, Side, + TensorEvaluator< + const TensorReshapingOp< + NewDimension, const TensorImagePatchOp >, + Device>, + nocontract_t, contract_t, packet_size, inner_dim_contiguous, + inner_dim_reordered, Alignment> + SubMapper; + + typedef SubMapper VectorMapper; + typedef SubMapper LinearMapper; + typedef typename packet_traits::type Packet; + + typedef TensorEvaluator TensorEvaluatorT; + + EIGEN_DEVICE_FUNC + TensorContractionInputMapper( + const TensorEvaluator< + const TensorReshapingOp< + NewDimension, const TensorImagePatchOp >, + Device>& tensor, + const nocontract_t&, const nocontract_t&, const contract_t&, + const contract_t&) + : m_impl(tensor.impl().impl()) { + Index patch_rows; + Index patch_depth; + if (internal::traits::Layout == ColMajor) { + patch_depth = tensor.impl().dimensions()[0]; + patch_rows = tensor.impl().dimensions()[1]; + m_patch_cols = tensor.impl().dimensions()[2]; + m_num_patches = tensor.impl().dimensions()[3]; + } else { + const size_t NumDims = tensor.impl().dimensions().size(); + patch_depth = tensor.impl().dimensions()[NumDims - 1]; + patch_rows = tensor.impl().dimensions()[NumDims - 2]; + m_patch_cols = tensor.impl().dimensions()[NumDims - 3]; + m_num_patches = tensor.impl().dimensions()[NumDims - 4]; + } + + // Strides for navigating through the single patch. + m_patch_row_stride = patch_depth; + m_patch_col_stride = patch_rows * m_patch_row_stride; + + m_patch_row_inflate_strides = tensor.impl().rowInflateStride(); + m_patch_col_inflate_strides = tensor.impl().colInflateStride(); + + m_colStride = patch_rows; + + m_outputRows = tensor.impl().outputRows(); + m_outputCols = tensor.impl().outputCols(); + m_row_strides = tensor.impl().userRowStride(); + m_col_strides = tensor.impl().userColStride(); + + m_in_row_strides = tensor.impl().userInRowStride(); + m_in_col_strides = tensor.impl().userInColStride(); + + if (internal::traits::Layout == ColMajor) { + m_inputRows = tensor.impl().impl().dimensions()[1]; + m_inputCols = tensor.impl().impl().dimensions()[2]; + } else { + const int NumDims = tensor.impl().impl().dimensions().size(); + m_inputRows = tensor.impl().impl().dimensions()[NumDims - 2]; + m_inputCols = tensor.impl().impl().dimensions()[NumDims - 3]; + } + + m_rowInputStride = patch_depth; + m_colInputStride = patch_depth * m_inputRows; + m_patchInputStride = patch_depth * m_inputRows * m_inputCols; + + m_rowPaddingTop = tensor.impl().rowPaddingTop(); + m_colPaddingLeft = tensor.impl().colPaddingLeft(); + + m_fastPatchRowStride = + internal::TensorIntDivisor(m_patch_row_stride); + m_fastPatchColStride = + internal::TensorIntDivisor(m_patch_col_stride); + m_fastInputRowStride = + internal::TensorIntDivisor(m_patch_row_inflate_strides); + m_fastInputColStride = + internal::TensorIntDivisor(m_patch_col_inflate_strides); + m_fastNumPatches = internal::TensorIntDivisor(m_num_patches); + m_fastColStride = internal::TensorIntDivisor(m_colStride); + m_fastOutputRows = internal::TensorIntDivisor(m_outputRows); + m_fastDimZero = internal::TensorIntDivisor(patch_depth); + } + + EIGEN_DEVICE_FUNC + TensorContractionInputMapper(const TensorContractionInputMapper& base_mapper) + : m_impl(base_mapper.m_impl) { + m_patch_cols = base_mapper.m_patch_cols; + m_num_patches = base_mapper.m_num_patches; + + m_patch_row_stride = base_mapper.m_patch_row_stride; + m_patch_col_stride = base_mapper.m_patch_col_stride; + + m_patch_row_inflate_strides = base_mapper.m_patch_row_inflate_strides; + m_patch_col_inflate_strides = base_mapper.m_patch_col_inflate_strides; + + m_colStride = base_mapper.m_colStride; + + m_rowInputStride = base_mapper.m_rowInputStride; + m_colInputStride = base_mapper.m_colInputStride; + m_patchInputStride = base_mapper.m_patchInputStride; + + m_inputRows = base_mapper.m_inputRows; + m_inputCols = base_mapper.m_inputCols; + + m_outputRows = base_mapper.m_outputRows; + m_outputCols = base_mapper.m_outputCols; + m_row_strides = base_mapper.m_row_strides; + m_col_strides = base_mapper.m_col_strides; + + m_in_row_strides = base_mapper.m_in_row_strides; + m_in_col_strides = base_mapper.m_in_col_strides; + + m_rowPaddingTop = base_mapper.m_rowPaddingTop; + m_colPaddingLeft = base_mapper.m_colPaddingLeft; + + m_fastPatchRowStride = base_mapper.m_fastPatchRowStride; + m_fastPatchColStride = base_mapper.m_fastPatchColStride; + m_fastInputRowStride = base_mapper.m_fastInputRowStride; + m_fastInputColStride = base_mapper.m_fastInputColStride; + m_fastNumPatches = base_mapper.m_fastNumPatches; + m_fastColStride = base_mapper.m_fastColStride; + m_fastOutputRows = base_mapper.m_fastOutputRows; + m_fastDimZero = base_mapper.m_fastDimZero; + } + + // If true, turns off some optimizations for loading packets since the image + // patches are "non-standard" such as there are non-trivial strides or + // inflations in the input. + EIGEN_DEVICE_FUNC + EIGEN_ALWAYS_INLINE bool nonStandardPatches() const { + return m_in_row_strides != 1 || m_in_col_strides != 1 || + m_patch_row_inflate_strides != 1 || m_patch_col_inflate_strides != 1; + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE SubMapper getSubMapper(Index i, Index j) const { + return SubMapper(*this, i, j); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE LinearMapper getLinearMapper(Index i, Index j) const { + return LinearMapper(*this, i, j); + } + + EIGEN_DEVICE_FUNC + EIGEN_ALWAYS_INLINE Scalar operator()(Index row) const { + Index rowIndex, colIndex, otherIndex; + computeBaseIndices(0, rowIndex, colIndex, otherIndex); + return loadCoeff(row, rowIndex, colIndex, otherIndex); + } + + // Load the coefficient at the patchIndex location instead of the usual + // m_rowIndex, + // m_colIndex, m_otherIndex. This is currently only used by the gpu code. + // EIGEN_DEVICE_FUNC + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar operator()(Index row, Index patchIndex) const { + Index rowIndex, colIndex, otherIndex; + computeBaseIndices(patchIndex, rowIndex, colIndex, otherIndex); + return loadCoeff(row, rowIndex, colIndex, otherIndex); + } + + EIGEN_DEVICE_FUNC + EIGEN_ALWAYS_INLINE Packet loadPacket(Index row) const { + Index rowIndex, colIndex, otherIndex; + computeBaseIndices(0, rowIndex, colIndex, otherIndex); + return loadPacket(row, rowIndex, colIndex, otherIndex); + } + + // Load the packet at the patchIndex location instead of the usual m_rowIndex, + // m_colIndex, m_otherIndex. This is currently only used by the gpu code. + EIGEN_DEVICE_FUNC + EIGEN_ALWAYS_INLINE Packet loadPacket(Index row, Index patchIndex) const { + Index rowIndex, colIndex, otherIndex; + computeBaseIndices(patchIndex, rowIndex, colIndex, otherIndex); + return loadPacket(row, rowIndex, colIndex, otherIndex); + } + + EIGEN_DEVICE_FUNC + EIGEN_ALWAYS_INLINE const TensorEvaluator& impl() const { + return m_impl; + } + + EIGEN_DEVICE_FUNC + EIGEN_ALWAYS_INLINE Index patchDepth() const { return m_rowInputStride; } + EIGEN_DEVICE_FUNC + EIGEN_ALWAYS_INLINE Index patchRows() const { return m_colStride; } + EIGEN_DEVICE_FUNC + EIGEN_ALWAYS_INLINE Index patchCols() const { return m_patch_cols; } + + private: + friend class TensorContractionSubMapper< + Scalar, Index, Side, + TensorEvaluator< + const TensorReshapingOp< + NewDimension, const TensorImagePatchOp >, + Device>, + nocontract_t, contract_t, packet_size, inner_dim_contiguous, + inner_dim_reordered, Alignment>; + + // Load coefficient from a patch specified by the "within patch offset" + // (patchId) and the precomputed indices of the first element of the patch. + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar loadCoeff(Index patchId, Index rowIndex, + Index colIndex, Index otherIndex) const { + // Find the offset of the element wrt the location of the first element. + const Index patchOffset = patchId / m_fastDimZero; + + const Index colOffset = patchOffset / m_fastColStride; + const Index inputCol = colIndex + colOffset * m_in_col_strides; + const Index origInputCol = + (m_patch_col_inflate_strides == 1) + ? inputCol + : ((inputCol >= 0) ? (inputCol / m_fastInputColStride) : 0); + + const Index rowOffset = patchOffset - colOffset * m_colStride; + const Index inputRow = rowIndex + rowOffset * m_in_row_strides; + const Index origInputRow = + (m_patch_row_inflate_strides == 1) + ? inputRow + : ((inputRow >= 0) ? (inputRow / m_fastInputRowStride) : 0); + if (origInputCol < 0 || origInputRow < 0 || origInputCol >= m_inputCols || + origInputRow >= m_inputRows || + (inputCol != origInputCol * m_patch_col_inflate_strides) || + (inputRow != origInputRow * m_patch_row_inflate_strides)) { + return Scalar(0); + } + const Index depth = patchId - patchOffset * patchDepth(); + const Index inputIndex = depth + origInputRow * m_rowInputStride + + origInputCol * m_colInputStride + otherIndex; + return m_impl.coeff(inputIndex); + } + + // This is the same as loadCoeff(...), but optimized for all `inflate_strides` + // and `in_strides` equal to 1 (template specialization without templates). + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar loadCoeffStandard(Index patchId, Index rowIndex, + Index colIndex, + Index otherIndex) const { + eigen_assert(!nonStandardPatches()); + + // Find the offset of the element wrt the location of the first element. + const Index patchOffset = patchId / m_fastDimZero; + const Index colOffset = patchOffset / m_fastColStride; + const Index rowOffset = patchOffset - colOffset * m_colStride; + const Index inputCol = colIndex + colOffset; + const Index inputRow = rowIndex + rowOffset; + if (inputCol < 0 || inputCol >= m_inputCols || inputRow < 0 || + inputRow >= m_inputRows) { + return Scalar(0); + } + const Index depth = patchId - patchOffset * patchDepth(); + const Index inputIndex = depth + inputRow * m_rowInputStride + + inputCol * m_colInputStride + otherIndex; + return m_impl.coeff(inputIndex); + } + + // Load packet from a patch specified by the "within patch offset" + // (patchId) and the precomputed indices of the first element of the patch. + EIGEN_DEVICE_FUNC + EIGEN_ALWAYS_INLINE Packet loadPacket(Index patchId, Index rowIndex, + Index colIndex, + Index otherIndex) const { + const Index packetSize = internal::unpacket_traits::size; + EIGEN_STATIC_ASSERT(packetSize > 1, YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(patchId < patchDepth() * patchRows() * m_patch_cols); + + if (nonStandardPatches()) { + return packetWithPossibleZero(patchId, rowIndex, colIndex, otherIndex); + } + typedef decltype(m_impl) TensorEvaluatorT; + return loadPacketStandard(patchId, rowIndex, + colIndex, otherIndex); + } + + // Helper function to load a 'partial' packet - this is the single column + // part of a packet that is split across two columns. In the 'partial' packet, + // the elements corresponding to the column (specified through colOffset) are + // loaded and the rest of the elements are zero-filled into the 'partial' + // packet. This function is called from loadPacketStandardFromTwoColumns(). + // This code path is exercised only when the packet type supports masked load + // and when the partial packet load is available in the TensorEvaluator. + EIGEN_DEVICE_FUNC + EIGEN_ALWAYS_INLINE Packet loadPartialPacketStandard( + Index rowIndex, Index colIndex, Index otherIndex, Index patchId, + const Index span[], const Index patchOffsets[], Index colOffset) const { + const Index inputCol = colIndex + colOffset; + const Index rowOffsets[2] = {patchOffsets[0] - colOffset * m_colStride, + patchOffsets[1] - colOffset * m_colStride}; + const Index inputRows[2] = {rowIndex + rowOffsets[0], + rowIndex + rowOffsets[1]}; + + if (inputRows[0] >= m_inputRows || inputRows[1] < 0 || + inputCol >= m_inputCols || inputCol < 0) { + // Partial packet is all zeros + return internal::pset1(Scalar(0)); + } else if (inputRows[0] >= 0 && inputRows[1] < m_inputRows) { + // From inputIndex-span[0], we need to load elements starting from index + // span[0] all the way upto (and including) span[1]. + const Index depth = patchId - patchOffsets[0] * patchDepth(); + const Index inputIndex = depth + inputRows[0] * m_rowInputStride + + inputCol * m_colInputStride + otherIndex; + return m_impl.template partialPacket( + inputIndex - span[0], mask(span[0], span[1] + 1)); + } else { + // Using slow path for this partial packet. + // We need to load elements starting from index span[0] all the way upto + // (and including) span[1]. We split this load into 3 parts: + // 0 : span[0]-1 - Zeros will be loaded for these indices + // span[0] : span[1] - Elements will be loaded here for these indices + // span[1]+1 : packetSize-1 - Zeross will be loaded for these indices + const Index packetSize = internal::unpacket_traits::size; + EIGEN_ALIGN_MAX + std::remove_const_t values[packetSize]; + for (int i = 0; i < span[0]; ++i) values[i] = Scalar(0); + for (int i = span[0]; i < span[1] + 1; ++i) + values[i] = + loadCoeff(patchId - span[0] + i, rowIndex, colIndex, otherIndex); + for (int i = span[1] + 1; i < packetSize; ++i) values[i] = Scalar(0); + return internal::pload(values); + } + } + + // Helper function to load a packet that is split across two columns. + // If required, this function is called from loadPacketStandard() when the + // packet type supports masked load and when the partial packet load is + // available in the TensorEvaluator. + EIGEN_DEVICE_FUNC + EIGEN_ALWAYS_INLINE Packet loadPacketStandardFromTwoColumns( + Index patchId, Index rowIndex, Index colIndex, Index otherIndex, + const Index patchOffsets[], const Index colOffsets[]) const { + eigen_assert(colOffsets[1] == colOffsets[0] + 1); + const Index packetSize = internal::unpacket_traits::size; + + // Packet to load will be split into 2 parts where each part spans a single + // column. First determine where to split. + const Index patchIdSplit = + ((colOffsets[1] * m_colStride) * m_rowInputStride) - 1; + const Index patchOffsetSplit = patchIdSplit / m_fastDimZero; + + // patchIds[i]: patchId corresponding to partial packet i + // spans[i]: Start and end indices corresponding to the elements + // to be loaded for partial packet i + // patchOffsets2Cols[i]: patchOffsets corresponding to partial packet i + const Index patchIds[2] = {patchId, patchIdSplit + 1}; + const Index spans[2][2] = {{0, patchIdSplit - patchId}, + {patchIdSplit - patchId + 1, packetSize - 1}}; + const Index patchOffsets2Cols[2][2] = { + {patchOffsets[0], patchOffsetSplit}, + {patchOffsetSplit + 1, patchOffsets[1]}}; + + // Load partial packets and do bit-wise OR to generate required packet + return internal::por( + loadPartialPacketStandard(rowIndex, colIndex, otherIndex, patchIds[0], + spans[0], patchOffsets2Cols[0], + colOffsets[0]), + loadPartialPacketStandard(rowIndex, colIndex, otherIndex, patchIds[1], + spans[1], patchOffsets2Cols[1], + colOffsets[1])); + } + + // Helper function to load a packet that is present in a single columns. + // If required, this function is called from loadPacketStandard(). + EIGEN_DEVICE_FUNC + EIGEN_ALWAYS_INLINE Packet loadPacketStandardFromSingleColumn( + Index patchId, Index rowIndex, Index colIndex, Index otherIndex, + const Index patchOffsets[], const Index colOffsets[], + const Index inputCols[]) const { + eigen_assert(colOffsets[0] == colOffsets[1]); + const Index rowOffsets[2] = {patchOffsets[0] - colOffsets[0] * m_colStride, + patchOffsets[1] - colOffsets[1] * m_colStride}; + eigen_assert(rowOffsets[0] <= rowOffsets[1]); + const Index inputRows[2] = {rowIndex + rowOffsets[0], + rowIndex + rowOffsets[1]}; + + if (inputRows[0] >= m_inputRows || inputRows[1] < 0) { + // all zeros + return internal::pset1(Scalar(0)); // all zeros + } + + if (inputRows[0] >= 0 && inputRows[1] < m_inputRows) { + // no padding + const Index depth = patchId - patchOffsets[0] * patchDepth(); + const Index inputIndex = depth + inputRows[0] * m_rowInputStride + + inputCols[0] * m_colInputStride + otherIndex; + return m_impl.template packet(inputIndex); + } + return packetWithPossibleZero(patchId, rowIndex, colIndex, otherIndex); + } + + // Load standard packet from a patch specified by the "within patch offset" + // (patchId) and the precomputed indices of the first element of the patch. + // This function will be called if partial packet loading is not available + // for the TensorEvaluator or if the packet type does not support masked + // load. + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE typename std::enable_if< + !TensorEvaluatorHasPartialPacket::value, + PacketT>::type + loadPacketStandard(Index patchId, Index rowIndex, Index colIndex, + Index otherIndex) const { + const Index packetSize = internal::unpacket_traits::size; + EIGEN_STATIC_ASSERT(packetSize > 1, YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(patchId < patchDepth() * patchRows() * m_patch_cols); + + eigen_assert(!nonStandardPatches()); + + if ((patchDepth() % packetSize) == 0) { + return loadPacketFast(patchId, rowIndex, colIndex, otherIndex); + } + + // Offsets and input calculation here are identical to + // loadCoeffStandard(...), but repeated twice. + const Index patchOffsets[2] = {patchId / m_fastDimZero, + (patchId + packetSize - 1) / m_fastDimZero}; + const Index colOffsets[2] = {patchOffsets[0] / m_fastColStride, + patchOffsets[1] / m_fastColStride}; + const Index inputCols[2] = {colIndex + colOffsets[0], + colIndex + colOffsets[1]}; + + if (inputCols[0] >= m_inputCols || inputCols[1] < 0) { + // all zeros + return internal::pset1(Scalar(0)); + } + if (inputCols[0] == inputCols[1]) { + return loadPacketStandardFromSingleColumn(patchId, rowIndex, colIndex, + otherIndex, patchOffsets, + colOffsets, inputCols); + } + return packetWithPossibleZero(patchId, rowIndex, colIndex, otherIndex); + } + + // Load standard packet from a patch specified by the "within patch offset" + // (patchId) and the precomputed indices of the first element of the patch. + // This function will be called if partial packet loading is available for + // the TensorEvaluator and if the packet type supports masked load. + // The only difference between this and the other case is that if the packet + // to load is split across two columns, then in this case instead of going to + // the slow (element-by-element) load, we load two packets - each containing + // elements from one of the columns (rest of the elements of the packets are + // zeroes), and then combine these two packets to generate the required + // packet. The idea is to enable fast load (if possible) of these 'partial' + // packets. + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE typename std::enable_if< + TensorEvaluatorHasPartialPacket::value, + PacketT>::type + loadPacketStandard(Index patchId, Index rowIndex, Index colIndex, + Index otherIndex) const { + const Index packetSize = internal::unpacket_traits::size; + EIGEN_STATIC_ASSERT(packetSize > 1, YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(patchId < patchDepth() * patchRows() * m_patch_cols); + + eigen_assert(!nonStandardPatches()); + + if ((patchDepth() % packetSize) == 0) { + return loadPacketFast(patchId, rowIndex, colIndex, otherIndex); + } + + // Offsets and input calculation here are identical to + // loadCoeffStandard(...), but repeated twice. + const Index patchOffsets[2] = {patchId / m_fastDimZero, + (patchId + packetSize - 1) / m_fastDimZero}; + const Index colOffsets[2] = {patchOffsets[0] / m_fastColStride, + patchOffsets[1] / m_fastColStride}; + const Index inputCols[2] = {colIndex + colOffsets[0], + colIndex + colOffsets[1]}; + + if (inputCols[0] >= m_inputCols || inputCols[1] < 0) { + // all zeros + return internal::pset1(Scalar(0)); + } + if (inputCols[0] == inputCols[1]) { + return loadPacketStandardFromSingleColumn(patchId, rowIndex, colIndex, + otherIndex, patchOffsets, + colOffsets, inputCols); + } + if (inputCols[1] == inputCols[0] + 1) { + return loadPacketStandardFromTwoColumns( + patchId, rowIndex, colIndex, otherIndex, patchOffsets, colOffsets); + } + return packetWithPossibleZero(patchId, rowIndex, colIndex, otherIndex); + } + + EIGEN_DEVICE_FUNC + EIGEN_ALWAYS_INLINE Packet loadPacketFast(Index patchId, Index rowIndex, + Index colIndex, + Index otherIndex) const { + const Index packetSize = internal::unpacket_traits::size; + EIGEN_STATIC_ASSERT(packetSize > 1, YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(patchId < patchDepth() * patchRows() * m_patch_cols); + + eigen_assert(!nonStandardPatches()); + eigen_assert((patchDepth() % packetSize) == 0); + // Find the offset of the element wrt the location of the first element. + const Index patchOffset = patchId / m_fastDimZero; + eigen_assert((patchId + packetSize - 1) / m_fastDimZero == patchOffset); + + const Index colOffset = patchOffset / m_fastColStride; + const Index rowOffset = patchOffset - colOffset * m_colStride; + const Index inputCol = colIndex + colOffset; + const Index inputRow = rowIndex + rowOffset; + if (inputCol < 0 || inputRow < 0 || inputCol >= m_inputCols || + inputRow >= m_inputRows) { + // all zeros + return internal::pset1(Scalar(0)); + } + // no padding + const Index depth = patchId - patchOffset * patchDepth(); + const Index inputIndex = depth + inputRow * m_rowInputStride + + inputCol * m_colInputStride + otherIndex; + return m_impl.template packet(inputIndex); + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet packetWithPossibleZero( + Index patchId, Index rowIndex, Index colIndex, Index otherIndex) const { + const int packetSize = internal::unpacket_traits::size; + EIGEN_ALIGN_MAX + std::remove_const_t values[packetSize]; + for (int i = 0; i < packetSize; ++i) { + values[i] = loadCoeff(patchId + i, rowIndex, colIndex, otherIndex); + } + Packet rslt = internal::pload(values); + return rslt; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void computeBaseIndices( + Index patchIndex, Index& rowIndex, Index& colIndex, + Index& otherIndex) const { + const size_t NumInputDims = array_size< + typename TensorEvaluator::Dimensions>::value; + otherIndex = (NumInputDims == 3) ? 0 : patchIndex / m_fastNumPatches; + const Index patch2DIndex = (NumInputDims == 3) + ? patchIndex + : (patchIndex - otherIndex * m_num_patches); + otherIndex *= m_patchInputStride; + colIndex = patch2DIndex / m_fastOutputRows; + rowIndex = patch2DIndex - colIndex * m_outputRows; + colIndex = colIndex * m_col_strides - m_colPaddingLeft; + rowIndex = rowIndex * m_row_strides - m_rowPaddingTop; + } + + Index m_patch_cols; // number of columns in the patch + Index m_num_patches; // number of patches to extract. + + // Strides for navigating through the single patch. + Index m_patch_row_stride; + Index m_patch_col_stride; + internal::TensorIntDivisor m_fastPatchRowStride; + internal::TensorIntDivisor m_fastPatchColStride; + + Index m_patch_row_inflate_strides; // the strides for row inflation in the + // image patch + Index m_patch_col_inflate_strides; // the strides for col inflation in the + // image patch + // Fast representation of inflation strides. + internal::TensorIntDivisor m_fastInputRowStride; + internal::TensorIntDivisor m_fastInputColStride; + + Index m_otherStride; + Index m_colStride; + internal::TensorIntDivisor m_fastNumPatches; + internal::TensorIntDivisor m_fastColStride; + + Index m_rowInputStride; // row stride in the input tensor + Index m_colInputStride; // col stride in the input tensor + Index m_patchInputStride; // patch stride in the input tensor + + Index m_inputRows; // Number of rows in the input tensor + Index m_inputCols; // Number of cols in the input tensor + + Index m_outputRows; // Number of convolution output rows + Index m_outputCols; // Number of convolution output column + + Index m_row_strides; // User specified row stride + Index m_col_strides; // User specified col stride + + Index m_in_row_strides; // User specified input row stride + Index m_in_col_strides; // User specified input col stride + + Index m_rowPaddingTop; // Row padding + Index m_colPaddingLeft; // Column padding + + internal::TensorIntDivisor m_fastOutputRows; + internal::TensorIntDivisor m_fastDimZero; + + const TensorEvaluator m_impl; +}; + +template +class TensorContractionSubMapper< + Scalar, Index, Side, + TensorEvaluator< + const TensorReshapingOp >, + Device>, + nocontract_t, contract_t, packet_size, inner_dim_contiguous, + inner_dim_reordered, Alignment> { + public: + typedef typename packet_traits::type Packet; + typedef typename packet_traits::half HalfPacket; + + typedef TensorContractionInputMapper< + Scalar, Index, Side, + TensorEvaluator< + const TensorReshapingOp< + NewDimension, const TensorImagePatchOp >, + Device>, + nocontract_t, contract_t, packet_size, inner_dim_contiguous, + inner_dim_reordered, Alignment> + ParentMapper; + + typedef TensorContractionSubMapper< + Scalar, Index, Side, + TensorEvaluator< + const TensorReshapingOp< + NewDimension, const TensorImagePatchOp >, + Device>, + nocontract_t, contract_t, packet_size, inner_dim_contiguous, + inner_dim_reordered, Alignment> + Self; + + typedef Self LinearMapper; + + typedef typename ParentMapper::TensorEvaluatorT TensorEvaluatorT; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorContractionSubMapper( + const ParentMapper& base_mapper, Index vert_offset, Index horiz_offset) + : m_depth_offset(vert_offset), + m_col_offset(horiz_offset), + m_base_mapper(base_mapper) { + m_base_mapper.computeBaseIndices(m_col_offset, m_rowIndex, m_colIndex, + m_otherIndex); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorContractionSubMapper( + const Self& base_mapper, Index vert_offset, Index horiz_offset) + : m_depth_offset(vert_offset + base_mapper.m_depth_offset), + m_col_offset(horiz_offset + base_mapper.m_col_offset), + m_base_mapper(base_mapper.m_base_mapper) { + m_base_mapper.computeBaseIndices(m_col_offset, m_rowIndex, m_colIndex, + m_otherIndex); + } + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Scalar operator()(Index i) const { + return m_base_mapper.loadCoeff(i + m_depth_offset, m_rowIndex, m_colIndex, + m_otherIndex); + } + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Scalar operator()(Index i, + Index j) const { + return m_base_mapper(i + m_depth_offset, j + m_col_offset); + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet loadPacket(Index i) const { + return m_base_mapper.loadPacket(i + m_depth_offset, m_rowIndex, m_colIndex, + m_otherIndex); + } + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet loadPacket(Index i, + Index j) const { + return m_base_mapper.template loadPacket(i + m_depth_offset, + j + m_col_offset); + } + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Scalar + loadCoeffStandard(Index i) const { + return m_base_mapper.loadCoeffStandard(i + m_depth_offset, m_rowIndex, + m_colIndex, m_otherIndex); + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet loadPacketFast(Index i) const { + return m_base_mapper.loadPacketFast(i + m_depth_offset, m_rowIndex, + m_colIndex, m_otherIndex); + } + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet + loadPacketStandard(Index i) const { + typedef decltype(m_base_mapper.m_impl) TensorEvaluatorT; + return m_base_mapper.template loadPacketStandard( + i + m_depth_offset, m_rowIndex, m_colIndex, m_otherIndex); + } + template + EIGEN_DEVICE_FUNC bool aligned(Index) const { + return false; + } + + EIGEN_DEVICE_FUNC + EIGEN_ALWAYS_INLINE bool nonStandardPatches() const { + return m_base_mapper.nonStandardPatches(); + } + + // Max(Col|Row|Depth): compute the upper limit for the column, row and depth + // index respectively that fits into the peeled_k elements starting at + // m_depth_offset. + + EIGEN_DEVICE_FUNC + EIGEN_ALWAYS_INLINE Index maxCol(const Index peeled_k) const { + const Index max_col = + (m_depth_offset + (peeled_k == 0 ? 0 : peeled_k - 1)) / + fastPatchColStride(); + return std::min(1 + max_col, patchCols()); + } + + EIGEN_DEVICE_FUNC + EIGEN_ALWAYS_INLINE Index maxRow(const Index peeled_k, + const Index col) const { + const Index max_row = (m_depth_offset + (peeled_k == 0 ? 0 : peeled_k - 1) - + col * patchColStride()) / + fastPatchRowStride(); + return std::min(1 + max_row, patchRows()); + } + + EIGEN_DEVICE_FUNC + EIGEN_ALWAYS_INLINE Index maxDepth(const Index peeled_k, const Index col, + Index row) const { + const Index max_depth = m_depth_offset + peeled_k - // + col * patchColStride() - // + row * patchRowStride(); + return std::min(max_depth, patchDepth()); + } + + // MaxDepth uses only the remaining number of elements in the peeled_k. + EIGEN_DEVICE_FUNC + EIGEN_ALWAYS_INLINE Index maxDepth(const Index num_elements, + const Index start_depth) const { + return std::min(start_depth + num_elements, patchDepth()); + } + + // Every register matters in this code, so sometimes to prevent register + // spilling, instead of the variable that you would expect to see, we use + // another one, that is guaranteed to have the same value. E.g. patch depth is + // always the same as input depth, and it's also the same as input row stride. + // Bunch of other parameters have similar relations. + + typedef internal::TensorIntDivisor IndexDivisor; + + EIGEN_DEVICE_FUNC + EIGEN_ALWAYS_INLINE Index patchDepth() const { + return m_base_mapper.m_rowInputStride; + } + EIGEN_DEVICE_FUNC + EIGEN_ALWAYS_INLINE Index patchRows() const { + return m_base_mapper.m_colStride; + } + EIGEN_DEVICE_FUNC + EIGEN_ALWAYS_INLINE Index patchCols() const { + return m_base_mapper.m_patch_cols; + } + + EIGEN_DEVICE_FUNC + EIGEN_ALWAYS_INLINE Index patchRowStride() const { + eigen_assert(patchDepth() == m_base_mapper.m_patch_row_stride && + "Patch depth must be equal to patch row stride."); + return patchDepth(); + } + EIGEN_DEVICE_FUNC + EIGEN_ALWAYS_INLINE Index patchColStride() const { + return m_base_mapper.m_patch_col_stride; + } + + EIGEN_DEVICE_FUNC + EIGEN_ALWAYS_INLINE IndexDivisor fastPatchRowStride() const { + eigen_assert(patchDepth() == m_base_mapper.m_patch_row_stride && + "Patch depth must be equal to patch row stride."); + return m_base_mapper.m_fastDimZero; // patch_depth + } + EIGEN_DEVICE_FUNC + EIGEN_ALWAYS_INLINE IndexDivisor fastPatchColStride() const { + return m_base_mapper.m_fastPatchColStride; + } + + EIGEN_DEVICE_FUNC + EIGEN_ALWAYS_INLINE Packet packetNoPadding(const Index depth, + const Index baseIndex) const { + const Index inputIndex = depth + baseIndex; + return m_base_mapper.m_impl.template packet(inputIndex); + } + EIGEN_DEVICE_FUNC + EIGEN_ALWAYS_INLINE Scalar coeffNoPadding(const Index depth, + const Index baseIndex) const { + const Index inputIndex = depth + baseIndex; + return m_base_mapper.m_impl.coeff(inputIndex); + } + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE typename std::enable_if< + TensorEvaluatorHasPartialPacket::value, + PacketT>::type + partialPacketNoPadding(const Index depth, const Index baseIndex, + Index num_coeffs) const { + const Index inputIndex = depth + baseIndex; + return m_base_mapper.m_impl.template partialPacket( + inputIndex, mask(0, num_coeffs)); + } + EIGEN_DEVICE_FUNC + EIGEN_ALWAYS_INLINE bool hasPadding() const { + // TODO(ezhulenev): It does seems that for inflated filter it's still + // possible to guarantee "no padding or skipping" for non-standard packing. + if (nonStandardPatches()) return true; + + // Non zero padding before. + if (m_base_mapper.m_rowPaddingTop > 0) return true; + if (m_base_mapper.m_colPaddingLeft > 0) return true; + + // Non zero padding after in rows. + const Index last_row = + (m_base_mapper.m_outputRows - 1) * m_base_mapper.m_row_strides; + if (last_row + (patchRows() - 1) >= m_base_mapper.m_inputRows) return true; + + // Non zero padding after in cols. + const Index last_col = + (m_base_mapper.m_outputCols - 1) * m_base_mapper.m_col_strides; + if (last_col + (patchCols() - 1) >= m_base_mapper.m_inputCols) return true; + + return false; + } + EIGEN_DEVICE_FUNC + EIGEN_ALWAYS_INLINE bool padRow(const Index row) const { + const Index r = m_rowIndex + row; + return r < 0 || r >= m_base_mapper.m_inputRows; + } + EIGEN_DEVICE_FUNC + EIGEN_ALWAYS_INLINE bool padAnyRow(const Index first_row, + const Index last_row) const { + return m_rowIndex + first_row < 0 || + m_rowIndex + last_row >= m_base_mapper.m_inputRows; + } + EIGEN_DEVICE_FUNC + EIGEN_ALWAYS_INLINE bool padOrSkipRow(const Index row, + Index* orig_row) const { + eigen_assert(nonStandardPatches()); + + const Index input_row = m_rowIndex + row * m_base_mapper.m_in_row_strides; + *orig_row = (m_base_mapper.m_patch_row_inflate_strides == 1) + ? input_row + : ((input_row >= 0) + ? (input_row / m_base_mapper.m_fastInputRowStride) + : 0); + + return (*orig_row < 0 || *orig_row >= m_base_mapper.m_inputRows) || + (input_row != *orig_row * m_base_mapper.m_patch_row_inflate_strides); + } + EIGEN_DEVICE_FUNC + EIGEN_ALWAYS_INLINE bool padCol(const Index col) const { + const Index c = m_colIndex + col; + return c < 0 || c >= m_base_mapper.m_inputCols; + } + EIGEN_DEVICE_FUNC + EIGEN_ALWAYS_INLINE bool padOrSkipCol(const Index col, + Index* orig_col) const { + eigen_assert(nonStandardPatches()); + + const Index input_col = m_colIndex + col * m_base_mapper.m_in_col_strides; + *orig_col = (m_base_mapper.m_patch_col_inflate_strides == 1) + ? input_col + : ((input_col >= 0) + ? (input_col / m_base_mapper.m_fastInputColStride) + : 0); + + return (*orig_col < 0 || *orig_col >= m_base_mapper.m_inputCols) || + (input_col != *orig_col * m_base_mapper.m_patch_col_inflate_strides); + } + EIGEN_DEVICE_FUNC + EIGEN_ALWAYS_INLINE Index baseIndex(const Index row, const Index col) const { + const Index r = m_rowIndex + row; + const Index c = m_colIndex + col; + return r * m_base_mapper.m_rowInputStride + + c * m_base_mapper.m_colInputStride + m_otherIndex; + } + // Compute a base index when original input row and column were precomputed + // using padOrSkipRow and padOrSkipCol. Used only for non standard patches. + EIGEN_DEVICE_FUNC + EIGEN_ALWAYS_INLINE Index origBaseIndex(const Index orig_row, + const Index orig_col) const { + return orig_row * m_base_mapper.m_rowInputStride + + orig_col * m_base_mapper.m_colInputStride + m_otherIndex; + } + + EIGEN_DEVICE_FUNC + EIGEN_ALWAYS_INLINE Index rowStride() const { + return m_base_mapper.m_row_strides; + } + EIGEN_DEVICE_FUNC + EIGEN_ALWAYS_INLINE Index colStride() const { + return m_base_mapper.m_col_strides; + } + + EIGEN_DEVICE_FUNC + EIGEN_ALWAYS_INLINE Index rowOffset() const { + const Index patchOffset = m_depth_offset / m_base_mapper.m_fastDimZero; + const Index colOffset = patchOffset / m_base_mapper.m_fastColStride; + return patchOffset - colOffset * m_base_mapper.m_colStride; + } + + EIGEN_DEVICE_FUNC + EIGEN_ALWAYS_INLINE Index colOffset() const { + const Index patchOffset = m_depth_offset / m_base_mapper.m_fastDimZero; + const Index colOffset = patchOffset / m_base_mapper.m_fastColStride; + return colOffset; + } + + EIGEN_DEVICE_FUNC + EIGEN_ALWAYS_INLINE Index depthOffset() const { + return m_depth_offset % patchDepth(); + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE LinearMapper + getLinearMapper(Index i, Index j) const { + return LinearMapper(m_base_mapper, i + m_depth_offset, j + m_col_offset); + } + + private: + Index m_depth_offset; // First row in the input matrix + Index m_col_offset; // First col in the input matrix + + // Knowing that: col_offset == patchIndex * OTHERS, we keep precomputed base + // indices for the first element in a patch specified by col_offset + // (see computeBaseIndices(...) for details). + Index m_rowIndex; + Index m_colIndex; + Index m_otherIndex; + + const ParentMapper m_base_mapper; // Keeping a copy instead of a reference + // performs better in benchmarks. +}; + +// Arrange a block of the right input matrix (in our case it's always a "virtual +// matrix" constructed from extracted image patches) in contiguous memory. +// +// Given column major input (A0 beside A1 in memory): +// A0 B0 C0 D0 E0 F0 G0 H0 ... Z0 +// A1 B1 C1 D1 E1 F1 G1 H1 ... Z1 +// A2 B2 C2 D2 E2 F2 G2 H2 ... Z2 +// A3 B3 C3 D3 E3 F3 G3 H3 ... Z3 +// A4 B4 C4 D4 E4 F4 G4 H4 ... Z4 +// A5 B5 C5 D5 E5 F5 G5 H5 ... Z5 +// A6 B6 C6 D6 E6 F6 G6 H6 ... Z6 +// A7 B7 C7 D7 E7 F7 G7 H7 ... Z7 +// A8 ... +// ... +// +// *) A, B, C, ... - patches extracted from the original input. +// *) A0, A1, A2 ... - values from the same patch at different offsets. +// +// The traversal (packed rhs memory) order (B0 besides A0 in memory): +// A0 B0 C0 D0 A1 B1 C1 D1 ... +// E0 F0 G0 H0 E1 F1 G1 H1 ... +// ... +// Z0 Z1 Z2 Z3 Z4 Z5 Z6 Z7 ... <- doesn't belong to any block (nr = 4) +// +// This traversal order must be the same as in default gemm_pack_rhs defined in +// GeneralBlockPanelKernel.h. +// +// *) nr - number of registers along the 'n' dimension. +// See GeneralBlockPanelKernel.h and "Anatomy of High-Performance Matrix +// Multiplication" paper. +template +struct gemm_pack_rhs< + Scalar, Index, + TensorContractionSubMapper< + Scalar, Index, Rhs, + TensorEvaluator< + const TensorReshapingOp< + NewDimension, const TensorImagePatchOp >, + Device>, + nocontract_t, contract_t, packet_size, inner_dim_contiguous, + inner_dim_reordered, Alignment>, + nr, ColMajor, false, false> { + typedef TensorContractionSubMapper< + Scalar, Index, Rhs, + TensorEvaluator< + const TensorReshapingOp< + NewDimension, const TensorImagePatchOp >, + Device>, + nocontract_t, contract_t, packet_size, inner_dim_contiguous, + inner_dim_reordered, Alignment> + SubMapper; + typedef SubMapper DataMapper; + typedef typename packet_traits::type Packet; + + EIGEN_STATIC_ASSERT((nr == 4), YOU_MADE_A_PROGRAMMING_MISTAKE) + + EIGEN_DEVICE_FUNC + EIGEN_DONT_INLINE void operator()(Scalar* block, const DataMapper& rhs, + Index depth, Index cols, Index stride = 0, + Index offset = 0) const { + eigen_assert(stride == 0); + eigen_assert(offset == 0); + + const Index packet_cols4 = (cols / 4) * 4; + const Index peeled_k = (depth / packet_size) * packet_size; + const bool non_standard_patches = rhs.nonStandardPatches(); + + for (Index j2 = 0; j2 < packet_cols4; j2 += 4) { + const SubMapper dm0 = rhs.getLinearMapper(0, j2 + 0); + const SubMapper dm1 = rhs.getLinearMapper(0, j2 + 1); + const SubMapper dm2 = rhs.getLinearMapper(0, j2 + 2); + const SubMapper dm3 = rhs.getLinearMapper(0, j2 + 3); + + Index k = 0; + if ((packet_size % 4) == 0 && !non_standard_patches) { + // FAST PATH: + // Iterate over patch columns and rows, if we know that a single + // packet do not span across multiple rows or columns. + if ((rhs.patchDepth() % packet_size) == 0) { + const Index start_col = rhs.colOffset(); + const Index max_col = rhs.maxCol(peeled_k); + + for (Index c = start_col; c < max_col; ++c) { + eigen_assert(k <= peeled_k); + + const Index start_row = (c == start_col) ? rhs.rowOffset() : 0; + const Index max_row = rhs.maxRow(peeled_k, c); + + const bool pad_col0 = dm0.padCol(c); + const bool pad_col1 = dm1.padCol(c); + const bool pad_col2 = dm2.padCol(c); + const bool pad_col3 = dm3.padCol(c); + + // Check if we can squeeze reads along the `row` and `depth` + // dimensions (two innermost dimensions). + if (!pad_col0 && !pad_col1 && !pad_col2 && !pad_col3 && // + !dm0.padRow(start_row) && !dm0.padRow(max_row - 1) && // + !dm1.padRow(start_row) && !dm1.padRow(max_row - 1) && // + !dm2.padRow(start_row) && !dm2.padRow(max_row - 1) && // + !dm3.padRow(start_row) && !dm3.padRow(max_row - 1)) { + // Compute how many elements we can squeeze read. + const Index start_depth = + (c == start_col) ? rhs.depthOffset() : 0; + + // Upper bound for the number of elements in the depth dimension + // that we can squeeze read. + const Index squeeze_length = + (max_row - start_row) * rhs.patchDepth() - start_depth; + + // Do not overshoot beyond the block size. + const Index max_depth = + start_depth + std::min(peeled_k - k, squeeze_length); + eigen_assert((max_depth - start_depth) % packet_size == 0); + + const Index idx0 = dm0.baseIndex(start_row, c); + const Index idx1 = dm1.baseIndex(start_row, c); + const Index idx2 = dm2.baseIndex(start_row, c); + const Index idx3 = dm3.baseIndex(start_row, c); + + for (Index d = start_depth; d < max_depth; d += packet_size) { + eigen_assert(k < peeled_k); + PacketBlock kernel; + kernel.packet[0] = rhs.packetNoPadding(d, idx0); + kernel.packet[1] = rhs.packetNoPadding(d, idx1); + kernel.packet[2] = rhs.packetNoPadding(d, idx2); + kernel.packet[3] = rhs.packetNoPadding(d, idx3); + ptranspose(kernel); + pstoreu(block + 0 * packet_size, kernel.packet[0]); + pstoreu(block + 1 * packet_size, kernel.packet[1]); + pstoreu(block + 2 * packet_size, kernel.packet[2]); + pstoreu(block + 3 * packet_size, kernel.packet[3]); + block += 4 * packet_size; + k += packet_size; + } + + // Go to the next column. + continue; + } + + // If we can't squeeze reads, process rows one by one. + for (Index r = start_row; r < max_row; ++r) { + eigen_assert(k <= peeled_k); + + const bool pad0 = pad_col0 || dm0.padRow(r); + const bool pad1 = pad_col1 || dm1.padRow(r); + const bool pad2 = pad_col2 || dm2.padRow(r); + const bool pad3 = pad_col3 || dm3.padRow(r); + + const Index idx0 = dm0.baseIndex(r, c); + const Index idx1 = dm1.baseIndex(r, c); + const Index idx2 = dm2.baseIndex(r, c); + const Index idx3 = dm3.baseIndex(r, c); + + const Index start_depth = ((c == start_col) && (r == start_row)) + ? rhs.depthOffset() + : 0; + const Index max_depth = rhs.maxDepth(peeled_k - k, start_depth); + eigen_assert((max_depth - start_depth) % packet_size == 0); + + for (Index d = start_depth; d < max_depth; d += packet_size) { + eigen_assert(k < peeled_k); + PacketBlock kernel; + kernel.packet[0] = pad0 ? pset1(Scalar(0)) + : rhs.packetNoPadding(d, idx0); + kernel.packet[1] = pad1 ? pset1(Scalar(0)) + : rhs.packetNoPadding(d, idx1); + kernel.packet[2] = pad2 ? pset1(Scalar(0)) + : rhs.packetNoPadding(d, idx2); + kernel.packet[3] = pad3 ? pset1(Scalar(0)) + : rhs.packetNoPadding(d, idx3); + ptranspose(kernel); + pstoreu(block + 0 * packet_size, kernel.packet[0]); + pstoreu(block + 1 * packet_size, kernel.packet[1]); + pstoreu(block + 2 * packet_size, kernel.packet[2]); + pstoreu(block + 3 * packet_size, kernel.packet[3]); + block += 4 * packet_size; + k += packet_size; + } + } + } + + // The loop above should fill peeled_k elements. + eigen_assert(peeled_k == k); + + } else { + for (; k < peeled_k; k += packet_size) { + PacketBlock kernel; + kernel.packet[0] = dm0.loadPacketStandard(k); + kernel.packet[1] = dm1.loadPacketStandard(k); + kernel.packet[2] = dm2.loadPacketStandard(k); + kernel.packet[3] = dm3.loadPacketStandard(k); + ptranspose(kernel); + pstoreu(block + 0 * packet_size, kernel.packet[0]); + pstoreu(block + 1 * packet_size, kernel.packet[1]); + pstoreu(block + 2 * packet_size, kernel.packet[2]); + pstoreu(block + 3 * packet_size, kernel.packet[3]); + block += 4 * packet_size; + } + } + } + + // Copy the remaining coefficients of the column block after the peeled_k. + if (!rhs.nonStandardPatches()) { + for (; k < depth; k++) { + block[0] = dm0.loadCoeffStandard(k); + block[1] = dm1.loadCoeffStandard(k); + block[2] = dm2.loadCoeffStandard(k); + block[3] = dm3.loadCoeffStandard(k); + block += 4; + } + } else { + for (; k < depth; k++) { + block[0] = dm0(k); + block[1] = dm1(k); + block[2] = dm2(k); + block[3] = dm3(k); + block += 4; + } + } + } + + // copy the remaining columns one at a time (nr==1) + for (Index j2 = packet_cols4; j2 < cols; ++j2) { + const SubMapper dm0 = rhs.getLinearMapper(0, j2); + for (Index k = 0; k < depth; k++) { + *block = dm0(k); + block += 1; + } + } + } +}; + +// Template specialization for packet_size = 2. We must special-case packet +// blocks with nr > packet_size, e.g. PacketBlock. +template +struct gemm_pack_rhs< + Scalar, Index, + TensorContractionSubMapper< + Scalar, Index, Rhs, + TensorEvaluator< + const TensorReshapingOp< + NewDimension, const TensorImagePatchOp >, + Device>, + nocontract_t, contract_t, 2, inner_dim_contiguous, inner_dim_reordered, + Alignment>, + nr, ColMajor, false, false> { + typedef TensorContractionSubMapper< + Scalar, Index, Rhs, + TensorEvaluator< + const TensorReshapingOp< + NewDimension, const TensorImagePatchOp >, + Device>, + nocontract_t, contract_t, 2, inner_dim_contiguous, inner_dim_reordered, + Alignment> + SubMapper; + typedef SubMapper DataMapper; + typedef typename packet_traits::type Packet; + + EIGEN_STATIC_ASSERT((nr == 4), YOU_MADE_A_PROGRAMMING_MISTAKE) + + EIGEN_DEVICE_FUNC + EIGEN_DONT_INLINE void operator()(Scalar* block, const DataMapper& rhs, + Index depth, Index cols, Index stride = 0, + Index offset = 0) const { + eigen_assert(stride == 0); + eigen_assert(offset == 0); + + const int packet_size = 2; + const Index packet_cols4 = (cols / 4) * 4; + const Index peeled_k = (depth / packet_size) * packet_size; + const bool non_standard_patches = rhs.nonStandardPatches(); + + for (Index j2 = 0; j2 < packet_cols4; j2 += 4) { + const SubMapper dm0 = rhs.getLinearMapper(0, j2 + 0); + const SubMapper dm1 = rhs.getLinearMapper(0, j2 + 1); + const SubMapper dm2 = rhs.getLinearMapper(0, j2 + 2); + const SubMapper dm3 = rhs.getLinearMapper(0, j2 + 3); + + Index k = 0; + if (!non_standard_patches) { + // FAST PATH: + // Iterate over patch columns and rows if we know that a single + // packet do not span across multiple rows or columns. + if ((rhs.patchDepth() % packet_size) == 0) { + const Index start_col = rhs.colOffset(); + const Index max_col = rhs.maxCol(peeled_k); + + for (Index c = start_col; c < max_col; ++c) { + eigen_assert(k <= peeled_k); + + const Index start_row = (c == start_col) ? rhs.rowOffset() : 0; + const Index max_row = rhs.maxRow(peeled_k, c); + + const bool pad_col0 = dm0.padCol(c); + const bool pad_col1 = dm1.padCol(c); + const bool pad_col2 = dm2.padCol(c); + const bool pad_col3 = dm3.padCol(c); + + // We can squeeze reads along the `row` and `depth` dimensions if + // the row stride is `1`, which means that `row` and `depth` + // dimensions are contiguous (two innermost dimensions). + if (rhs.rowStride() == 1 && // + !pad_col0 && !pad_col1 && !pad_col2 && !pad_col3 && // + !dm0.padRow(start_row) && !dm0.padRow(max_row - 1) && // + !dm1.padRow(start_row) && !dm1.padRow(max_row - 1) && // + !dm2.padRow(start_row) && !dm2.padRow(max_row - 1) && // + !dm3.padRow(start_row) && !dm3.padRow(max_row - 1)) { + // Compute how many elements we can squeeze read. + const Index start_depth = + (c == start_col) ? rhs.depthOffset() : 0; + + // Upper bound for the number of elements in the depth dimension + // that we can squeeze read. + const Index squeeze_length = + (max_row - start_row) * rhs.patchDepth() - start_depth; + + // Do not overshoot beyond the block size. + const Index max_depth = + start_depth + std::min(peeled_k - k, squeeze_length); + eigen_assert((max_depth - start_depth) % packet_size == 0); + + const Index idx0 = dm0.baseIndex(start_row, c); + const Index idx1 = dm1.baseIndex(start_row, c); + const Index idx2 = dm2.baseIndex(start_row, c); + const Index idx3 = dm3.baseIndex(start_row, c); + + for (Index d = start_depth; d < max_depth; d += packet_size) { + PacketBlock kernel0; + PacketBlock kernel1; + kernel0.packet[0] = rhs.packetNoPadding(d, idx0); + kernel0.packet[1] = rhs.packetNoPadding(d, idx1); + kernel1.packet[0] = rhs.packetNoPadding(d, idx2); + kernel1.packet[1] = rhs.packetNoPadding(d, idx3); + ptranspose(kernel0); + ptranspose(kernel1); + pstoreu(block + 0 * packet_size, kernel0.packet[0]); + pstoreu(block + 1 * packet_size, kernel1.packet[0]); + pstoreu(block + 2 * packet_size, kernel0.packet[1]); + pstoreu(block + 3 * packet_size, kernel1.packet[1]); + block += 4 * packet_size; + k += packet_size; + } + + // Go to the next column. + continue; + } + + // If we can't squeeze reads, process rows one by one. + for (Index r = start_row; r < max_row; ++r) { + eigen_assert(k <= peeled_k); + + const bool pad0 = pad_col0 || dm0.padRow(r); + const bool pad1 = pad_col1 || dm1.padRow(r); + const bool pad2 = pad_col2 || dm2.padRow(r); + const bool pad3 = pad_col3 || dm3.padRow(r); + + const Index idx0 = dm0.baseIndex(r, c); + const Index idx1 = dm1.baseIndex(r, c); + const Index idx2 = dm2.baseIndex(r, c); + const Index idx3 = dm3.baseIndex(r, c); + + const Index start_depth = ((c == start_col) && (r == start_row)) + ? rhs.depthOffset() + : 0; + const Index max_depth = rhs.maxDepth(peeled_k - k, start_depth); + eigen_assert((max_depth - start_depth) % packet_size == 0); + + for (Index d = start_depth; d < max_depth; d += packet_size) { + eigen_assert(k < peeled_k); + PacketBlock kernel0; + PacketBlock kernel1; + kernel0.packet[0] = pad0 ? pset1(Scalar(0)) + : rhs.packetNoPadding(d, idx0); + kernel0.packet[1] = pad1 ? pset1(Scalar(0)) + : rhs.packetNoPadding(d, idx1); + kernel1.packet[0] = pad2 ? pset1(Scalar(0)) + : rhs.packetNoPadding(d, idx2); + kernel1.packet[1] = pad3 ? pset1(Scalar(0)) + : rhs.packetNoPadding(d, idx3); + ptranspose(kernel0); + ptranspose(kernel1); + pstoreu(block + 0 * packet_size, kernel0.packet[0]); + pstoreu(block + 1 * packet_size, kernel1.packet[0]); + pstoreu(block + 2 * packet_size, kernel0.packet[1]); + pstoreu(block + 3 * packet_size, kernel1.packet[1]); + block += 4 * packet_size; + k += packet_size; + } + } + } + + // The loop above should fill peeled_k elements. + eigen_assert(peeled_k == k); + + } else { + // Packet can span multiple rows or columns, so we have to go + // though the slower "standard" path. + for (; k < peeled_k; k += packet_size) { + PacketBlock kernel0; + PacketBlock kernel1; + kernel0.packet[0] = dm0.loadPacketStandard(k); + kernel0.packet[1] = dm1.loadPacketStandard(k); + kernel1.packet[0] = dm2.loadPacketStandard(k); + kernel1.packet[1] = dm3.loadPacketStandard(k); + ptranspose(kernel0); + ptranspose(kernel1); + pstoreu(block + 0 * packet_size, kernel0.packet[0]); + pstoreu(block + 1 * packet_size, kernel1.packet[0]); + pstoreu(block + 2 * packet_size, kernel0.packet[1]); + pstoreu(block + 3 * packet_size, kernel1.packet[1]); + block += 4 * packet_size; + } + } + } + + // Copy the remaining coefficients of the column block after the peeled_k. + if (!non_standard_patches) { + for (; k < depth; k++) { + block[0] = dm0.loadCoeffStandard(k); + block[1] = dm1.loadCoeffStandard(k); + block[2] = dm2.loadCoeffStandard(k); + block[3] = dm3.loadCoeffStandard(k); + block += 4; + } + } else { + for (; k < depth; k++) { + block[0] = dm0(k); + block[1] = dm1(k); + block[2] = dm2(k); + block[3] = dm3(k); + block += 4; + } + } + } + + // Copy the remaining columns one at a time (nr==1). + for (Index j2 = packet_cols4; j2 < cols; ++j2) { + const SubMapper dm0 = rhs.getLinearMapper(0, j2); + for (Index k = 0; k < depth; k++) { + *block = dm0(k); + block += 1; + } + } + } +}; + +// Special case for non-vectorized types such as float16. +template +struct gemm_pack_rhs< + Scalar, Index, + TensorContractionSubMapper< + Scalar, Index, Rhs, + TensorEvaluator< + const TensorReshapingOp< + NewDimension, const TensorImagePatchOp >, + Device>, + nocontract_t, contract_t, 1, inner_dim_contiguous, inner_dim_reordered, + Alignment>, + nr, ColMajor, false, false> { + typedef TensorContractionSubMapper< + Scalar, Index, Rhs, + TensorEvaluator< + const TensorReshapingOp< + NewDimension, const TensorImagePatchOp >, + Device>, + nocontract_t, contract_t, 1, inner_dim_contiguous, inner_dim_reordered, + Alignment> + SubMapper; + typedef SubMapper DataMapper; + + EIGEN_STATIC_ASSERT((nr == 4), YOU_MADE_A_PROGRAMMING_MISTAKE) + + EIGEN_DEVICE_FUNC + EIGEN_DONT_INLINE void operator()(Scalar* block, const DataMapper& rhs, + Index depth, Index cols, Index stride = 0, + Index offset = 0) const { + eigen_assert(stride == 0); + eigen_assert(offset == 0); + + const Index packet_cols4 = (cols / 4) * 4; + + for (Index j2 = 0; j2 < packet_cols4; j2 += 4) { + const SubMapper dm0 = rhs.getLinearMapper(0, j2 + 0); + const SubMapper dm1 = rhs.getLinearMapper(0, j2 + 1); + const SubMapper dm2 = rhs.getLinearMapper(0, j2 + 2); + const SubMapper dm3 = rhs.getLinearMapper(0, j2 + 3); + + if (!rhs.nonStandardPatches()) { + for (Index k = 0; k < depth; k++) { + block[0] = dm0.loadCoeffStandard(k); + block[1] = dm1.loadCoeffStandard(k); + block[2] = dm2.loadCoeffStandard(k); + block[3] = dm3.loadCoeffStandard(k); + block += 4; + } + } else { + for (Index k = 0; k < depth; k++) { + block[0] = dm0(k); + block[1] = dm1(k); + block[2] = dm2(k); + block[3] = dm3(k); + block += 4; + } + } + } + + // Copy the remaining columns one at a time (nr==1). + for (Index j2 = packet_cols4; j2 < cols; ++j2) { + const SubMapper dm0 = rhs.getLinearMapper(0, j2); + for (Index k = 0; k < depth; k++) { + *block = dm0(k); + block += 1; + } + } + } +}; +#endif +} // end namespace internal + +/** SpatialConvolution + * \ingroup CXX11_NeuralNetworks_Module + * + * \brief Applies a 2D convolution over a multichannel input image. + * + * The input parameter is expected to be a tensor with a rank of 3 or more + * (channels, height, width, and optionally others) + * The kernel parameter is expected to be a 4D tensor (filters, channels, + * kernel_height, kernel_width) + * The input and the kernel must both be in col-major layout. The result will + * also be in col-major layout. + * + * If col_in_stride, row_in_stride > 1, then applies convolution with holes + * (aka atrous convolution), sampling every col_in_stride, row_in_stride input + * pixels. + * + * If padding_top, padding_bottom, padding_left, or padding_right is specified, + * then those paddings will be used to pad the input, and padding_type must be + * PADDING_VALID. + * + * The result can be assigned to a tensor of rank equal to the rank of the + * input. The dimensions of the result will be filters, height, width (and + * others if applicable). + * + * It is possible to swap the order of the width and height dimensions provided + * that the same order is used in the input, the kernel, and the output. + * + * It is also possible to add an output kernel to the contraction, output + * kernel is called by Eigen when it "finalizes" the block of an output tensor. + * + */ +template +EIGEN_ALWAYS_INLINE static const std::conditional_t< + internal::traits::Layout == ColMajor, + TensorReshapingOp< + const DSizes::Index, + internal::traits::NumDimensions>, + const TensorContractionOp< + const array::Index>, 1>, + const TensorReshapingOp< + const DSizes::Index, 2>, + const Kernel>, + const TensorReshapingOp< + const DSizes::Index, 2>, + const TensorImagePatchOp >, + const OutputKernel> >, + TensorReshapingOp< + const DSizes::Index, + internal::traits::NumDimensions>, + const TensorContractionOp< + const array::Index>, 1>, + const TensorReshapingOp< + const DSizes::Index, 2>, + const TensorImagePatchOp >, + const TensorReshapingOp< + const DSizes::Index, 2>, + const Kernel>, + const OutputKernel> > > +SpatialConvolution(const Input& input, const Kernel& kernel, + const Index row_stride = 1, const Index col_stride = 1, + const PaddingType padding_type = PADDING_SAME, + const Index row_in_stride = 1, const Index col_in_stride = 1, + const OutputKernel& output_kernel = OutputKernel(), + Index padding_top = 0, Index padding_bottom = 0, + Index padding_left = 0, Index padding_right = 0) { + typedef typename internal::traits::Index TensorIndex; + typedef typename internal::traits::Scalar InputScalar; + TensorRef::NumDimensions, + internal::traits::Layout, TensorIndex> > + in(input); + TensorRef::Scalar, + internal::traits::NumDimensions, + internal::traits::Layout, TensorIndex> > + kern(kernel); + + EIGEN_STATIC_ASSERT( + internal::traits::Layout == internal::traits::Layout, + YOU_MADE_A_PROGRAMMING_MISTAKE) + const bool isColMajor = (internal::traits::Layout == ColMajor); + + const int NumDims = internal::traits::NumDimensions; + + // Number of filters to apply. This is the same as the output depth of the + // result + const TensorIndex kernelFilters = + isColMajor ? kern.dimensions()[0] : kern.dimensions()[3]; + // Number of channels. This is the same as the input depth. + const TensorIndex kernelChannels = + isColMajor ? kern.dimensions()[1] : kern.dimensions()[2]; + const TensorIndex kernelRows = + isColMajor ? kern.dimensions()[2] : kern.dimensions()[1]; + const TensorIndex kernelCols = + isColMajor ? kern.dimensions()[3] : kern.dimensions()[0]; + + const Index kernelRowsEff = + kernelRows + (kernelRows - 1) * (row_in_stride - 1); + const Index kernelColsEff = + kernelCols + (kernelCols - 1) * (col_in_stride - 1); + + array, 1> contract_dims; + contract_dims[0] = IndexPair(1, 0); + + const TensorIndex InputRows = + isColMajor ? in.dimension(1) : in.dimension(NumDims - 2); + const TensorIndex InputCols = + isColMajor ? in.dimension(2) : in.dimension(NumDims - 3); + const bool padding_explicit = + (padding_top || padding_bottom || padding_left || padding_right); + + TensorIndex out_height; + TensorIndex out_width; + switch (padding_type) { + case PADDING_VALID: { + const TensorIndex InputRowsEff = InputRows + padding_top + padding_bottom; + const TensorIndex InputColsEff = InputCols + padding_left + padding_right; + out_height = divup(InputRowsEff - kernelRowsEff + 1, row_stride); + out_width = divup(InputColsEff - kernelColsEff + 1, col_stride); + break; + } + case PADDING_SAME: { + eigen_assert(!padding_explicit); + out_height = divup(InputRows, row_stride); + out_width = divup(InputCols, col_stride); + break; + } + default: { + // Initialize unused variables to avoid a compiler warning + out_height = 0; + out_width = 0; + eigen_assert(false && "unexpected padding"); + } + } + + // Molds the output of the patch extraction code into a 2d tensor: + // - the first dimension (dims[0]): the patch values to be multiplied with the + // kernels + // - the second dimension (dims[1]): everything else + DSizes pre_contract_dims; + if (isColMajor) { + pre_contract_dims[0] = kernelChannels * kernelRows * kernelCols; + pre_contract_dims[1] = out_height * out_width; + for (int i = 3; i < NumDims; ++i) { + pre_contract_dims[1] *= in.dimension(i); + } + } else { + pre_contract_dims[1] = kernelChannels * kernelRows * kernelCols; + pre_contract_dims[0] = out_height * out_width; + for (int i = 0; i < NumDims - 3; ++i) { + pre_contract_dims[0] *= in.dimension(i); + } + } + + // Molds the output of the contraction into the shape expected by the used + // (assuming this is ColMajor): + // - 1st dim: kernel filters + // - 2nd dim: output height + // - 3rd dim: output width + // - 4th dim and beyond: everything else including batch size + DSizes post_contract_dims; + if (isColMajor) { + post_contract_dims[0] = kernelFilters; + post_contract_dims[1] = out_height; + post_contract_dims[2] = out_width; + for (int i = 3; i < NumDims; ++i) { + post_contract_dims[i] = in.dimension(i); + } + } else { + post_contract_dims[NumDims - 1] = kernelFilters; + post_contract_dims[NumDims - 2] = out_height; + post_contract_dims[NumDims - 3] = out_width; + for (int i = 0; i < NumDims - 3; ++i) { + post_contract_dims[i] = in.dimension(i); + } + } + + DSizes kernel_dims; + if (isColMajor) { + kernel_dims[0] = kernelFilters; + kernel_dims[1] = kernelChannels * kernelRows * kernelCols; + } else { + kernel_dims[0] = kernelChannels * kernelRows * kernelCols; + kernel_dims[1] = kernelFilters; + } + if (padding_explicit) { + return choose( + Cond::Layout == ColMajor>(), + kernel.reshape(kernel_dims) + .contract(input + .extract_image_patches( + kernelRows, kernelCols, row_stride, col_stride, + row_in_stride, col_in_stride, + /*row_inflate_stride=*/1, + /*col_inflate_stride=*/1, padding_top, + padding_bottom, padding_left, padding_right, + /*padding_value=*/static_cast(0)) + .reshape(pre_contract_dims), + contract_dims, output_kernel) + .reshape(post_contract_dims), + input + .extract_image_patches( + kernelRows, kernelCols, row_stride, col_stride, row_in_stride, + col_in_stride, + /*row_inflate_stride=*/1, + /*col_inflate_stride=*/1, padding_top, padding_bottom, + padding_left, padding_right, + /*padding_value=*/static_cast(0)) + .reshape(pre_contract_dims) + .contract(kernel.reshape(kernel_dims), contract_dims, output_kernel) + .reshape(post_contract_dims)); + } else { + return choose( + Cond::Layout == ColMajor>(), + kernel.reshape(kernel_dims) + .contract(input + .extract_image_patches( + kernelRows, kernelCols, row_stride, col_stride, + row_in_stride, col_in_stride, padding_type) + .reshape(pre_contract_dims), + contract_dims, output_kernel) + .reshape(post_contract_dims), + input + .extract_image_patches(kernelRows, kernelCols, row_stride, + col_stride, row_in_stride, col_in_stride, + padding_type) + .reshape(pre_contract_dims) + .contract(kernel.reshape(kernel_dims), contract_dims, output_kernel) + .reshape(post_contract_dims)); + } +} + +} // end namespace Eigen + +#endif // TENSORFLOW_TSL_FRAMEWORK_CONVOLUTION_EIGEN_SPATIAL_CONVOLUTIONS_INL_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/convolution/eigen_spatial_convolutions.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/convolution/eigen_spatial_convolutions.h new file mode 100644 index 0000000000000000000000000000000000000000..5ac4382d45131ab5347473855d884264cf2f78de --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/convolution/eigen_spatial_convolutions.h @@ -0,0 +1,445 @@ +/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_FRAMEWORK_CONVOLUTION_EIGEN_SPATIAL_CONVOLUTIONS_H_ +#define TENSORFLOW_TSL_FRAMEWORK_CONVOLUTION_EIGEN_SPATIAL_CONVOLUTIONS_H_ + +#include "unsupported/Eigen/CXX11/Tensor" // from @eigen_archive + +// Note the following header is used in both TF and TFLite. Particularly, it's +// used for float TFLite Conv2D. +#include "tsl/framework/convolution/eigen_spatial_convolutions-inl.h" + +#if defined(TENSORFLOW_USE_CUSTOM_CONTRACTION_KERNEL) +#include "tsl/framework/contraction/eigen_contraction_kernel.h" + +namespace Eigen { +namespace internal { + +// After we vectorized all loads from the underlying tensor using Packet ops, we +// have to finalize coefficients that do not fit into a packet. +template +struct FinalizeDataMapperCoeffs { + EIGEN_ALWAYS_INLINE static Index finalize(Scalar* block, + const DataMapper& rhs, + Index base_idx, Index depth, + Index max_depth, bool pad = false) { + const Index num_coeffs = max_depth - depth; + eigen_assert(num_coeffs <= packet_size); + + for (; depth < max_depth; ++depth) { + *block = pad ? Scalar(0) : rhs.coeffNoPadding(depth, base_idx); + ++block; + } + + return num_coeffs; + } +}; + +template +struct FinalizeDataMapperCoeffs { + EIGEN_ALWAYS_INLINE static Index finalize(Scalar* block, + const DataMapper& rhs, + Index base_idx, Index depth, + Index max_depth, bool pad = false) { + Index num_coeffs = max_depth - depth; + eigen_assert(num_coeffs <= packet_size); + if (num_coeffs == 0) return 0; + + using Packet = typename packet_traits::type; + Packet p = pad ? pset1(Scalar(0)) + : rhs.partialPacketNoPadding(depth, base_idx, num_coeffs); + internal::pstoreu(block, p, mask(0, num_coeffs)); + + return num_coeffs; + } +}; + +// Pack a block of the right input matrix (in our case it's always a +// "virtual matrix" constructed from extracted image patches) in contiguous +// block in column-major storage order. Knowing the properties of the +// original patch op we can do it more efficient than the default +// gemm_pack_colmajor_block. +template +struct gemm_pack_colmajor_block< + Scalar, StorageIndex, + TensorContractionSubMapper< + Scalar, StorageIndex, Rhs, + TensorEvaluator< + const TensorReshapingOp< + NewDimension, const TensorImagePatchOp >, + Device>, + nocontract_t, contract_t, packet_size, inner_dim_contiguous, + inner_dim_reordered, Alignment>, + ColMajor> { + typedef TensorContractionSubMapper< + Scalar, StorageIndex, Rhs, + TensorEvaluator< + const TensorReshapingOp< + NewDimension, const TensorImagePatchOp >, + Device>, + nocontract_t, contract_t, packet_size, inner_dim_contiguous, + inner_dim_reordered, Alignment> + SubMapper; + + typedef SubMapper DataMapper; + typedef typename packet_traits::type Packet; + + using CoeffFinalizer = FinalizeDataMapperCoeffs< + Scalar, DataMapper, packet_size, + TensorEvaluatorHasPartialPacket::value && + unpacket_traits::masked_store_available>; + + EIGEN_DONT_INLINE + void operator()(Scalar* block, const DataMapper& rhs, StorageIndex rows, + StorageIndex cols) { + const bool standard_patches = !rhs.nonStandardPatches(); + + if (standard_patches && (rhs.patchDepth() % packet_size == 0)) { + // Single packet always belong to single patch (row, col). + if (rhs.hasPadding()) { + packStandardPatches(block, rhs, rows, cols); + } else { + packStandardPatches(block, rhs, rows, cols); + } + + } else if (standard_patches) { + // Single packet can span across multiple patch rows or columns. + if (rhs.hasPadding()) { + packStandardPatches(block, rhs, rows, cols); + } else { + packStandardPatches(block, rhs, rows, cols); + } + + } else if (rhs.patchDepth() % packet_size == 0) { + // Single packet always belong to single patch (row, col). + packNonStandardPatches(block, rhs, rows, cols); + + } else { + // Single packet can span across multiple patch rows or columns. + packNonStandardPatches(block, rhs, rows, cols); + } + } + + private: + // (A) Standard image patches: + // + // (1) patch_row_inflate_strides == 1 AND + // (2) patch_col_inflate_strides == 1 + // + // Standard patches guarantee that two inner most dimensions (depth and rows) + // are contiguous in memory and we can try to squeeze reads from them. + // + // (B) Non standard image patches: in_row/in_col and patch_row/patch_col + // strides can be not equal to 1, and for each [row, col] inside a patch we + // have to do additional computations to find corresponding row and col in the + // input tensor. Also we can no longer squeeze reads from inner dimensions. + // + // Additional parameters: + // - patch_depth_is_multiple_of_packet_size=true: We are guaranteed to have + // depth dimension size to be a multiple of packet size, so we can skip all + // non vectorized loads and checks, because it's guaranteed that block size + // will be a multiple of a packet size (see TensorContractionBlocking). + // + // - has_padding: Input tensor has non-zero padding. In this case for each + // patch col and row we need to check that it doesn't correspond to the + // padded region of original input. + template + EIGEN_ALWAYS_INLINE void packStandardPatches(Scalar* __restrict block, + const DataMapper& rhs, + StorageIndex rows, + StorageIndex cols) { + eigen_assert(!rhs.nonStandardPatches()); + + // Give vectorized_rows the name used in all other gemm_pack_rhs above. + const StorageIndex peeled_k = (rows / packet_size) * packet_size; + + const StorageIndex start_col = rhs.colOffset(); + const StorageIndex max_col = rhs.maxCol(peeled_k); + const StorageIndex rhs_depth_offset = rhs.depthOffset(); + + for (StorageIndex col = 0; col < cols; ++col) { + SubMapper lm = rhs.getLinearMapper(0, col); + + StorageIndex k = 0; + for (Index c = start_col; c < max_col; ++c) { + eigen_assert(k <= peeled_k); + + const StorageIndex start_row = (c == start_col) ? rhs.rowOffset() : 0; + const StorageIndex max_row = rhs.maxRow(peeled_k, c); + const bool pad_col = has_padding && lm.padCol(c); + + eigen_assert(has_padding || !lm.padCol(c)); + eigen_assert(has_padding || !lm.padAnyRow(start_row, max_row - 1)); + + // We can squeeze reads for all rows in [start_row, max_row) range. + if (!has_padding || + (!pad_col && !lm.padAnyRow(start_row, max_row - 1))) { + const StorageIndex start_depth = + (c == start_col) ? rhs_depth_offset : 0; + + const StorageIndex max_depth = + std::min(start_depth + (peeled_k - k), + (max_row - start_row) * rhs.patchDepth()); + + const StorageIndex base_idx = lm.baseIndex(start_row, c); + + if (patch_depth_is_multiple_of_packet_size) { + // If patch depth is a multiple of packet size, it's guaranteed that + // we can process all values in depth dimension with packets. + eigen_assert((max_depth - start_depth) % packet_size == 0); + StorageIndex d = start_depth; + + const StorageIndex unrolled_depth = max_depth - 4 * packet_size; + for (; d <= unrolled_depth; d += 4 * packet_size) { + eigen_assert(k < peeled_k); + + Packet p0 = rhs.packetNoPadding(d + 0 * packet_size, base_idx); + Packet p1 = rhs.packetNoPadding(d + 1 * packet_size, base_idx); + Packet p2 = rhs.packetNoPadding(d + 2 * packet_size, base_idx); + Packet p3 = rhs.packetNoPadding(d + 3 * packet_size, base_idx); + + internal::pstoreu(block + 0 * packet_size, p0); + internal::pstoreu(block + 1 * packet_size, p1); + internal::pstoreu(block + 2 * packet_size, p2); + internal::pstoreu(block + 3 * packet_size, p3); + + block += 4 * packet_size; + k += 4 * packet_size; + } + + for (; d < max_depth; d += packet_size) { + eigen_assert(k < peeled_k); + internal::pstoreu(block, rhs.packetNoPadding(d, base_idx)); + block += packet_size; + k += packet_size; + } + + } else { + StorageIndex d = start_depth; + + const StorageIndex unrolled_depth = max_depth - 4 * packet_size; + for (; d <= unrolled_depth; d += 4 * packet_size) { + eigen_assert(k < peeled_k); + + Packet p0 = rhs.packetNoPadding(d + 0 * packet_size, base_idx); + Packet p1 = rhs.packetNoPadding(d + 1 * packet_size, base_idx); + Packet p2 = rhs.packetNoPadding(d + 2 * packet_size, base_idx); + Packet p3 = rhs.packetNoPadding(d + 3 * packet_size, base_idx); + + internal::pstoreu(block + 0 * packet_size, p0); + internal::pstoreu(block + 1 * packet_size, p1); + internal::pstoreu(block + 2 * packet_size, p2); + internal::pstoreu(block + 3 * packet_size, p3); + + block += 4 * packet_size; + k += 4 * packet_size; + } + + const StorageIndex vectorized_depth = max_depth - packet_size; + for (; d <= vectorized_depth; d += packet_size) { + eigen_assert(k < peeled_k); + internal::pstoreu(block, rhs.packetNoPadding(d, base_idx)); + block += packet_size; + k += packet_size; + } + + eigen_assert(k <= peeled_k); + const Index num_coeffs = + CoeffFinalizer::finalize(block, rhs, base_idx, d, max_depth); + + k += num_coeffs; + block += num_coeffs; + eigen_assert(k <= peeled_k); + } + + // Go to the next column. + continue; + } + + // If we are not allowed to squeeze reads along the `row` and `depth` + // dimensions, we must process rows one by one. + for (StorageIndex r = start_row; r < max_row; ++r) { + eigen_assert(k <= peeled_k); + + const StorageIndex start_depth = + ((c == start_col) && (r == start_row)) ? rhs_depth_offset : 0; + const StorageIndex max_depth = + rhs.maxDepth(peeled_k - k, start_depth); + + const bool pad = has_padding && (pad_col || lm.padRow(r)); + eigen_assert(has_padding || !lm.padRow(r)); + + const StorageIndex base_idx = lm.baseIndex(r, c); + + if (patch_depth_is_multiple_of_packet_size) { + // If patch depth is a multiple of packet size, it's guaranteed that + // we can process all values in depth dimension with packets. + eigen_assert((max_depth - start_depth) % packet_size == 0); + StorageIndex d = start_depth; + + for (; d < max_depth; d += packet_size) { + eigen_assert(k < peeled_k); + const Packet p = (has_padding && pad) + ? pset1(Scalar(0)) + : rhs.packetNoPadding(d, base_idx); + internal::pstoreu(block, p); + block += packet_size; + k += packet_size; + } + + } else { + StorageIndex d = start_depth; + + const StorageIndex vectorized_depth = max_depth - packet_size; + for (; d <= vectorized_depth; d += packet_size) { + eigen_assert(k < peeled_k); + const Packet p = (has_padding && pad) + ? pset1(Scalar(0)) + : rhs.packetNoPadding(d, base_idx); + internal::pstoreu(block, p); + block += packet_size; + k += packet_size; + } + + eigen_assert(k <= peeled_k); + const Index num_coeffs = CoeffFinalizer::finalize( + block, rhs, base_idx, d, max_depth, has_padding && pad); + + k += num_coeffs; + block += num_coeffs; + eigen_assert(k <= peeled_k); + } + } + } + + // The loop above should fill peeled_k elements. + eigen_assert(peeled_k == k); + + // Fill remaining elements using loadCoeffStandard. + for (; k < rows; ++k) { + *block = lm.loadCoeffStandard(k); + ++block; + } + } + } + + template + EIGEN_ALWAYS_INLINE void packNonStandardPatches(Scalar* __restrict block, + const DataMapper& rhs, + StorageIndex rows, + StorageIndex cols) { + eigen_assert(rhs.nonStandardPatches()); + + // Give vectorized_rows the name used in all other gemm_pack_rhs above. + const StorageIndex peeled_k = (rows / packet_size) * packet_size; + + const StorageIndex start_col = rhs.colOffset(); + const StorageIndex max_col = rhs.maxCol(peeled_k); + const StorageIndex rhs_depth_offset = rhs.depthOffset(); + + // Original input column and row after applying all non-standard strides and + // dilations. Computed by padOrSkip{Row,Col}. + Index orig_c = 0; + Index orig_r = 0; + + for (StorageIndex col = 0; col < cols; ++col) { + SubMapper lm = rhs.getLinearMapper(0, col); + + StorageIndex k = 0; + for (Index c = start_col; c < max_col; ++c) { + eigen_assert(k <= peeled_k); + + const StorageIndex start_row = (c == start_col) ? rhs.rowOffset() : 0; + const StorageIndex max_row = rhs.maxRow(peeled_k, c); + const bool pad_or_skip_col = lm.padOrSkipCol(c, &orig_c); + + for (StorageIndex r = start_row; r < max_row; ++r) { + eigen_assert(k <= peeled_k); + + const StorageIndex start_depth = + ((c == start_col) && (r == start_row)) ? rhs_depth_offset : 0; + const StorageIndex max_depth = + rhs.maxDepth(peeled_k - k, start_depth); + + const bool pad_or_skip = + pad_or_skip_col || lm.padOrSkipRow(r, &orig_r); + const StorageIndex base_idx = lm.origBaseIndex(orig_r, orig_c); + + if (patch_depth_is_multiple_of_packet_size) { + // If patch depth is a multiple of packet size, it's guaranteed that + // we can process all values in depth dimension with packets. + eigen_assert((max_depth - start_depth) % packet_size == 0); + StorageIndex d = start_depth; + + for (; d < max_depth; d += packet_size) { + eigen_assert(k < peeled_k); + const Packet p = pad_or_skip ? pset1(Scalar(0)) + : rhs.packetNoPadding(d, base_idx); + internal::pstoreu(block, p); + block += packet_size; + k += packet_size; + } + + } else { + const StorageIndex vectorized_depth = max_depth - packet_size; + StorageIndex d = start_depth; + for (; d <= vectorized_depth; d += packet_size) { + eigen_assert(k < peeled_k); + const Packet p = pad_or_skip ? pset1(Scalar(0)) + : rhs.packetNoPadding(d, base_idx); + internal::pstoreu(block, p); + block += packet_size; + k += packet_size; + } + + eigen_assert(k <= peeled_k); + const Index num_coeffs = CoeffFinalizer::finalize( + block, rhs, base_idx, d, max_depth, pad_or_skip); + + k += num_coeffs; + block += num_coeffs; + eigen_assert(k <= peeled_k); + } + } + } + + // The loop above should fill peeled_k elements. + eigen_assert(peeled_k == k); + + // Fill remaining elements using loadCoeff. + for (; k < rows; ++k) { + *block = lm(k); + ++block; + } + } + } +}; +} // namespace internal +} // namespace Eigen +#endif // defined(TENSORFLOW_USE_CUSTOM_CONTRACTION_KERNEL) +#endif // TENSORFLOW_TSL_FRAMEWORK_CONVOLUTION_EIGEN_SPATIAL_CONVOLUTIONS_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/device_id.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/device_id.h new file mode 100644 index 0000000000000000000000000000000000000000..533c63d1081c2f1b9d5e6e37743a796f932d7b7b --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/device_id.h @@ -0,0 +1,89 @@ +/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_FRAMEWORK_DEVICE_ID_H_ +#define TENSORFLOW_TSL_FRAMEWORK_DEVICE_ID_H_ + +#include "tsl/lib/gtl/int_type.h" + +namespace tsl { + +// There are three types of device ids: +// - *physical* device id: this is the integer index of a device in the +// physical machine, it can be filtered (for e.g. using environment variable +// CUDA_VISIBLE_DEVICES when using CUDA). Note that this id is not visible to +// Tensorflow, but result after filtering is visible to TF and is called +// platform device id as below. +// For CUDA, see +// http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#env-vars +// for more details. +// - *platform* device id (also called *visible* device id in +// third_party/tensorflow/core/protobuf/config.proto): this is the id that is +// visible to Tensorflow after filtering (for e.g. by CUDA_VISIBLE_DEVICES). +// For CUDA, this id is generated by the CUDA GPU driver. It starts from 0 +// and is used for CUDA API calls like cuDeviceGet(). +// - TF device id (also called *virtual* device id in +// third_party/tensorflow/core/protobuf/config.proto): this is the id that +// Tensorflow generates and exposes to its users. It is the id in the +// field of the device name "/device:GPU:", and is also the identifier of +// a BaseGPUDevice. Note that the configuration allows us to create multiple +// BaseGPUDevice per GPU hardware in order to use multi CUDA streams on the +// hardware, so the mapping between TF GPU id and platform GPU id is not a 1:1 +// mapping, see the example below. +// +// For example, assuming that in the machine we have GPU device with index 0, 1, +// 2 and 3 (physical GPU id). Setting "CUDA_VISIBLE_DEVICES=1,2,3" will create +// the following mapping between platform GPU id and physical GPU id: +// +// platform GPU id -> physical GPU id +// 0 -> 1 +// 1 -> 2 +// 2 -> 3 +// +// Note that physical GPU id 0 is invisible to TF so there is no mapping entry +// for it. +// +// Assuming we configure the Session to create one BaseGPUDevice per GPU +// hardware, then setting GPUOptions::visible_device_list to "2,0" will create +// the following mapping between TF device id and platform device id: +// +// TF GPU id -> platform GPU ID +// 0 (i.e. /device:GPU:0) -> 2 +// 1 (i.e. /device:GPU:1) -> 0 +// +// Note that platform device id 1 is filtered out by +// GPUOptions::visible_device_list, so it won't be used by the TF process. +// +// On the other hand, if we configure it to create 2 BaseGPUDevice per GPU +// hardware, then setting GPUOptions::visible_device_list to "2,0" will create +// the following mapping between TF device id and platform device id: +// +// TF GPU id -> platform GPU ID +// 0 (i.e. /device:GPU:0) -> 2 +// 1 (i.e. /device:GPU:1) -> 2 +// 2 (i.e. /device:GPU:2) -> 0 +// 3 (i.e. /device:GPU:3) -> 0 +// +// We create strong-typed integer classes for both TF device id and platform +// device id to minimize programming errors and improve code readability. Except +// for the StreamExecutor interface (as we don't change its API), whenever we +// need a TF device id (or platform device id) we should use TfDeviceId (or +// PlatformDeviceId) instead of a raw integer. +TSL_LIB_GTL_DEFINE_INT_TYPE(TfDeviceId, int32); +TSL_LIB_GTL_DEFINE_INT_TYPE(PlatformDeviceId, int32); + +} // namespace tsl + +#endif // TENSORFLOW_TSL_FRAMEWORK_DEVICE_ID_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/device_id_manager.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/device_id_manager.h new file mode 100644 index 0000000000000000000000000000000000000000..ca101758ee3ebb5c3b97ef52715888af13a9a16a --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/device_id_manager.h @@ -0,0 +1,53 @@ +/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_FRAMEWORK_DEVICE_ID_MANAGER_H_ +#define TENSORFLOW_TSL_FRAMEWORK_DEVICE_ID_MANAGER_H_ + +#include + +#include "tsl/framework/device_id.h" +#include "tsl/framework/device_type.h" +#include "tsl/platform/status.h" +#include "tsl/platform/statusor.h" + +namespace tsl { + +// Class that maintains a map from TfDeviceId to PlatformDeviceId, and manages +// the translation between them. +class DeviceIdManager { + public: + // Adds a mapping from tf_device_id to platform_device_id. + static Status InsertTfPlatformDeviceIdPair( + const DeviceType& type, TfDeviceId tf_device_id, + PlatformDeviceId platform_device_id); + + // Gets the platform_device_id associated with tf_device_id. Returns OK if + // found. + static Status TfToPlatformDeviceId(const DeviceType& type, + TfDeviceId tf_device_id, + PlatformDeviceId* platform_device_id); + + // Gets all tf_device_ids that are on the platform with `platform_device_id`. + static StatusOr> GetTfDevicesOnPlatform( + const DeviceType& type, PlatformDeviceId platform_device_id); + + // Clears the map. Used in unit tests only. + static void TestOnlyReset(); +}; + +} // namespace tsl + +#endif // TENSORFLOW_TSL_FRAMEWORK_DEVICE_ID_MANAGER_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/device_id_utils.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/device_id_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..c2479aded5fe0aaa88c3609e37878effe1cd8648 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/device_id_utils.h @@ -0,0 +1,72 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_FRAMEWORK_DEVICE_ID_UTILS_H_ +#define TENSORFLOW_TSL_FRAMEWORK_DEVICE_ID_UTILS_H_ + +#include +#include + +#include "absl/container/flat_hash_map.h" +#include "tsl/framework/device_id.h" +#include "tsl/framework/device_type.h" +#include "tsl/platform/status.h" +#include "tsl/platform/statusor.h" +#include "tsl/util/device_name_utils.h" + +namespace tsl { + +// Utility methods for translation between TensorFlow device ids and platform +// device ids. + +// Verify that the platform_device_id associated with a TfDeviceId is +// legitimate. +void CheckValidTfDeviceId(const DeviceType& type, int visible_device_count, + TfDeviceId tf_device_id); + +// Parse `visible_device_list` into a list of platform Device ids. +Status ParseVisibleDeviceList( + const std::string& visible_device_list, int visible_device_count, + std::vector* visible_device_order); + +// Returns how many TF devices should be created, and generates the mapping +// between TfDeviceId and PlatformDeviceId. The number of TF devices is the +// minimum among the device count in `session_option_device_counts`, +// `visible_device_count` and the number of visible devices in +// `visible_device_list`. If `visible_device_list` is empty, the mapping +// between TfDeviceId and PlatformDeviceId is an identity mapping. +// Please refer to tensorflow/tsl/framework/device_id.h and +// tensorflow/core/protobuf/config.proto about the relationship between +// TfDeviceId and PlatformDeviceId, and how `visible_device_list` is used. +StatusOr GetNumberTfDevicesAndConfigurePlatformDeviceId( + const absl::flat_hash_map& + session_option_device_counts, + absl::string_view device_type, absl::string_view visible_device_list, + int visible_device_count); + +StatusOr GetPlatformDeviceIdFromDeviceParsedName( + const DeviceNameUtils::ParsedName& device_name, + const DeviceType& device_type); + +// TODO(b/293324740): support virtual devices. +// Returns the corresponding PlatformDeviceId if it is found. Otherwise returns +// the id in device_name. +StatusOr GetDeviceIdFromDeviceParsedName( + const DeviceNameUtils::ParsedName& device_name, + const DeviceType& device_type); + +} // namespace tsl + +#endif // TENSORFLOW_TSL_FRAMEWORK_DEVICE_ID_UTILS_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/device_type.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/device_type.h new file mode 100644 index 0000000000000000000000000000000000000000..148cb1686348b0ca86fd9c0b7289905df0f87590 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/device_type.h @@ -0,0 +1,50 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_FRAMEWORK_DEVICE_TYPE_H_ +#define TENSORFLOW_TSL_FRAMEWORK_DEVICE_TYPE_H_ + +#include +#include + +#include "absl/strings/string_view.h" + +namespace tsl { + +// A DeviceType is just a string, but we wrap it up in a class to give +// some type checking as we're passing these around +class DeviceType { + public: + DeviceType(const char* type) // NOLINT + : type_(type) {} + + explicit DeviceType(absl::string_view type) + : type_(type.data(), type.size()) {} + + const char* type() const { return type_.c_str(); } + const std::string& type_string() const { return type_; } + + bool operator<(const DeviceType& other) const; + bool operator==(const DeviceType& other) const; + bool operator!=(const DeviceType& other) const { return !(*this == other); } + + private: + std::string type_; +}; +std::ostream& operator<<(std::ostream& os, const DeviceType& d); + +} // namespace tsl + +#endif // TENSORFLOW_TSL_FRAMEWORK_DEVICE_TYPE_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/fixedpoint/FixedPoint.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/fixedpoint/FixedPoint.h new file mode 100644 index 0000000000000000000000000000000000000000..5301914ad37cefdfebeb05b3320157b6955e657b --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/fixedpoint/FixedPoint.h @@ -0,0 +1,53 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_FIXEDPOINT_H_ +#define TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_FIXEDPOINT_H_ + +#include "unsupported/Eigen/CXX11/Tensor" // from @eigen_archive +#include "tsl/framework/fixedpoint_types.h" + +// Use optimized implementations whenever available +#if defined(EIGEN_VECTORIZE_AVX512DQ) || defined(EIGEN_VECTORIZE_AVX512BW) +#include "tsl/framework/fixedpoint/PacketMathAVX512.h" +#include "tsl/framework/fixedpoint/TypeCastingAVX512.h" + +#elif defined EIGEN_VECTORIZE_AVX2 +#define EIGEN_USE_OPTIMIZED_INT8_UINT8_MAT_MAT_PRODUCT +#define EIGEN_USE_OPTIMIZED_INT16_INT16_MAT_MAT_PRODUCT +#include "tsl/framework/fixedpoint/PacketMathAVX2.h" +// Disable clang-format to prevent 'MatMatProductAVX2.h' header from being +// included before 'PacketMathAVX2' header on which it depends. +// clang-format off +#include "tsl/framework/fixedpoint/MatMatProductAVX2.h" +// clang-format on +#include "tsl/framework/fixedpoint/TypeCastingAVX2.h" + +#elif defined EIGEN_VECTORIZE_AVX +#include "tsl/framework/fixedpoint/PacketMathAVX.h" + +#elif defined EIGEN_VECTORIZE_NEON +#define EIGEN_USE_OPTIMIZED_INT8_INT8_MAT_MAT_PRODUCT +#define EIGEN_USE_OPTIMIZED_INT8_UINT8_MAT_MAT_PRODUCT +#define EIGEN_USE_OPTIMIZED_UINT8_INT8_MAT_MAT_PRODUCT +#define EIGEN_USE_OPTIMIZED_INT16_INT16_MAT_MAT_PRODUCT +#include "tsl/framework/fixedpoint/MatMatProductNEON.h" +#endif + +// Use the default implementation when no optimized code is available +#include "tsl/framework/fixedpoint/MatMatProduct.h" +#include "tsl/framework/fixedpoint/MatVecProduct.h" + +#endif // TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_FIXEDPOINT_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/fixedpoint/MatMatProduct.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/fixedpoint/MatMatProduct.h new file mode 100644 index 0000000000000000000000000000000000000000..c7823d86d5c0a983fea268d64fe3df7ac3e144f9 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/fixedpoint/MatMatProduct.h @@ -0,0 +1,363 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_MATMATPRODUCT_H_ +#define TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_MATMATPRODUCT_H_ + +namespace Eigen { +namespace internal { + +// Accumulate the product of 2 QInt8 inputs on 32 bits to prevent +// overflows +template <> +struct scalar_product_traits { + enum { Defined = 1 }; + typedef QInt32 ReturnType; +}; + +// Accumulate the product of 2 QInt16 inputs on 32 bits to prevent +// overflows +template <> +struct scalar_product_traits { + enum { Defined = 1 }; + typedef QInt32 ReturnType; +}; + +// Accumulate the product of QInt8 inputs with QUint8 inputs on 32 bits +// to prevent overflows +template <> +struct scalar_product_traits { + enum { Defined = 1 }; + typedef QInt32 ReturnType; +}; + +// Accumulate the product of QUInt8 inputs with Qint8 inputs on 32 bits +// to prevent overflows +template <> +struct scalar_product_traits { + enum { Defined = 1 }; + typedef QInt32 ReturnType; +}; + +// Description of the product implementation. It's pretty simple now since +// nothing is vectorized yet. +// This definition tackle the case where both lhs and rhs are encoded using +// signed 8bit integers +#ifndef EIGEN_USE_OPTIMIZED_INT8_INT8_MAT_MAT_PRODUCT + +template +class gebp_traits { + public: + typedef QInt8 LhsScalar; + typedef QInt8 RhsScalar; + typedef QInt32 ResScalar; + + typedef typename packet_traits::type LhsPacket; + typedef LhsPacket LhsPacket4Packing; + + enum { + // register block size along the M and N directions + // One for the current implementation + nr = 1, + mr = 1, + // Progress made at each iteration of the product loop + // also 1 for the current implementation + LhsProgress = 1, + RhsProgress = 1 + }; +}; + +// The signed 8bit Mat-Mat product itself. +template +struct gebp_kernel { + EIGEN_DONT_INLINE + void operator()(const DataMapper& res, const QInt8* blockA, + const QInt8* blockB, Index rows, Index depth, Index cols, + QInt32 alpha, Index strideA = -1, Index strideB = -1, + Index offsetA = 0, Index offsetB = 0); +}; + +template +EIGEN_DONT_INLINE void +gebp_kernel::operator()(const DataMapper& res, + const QInt8* blockA, const QInt8* blockB, + Index rows, Index depth, Index cols, + QInt32 alpha, Index strideA, + Index strideB, Index offsetA, + Index offsetB) { + EIGEN_STATIC_ASSERT(!ConjugateLhs, YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT(!ConjugateRhs, YOU_MADE_A_PROGRAMMING_MISTAKE); + + eigen_assert(alpha.value == 1); + eigen_assert(strideA == -1); + eigen_assert(strideB == -1); + eigen_assert(offsetA == 0); + eigen_assert(offsetB == 0); + + eigen_assert(rows > 0); + eigen_assert(cols > 0); + eigen_assert(depth > 0); + eigen_assert(blockA); + eigen_assert(blockB); + + for (Index j = 0; j < cols; ++j) { + Index startB = j * depth; + + for (Index i = 0; i < rows; ++i) { + Index startA = i * depth; + + for (Index k = 0; k < depth; ++k) { + res(i, j) += blockA[startA + k] * blockB[startB + k]; + } + } + } +} +#endif + +// This definition tackle the case where the lhs is encoded using signed 8bit +// integers and the rhs using unsigned 8bit integers. +#ifndef EIGEN_USE_OPTIMIZED_INT8_UINT8_MAT_MAT_PRODUCT +template +class gebp_traits { + public: + typedef QInt8 LhsScalar; + typedef QUInt8 RhsScalar; + typedef QInt32 ResScalar; + + typedef typename packet_traits::type LhsPacket; + typedef LhsPacket LhsPacket4Packing; + + enum { + // register block size along the M and N directions + // One for the current implementation + nr = 1, + mr = 1, + // Progress made at each iteration of the product loop + // also 1 for the current implementation + LhsProgress = 1, + RhsProgress = 1 + }; +}; + +// Mat-Mat product of a signed 8bit lhs with an unsigned 8bit rhs +template +struct gebp_kernel { + EIGEN_DONT_INLINE + void operator()(const DataMapper& res, const QInt8* blockA, + const QUInt8* blockB, Index rows, Index depth, Index cols, + QInt32 alpha, Index strideA = -1, Index strideB = -1, + Index offsetA = 0, Index offsetB = 0); +}; + +template +EIGEN_DONT_INLINE void +gebp_kernel::operator()(const DataMapper& res, + const QInt8* blockA, const QUInt8* blockB, + Index rows, Index depth, Index cols, + QInt32 alpha, Index strideA, + Index strideB, Index offsetA, + Index offsetB) { + EIGEN_STATIC_ASSERT(!ConjugateLhs, YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT(!ConjugateRhs, YOU_MADE_A_PROGRAMMING_MISTAKE); + + eigen_assert(alpha.value == 1); + eigen_assert(strideA == -1); + eigen_assert(strideB == -1); + eigen_assert(offsetA == 0); + eigen_assert(offsetB == 0); + + eigen_assert(rows > 0); + eigen_assert(cols > 0); + eigen_assert(depth > 0); + eigen_assert(blockA); + eigen_assert(blockB); + + for (Index j = 0; j < cols; ++j) { + Index startB = j * depth; + + for (Index i = 0; i < rows; ++i) { + Index startA = i * depth; + + for (Index k = 0; k < depth; ++k) { + res(i, j) += blockA[startA + k] * blockB[startB + k]; + } + } + } +} +#endif + +// This definition tackle the case where the lhs is encoded using unsigned 8bit +// integers and the rhs using signed 8bit integers. +#ifndef EIGEN_USE_OPTIMIZED_UINT8_INT8_MAT_MAT_PRODUCT +template +class gebp_traits { + public: + typedef QUInt8 LhsScalar; + typedef QInt8 RhsScalar; + typedef QInt32 ResScalar; + + typedef typename packet_traits::type LhsPacket; + typedef LhsPacket LhsPacket4Packing; + + enum { + // register block size along the M and N directions + // One for the current implementation + nr = 1, + mr = 1, + // Progress made at each iteration of the product loop + // also 1 for the current implementation + LhsProgress = 1, + RhsProgress = 1 + }; +}; + +// Mat-Mat product of an unsigned 8bit lhs with a signed 8bit rhs +template +struct gebp_kernel { + EIGEN_DONT_INLINE + void operator()(const DataMapper& res, const QUInt8* blockA, + const QInt8* blockB, Index rows, Index depth, Index cols, + QInt32 alpha, Index strideA = -1, Index strideB = -1, + Index offsetA = 0, Index offsetB = 0); +}; + +template +EIGEN_DONT_INLINE void +gebp_kernel::operator()(const DataMapper& res, + const QUInt8* blockA, const QInt8* blockB, + Index rows, Index depth, Index cols, + QInt32 alpha, Index strideA, + Index strideB, Index offsetA, + Index offsetB) { + EIGEN_STATIC_ASSERT(!ConjugateLhs, YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT(!ConjugateRhs, YOU_MADE_A_PROGRAMMING_MISTAKE); + + eigen_assert(alpha.value == 1); + eigen_assert(strideA == -1); + eigen_assert(strideB == -1); + eigen_assert(offsetA == 0); + eigen_assert(offsetB == 0); + + eigen_assert(rows > 0); + eigen_assert(cols > 0); + eigen_assert(depth > 0); + eigen_assert(blockA); + eigen_assert(blockB); + + for (Index j = 0; j < cols; ++j) { + Index startB = j * depth; + + for (Index i = 0; i < rows; ++i) { + Index startA = i * depth; + + for (Index k = 0; k < depth; ++k) { + res(i, j) += blockA[startA + k] * blockB[startB + k]; + } + } + } +} +#endif + +#ifndef EIGEN_USE_OPTIMIZED_INT16_INT16_MAT_MAT_PRODUCT + +template +class gebp_traits { + public: + typedef QInt16 LhsScalar; + typedef QInt16 RhsScalar; + typedef QInt32 ResScalar; + + typedef typename packet_traits::type LhsPacket; + typedef LhsPacket LhsPacket4Packing; + + enum { + // register block size along the M and N directions + // One for the current implementation + nr = 1, + mr = 1, + // Progress made at each iteration of the product loop + // also 1 for the current implementation + LhsProgress = 1, + RhsProgress = 1 + }; +}; + +// The signed 16bit Mat-Mat product itself. +template +struct gebp_kernel { + EIGEN_DONT_INLINE + void operator()(const DataMapper& res, const QInt16* blockA, + const QInt16* blockB, Index rows, Index depth, Index cols, + QInt32 alpha, Index strideA = -1, Index strideB = -1, + Index offsetA = 0, Index offsetB = 0); +}; + +template +EIGEN_DONT_INLINE void +gebp_kernel::operator()(const DataMapper& res, + const QInt16* blockA, + const QInt16* blockB, Index rows, + Index depth, Index cols, QInt32 alpha, + Index strideA, Index strideB, + Index offsetA, Index offsetB) { + EIGEN_STATIC_ASSERT(!ConjugateLhs, YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT(!ConjugateRhs, YOU_MADE_A_PROGRAMMING_MISTAKE); + + eigen_assert(alpha.value == 1); + eigen_assert(strideA == -1); + eigen_assert(strideB == -1); + eigen_assert(offsetA == 0); + eigen_assert(offsetB == 0); + + eigen_assert(rows > 0); + eigen_assert(cols > 0); + eigen_assert(depth > 0); + eigen_assert(blockA); + eigen_assert(blockB); + + for (Index j = 0; j < cols; ++j) { + Index startB = j * depth; + + for (Index i = 0; i < rows; ++i) { + Index startA = i * depth; + + for (Index k = 0; k < depth; ++k) { + res(i, j) += blockA[startA + k] * blockB[startB + k]; + } + } + } +} +#endif + +} // namespace internal +} // namespace Eigen + +#endif // TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_MATMATPRODUCT_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/fixedpoint/MatMatProductAVX2.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/fixedpoint/MatMatProductAVX2.h new file mode 100644 index 0000000000000000000000000000000000000000..742666766977598fd90e53a036c42450109c1053 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/fixedpoint/MatMatProductAVX2.h @@ -0,0 +1,2314 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_MATMATPRODUCTAVX2_H_ +#define TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_MATMATPRODUCTAVX2_H_ + +namespace Eigen { +namespace internal { + +// AVX2 optimized implementation of Mat-Mat product. +// LHS is encoded using signed 16-bit integers. +// RHS is encoded using signed 16-bit integers. +#ifdef EIGEN_USE_OPTIMIZED_INT16_INT16_MAT_MAT_PRODUCT + +// Define quantized traits +template +class gebp_traits { + public: + typedef QInt16 LhsScalar; + typedef QInt16 RhsScalar; + typedef QInt32 ResScalar; + + typedef typename packet_traits::type LhsPacket; + typedef LhsPacket LhsPacket4Packing; + + enum { + // Define register blocking scheme. + nr = 16, + mr = 16, + kr = 4, + // Ignore progress tracking per loop iteration. + LhsProgress = -1, + RhsProgress = -1 + }; +}; + +// Specialized blocking for quantized implementations. +// Used by TensorContractionThreadPool, inputs must have dimensions that are +// multiples of 32. +template +class TensorContractionBlocking { + public: + TensorContractionBlocking(Index k, Index m, Index n, Index num_threads = 1) + : kc_(((k + 15) / 16) * 16), + mc_(((m + 15) / 16) * 16), + nc_(((n + 15) / 16) * 16) { + eigen_assert(mc_ % 16 == 0); + eigen_assert(kc_ % 16 == 0); + if (!k || !m || !n) { + return; + } + + if (ShardingType == ShardByCol) { + eigen_assert(nc_ % 16 == 0); + nc_ = (((nc_ / num_threads) + 15) / 16) * 16; + } else { + eigen_assert(nc_ % 16 == 0); + mc_ = (((mc_ / num_threads) + 15) / 16) * 16; + } + } + + EIGEN_ALWAYS_INLINE Index kc() const { return kc_; } + EIGEN_ALWAYS_INLINE Index mc() const { return mc_; } + EIGEN_ALWAYS_INLINE Index nc() const { return nc_; } + + private: + Index kc_; + Index mc_; + Index nc_; +}; + +// Specialized blocking for quantized implementations. +// Used by TensorContraction and GeneralMatrixMatrix, inputs are padded to +// multiples of 32. +template +class gemm_blocking_space + : public level3_blocking { + DenseIndex m_sizeA; + DenseIndex m_sizeB; + + public: + gemm_blocking_space(DenseIndex rows, DenseIndex cols, DenseIndex depth, + DenseIndex /*num_threads*/, bool /*l3_blocking*/) { + this->m_mc = ((rows + 15) / 16) * 16; + this->m_nc = ((cols + 15) / 16) * 16; + this->m_kc = ((depth + 15) / 16) * 16; + m_sizeA = this->m_mc * this->m_kc; + m_sizeB = this->m_kc * this->m_nc; + } + void allocateA() { + if (this->m_blockA == 0) this->m_blockA = aligned_new(m_sizeA); + } + void allocateB() { + if (this->m_blockB == 0) this->m_blockB = aligned_new(m_sizeB); + } + void allocateAll() { + allocateA(); + allocateB(); + } + ~gemm_blocking_space() { + aligned_delete(this->m_blockA, m_sizeA); + aligned_delete(this->m_blockB, m_sizeB); + } +}; + +// Below are the fully optimized versions that are correct only for sizes that +// are multiple of 16. It is about a 10% performance benefit to keep these +// implementations separate. + +// Arrange a block of the left input matrix in contiguous memory. +// +// Given column major input (A0 beside A1 in memory): +// A0 B0 C0 D0 E0 F0 G0 H0 ... +// A1 B1 C1 D1 E1 F1 G1 H1 ... +// A2 B2 C2 D2 E2 F2 G2 H2 ... +// A3 B3 C3 D3 E3 F3 G3 H3 ... +// A4 B4 C4 D4 E4 F4 G4 H4 ... +// A5 B5 C5 D5 E5 F5 G5 H5 ... +// A6 B6 C6 D6 E6 F6 G6 H6 ... +// A7 B7 C7 D7 E7 F7 G7 H7 ... +// A8 ... +// ... +// +// Packing with m = 8 yields row major output (A0 beside B0 in memory): +// A0 B0 +// A1 B1 +// A2 B2 +// A3 B3 +// A4 B4 +// A5 B5 +// A6 B6 +// A7 B7 +// ... +// +// The purpose is to collect m rows of size k. Two elements of the same +// row are arranged contiguously because madd performs an adjacent addition +// in the kernel. + +template +struct gemm_pack_lhs { + EIGEN_DONT_INLINE void operator()(QInt16* blockA, const DataMapper& lhs, + Index depth, Index rows, Index stride = 0, + Index offset = 0); +}; + +template +EIGEN_DONT_INLINE void +gemm_pack_lhs::operator()(QInt16* blockA, + const DataMapper& lhs, + Index depth, Index rows, + Index stride, Index offset) { + eigen_assert(stride == 0); + eigen_assert(offset == 0); + + typedef typename packet_traits::type Packet; + + // Use alternate function for weird sizes + if (rows % 16 != 0 || depth % 16 != 0) { + eigen_assert(false && + "only depths and rows that are a multiple of 16 are currently " + "supported"); + // gemm_pack_lhs_any lhs_pack; + // return lhs_pack(blockA, lhs, depth, rows, stride, offset); + } + + // Get vector pointer + __m256i* blockA_256 = reinterpret_cast<__m256i*>(blockA); + + // Pack rows in sets of 16 + for (Index m = 0; m < rows; m += 16) { + // Pack depth in sets of 4 + for (Index k = 0; k < depth; k += 4) { + // Load vectors + __m256i L_A = lhs.template loadPacket(m, k); + __m256i L_B = lhs.template loadPacket(m, k + 1); + __m256i L_C = lhs.template loadPacket(m, k + 2); + __m256i L_D = lhs.template loadPacket(m, k + 3); + + // Rearrange the inputs as required by the kernel + __m256i L_AB0_AB7 = _mm256_unpacklo_epi16(L_A, L_B); + __m256i L_AB8_AB15 = _mm256_unpackhi_epi16(L_A, L_B); + __m256i L_CD0_CD7 = _mm256_unpacklo_epi16(L_C, L_D); + __m256i L_CD8_CD15 = _mm256_unpackhi_epi16(L_C, L_D); + + __m256i L_AD0 = _mm256_permute2x128_si256(L_AB0_AB7, L_AB8_AB15, 0x20); + _mm256_store_si256(blockA_256++, L_AD0); + __m256i L_AD8 = _mm256_permute2x128_si256(L_CD0_CD7, L_CD8_CD15, 0x20); + _mm256_store_si256(blockA_256++, L_AD8); + __m256i L_AD16 = _mm256_permute2x128_si256(L_AB0_AB7, L_AB8_AB15, 0x31); + _mm256_store_si256(blockA_256++, L_AD16); + __m256i L_AD24 = _mm256_permute2x128_si256(L_CD0_CD7, L_CD8_CD15, 0x31); + _mm256_store_si256(blockA_256++, L_AD24); + } + } +} + +// Arrange a block of the right input matrix in contiguous memory. +// +// Given column major input (A0 beside A1 in memory): +// A0 B0 C0 D0 E0 F0 G0 H0 ... +// A1 B1 C1 D1 E1 F1 G1 H1 ... +// A2 B2 C2 D2 E2 F2 G2 H2 ... +// A3 B3 C3 D3 E3 F3 G3 H3 ... +// A4 B4 C4 D4 E4 F4 G4 H4 ... +// A5 B5 C5 D5 E5 F5 G5 H5 ... +// A6 B6 C6 D6 E6 F6 G6 H6 ... +// A7 B7 C7 D7 E7 F7 G7 H7 ... +// A8 ... +// ... +// Packing yields row major output (A0 beside A1 in memory): +// A0 A1 A2 A3 A4 A5 A6 A7 +// B0 B1 B2 B3 B4 B5 B6 B7 +// ... +// +// At least two elements of the same col are arranged contiguously because +// maddubs and madd both perform an adjacent addition in the kernel. We can +// save work by leaving 4 adjacent elements because kr = 4. +// The purpose is to collect n cols of size k. Two elements of the same +// col are arranged contiguously because madd performs an adjacent addition +// in the kernel. +template +struct gemm_pack_rhs { + EIGEN_DONT_INLINE void operator()(QInt16* blockB, const DataMapper& rhs, + Index depth, Index cols, Index stride = 0, + Index offset = 0); +}; + +template +EIGEN_DONT_INLINE void +gemm_pack_rhs::operator()(QInt16* blockB, const DataMapper& rhs, + Index depth, Index cols, Index stride, + Index offset) { + eigen_assert(stride == 0); + eigen_assert(offset == 0); + + typedef typename packet_traits::type Packet; + + // Use alternate function for weird sizes + if (cols % 16 != 0 || depth % 16 != 0) { + eigen_assert(false && + "only depths and cols that are a multiple of 16 are currently " + "supported"); + // gemm_pack_rhs_any rhs_pack; + // return rhs_pack(blockB, rhs, depth, cols, stride, offset); + } + + // Get vector pointer + __m256i* blockB_256 = reinterpret_cast<__m256i*>(blockB); + + // Perform a step of the packing for 4 columns + __m256i R_AB_L, R_AB_H, R_CD_L, R_CD_H, R_AD_0, R_AD_4, R_AD_8, R_AD_12; +#define PACK_STEP \ + R_AB_L = _mm256_unpacklo_epi64(R_A, R_B); \ + R_CD_L = _mm256_unpacklo_epi64(R_C, R_D); \ + R_AB_H = _mm256_unpackhi_epi64(R_A, R_B); \ + R_CD_H = _mm256_unpackhi_epi64(R_C, R_D); \ + R_AD_0 = _mm256_permute2x128_si256(R_AB_L, R_CD_L, 0x20); \ + R_AD_8 = _mm256_permute2x128_si256(R_AB_L, R_CD_L, 0x31); \ + R_AD_4 = _mm256_permute2x128_si256(R_AB_H, R_CD_H, 0x20); \ + R_AD_12 = _mm256_permute2x128_si256(R_AB_H, R_CD_H, 0x31); \ + _mm256_store_si256(blockB_256, R_AD_0); \ + _mm256_store_si256(blockB_256 + 4, R_AD_4); \ + _mm256_store_si256(blockB_256 + 8, R_AD_8); \ + _mm256_store_si256(blockB_256 + 12, R_AD_12); \ + blockB_256++; + + // Pack cols in sets of 16 + for (Index n = 0; n < cols; n += 16) { + // Pack depth in sets of 16 + for (Index k = 0; k < depth; k += 16) { + __m256i R_A = rhs.template loadPacket(k, n); + __m256i R_B = rhs.template loadPacket(k, n + 1); + __m256i R_C = rhs.template loadPacket(k, n + 2); + __m256i R_D = rhs.template loadPacket(k, n + 3); + PACK_STEP; + + R_A = rhs.template loadPacket(k, n + 4); + R_B = rhs.template loadPacket(k, n + 5); + R_C = rhs.template loadPacket(k, n + 6); + R_D = rhs.template loadPacket(k, n + 7); + PACK_STEP; + + R_A = rhs.template loadPacket(k, n + 8); + R_B = rhs.template loadPacket(k, n + 9); + R_C = rhs.template loadPacket(k, n + 10); + R_D = rhs.template loadPacket(k, n + 11); + PACK_STEP; + + R_A = rhs.template loadPacket(k, n + 12); + R_B = rhs.template loadPacket(k, n + 13); + R_C = rhs.template loadPacket(k, n + 14); + R_D = rhs.template loadPacket(k, n + 15); + PACK_STEP; + + blockB_256 += 12; + } + } +#undef PACK_STEP +} + +// Perform the actual multiplication on packed inputs +template +struct gebp_kernel { + typedef typename DataMapper::LinearMapper LinearMapper; + + EIGEN_DONT_INLINE + void operator()(const DataMapper& res, const QInt16* blockA, + const QInt16* blockB, Index rows, Index depth, Index cols, + QInt32 alpha, Index strideA = -1, Index strideB = -1, + Index offsetA = 0, Index offsetB = 0); +}; + +template +EIGEN_DONT_INLINE void +gebp_kernel::operator()(const DataMapper& res, + const QInt16* blockA, + const QInt16* blockB, Index rows, + Index depth, Index cols, QInt32 alpha, + Index strideA, Index strideB, + Index offsetA, Index offsetB) { + EIGEN_STATIC_ASSERT(!ConjugateLhs, YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT(!ConjugateRhs, YOU_MADE_A_PROGRAMMING_MISTAKE); + eigen_assert(alpha.value == 1); + eigen_assert(strideA == -1); + eigen_assert(strideB == -1); + eigen_assert(offsetA == 0); + eigen_assert(offsetB == 0); + eigen_assert(rows > 0); + eigen_assert(cols > 0); + eigen_assert(depth > 0); + eigen_assert(blockA); + eigen_assert(blockB); + + // Use alternate function for weird sizes + if (rows % 16 != 0 || cols % 16 != 0 || depth % 16 != 0) { + eigen_assert( + false && + "only depths, cols and rows that are a multiple of 16 are currently " + "supported"); + // gebp_kernel_any gebp; + // return gebp(res, blockA, blockB, rows, depth, cols, alpha, strideA, + // strideB, offsetA, offsetB); + } + + // Create result block + QInt32* blockO = aligned_new(16 * 16); + memset(blockO, 0, 16 * 16 * sizeof(QInt32)); + + // Get vectorized pointers + __m256i* blockO_256 = reinterpret_cast<__m256i*>(blockO); + const __m256i* blockA_256 = reinterpret_cast(blockA); + const __m256i* blockB_256 = reinterpret_cast(blockB); + + // Loop over blocks of 16 columns + for (Index n = 0; n < cols; n += 16) { + // Reset index into blockA + Index indexL = 0; + // Loop over blocks of 16 rows + for (Index m = 0; m < rows; m += 16) { + // Reset index into blockB + Index indexR = n / 16 * depth; + // Loop over blocks of 4 on depth + for (Index k = 0; k < depth; k += 4) { + // Load inputs + __m256i L_AD0 = blockA_256[indexL++]; + __m256i L_AD8 = blockA_256[indexL++]; + __m256i L_EH0 = blockA_256[indexL++]; + __m256i L_EH8 = blockA_256[indexL++]; + + __m256i R_AH0 = blockB_256[indexR++]; + __m256i R_AH4 = blockB_256[indexR++]; + __m256i R_AH8 = blockB_256[indexR++]; + __m256i R_AH12 = blockB_256[indexR++]; + + // Declare variables used in COMPUTE_STEP + __m256i P_32_A, P_32_B, P_32; + +#define COMPUTE_STEP(R_INPUT_A, R_INPUT_B, OFFSET) \ + P_32_A = _mm256_madd_epi16(R_INPUT_A, L_AD0); \ + P_32_B = _mm256_madd_epi16(R_INPUT_B, L_AD8); \ + P_32 = _mm256_add_epi32(P_32_A, P_32_B); \ + _mm256_store_si256( \ + blockO_256 + 2 * OFFSET, \ + _mm256_add_epi32(_mm256_load_si256(blockO_256 + 2 * OFFSET), P_32)); \ + \ + P_32_A = _mm256_madd_epi16(R_INPUT_A, L_EH0); \ + P_32_B = _mm256_madd_epi16(R_INPUT_B, L_EH8); \ + P_32 = _mm256_add_epi32(P_32_A, P_32_B); \ + _mm256_store_si256( \ + blockO_256 + 2 * OFFSET + 1, \ + _mm256_add_epi32(_mm256_load_si256(blockO_256 + 2 * OFFSET + 1), P_32)); + + // Permute and shuffle to copy a single value across the entire vector + // Then compute the multiplication + // Replicate lower 128-bits of R_AH0 across both lanes + __m256i R_AH0_ = _mm256_permute2x128_si256(R_AH0, R_AH0, 0x00); + // Copy first two elements of R_AH0 across entire vector + __m256i R_AD0 = _mm256_shuffle_epi32(R_AH0_, 0x00); + // Copy second two elements of R_AH0 across entire vector + __m256i R_EH0 = _mm256_shuffle_epi32(R_AH0_, 0x55); + + COMPUTE_STEP(R_AD0, R_EH0, 0); + __m256i R_AD1 = _mm256_shuffle_epi32(R_AH0_, 0xAA); + __m256i R_EH1 = _mm256_shuffle_epi32(R_AH0_, 0xFF); + COMPUTE_STEP(R_AD1, R_EH1, 1); + + // Replicate upper 128-bits of R_AH0 across both lanes + R_AH0_ = _mm256_permute2x128_si256(R_AH0, R_AH0, 0x11); + __m256i R_AD2 = _mm256_shuffle_epi32(R_AH0_, 0x00); + __m256i R_EH2 = _mm256_shuffle_epi32(R_AH0_, 0x55); + COMPUTE_STEP(R_AD2, R_EH2, 2); + __m256i R_AD3 = _mm256_shuffle_epi32(R_AH0_, 0xAA); + __m256i R_EH3 = _mm256_shuffle_epi32(R_AH0_, 0xFF); + COMPUTE_STEP(R_AD3, R_EH3, 3); + + R_AH0_ = _mm256_permute2x128_si256(R_AH4, R_AH4, 0x00); + R_AD0 = _mm256_shuffle_epi32(R_AH0_, 0x00); + R_EH0 = _mm256_shuffle_epi32(R_AH0_, 0x55); + COMPUTE_STEP(R_AD0, R_EH0, 4); + R_AD1 = _mm256_shuffle_epi32(R_AH0_, 0xAA); + R_EH1 = _mm256_shuffle_epi32(R_AH0_, 0xFF); + COMPUTE_STEP(R_AD1, R_EH1, 5); + R_AH0_ = _mm256_permute2x128_si256(R_AH4, R_AH4, 0x11); + R_AD2 = _mm256_shuffle_epi32(R_AH0_, 0x00); + R_EH2 = _mm256_shuffle_epi32(R_AH0_, 0x55); + COMPUTE_STEP(R_AD2, R_EH2, 6); + R_AD3 = _mm256_shuffle_epi32(R_AH0_, 0xAA); + R_EH3 = _mm256_shuffle_epi32(R_AH0_, 0xFF); + COMPUTE_STEP(R_AD3, R_EH3, 7); + + R_AH0_ = _mm256_permute2x128_si256(R_AH8, R_AH8, 0x00); + R_AD0 = _mm256_shuffle_epi32(R_AH0_, 0x00); + R_EH0 = _mm256_shuffle_epi32(R_AH0_, 0x55); + COMPUTE_STEP(R_AD0, R_EH0, 8); + R_AD1 = _mm256_shuffle_epi32(R_AH0_, 0xAA); + R_EH1 = _mm256_shuffle_epi32(R_AH0_, 0xFF); + COMPUTE_STEP(R_AD1, R_EH1, 9); + R_AH0_ = _mm256_permute2x128_si256(R_AH8, R_AH8, 0x11); + R_AD2 = _mm256_shuffle_epi32(R_AH0_, 0x00); + R_EH2 = _mm256_shuffle_epi32(R_AH0_, 0x55); + COMPUTE_STEP(R_AD2, R_EH2, 10); + R_AD3 = _mm256_shuffle_epi32(R_AH0_, 0xAA); + R_EH3 = _mm256_shuffle_epi32(R_AH0_, 0xFF); + COMPUTE_STEP(R_AD3, R_EH3, 11); + + R_AH0_ = _mm256_permute2x128_si256(R_AH12, R_AH12, 0x00); + R_AD0 = _mm256_shuffle_epi32(R_AH0_, 0x00); + R_EH0 = _mm256_shuffle_epi32(R_AH0_, 0x55); + COMPUTE_STEP(R_AD0, R_EH0, 12); + R_AD1 = _mm256_shuffle_epi32(R_AH0_, 0xAA); + R_EH1 = _mm256_shuffle_epi32(R_AH0_, 0xFF); + COMPUTE_STEP(R_AD1, R_EH1, 13); + R_AH0_ = _mm256_permute2x128_si256(R_AH12, R_AH12, 0x11); + R_AD2 = _mm256_shuffle_epi32(R_AH0_, 0x00); + R_EH2 = _mm256_shuffle_epi32(R_AH0_, 0x55); + COMPUTE_STEP(R_AD2, R_EH2, 14); + R_AD3 = _mm256_shuffle_epi32(R_AH0_, 0xAA); + R_EH3 = _mm256_shuffle_epi32(R_AH0_, 0xFF); + COMPUTE_STEP(R_AD3, R_EH3, 15); + +#undef COMPUTE_STEP + } + + // Transfer the results to the result matrix + Index i = 0; + for (Index j = n; j < n + 16; j++) { + LinearMapper r0 = res.getLinearMapper(m, j); + LinearMapper r1 = res.getLinearMapper(m + 8, j); + typedef typename packet_traits::type Packet; + r0.template storePacket( + 0, _mm256_add_epi32(blockO_256[i++], + r0.template loadPacket(0))); + r1.template storePacket( + 0, _mm256_add_epi32(blockO_256[i++], + r1.template loadPacket(0))); + } + + // Zero the result block so it can be reused + memset(blockO, 0, 16 * 16 * sizeof(QInt32)); + } + } + aligned_delete(blockO, 16 * 16); +} + +#endif + +// AVX2 optimized implementation of Mat-Mat product. +// LHS is encoded using signed 8-bit integers. +// RHS is encoded using unsigned 8-bit integers. +#ifdef EIGEN_USE_OPTIMIZED_INT8_UINT8_MAT_MAT_PRODUCT + +// Define quantized traits +template +class gebp_traits { + public: + typedef QInt8 LhsScalar; + typedef QUInt8 RhsScalar; + typedef QInt32 ResScalar; + + typedef typename packet_traits::type LhsPacket; + typedef LhsPacket LhsPacket4Packing; + + enum { + // Define register blocking scheme. + nr = 32, + mr = 32, + kr = 8, + // Ignore progress tracking per loop iteration. + LhsProgress = -1, + RhsProgress = -1 + }; +}; + +// Specialized blocking for quantized implementations. +// Used by TensorContractionThreadPool, inputs must have dimensions that are +// multiples of 32. +template +class TensorContractionBlocking< + ResScalar, + TensorContractionInputMapper< + QInt8, Index, Lhs, LeftTensor, left_nocontract_t, left_contract_t, 32, + left_inner_dim_contiguous, left_inner_dim_reordered, LeftAlignment>, + TensorContractionInputMapper, + Index, ShardingType> { + public: + typedef QInt8 LhsScalar; + typedef QUInt8 RhsScalar; + + TensorContractionBlocking(Index k, Index m, Index n, Index num_threads = 1) + : kc_(k), mc_(m), nc_(n) { + eigen_assert(m % 32 == 0); + eigen_assert(k % 32 == 0); + if (!k || !m || !n) { + return; + } + + if (ShardingType == ShardByCol) { + eigen_assert(n % 32 == 0); + nc_ = (((n / num_threads) + 31) / 32) * 32; + } else { + eigen_assert(n % 32 == 0 || n == 1); + // Special case to avoid breaking the unimplemented matrix-vector case + if (n == 1) { + nc_ = 32; + } + mc_ = (((m / num_threads) + 31) / 32) * 32; + } + } + + EIGEN_ALWAYS_INLINE Index kc() const { return kc_; } + EIGEN_ALWAYS_INLINE Index mc() const { return mc_; } + EIGEN_ALWAYS_INLINE Index nc() const { return nc_; } + + private: + Index kc_; + Index mc_; + Index nc_; +}; + +// Specialized blocking for quantized implementations. +// Used by TensorContraction and GeneralMatrixMatrix, inputs are padded to +// multiples of 32. +template +class gemm_blocking_space + : public level3_blocking { + DenseIndex m_sizeA; + DenseIndex m_sizeB; + + public: + gemm_blocking_space(DenseIndex rows, DenseIndex cols, DenseIndex depth, + DenseIndex /*num_threads*/, bool /*l3_blocking*/) { + this->m_mc = ((rows + 31) / 32) * 32; + this->m_nc = ((cols + 31) / 32) * 32; + this->m_kc = ((depth + 31) / 32) * 32; + m_sizeA = this->m_mc * this->m_kc; + m_sizeB = this->m_kc * this->m_nc; + } + void allocateA() { + if (this->m_blockA == 0) this->m_blockA = aligned_new(m_sizeA); + } + void allocateB() { + if (this->m_blockB == 0) this->m_blockB = aligned_new(m_sizeB); + } + void allocateAll() { + allocateA(); + allocateB(); + } + ~gemm_blocking_space() { + aligned_delete(this->m_blockA, m_sizeA); + aligned_delete(this->m_blockB, m_sizeB); + } +}; + +template +class gemm_blocking_space + : public level3_blocking { + DenseIndex m_sizeA; + DenseIndex m_sizeB; + + public: + gemm_blocking_space(DenseIndex rows, DenseIndex cols, DenseIndex depth, + DenseIndex /*num_threads*/, bool /*l3_blocking*/) { + this->m_mc = ((rows + 31) / 32) * 32; + this->m_nc = ((cols + 31) / 32) * 32; + this->m_kc = ((depth + 31) / 32) * 32; + m_sizeA = this->m_mc * this->m_kc; + m_sizeB = this->m_kc * this->m_nc; + } + void allocateA() { + if (this->m_blockA == 0) this->m_blockA = aligned_new(m_sizeA); + } + void allocateB() { + if (this->m_blockB == 0) this->m_blockB = aligned_new(m_sizeB); + } + void allocateAll() { + allocateA(); + allocateB(); + } + ~gemm_blocking_space() { + aligned_delete(this->m_blockA, m_sizeA); + aligned_delete(this->m_blockB, m_sizeB); + } +}; + +// Alternate templates for any input sizes +template +struct gemm_pack_lhs_any; +template +struct gemm_pack_lhs_any { + EIGEN_DONT_INLINE void operator()(QInt8* blockA, const DataMapper& lhs, + Index depth, Index rows, Index stride = 0, + Index offset = 0); +}; + +template +struct gemm_pack_rhs_any; +template +struct gemm_pack_rhs_any { + EIGEN_DONT_INLINE void operator()(QUInt8* blockB, const DataMapper& rhs, + Index depth, Index cols, Index stride = 0, + Index offset = 0); +}; + +template +struct gebp_kernel_any; +template +struct gebp_kernel_any { + typedef typename DataMapper::LinearMapper LinearMapper; + + EIGEN_DONT_INLINE + void operator()(const DataMapper& res, const QInt8* blockA, + const QUInt8* blockB, Index rows, Index depth, Index cols, + QInt32 alpha, Index strideA = -1, Index strideB = -1, + Index offsetA = 0, Index offsetB = 0); +}; + +// Alternate implementations for any input sizes +template +EIGEN_DONT_INLINE void +gemm_pack_lhs_any::operator()(QInt8* blockA, const DataMapper& lhs, + Index depth, Index rows, Index stride, + Index offset) { + eigen_assert(stride == 0); + eigen_assert(offset == 0); + + typedef typename packet_traits::type Packet; + + // Get vector pointer + __m256i* blockA_256 = reinterpret_cast<__m256i*>(blockA); + + // Get even multiples of the dimensions + Index rows_32 = (rows / 32) * 32; + Index depth_8 = (depth / 8) * 8; + + // Get padding for when depth is not a multiple of 32 + int padding = 0; + if (depth % 32 != 0) { + int depth_32 = (depth / 32) * 32; + int extra_depth = depth - depth_32; + int extra_depth_8 = ((extra_depth + 7) / 8) * 8; + padding = 32 - extra_depth_8; + } + + // Pack rows in sets of 32 + for (Index m = 0; m < rows_32; m += 32) { + // Pack depth in sets of 8 + for (Index k = 0; k < depth_8; k += 8) { + // Load vectors + __m256i L_A = lhs.template loadPacket(m, k); + __m256i L_B = lhs.template loadPacket(m, k + 1); + + // Interleave 8-bit elements + __m256i L_AB0_AB16 = _mm256_unpacklo_epi8(L_A, L_B); + __m256i L_AB8_AB24 = _mm256_unpackhi_epi8(L_A, L_B); + + __m256i L_C = lhs.template loadPacket(m, k + 2); + __m256i L_D = lhs.template loadPacket(m, k + 3); + __m256i L_CD0_CD16 = _mm256_unpacklo_epi8(L_C, L_D); + __m256i L_CD8_CD24 = _mm256_unpackhi_epi8(L_C, L_D); + + // Interleave 16-bit elements + __m256i L_AD0_AD16 = _mm256_unpacklo_epi16(L_AB0_AB16, L_CD0_CD16); + __m256i L_AD4_AD20 = _mm256_unpackhi_epi16(L_AB0_AB16, L_CD0_CD16); + + // Use permute before we store to cross 128-bit lanes + __m256i L_AD0 = _mm256_permute2x128_si256(L_AD0_AD16, L_AD4_AD20, 0x20); + _mm256_store_si256(blockA_256++, L_AD0); + + // Complete packing for 32 x 8 block + __m256i L_AD16 = _mm256_permute2x128_si256(L_AD0_AD16, L_AD4_AD20, 0x31); + __m256i L_AD8_AD24 = _mm256_unpacklo_epi16(L_AB8_AB24, L_CD8_CD24); + __m256i L_AD12_AD28 = _mm256_unpackhi_epi16(L_AB8_AB24, L_CD8_CD24); + __m256i L_AD8 = _mm256_permute2x128_si256(L_AD8_AD24, L_AD12_AD28, 0x20); + _mm256_store_si256(blockA_256++, L_AD8); + _mm256_store_si256(blockA_256++, L_AD16); + __m256i L_AD24 = _mm256_permute2x128_si256(L_AD8_AD24, L_AD12_AD28, 0x31); + _mm256_store_si256(blockA_256++, L_AD24); + __m256i L_E = lhs.template loadPacket(m, k + 4); + __m256i L_F = lhs.template loadPacket(m, k + 5); + __m256i L_EF0_EF16 = _mm256_unpacklo_epi8(L_E, L_F); + __m256i L_EF8_EF24 = _mm256_unpackhi_epi8(L_E, L_F); + __m256i L_G = lhs.template loadPacket(m, k + 6); + __m256i L_H = lhs.template loadPacket(m, k + 7); + __m256i L_GH0_GH16 = _mm256_unpacklo_epi8(L_G, L_H); + __m256i L_GH8_GH24 = _mm256_unpackhi_epi8(L_G, L_H); + __m256i L_EH0_EH16 = _mm256_unpacklo_epi16(L_EF0_EF16, L_GH0_GH16); + __m256i L_EH4_EH20 = _mm256_unpackhi_epi16(L_EF0_EF16, L_GH0_GH16); + __m256i L_EH0 = _mm256_permute2x128_si256(L_EH0_EH16, L_EH4_EH20, 0x20); + _mm256_store_si256(blockA_256++, L_EH0); + __m256i L_EH16 = _mm256_permute2x128_si256(L_EH0_EH16, L_EH4_EH20, 0x31); + __m256i L_EH8_EH24 = _mm256_unpacklo_epi16(L_EF8_EF24, L_GH8_GH24); + __m256i L_EH12_EH28 = _mm256_unpackhi_epi16(L_EF8_EF24, L_GH8_GH24); + __m256i L_EH8 = _mm256_permute2x128_si256(L_EH8_EH24, L_EH12_EH28, 0x20); + _mm256_store_si256(blockA_256++, L_EH8); + _mm256_store_si256(blockA_256++, L_EH16); + __m256i L_EH24 = _mm256_permute2x128_si256(L_EH8_EH24, L_EH12_EH28, 0x31); + _mm256_store_si256(blockA_256++, L_EH24); + } + + // Finish the k dimension, padding with zeros + if (depth_8 < depth) { + __m256i L_A, L_B, L_C, L_D, L_E, L_F, L_G, L_H; + switch (depth - depth_8) { + case 1: + L_A = lhs.template loadPacket(m, depth_8); + L_B = _mm256_setzero_si256(); + L_C = _mm256_setzero_si256(); + L_D = _mm256_setzero_si256(); + L_E = _mm256_setzero_si256(); + L_F = _mm256_setzero_si256(); + L_G = _mm256_setzero_si256(); + L_H = _mm256_setzero_si256(); + break; + case 2: + L_A = lhs.template loadPacket(m, depth_8); + L_B = lhs.template loadPacket(m, depth_8 + 1); + L_C = _mm256_setzero_si256(); + L_D = _mm256_setzero_si256(); + L_E = _mm256_setzero_si256(); + L_F = _mm256_setzero_si256(); + L_G = _mm256_setzero_si256(); + L_H = _mm256_setzero_si256(); + break; + case 3: + L_A = lhs.template loadPacket(m, depth_8); + L_B = lhs.template loadPacket(m, depth_8 + 1); + L_C = lhs.template loadPacket(m, depth_8 + 2); + L_D = _mm256_setzero_si256(); + L_E = _mm256_setzero_si256(); + L_F = _mm256_setzero_si256(); + L_G = _mm256_setzero_si256(); + L_H = _mm256_setzero_si256(); + break; + case 4: + L_A = lhs.template loadPacket(m, depth_8); + L_B = lhs.template loadPacket(m, depth_8 + 1); + L_C = lhs.template loadPacket(m, depth_8 + 2); + L_D = lhs.template loadPacket(m, depth_8 + 3); + L_E = _mm256_setzero_si256(); + L_F = _mm256_setzero_si256(); + L_G = _mm256_setzero_si256(); + L_H = _mm256_setzero_si256(); + break; + case 5: + L_A = lhs.template loadPacket(m, depth_8); + L_B = lhs.template loadPacket(m, depth_8 + 1); + L_C = lhs.template loadPacket(m, depth_8 + 2); + L_D = lhs.template loadPacket(m, depth_8 + 3); + L_E = lhs.template loadPacket(m, depth_8 + 4); + L_F = _mm256_setzero_si256(); + L_G = _mm256_setzero_si256(); + L_H = _mm256_setzero_si256(); + break; + case 6: + L_A = lhs.template loadPacket(m, depth_8); + L_B = lhs.template loadPacket(m, depth_8 + 1); + L_C = lhs.template loadPacket(m, depth_8 + 2); + L_D = lhs.template loadPacket(m, depth_8 + 3); + L_E = lhs.template loadPacket(m, depth_8 + 4); + L_F = lhs.template loadPacket(m, depth_8 + 5); + L_G = _mm256_setzero_si256(); + L_H = _mm256_setzero_si256(); + break; + case 7: + L_A = lhs.template loadPacket(m, depth_8); + L_B = lhs.template loadPacket(m, depth_8 + 1); + L_C = lhs.template loadPacket(m, depth_8 + 2); + L_D = lhs.template loadPacket(m, depth_8 + 3); + L_E = lhs.template loadPacket(m, depth_8 + 4); + L_F = lhs.template loadPacket(m, depth_8 + 5); + L_G = lhs.template loadPacket(m, depth_8 + 6); + L_H = _mm256_setzero_si256(); + break; + } + + // Interleave 8-bit elements + __m256i L_AB0_AB16 = _mm256_unpacklo_epi8(L_A, L_B); + __m256i L_AB8_AB24 = _mm256_unpackhi_epi8(L_A, L_B); + + __m256i L_CD0_CD16 = _mm256_unpacklo_epi8(L_C, L_D); + __m256i L_CD8_CD24 = _mm256_unpackhi_epi8(L_C, L_D); + + // Interleave 16-bit elements + __m256i L_AD0_AD16 = _mm256_unpacklo_epi16(L_AB0_AB16, L_CD0_CD16); + __m256i L_AD4_AD20 = _mm256_unpackhi_epi16(L_AB0_AB16, L_CD0_CD16); + + // Use permute before we store to cross 128-bit lanes + __m256i L_AD0 = _mm256_permute2x128_si256(L_AD0_AD16, L_AD4_AD20, 0x20); + _mm256_store_si256(blockA_256++, L_AD0); + + // Complete packing + __m256i L_AD16 = _mm256_permute2x128_si256(L_AD0_AD16, L_AD4_AD20, 0x31); + __m256i L_AD8_AD24 = _mm256_unpacklo_epi16(L_AB8_AB24, L_CD8_CD24); + __m256i L_AD12_AD28 = _mm256_unpackhi_epi16(L_AB8_AB24, L_CD8_CD24); + __m256i L_AD8 = _mm256_permute2x128_si256(L_AD8_AD24, L_AD12_AD28, 0x20); + _mm256_store_si256(blockA_256++, L_AD8); + _mm256_store_si256(blockA_256++, L_AD16); + __m256i L_AD24 = _mm256_permute2x128_si256(L_AD8_AD24, L_AD12_AD28, 0x31); + _mm256_store_si256(blockA_256++, L_AD24); + __m256i L_EF0_EF16 = _mm256_unpacklo_epi8(L_E, L_F); + __m256i L_EF8_EF24 = _mm256_unpackhi_epi8(L_E, L_F); + __m256i L_GH0_GH16 = _mm256_unpacklo_epi8(L_G, L_H); + __m256i L_GH8_GH24 = _mm256_unpackhi_epi8(L_G, L_H); + __m256i L_EH0_EH16 = _mm256_unpacklo_epi16(L_EF0_EF16, L_GH0_GH16); + __m256i L_EH4_EH20 = _mm256_unpackhi_epi16(L_EF0_EF16, L_GH0_GH16); + __m256i L_EH0 = _mm256_permute2x128_si256(L_EH0_EH16, L_EH4_EH20, 0x20); + _mm256_store_si256(blockA_256++, L_EH0); + __m256i L_EH16 = _mm256_permute2x128_si256(L_EH0_EH16, L_EH4_EH20, 0x31); + __m256i L_EH8_EH24 = _mm256_unpacklo_epi16(L_EF8_EF24, L_GH8_GH24); + __m256i L_EH12_EH28 = _mm256_unpackhi_epi16(L_EF8_EF24, L_GH8_GH24); + __m256i L_EH8 = _mm256_permute2x128_si256(L_EH8_EH24, L_EH12_EH28, 0x20); + _mm256_store_si256(blockA_256++, L_EH8); + _mm256_store_si256(blockA_256++, L_EH16); + __m256i L_EH24 = _mm256_permute2x128_si256(L_EH8_EH24, L_EH12_EH28, 0x31); + _mm256_store_si256(blockA_256++, L_EH24); + } + blockA_256 += padding; + } + + // Finish the m dimension, padding with zeros + if (rows_32 < rows) { + // Pack depth in sets of 8 + for (Index k = 0; k < depth_8; k += 8) { + // Load vectors + __m256i L_A = _mm256_setzero_si256(); + __m256i L_B = _mm256_setzero_si256(); + __m256i L_C = _mm256_setzero_si256(); + __m256i L_D = _mm256_setzero_si256(); + __m256i L_E = _mm256_setzero_si256(); + __m256i L_F = _mm256_setzero_si256(); + __m256i L_G = _mm256_setzero_si256(); + __m256i L_H = _mm256_setzero_si256(); + for (Index m = 0; m < rows - rows_32; m++) { + QInt8* ptr = (QInt8*)&L_A; + ptr[m] = lhs(rows_32 + m, k); + ptr = (QInt8*)&L_B; + ptr[m] = lhs(rows_32 + m, k + 1); + ptr = (QInt8*)&L_C; + ptr[m] = lhs(rows_32 + m, k + 2); + ptr = (QInt8*)&L_D; + ptr[m] = lhs(rows_32 + m, k + 3); + ptr = (QInt8*)&L_E; + ptr[m] = lhs(rows_32 + m, k + 4); + ptr = (QInt8*)&L_F; + ptr[m] = lhs(rows_32 + m, k + 5); + ptr = (QInt8*)&L_G; + ptr[m] = lhs(rows_32 + m, k + 6); + ptr = (QInt8*)&L_H; + ptr[m] = lhs(rows_32 + m, k + 7); + } + + // Interleave 8-bit elements + __m256i L_AB0_AB16 = _mm256_unpacklo_epi8(L_A, L_B); + __m256i L_AB8_AB24 = _mm256_unpackhi_epi8(L_A, L_B); + __m256i L_CD0_CD16 = _mm256_unpacklo_epi8(L_C, L_D); + __m256i L_CD8_CD24 = _mm256_unpackhi_epi8(L_C, L_D); + + // Interleave 16-bit elements + __m256i L_AD0_AD16 = _mm256_unpacklo_epi16(L_AB0_AB16, L_CD0_CD16); + __m256i L_AD4_AD20 = _mm256_unpackhi_epi16(L_AB0_AB16, L_CD0_CD16); + + // Use permute before we store to cross 128-bit lanes + __m256i L_AD0 = _mm256_permute2x128_si256(L_AD0_AD16, L_AD4_AD20, 0x20); + _mm256_store_si256(blockA_256++, L_AD0); + + // Complete packing for 32 x 8 block + __m256i L_AD16 = _mm256_permute2x128_si256(L_AD0_AD16, L_AD4_AD20, 0x31); + __m256i L_AD8_AD24 = _mm256_unpacklo_epi16(L_AB8_AB24, L_CD8_CD24); + __m256i L_AD12_AD28 = _mm256_unpackhi_epi16(L_AB8_AB24, L_CD8_CD24); + __m256i L_AD8 = _mm256_permute2x128_si256(L_AD8_AD24, L_AD12_AD28, 0x20); + _mm256_store_si256(blockA_256++, L_AD8); + _mm256_store_si256(blockA_256++, L_AD16); + __m256i L_AD24 = _mm256_permute2x128_si256(L_AD8_AD24, L_AD12_AD28, 0x31); + _mm256_store_si256(blockA_256++, L_AD24); + __m256i L_EF0_EF16 = _mm256_unpacklo_epi8(L_E, L_F); + __m256i L_EF8_EF24 = _mm256_unpackhi_epi8(L_E, L_F); + __m256i L_GH0_GH16 = _mm256_unpacklo_epi8(L_G, L_H); + __m256i L_GH8_GH24 = _mm256_unpackhi_epi8(L_G, L_H); + __m256i L_EH0_EH16 = _mm256_unpacklo_epi16(L_EF0_EF16, L_GH0_GH16); + __m256i L_EH4_EH20 = _mm256_unpackhi_epi16(L_EF0_EF16, L_GH0_GH16); + __m256i L_EH0 = _mm256_permute2x128_si256(L_EH0_EH16, L_EH4_EH20, 0x20); + _mm256_store_si256(blockA_256++, L_EH0); + __m256i L_EH16 = _mm256_permute2x128_si256(L_EH0_EH16, L_EH4_EH20, 0x31); + __m256i L_EH8_EH24 = _mm256_unpacklo_epi16(L_EF8_EF24, L_GH8_GH24); + __m256i L_EH12_EH28 = _mm256_unpackhi_epi16(L_EF8_EF24, L_GH8_GH24); + __m256i L_EH8 = _mm256_permute2x128_si256(L_EH8_EH24, L_EH12_EH28, 0x20); + _mm256_store_si256(blockA_256++, L_EH8); + _mm256_store_si256(blockA_256++, L_EH16); + __m256i L_EH24 = _mm256_permute2x128_si256(L_EH8_EH24, L_EH12_EH28, 0x31); + _mm256_store_si256(blockA_256++, L_EH24); + } + + // Finish the k dimension, padding with zeros + if (depth_8 < depth) { + __m256i L_A, L_B, L_C, L_D, L_E, L_F, L_G, L_H; + QInt8* ptr; + switch (depth - depth_8) { + case 1: + L_A = _mm256_setzero_si256(); + L_B = _mm256_setzero_si256(); + L_C = _mm256_setzero_si256(); + L_D = _mm256_setzero_si256(); + L_E = _mm256_setzero_si256(); + L_F = _mm256_setzero_si256(); + L_G = _mm256_setzero_si256(); + L_H = _mm256_setzero_si256(); + for (Index m = 0; m < rows - rows_32; m++) { + QInt8* ptr = (QInt8*)&L_A; + ptr[m] = lhs(rows_32 + m, depth_8); + } + break; + case 2: + L_A = _mm256_setzero_si256(); + L_B = _mm256_setzero_si256(); + L_C = _mm256_setzero_si256(); + L_D = _mm256_setzero_si256(); + L_E = _mm256_setzero_si256(); + L_F = _mm256_setzero_si256(); + L_G = _mm256_setzero_si256(); + L_H = _mm256_setzero_si256(); + for (Index m = 0; m < rows - rows_32; m++) { + ptr = (QInt8*)&L_A; + ptr[m] = lhs(rows_32 + m, depth_8); + ptr = (QInt8*)&L_B; + ptr[m] = lhs(rows_32 + m, depth_8 + 1); + } + break; + case 3: + L_A = _mm256_setzero_si256(); + L_B = _mm256_setzero_si256(); + L_C = _mm256_setzero_si256(); + L_D = _mm256_setzero_si256(); + L_E = _mm256_setzero_si256(); + L_F = _mm256_setzero_si256(); + L_G = _mm256_setzero_si256(); + L_H = _mm256_setzero_si256(); + for (Index m = 0; m < rows - rows_32; m++) { + ptr = (QInt8*)&L_A; + ptr[m] = lhs(rows_32 + m, depth_8); + ptr = (QInt8*)&L_B; + ptr[m] = lhs(rows_32 + m, depth_8 + 1); + ptr = (QInt8*)&L_C; + ptr[m] = lhs(rows_32 + m, depth_8 + 2); + } + break; + case 4: + L_A = _mm256_setzero_si256(); + L_B = _mm256_setzero_si256(); + L_C = _mm256_setzero_si256(); + L_D = _mm256_setzero_si256(); + L_E = _mm256_setzero_si256(); + L_F = _mm256_setzero_si256(); + L_G = _mm256_setzero_si256(); + L_H = _mm256_setzero_si256(); + for (Index m = 0; m < rows - rows_32; m++) { + ptr = (QInt8*)&L_A; + ptr[m] = lhs(rows_32 + m, depth_8); + ptr = (QInt8*)&L_B; + ptr[m] = lhs(rows_32 + m, depth_8 + 1); + ptr = (QInt8*)&L_C; + ptr[m] = lhs(rows_32 + m, depth_8 + 2); + ptr = (QInt8*)&L_D; + ptr[m] = lhs(rows_32 + m, depth_8 + 3); + } + break; + case 5: + L_A = _mm256_setzero_si256(); + L_B = _mm256_setzero_si256(); + L_C = _mm256_setzero_si256(); + L_D = _mm256_setzero_si256(); + L_E = _mm256_setzero_si256(); + L_F = _mm256_setzero_si256(); + L_G = _mm256_setzero_si256(); + L_H = _mm256_setzero_si256(); + for (Index m = 0; m < rows - rows_32; m++) { + ptr = (QInt8*)&L_A; + ptr[m] = lhs(rows_32 + m, depth_8); + ptr = (QInt8*)&L_B; + ptr[m] = lhs(rows_32 + m, depth_8 + 1); + ptr = (QInt8*)&L_C; + ptr[m] = lhs(rows_32 + m, depth_8 + 2); + ptr = (QInt8*)&L_D; + ptr[m] = lhs(rows_32 + m, depth_8 + 3); + ptr = (QInt8*)&L_E; + ptr[m] = lhs(rows_32 + m, depth_8 + 4); + } + break; + case 6: + L_A = _mm256_setzero_si256(); + L_B = _mm256_setzero_si256(); + L_C = _mm256_setzero_si256(); + L_D = _mm256_setzero_si256(); + L_E = _mm256_setzero_si256(); + L_F = _mm256_setzero_si256(); + L_G = _mm256_setzero_si256(); + L_H = _mm256_setzero_si256(); + for (Index m = 0; m < rows - rows_32; m++) { + ptr = (QInt8*)&L_A; + ptr[m] = lhs(rows_32 + m, depth_8); + ptr = (QInt8*)&L_B; + ptr[m] = lhs(rows_32 + m, depth_8 + 1); + ptr = (QInt8*)&L_C; + ptr[m] = lhs(rows_32 + m, depth_8 + 2); + ptr = (QInt8*)&L_D; + ptr[m] = lhs(rows_32 + m, depth_8 + 3); + ptr = (QInt8*)&L_E; + ptr[m] = lhs(rows_32 + m, depth_8 + 4); + ptr = (QInt8*)&L_F; + ptr[m] = lhs(rows_32 + m, depth_8 + 5); + } + break; + case 7: + L_A = _mm256_setzero_si256(); + L_B = _mm256_setzero_si256(); + L_C = _mm256_setzero_si256(); + L_D = _mm256_setzero_si256(); + L_E = _mm256_setzero_si256(); + L_F = _mm256_setzero_si256(); + L_G = _mm256_setzero_si256(); + L_H = _mm256_setzero_si256(); + for (Index m = 0; m < rows - rows_32; m++) { + ptr = (QInt8*)&L_A; + ptr[m] = lhs(rows_32 + m, depth_8); + ptr = (QInt8*)&L_B; + ptr[m] = lhs(rows_32 + m, depth_8 + 1); + ptr = (QInt8*)&L_C; + ptr[m] = lhs(rows_32 + m, depth_8 + 2); + ptr = (QInt8*)&L_D; + ptr[m] = lhs(rows_32 + m, depth_8 + 3); + ptr = (QInt8*)&L_E; + ptr[m] = lhs(rows_32 + m, depth_8 + 4); + ptr = (QInt8*)&L_F; + ptr[m] = lhs(rows_32 + m, depth_8 + 5); + ptr = (QInt8*)&L_G; + ptr[m] = lhs(rows_32 + m, depth_8 + 6); + } + break; + } + + // Interleave 8-bit elements + __m256i L_AB0_AB16 = _mm256_unpacklo_epi8(L_A, L_B); + __m256i L_AB8_AB24 = _mm256_unpackhi_epi8(L_A, L_B); + __m256i L_CD0_CD16 = _mm256_unpacklo_epi8(L_C, L_D); + __m256i L_CD8_CD24 = _mm256_unpackhi_epi8(L_C, L_D); + + // Interleave 16-bit elements + __m256i L_AD0_AD16 = _mm256_unpacklo_epi16(L_AB0_AB16, L_CD0_CD16); + __m256i L_AD4_AD20 = _mm256_unpackhi_epi16(L_AB0_AB16, L_CD0_CD16); + + // Use permute before we store to cross 128-bit lanes + __m256i L_AD0 = _mm256_permute2x128_si256(L_AD0_AD16, L_AD4_AD20, 0x20); + _mm256_store_si256(blockA_256++, L_AD0); + + // Complete packing + __m256i L_AD16 = _mm256_permute2x128_si256(L_AD0_AD16, L_AD4_AD20, 0x31); + __m256i L_AD8_AD24 = _mm256_unpacklo_epi16(L_AB8_AB24, L_CD8_CD24); + __m256i L_AD12_AD28 = _mm256_unpackhi_epi16(L_AB8_AB24, L_CD8_CD24); + __m256i L_AD8 = _mm256_permute2x128_si256(L_AD8_AD24, L_AD12_AD28, 0x20); + _mm256_store_si256(blockA_256++, L_AD8); + _mm256_store_si256(blockA_256++, L_AD16); + __m256i L_AD24 = _mm256_permute2x128_si256(L_AD8_AD24, L_AD12_AD28, 0x31); + _mm256_store_si256(blockA_256++, L_AD24); + __m256i L_EF0_EF16 = _mm256_unpacklo_epi8(L_E, L_F); + __m256i L_EF8_EF24 = _mm256_unpackhi_epi8(L_E, L_F); + __m256i L_GH0_GH16 = _mm256_unpacklo_epi8(L_G, L_H); + __m256i L_GH8_GH24 = _mm256_unpackhi_epi8(L_G, L_H); + __m256i L_EH0_EH16 = _mm256_unpacklo_epi16(L_EF0_EF16, L_GH0_GH16); + __m256i L_EH4_EH20 = _mm256_unpackhi_epi16(L_EF0_EF16, L_GH0_GH16); + __m256i L_EH0 = _mm256_permute2x128_si256(L_EH0_EH16, L_EH4_EH20, 0x20); + _mm256_store_si256(blockA_256++, L_EH0); + __m256i L_EH16 = _mm256_permute2x128_si256(L_EH0_EH16, L_EH4_EH20, 0x31); + __m256i L_EH8_EH24 = _mm256_unpacklo_epi16(L_EF8_EF24, L_GH8_GH24); + __m256i L_EH12_EH28 = _mm256_unpackhi_epi16(L_EF8_EF24, L_GH8_GH24); + __m256i L_EH8 = _mm256_permute2x128_si256(L_EH8_EH24, L_EH12_EH28, 0x20); + _mm256_store_si256(blockA_256++, L_EH8); + _mm256_store_si256(blockA_256++, L_EH16); + __m256i L_EH24 = _mm256_permute2x128_si256(L_EH8_EH24, L_EH12_EH28, 0x31); + _mm256_store_si256(blockA_256++, L_EH24); + } + } +} + +template +EIGEN_DONT_INLINE void +gemm_pack_rhs_any::operator()(QUInt8* blockB, const DataMapper& rhs, + Index depth, Index cols, Index stride, + Index offset) { + eigen_assert(stride == 0); + eigen_assert(offset == 0); + + typedef typename packet_traits::type Packet; + + // Get vector pointer + __m256i* blockB_256 = reinterpret_cast<__m256i*>(blockB); + + // Get even multiples of the dimensions + Index cols_32 = (cols / 32) * 32; + Index depth_32 = (depth / 32) * 32; + + // Perform a step of the packing for 4 columns + __m256i R_AB_L, R_AB_H, R_CD_L, R_CD_H, R_AD_0, R_AD_8, R_AD_16, R_AD_24; +#define PACK_STEP \ + R_AB_L = _mm256_unpacklo_epi64(R_A, R_B); \ + R_CD_L = _mm256_unpacklo_epi64(R_C, R_D); \ + R_AB_H = _mm256_unpackhi_epi64(R_A, R_B); \ + R_CD_H = _mm256_unpackhi_epi64(R_C, R_D); \ + R_AD_0 = _mm256_permute2x128_si256(R_AB_L, R_CD_L, 0x20); \ + R_AD_16 = _mm256_permute2x128_si256(R_AB_L, R_CD_L, 0x31); \ + R_AD_8 = _mm256_permute2x128_si256(R_AB_H, R_CD_H, 0x20); \ + R_AD_24 = _mm256_permute2x128_si256(R_AB_H, R_CD_H, 0x31); \ + _mm256_store_si256(blockB_256, R_AD_0); \ + _mm256_store_si256(blockB_256 + 8, R_AD_8); \ + _mm256_store_si256(blockB_256 + 16, R_AD_16); \ + _mm256_store_si256(blockB_256 + 24, R_AD_24); \ + blockB_256++; + + // Pack cols in sets of 32 + for (Index n = 0; n < cols_32; n += 32) { + // Pack depth in sets of 32 + for (Index k = 0; k < depth_32; k += 32) { + __m256i R_A = rhs.template loadPacket(k, n); + __m256i R_B = rhs.template loadPacket(k, n + 1); + __m256i R_C = rhs.template loadPacket(k, n + 2); + __m256i R_D = rhs.template loadPacket(k, n + 3); + PACK_STEP; + + R_A = rhs.template loadPacket(k, n + 4); + R_B = rhs.template loadPacket(k, n + 5); + R_C = rhs.template loadPacket(k, n + 6); + R_D = rhs.template loadPacket(k, n + 7); + PACK_STEP; + + R_A = rhs.template loadPacket(k, n + 8); + R_B = rhs.template loadPacket(k, n + 9); + R_C = rhs.template loadPacket(k, n + 10); + R_D = rhs.template loadPacket(k, n + 11); + PACK_STEP; + + R_A = rhs.template loadPacket(k, n + 12); + R_B = rhs.template loadPacket(k, n + 13); + R_C = rhs.template loadPacket(k, n + 14); + R_D = rhs.template loadPacket(k, n + 15); + PACK_STEP; + + R_A = rhs.template loadPacket(k, n + 16); + R_B = rhs.template loadPacket(k, n + 17); + R_C = rhs.template loadPacket(k, n + 18); + R_D = rhs.template loadPacket(k, n + 19); + PACK_STEP; + + R_A = rhs.template loadPacket(k, n + 20); + R_B = rhs.template loadPacket(k, n + 21); + R_C = rhs.template loadPacket(k, n + 22); + R_D = rhs.template loadPacket(k, n + 23); + PACK_STEP; + + R_A = rhs.template loadPacket(k, n + 24); + R_B = rhs.template loadPacket(k, n + 25); + R_C = rhs.template loadPacket(k, n + 26); + R_D = rhs.template loadPacket(k, n + 27); + PACK_STEP; + + R_A = rhs.template loadPacket(k, n + 28); + R_B = rhs.template loadPacket(k, n + 29); + R_C = rhs.template loadPacket(k, n + 30); + R_D = rhs.template loadPacket(k, n + 31); + PACK_STEP; + + blockB_256 += 24; + } + + if (depth_32 < depth) { + QUInt8* ptr; + __m256i R_A = _mm256_setzero_si256(); + __m256i R_B = _mm256_setzero_si256(); + __m256i R_C = _mm256_setzero_si256(); + __m256i R_D = _mm256_setzero_si256(); + for (Index k = depth_32; k < depth; k++) { + ptr = (QUInt8*)&R_A; + ptr[k - depth_32] = rhs(k, n); + ptr = (QUInt8*)&R_B; + ptr[k - depth_32] = rhs(k, n + 1); + ptr = (QUInt8*)&R_C; + ptr[k - depth_32] = rhs(k, n + 2); + ptr = (QUInt8*)&R_D; + ptr[k - depth_32] = rhs(k, n + 3); + } + PACK_STEP; + + R_A = _mm256_setzero_si256(); + R_B = _mm256_setzero_si256(); + R_C = _mm256_setzero_si256(); + R_D = _mm256_setzero_si256(); + for (Index k = depth_32; k < depth; k++) { + ptr = (QUInt8*)&R_A; + ptr[k - depth_32] = rhs(k, n + 4); + ptr = (QUInt8*)&R_B; + ptr[k - depth_32] = rhs(k, n + 5); + ptr = (QUInt8*)&R_C; + ptr[k - depth_32] = rhs(k, n + 6); + ptr = (QUInt8*)&R_D; + ptr[k - depth_32] = rhs(k, n + 7); + } + PACK_STEP; + + R_A = _mm256_setzero_si256(); + R_B = _mm256_setzero_si256(); + R_C = _mm256_setzero_si256(); + R_D = _mm256_setzero_si256(); + for (Index k = depth_32; k < depth; k++) { + ptr = (QUInt8*)&R_A; + ptr[k - depth_32] = rhs(k, n + 8); + ptr = (QUInt8*)&R_B; + ptr[k - depth_32] = rhs(k, n + 9); + ptr = (QUInt8*)&R_C; + ptr[k - depth_32] = rhs(k, n + 10); + ptr = (QUInt8*)&R_D; + ptr[k - depth_32] = rhs(k, n + 11); + } + PACK_STEP; + + R_A = _mm256_setzero_si256(); + R_B = _mm256_setzero_si256(); + R_C = _mm256_setzero_si256(); + R_D = _mm256_setzero_si256(); + for (Index k = depth_32; k < depth; k++) { + ptr = (QUInt8*)&R_A; + ptr[k - depth_32] = rhs(k, n + 12); + ptr = (QUInt8*)&R_B; + ptr[k - depth_32] = rhs(k, n + 13); + ptr = (QUInt8*)&R_C; + ptr[k - depth_32] = rhs(k, n + 14); + ptr = (QUInt8*)&R_D; + ptr[k - depth_32] = rhs(k, n + 15); + } + PACK_STEP; + + R_A = _mm256_setzero_si256(); + R_B = _mm256_setzero_si256(); + R_C = _mm256_setzero_si256(); + R_D = _mm256_setzero_si256(); + for (Index k = depth_32; k < depth; k++) { + ptr = (QUInt8*)&R_A; + ptr[k - depth_32] = rhs(k, n + 16); + ptr = (QUInt8*)&R_B; + ptr[k - depth_32] = rhs(k, n + 17); + ptr = (QUInt8*)&R_C; + ptr[k - depth_32] = rhs(k, n + 18); + ptr = (QUInt8*)&R_D; + ptr[k - depth_32] = rhs(k, n + 19); + } + PACK_STEP; + + R_A = _mm256_setzero_si256(); + R_B = _mm256_setzero_si256(); + R_C = _mm256_setzero_si256(); + R_D = _mm256_setzero_si256(); + for (Index k = depth_32; k < depth; k++) { + ptr = (QUInt8*)&R_A; + ptr[k - depth_32] = rhs(k, n + 20); + ptr = (QUInt8*)&R_B; + ptr[k - depth_32] = rhs(k, n + 21); + ptr = (QUInt8*)&R_C; + ptr[k - depth_32] = rhs(k, n + 22); + ptr = (QUInt8*)&R_D; + ptr[k - depth_32] = rhs(k, n + 23); + } + PACK_STEP; + + R_A = _mm256_setzero_si256(); + R_B = _mm256_setzero_si256(); + R_C = _mm256_setzero_si256(); + R_D = _mm256_setzero_si256(); + for (Index k = depth_32; k < depth; k++) { + ptr = (QUInt8*)&R_A; + ptr[k - depth_32] = rhs(k, n + 24); + ptr = (QUInt8*)&R_B; + ptr[k - depth_32] = rhs(k, n + 25); + ptr = (QUInt8*)&R_C; + ptr[k - depth_32] = rhs(k, n + 26); + ptr = (QUInt8*)&R_D; + ptr[k - depth_32] = rhs(k, n + 27); + } + PACK_STEP; + + R_A = _mm256_setzero_si256(); + R_B = _mm256_setzero_si256(); + R_C = _mm256_setzero_si256(); + R_D = _mm256_setzero_si256(); + for (Index k = depth_32; k < depth; k++) { + ptr = (QUInt8*)&R_A; + ptr[k - depth_32] = rhs(k, n + 28); + ptr = (QUInt8*)&R_B; + ptr[k - depth_32] = rhs(k, n + 29); + ptr = (QUInt8*)&R_C; + ptr[k - depth_32] = rhs(k, n + 30); + ptr = (QUInt8*)&R_D; + ptr[k - depth_32] = rhs(k, n + 31); + } + PACK_STEP; + blockB_256 += 24; + } + } + + // Finish packing cols + if (cols_32 < cols) { + // Pack depth in sets of 32 + for (Index k = 0; k < depth_32; k += 32) { + __m256i R_A, R_B, R_C, R_D; + Index n; + for (n = cols_32; n < cols; n += 4) { + switch (cols - n) { + case 1: + R_A = rhs.template loadPacket(k, n); + R_B = _mm256_setzero_si256(); + R_C = _mm256_setzero_si256(); + R_D = _mm256_setzero_si256(); + PACK_STEP; + break; + case 2: + R_A = rhs.template loadPacket(k, n); + R_B = rhs.template loadPacket(k, n + 1); + R_C = _mm256_setzero_si256(); + R_D = _mm256_setzero_si256(); + PACK_STEP; + break; + case 3: + R_A = rhs.template loadPacket(k, n); + R_B = rhs.template loadPacket(k, n + 1); + R_C = rhs.template loadPacket(k, n + 2); + R_D = _mm256_setzero_si256(); + PACK_STEP; + break; + default: + R_A = rhs.template loadPacket(k, n); + R_B = rhs.template loadPacket(k, n + 1); + R_C = rhs.template loadPacket(k, n + 2); + R_D = rhs.template loadPacket(k, n + 3); + PACK_STEP; + break; + } + } + + // Increment the block pointer. + // We must pad if cols is not a multiple of 32. + blockB_256 += 32 - (n - cols_32) / 4; + } + + if (depth_32 < depth) { + for (Index n = cols_32; n < cols; n += 4) { + QUInt8* ptr; + __m256i R_A = _mm256_setzero_si256(); + __m256i R_B = _mm256_setzero_si256(); + __m256i R_C = _mm256_setzero_si256(); + __m256i R_D = _mm256_setzero_si256(); + switch (cols - n) { + case 1: + for (Index k = depth_32; k < depth; k++) { + ptr = (QUInt8*)&R_A; + ptr[k - depth_32] = rhs(k, n); + } + PACK_STEP; + break; + case 2: + for (Index k = depth_32; k < depth; k++) { + ptr = (QUInt8*)&R_A; + ptr[k - depth_32] = rhs(k, n); + ptr = (QUInt8*)&R_B; + ptr[k - depth_32] = rhs(k, n + 1); + } + PACK_STEP; + break; + case 3: + for (Index k = depth_32; k < depth; k++) { + ptr = (QUInt8*)&R_A; + ptr[k - depth_32] = rhs(k, n); + ptr = (QUInt8*)&R_B; + ptr[k - depth_32] = rhs(k, n + 1); + ptr = (QUInt8*)&R_C; + ptr[k - depth_32] = rhs(k, n + 2); + } + PACK_STEP; + break; + default: + for (Index k = depth_32; k < depth; k++) { + ptr = (QUInt8*)&R_A; + ptr[k - depth_32] = rhs(k, n); + ptr = (QUInt8*)&R_B; + ptr[k - depth_32] = rhs(k, n + 1); + ptr = (QUInt8*)&R_C; + ptr[k - depth_32] = rhs(k, n + 2); + ptr = (QUInt8*)&R_D; + ptr[k - depth_32] = rhs(k, n + 3); + } + PACK_STEP; + break; + } + } + } + } +#undef PACK_STEP +} + +template +EIGEN_DONT_INLINE void +gebp_kernel_any::operator()(const DataMapper& res, + const QInt8* blockA, + const QUInt8* blockB, Index rows, + Index depth, Index cols, QInt32 alpha, + Index strideA, Index strideB, + Index offsetA, Index offsetB) { + EIGEN_STATIC_ASSERT(!ConjugateLhs, YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT(!ConjugateRhs, YOU_MADE_A_PROGRAMMING_MISTAKE); + eigen_assert(alpha.value == 1); + eigen_assert(strideA == -1); + eigen_assert(strideB == -1); + eigen_assert(offsetA == 0); + eigen_assert(offsetB == 0); + eigen_assert(rows > 0); + eigen_assert(cols > 0); + eigen_assert(depth > 0); + eigen_assert(blockA); + eigen_assert(blockB); + + Index rows_32 = ((rows + 31) / 32) * 32; + Index cols_32 = ((cols + 31) / 32) * 32; + Index depth_32 = ((depth + 31) / 32) * 32; + + // Create result block + ei_declare_aligned_stack_constructed_variable(QInt32, blockO, 32 * 32, 0); + memset(blockO, 0, 32 * 32 * sizeof(QInt32)); + + // Get vectorized pointers + __m256i* blockO_256 = reinterpret_cast<__m256i*>(blockO); + const __m256i* blockA_256 = reinterpret_cast(blockA); + const __m256i* blockB_256 = reinterpret_cast(blockB); + + // Loop over blocks of 32 columns + for (Index n = 0; n < cols_32; n += 32) { + // Reset index into blockA + Index indexL = 0; + // Loop over blocks of 32 rows + for (Index m = 0; m < rows_32; m += 32) { + // Reset index into blockB + Index indexR = n / 32 * depth_32; + // Loop over blocks of 8 on depth + for (Index k = 0; k < depth_32; k += 8) { + // Load inputs + __m256i L_AD0 = blockA_256[indexL++]; + __m256i L_AD8 = blockA_256[indexL++]; + __m256i L_AD16 = blockA_256[indexL++]; + __m256i L_AD24 = blockA_256[indexL++]; + __m256i L_EH0 = blockA_256[indexL++]; + __m256i L_EH8 = blockA_256[indexL++]; + __m256i L_EH16 = blockA_256[indexL++]; + __m256i L_EH24 = blockA_256[indexL++]; + __m256i R_AH0 = blockB_256[indexR++]; + __m256i R_AH4 = blockB_256[indexR++]; + __m256i R_AH8 = blockB_256[indexR++]; + __m256i R_AH12 = blockB_256[indexR++]; + __m256i R_AH16 = blockB_256[indexR++]; + __m256i R_AH20 = blockB_256[indexR++]; + __m256i R_AH24 = blockB_256[indexR++]; + __m256i R_AH28 = blockB_256[indexR++]; + + // This constant is used with madd to convert 16 bit to 32 bit + const __m256i ONE = _mm256_set1_epi32(0x00010001); + + // Declare variables used in COMPUTE_STEP + __m256i P_16_A, P_16_B, P_32_A, P_32_B, P_32; + +#define COMPUTE_STEP(R_INPUT_A, R_INPUT_B, OFFSET) \ + P_16_A = _mm256_maddubs_epi16(R_INPUT_A, L_AD0); \ + P_32_A = _mm256_madd_epi16(P_16_A, ONE); \ + P_16_B = _mm256_maddubs_epi16(R_INPUT_B, L_EH0); \ + P_32_B = _mm256_madd_epi16(P_16_B, ONE); \ + P_32 = _mm256_add_epi32(P_32_A, P_32_B); \ + _mm256_store_si256( \ + blockO_256 + 4 * OFFSET, \ + _mm256_add_epi32(_mm256_load_si256(blockO_256 + 4 * OFFSET), P_32)); \ + \ + P_16_A = _mm256_maddubs_epi16(R_INPUT_A, L_AD8); \ + P_32_A = _mm256_madd_epi16(P_16_A, ONE); \ + P_16_B = _mm256_maddubs_epi16(R_INPUT_B, L_EH8); \ + P_32_B = _mm256_madd_epi16(P_16_B, ONE); \ + P_32 = _mm256_add_epi32(P_32_A, P_32_B); \ + _mm256_store_si256( \ + blockO_256 + 4 * OFFSET + 1, \ + _mm256_add_epi32(_mm256_load_si256(blockO_256 + 4 * OFFSET + 1), P_32)); \ + \ + P_16_A = _mm256_maddubs_epi16(R_INPUT_A, L_AD16); \ + P_32_A = _mm256_madd_epi16(P_16_A, ONE); \ + P_16_B = _mm256_maddubs_epi16(R_INPUT_B, L_EH16); \ + P_32_B = _mm256_madd_epi16(P_16_B, ONE); \ + P_32 = _mm256_add_epi32(P_32_A, P_32_B); \ + _mm256_store_si256( \ + blockO_256 + 4 * OFFSET + 2, \ + _mm256_add_epi32(_mm256_load_si256(blockO_256 + 4 * OFFSET + 2), P_32)); \ + \ + P_16_A = _mm256_maddubs_epi16(R_INPUT_A, L_AD24); \ + P_32_A = _mm256_madd_epi16(P_16_A, ONE); \ + P_16_B = _mm256_maddubs_epi16(R_INPUT_B, L_EH24); \ + P_32_B = _mm256_madd_epi16(P_16_B, ONE); \ + P_32 = _mm256_add_epi32(P_32_A, P_32_B); \ + _mm256_store_si256( \ + blockO_256 + 4 * OFFSET + 3, \ + _mm256_add_epi32(_mm256_load_si256(blockO_256 + 4 * OFFSET + 3), P_32)); + + // Permute and shuffle to copy a single value across the entire vector + // Then compute the multiplication + __m256i R_AH0_ = _mm256_permute2x128_si256(R_AH0, R_AH0, 0x00); + __m256i R_AD0 = _mm256_shuffle_epi32(R_AH0_, 0x00); + __m256i R_EH0 = _mm256_shuffle_epi32(R_AH0_, 0x55); + COMPUTE_STEP(R_AD0, R_EH0, 0); + __m256i R_AD1 = _mm256_shuffle_epi32(R_AH0_, 0xAA); + __m256i R_EH1 = _mm256_shuffle_epi32(R_AH0_, 0xFF); + COMPUTE_STEP(R_AD1, R_EH1, 1); + R_AH0_ = _mm256_permute2x128_si256(R_AH0, R_AH0, 0x11); + __m256i R_AD2 = _mm256_shuffle_epi32(R_AH0_, 0x00); + __m256i R_EH2 = _mm256_shuffle_epi32(R_AH0_, 0x55); + COMPUTE_STEP(R_AD2, R_EH2, 2); + __m256i R_AD3 = _mm256_shuffle_epi32(R_AH0_, 0xAA); + __m256i R_EH3 = _mm256_shuffle_epi32(R_AH0_, 0xFF); + COMPUTE_STEP(R_AD3, R_EH3, 3); + + R_AH0_ = _mm256_permute2x128_si256(R_AH4, R_AH4, 0x00); + R_AD0 = _mm256_shuffle_epi32(R_AH0_, 0x00); + R_EH0 = _mm256_shuffle_epi32(R_AH0_, 0x55); + COMPUTE_STEP(R_AD0, R_EH0, 4); + R_AD1 = _mm256_shuffle_epi32(R_AH0_, 0xAA); + R_EH1 = _mm256_shuffle_epi32(R_AH0_, 0xFF); + COMPUTE_STEP(R_AD1, R_EH1, 5); + R_AH0_ = _mm256_permute2x128_si256(R_AH4, R_AH4, 0x11); + R_AD2 = _mm256_shuffle_epi32(R_AH0_, 0x00); + R_EH2 = _mm256_shuffle_epi32(R_AH0_, 0x55); + COMPUTE_STEP(R_AD2, R_EH2, 6); + R_AD3 = _mm256_shuffle_epi32(R_AH0_, 0xAA); + R_EH3 = _mm256_shuffle_epi32(R_AH0_, 0xFF); + COMPUTE_STEP(R_AD3, R_EH3, 7); + + R_AH0_ = _mm256_permute2x128_si256(R_AH8, R_AH8, 0x00); + R_AD0 = _mm256_shuffle_epi32(R_AH0_, 0x00); + R_EH0 = _mm256_shuffle_epi32(R_AH0_, 0x55); + COMPUTE_STEP(R_AD0, R_EH0, 8); + R_AD1 = _mm256_shuffle_epi32(R_AH0_, 0xAA); + R_EH1 = _mm256_shuffle_epi32(R_AH0_, 0xFF); + COMPUTE_STEP(R_AD1, R_EH1, 9); + R_AH0_ = _mm256_permute2x128_si256(R_AH8, R_AH8, 0x11); + R_AD2 = _mm256_shuffle_epi32(R_AH0_, 0x00); + R_EH2 = _mm256_shuffle_epi32(R_AH0_, 0x55); + COMPUTE_STEP(R_AD2, R_EH2, 10); + R_AD3 = _mm256_shuffle_epi32(R_AH0_, 0xAA); + R_EH3 = _mm256_shuffle_epi32(R_AH0_, 0xFF); + COMPUTE_STEP(R_AD3, R_EH3, 11); + + R_AH0_ = _mm256_permute2x128_si256(R_AH12, R_AH12, 0x00); + R_AD0 = _mm256_shuffle_epi32(R_AH0_, 0x00); + R_EH0 = _mm256_shuffle_epi32(R_AH0_, 0x55); + COMPUTE_STEP(R_AD0, R_EH0, 12); + R_AD1 = _mm256_shuffle_epi32(R_AH0_, 0xAA); + R_EH1 = _mm256_shuffle_epi32(R_AH0_, 0xFF); + COMPUTE_STEP(R_AD1, R_EH1, 13); + R_AH0_ = _mm256_permute2x128_si256(R_AH12, R_AH12, 0x11); + R_AD2 = _mm256_shuffle_epi32(R_AH0_, 0x00); + R_EH2 = _mm256_shuffle_epi32(R_AH0_, 0x55); + COMPUTE_STEP(R_AD2, R_EH2, 14); + R_AD3 = _mm256_shuffle_epi32(R_AH0_, 0xAA); + R_EH3 = _mm256_shuffle_epi32(R_AH0_, 0xFF); + COMPUTE_STEP(R_AD3, R_EH3, 15); + + R_AH0_ = _mm256_permute2x128_si256(R_AH16, R_AH16, 0x00); + R_AD0 = _mm256_shuffle_epi32(R_AH0_, 0x00); + R_EH0 = _mm256_shuffle_epi32(R_AH0_, 0x55); + COMPUTE_STEP(R_AD0, R_EH0, 16); + R_AD1 = _mm256_shuffle_epi32(R_AH0_, 0xAA); + R_EH1 = _mm256_shuffle_epi32(R_AH0_, 0xFF); + COMPUTE_STEP(R_AD1, R_EH1, 17); + R_AH0_ = _mm256_permute2x128_si256(R_AH16, R_AH16, 0x11); + R_AD2 = _mm256_shuffle_epi32(R_AH0_, 0x00); + R_EH2 = _mm256_shuffle_epi32(R_AH0_, 0x55); + COMPUTE_STEP(R_AD2, R_EH2, 18); + R_AD3 = _mm256_shuffle_epi32(R_AH0_, 0xAA); + R_EH3 = _mm256_shuffle_epi32(R_AH0_, 0xFF); + COMPUTE_STEP(R_AD3, R_EH3, 19); + + R_AH0_ = _mm256_permute2x128_si256(R_AH20, R_AH20, 0x00); + R_AD0 = _mm256_shuffle_epi32(R_AH0_, 0x00); + R_EH0 = _mm256_shuffle_epi32(R_AH0_, 0x55); + COMPUTE_STEP(R_AD0, R_EH0, 20); + R_AD1 = _mm256_shuffle_epi32(R_AH0_, 0xAA); + R_EH1 = _mm256_shuffle_epi32(R_AH0_, 0xFF); + COMPUTE_STEP(R_AD1, R_EH1, 21); + R_AH0_ = _mm256_permute2x128_si256(R_AH20, R_AH20, 0x11); + R_AD2 = _mm256_shuffle_epi32(R_AH0_, 0x00); + R_EH2 = _mm256_shuffle_epi32(R_AH0_, 0x55); + COMPUTE_STEP(R_AD2, R_EH2, 22); + R_AD3 = _mm256_shuffle_epi32(R_AH0_, 0xAA); + R_EH3 = _mm256_shuffle_epi32(R_AH0_, 0xFF); + COMPUTE_STEP(R_AD3, R_EH3, 23); + + R_AH0_ = _mm256_permute2x128_si256(R_AH24, R_AH24, 0x00); + R_AD0 = _mm256_shuffle_epi32(R_AH0_, 0x00); + R_EH0 = _mm256_shuffle_epi32(R_AH0_, 0x55); + COMPUTE_STEP(R_AD0, R_EH0, 24); + R_AD1 = _mm256_shuffle_epi32(R_AH0_, 0xAA); + R_EH1 = _mm256_shuffle_epi32(R_AH0_, 0xFF); + COMPUTE_STEP(R_AD1, R_EH1, 25); + R_AH0_ = _mm256_permute2x128_si256(R_AH24, R_AH24, 0x11); + R_AD2 = _mm256_shuffle_epi32(R_AH0_, 0x00); + R_EH2 = _mm256_shuffle_epi32(R_AH0_, 0x55); + COMPUTE_STEP(R_AD2, R_EH2, 26); + R_AD3 = _mm256_shuffle_epi32(R_AH0_, 0xAA); + R_EH3 = _mm256_shuffle_epi32(R_AH0_, 0xFF); + COMPUTE_STEP(R_AD3, R_EH3, 27); + + R_AH0_ = _mm256_permute2x128_si256(R_AH28, R_AH28, 0x00); + R_AD0 = _mm256_shuffle_epi32(R_AH0_, 0x00); + R_EH0 = _mm256_shuffle_epi32(R_AH0_, 0x55); + COMPUTE_STEP(R_AD0, R_EH0, 28); + R_AD1 = _mm256_shuffle_epi32(R_AH0_, 0xAA); + R_EH1 = _mm256_shuffle_epi32(R_AH0_, 0xFF); + COMPUTE_STEP(R_AD1, R_EH1, 29); + R_AH0_ = _mm256_permute2x128_si256(R_AH28, R_AH28, 0x11); + R_AD2 = _mm256_shuffle_epi32(R_AH0_, 0x00); + R_EH2 = _mm256_shuffle_epi32(R_AH0_, 0x55); + COMPUTE_STEP(R_AD2, R_EH2, 30); + R_AD3 = _mm256_shuffle_epi32(R_AH0_, 0xAA); + R_EH3 = _mm256_shuffle_epi32(R_AH0_, 0xFF); + COMPUTE_STEP(R_AD3, R_EH3, 31); + +#undef COMPUTE_STEP + } + + // Transfer the results to the result matrix. + if (m + 32 <= rows && n + 32 <= cols) { + Index i = 0; + for (Index j = n; j < n + 32; j++) { + LinearMapper r0 = res.getLinearMapper(m, j); + LinearMapper r1 = res.getLinearMapper(m + 8, j); + LinearMapper r2 = res.getLinearMapper(m + 16, j); + LinearMapper r3 = res.getLinearMapper(m + 24, j); + typedef typename packet_traits::type Packet; + r0.template storePacket( + 0, _mm256_add_epi32(blockO_256[i++], + r0.template loadPacket(0))); + r1.template storePacket( + 0, _mm256_add_epi32(blockO_256[i++], + r1.template loadPacket(0))); + r2.template storePacket( + 0, _mm256_add_epi32(blockO_256[i++], + r2.template loadPacket(0))); + r3.template storePacket( + 0, _mm256_add_epi32(blockO_256[i++], + r3.template loadPacket(0))); + } + } else { + for (Index j = n; j < cols; j++) { + for (Index i = m; i < rows; i++) { + res(i, j) = blockO[(j - n) * 32 + (i - m)]; + } + } + } + + // Zero the result block so it can be reused + memset(blockO, 0, 32 * 32 * sizeof(QInt32)); + } + } +} + +// Below are the fully optimized versions that are correct only for sizes that +// are multiple of 32. It is about a 10% performance benefit to keep these +// implementations separate. + +// Arrange a block of the left input matrix in contiguous memory. +// +// Given column major input (A0 beside A1 in memory): +// A0 B0 C0 D0 E0 F0 G0 H0 ... +// A1 B1 C1 D1 E1 F1 G1 H1 ... +// A2 B2 C2 D2 E2 F2 G2 H2 ... +// A3 B3 C3 D3 E3 F3 G3 H3 ... +// A4 B4 C4 D4 E4 F4 G4 H4 ... +// A5 B5 C5 D5 E5 F5 G5 H5 ... +// A6 B6 C6 D6 E6 F6 G6 H6 ... +// A7 B7 C7 D7 E7 F7 G7 H7 ... +// A8 ... +// ... +// +// Packing yields output (A0 beside B0 in memory): +// A0 B0 C0 D0 +// A1 B1 C1 D1 +// A2 B2 C2 D2 +// A3 B3 C3 D3 +// A4 B4 C4 D4 +// A5 B5 C5 D5 +// A6 B6 C6 D6 +// A7 B7 C7 D7 +// ... +// A31 B31 C31 D31 +// E0 F0 G0 H0 +// E1 F1 G1 H1 +// E2 F2 G2 H2 +// E3 F3 G3 H3 +// E4 F4 G4 H4 +// E5 F5 G5 H5 +// E6 F6 G6 H6 +// E7 F7 G7 H7 +// ... +// +// Four elements of the same row are arranged contiguously because maddubs and +// madd both perform an adjacent addition in the kernel. +template +struct gemm_pack_lhs { + EIGEN_DONT_INLINE void operator()(QInt8* blockA, const DataMapper& lhs, + Index depth, Index rows, Index stride = 0, + Index offset = 0); +}; + +template +EIGEN_DONT_INLINE void +gemm_pack_lhs::operator()(QInt8* blockA, + const DataMapper& lhs, + Index depth, Index rows, + Index stride, Index offset) { + eigen_assert(stride == 0); + eigen_assert(offset == 0); + + typedef typename packet_traits::type Packet; + + // Use alternate function for weird sizes + if (rows % 32 != 0 || depth % 32 != 0) { + gemm_pack_lhs_any + lhs_pack; + return lhs_pack(blockA, lhs, depth, rows, stride, offset); + } + + // Get vector pointer + __m256i* blockA_256 = reinterpret_cast<__m256i*>(blockA); + + // Pack rows in sets of 32 + for (Index m = 0; m < rows; m += 32) { + // Pack depth in sets of 8 + for (Index k = 0; k < depth; k += 8) { + // Load vectors + __m256i L_A = lhs.template loadPacket(m, k); + __m256i L_B = lhs.template loadPacket(m, k + 1); + + // Interleave 8-bit elements + __m256i L_AB0_AB16 = _mm256_unpacklo_epi8(L_A, L_B); + __m256i L_AB8_AB24 = _mm256_unpackhi_epi8(L_A, L_B); + + __m256i L_C = lhs.template loadPacket(m, k + 2); + __m256i L_D = lhs.template loadPacket(m, k + 3); + __m256i L_CD0_CD16 = _mm256_unpacklo_epi8(L_C, L_D); + __m256i L_CD8_CD24 = _mm256_unpackhi_epi8(L_C, L_D); + + // Interleave 16-bit elements + __m256i L_AD0_AD16 = _mm256_unpacklo_epi16(L_AB0_AB16, L_CD0_CD16); + __m256i L_AD4_AD20 = _mm256_unpackhi_epi16(L_AB0_AB16, L_CD0_CD16); + + // Use permute before we store to cross 128-bit lanes + __m256i L_AD0 = _mm256_permute2x128_si256(L_AD0_AD16, L_AD4_AD20, 0x20); + _mm256_store_si256(blockA_256++, L_AD0); + + // Complete packing for 32 x 8 block + __m256i L_AD16 = _mm256_permute2x128_si256(L_AD0_AD16, L_AD4_AD20, 0x31); + __m256i L_AD8_AD24 = _mm256_unpacklo_epi16(L_AB8_AB24, L_CD8_CD24); + __m256i L_AD12_AD28 = _mm256_unpackhi_epi16(L_AB8_AB24, L_CD8_CD24); + __m256i L_AD8 = _mm256_permute2x128_si256(L_AD8_AD24, L_AD12_AD28, 0x20); + _mm256_store_si256(blockA_256++, L_AD8); + _mm256_store_si256(blockA_256++, L_AD16); + __m256i L_AD24 = _mm256_permute2x128_si256(L_AD8_AD24, L_AD12_AD28, 0x31); + _mm256_store_si256(blockA_256++, L_AD24); + __m256i L_E = lhs.template loadPacket(m, k + 4); + __m256i L_F = lhs.template loadPacket(m, k + 5); + __m256i L_EF0_EF16 = _mm256_unpacklo_epi8(L_E, L_F); + __m256i L_EF8_EF24 = _mm256_unpackhi_epi8(L_E, L_F); + __m256i L_G = lhs.template loadPacket(m, k + 6); + __m256i L_H = lhs.template loadPacket(m, k + 7); + __m256i L_GH0_GH16 = _mm256_unpacklo_epi8(L_G, L_H); + __m256i L_GH8_GH24 = _mm256_unpackhi_epi8(L_G, L_H); + __m256i L_EH0_EH16 = _mm256_unpacklo_epi16(L_EF0_EF16, L_GH0_GH16); + __m256i L_EH4_EH20 = _mm256_unpackhi_epi16(L_EF0_EF16, L_GH0_GH16); + __m256i L_EH0 = _mm256_permute2x128_si256(L_EH0_EH16, L_EH4_EH20, 0x20); + _mm256_store_si256(blockA_256++, L_EH0); + __m256i L_EH16 = _mm256_permute2x128_si256(L_EH0_EH16, L_EH4_EH20, 0x31); + __m256i L_EH8_EH24 = _mm256_unpacklo_epi16(L_EF8_EF24, L_GH8_GH24); + __m256i L_EH12_EH28 = _mm256_unpackhi_epi16(L_EF8_EF24, L_GH8_GH24); + __m256i L_EH8 = _mm256_permute2x128_si256(L_EH8_EH24, L_EH12_EH28, 0x20); + _mm256_store_si256(blockA_256++, L_EH8); + _mm256_store_si256(blockA_256++, L_EH16); + __m256i L_EH24 = _mm256_permute2x128_si256(L_EH8_EH24, L_EH12_EH28, 0x31); + _mm256_store_si256(blockA_256++, L_EH24); + } + } +} + +// Arrange a block of the right input matrix in contiguous memory. +// +// Given column major input (A0 beside A1 in memory): +// A0 B0 C0 D0 E0 F0 G0 H0 ... +// A1 B1 C1 D1 E1 F1 G1 H1 ... +// A2 B2 C2 D2 E2 F2 G2 H2 ... +// A3 B3 C3 D3 E3 F3 G3 H3 ... +// A4 B4 C4 D4 E4 F4 G4 H4 ... +// A5 B5 C5 D5 E5 F5 G5 H5 ... +// A6 B6 C6 D6 E6 F6 G6 H6 ... +// A7 B7 C7 D7 E7 F7 G7 H7 ... +// A8 ... +// ... +// +// Packing yields row major output (A0 beside A1 in memory): +// A0 A1 A2 A3 A4 A5 A6 A7 +// B0 B1 B2 B3 B4 B5 B6 B7 +// ... +// +// At least four elements of the same col are arranged contiguously because +// maddubs and madd both perform an adjacent addition in the kernel. We can +// save work by leaving 8 adjacent elements because kr = 8. +template +struct gemm_pack_rhs { + EIGEN_DONT_INLINE void operator()(QUInt8* blockB, const DataMapper& rhs, + Index depth, Index cols, Index stride = 0, + Index offset = 0); +}; + +template +EIGEN_DONT_INLINE void +gemm_pack_rhs::operator()(QUInt8* blockB, const DataMapper& rhs, + Index depth, Index cols, Index stride, + Index offset) { + eigen_assert(stride == 0); + eigen_assert(offset == 0); + + typedef typename packet_traits::type Packet; + + // Use alternate function for weird sizes + if (cols % 32 != 0 || depth % 32 != 0) { + gemm_pack_rhs_any + rhs_pack; + return rhs_pack(blockB, rhs, depth, cols, stride, offset); + } + + // Get vector pointer + __m256i* blockB_256 = reinterpret_cast<__m256i*>(blockB); + + // Perform a step of the packing for 4 columns + __m256i R_AB_L, R_AB_H, R_CD_L, R_CD_H, R_AD_0, R_AD_8, R_AD_16, R_AD_24; +#define PACK_STEP \ + R_AB_L = _mm256_unpacklo_epi64(R_A, R_B); \ + R_CD_L = _mm256_unpacklo_epi64(R_C, R_D); \ + R_AB_H = _mm256_unpackhi_epi64(R_A, R_B); \ + R_CD_H = _mm256_unpackhi_epi64(R_C, R_D); \ + R_AD_0 = _mm256_permute2x128_si256(R_AB_L, R_CD_L, 0x20); \ + R_AD_16 = _mm256_permute2x128_si256(R_AB_L, R_CD_L, 0x31); \ + R_AD_8 = _mm256_permute2x128_si256(R_AB_H, R_CD_H, 0x20); \ + R_AD_24 = _mm256_permute2x128_si256(R_AB_H, R_CD_H, 0x31); \ + _mm256_store_si256(blockB_256, R_AD_0); \ + _mm256_store_si256(blockB_256 + 8, R_AD_8); \ + _mm256_store_si256(blockB_256 + 16, R_AD_16); \ + _mm256_store_si256(blockB_256 + 24, R_AD_24); \ + blockB_256++; + + // Pack cols in sets of 32 + for (Index n = 0; n < cols; n += 32) { + // Pack depth in sets of 32 + for (Index k = 0; k < depth; k += 32) { + __m256i R_A = rhs.template loadPacket(k, n); + __m256i R_B = rhs.template loadPacket(k, n + 1); + __m256i R_C = rhs.template loadPacket(k, n + 2); + __m256i R_D = rhs.template loadPacket(k, n + 3); + PACK_STEP; + + R_A = rhs.template loadPacket(k, n + 4); + R_B = rhs.template loadPacket(k, n + 5); + R_C = rhs.template loadPacket(k, n + 6); + R_D = rhs.template loadPacket(k, n + 7); + PACK_STEP; + + R_A = rhs.template loadPacket(k, n + 8); + R_B = rhs.template loadPacket(k, n + 9); + R_C = rhs.template loadPacket(k, n + 10); + R_D = rhs.template loadPacket(k, n + 11); + PACK_STEP; + + R_A = rhs.template loadPacket(k, n + 12); + R_B = rhs.template loadPacket(k, n + 13); + R_C = rhs.template loadPacket(k, n + 14); + R_D = rhs.template loadPacket(k, n + 15); + PACK_STEP; + + R_A = rhs.template loadPacket(k, n + 16); + R_B = rhs.template loadPacket(k, n + 17); + R_C = rhs.template loadPacket(k, n + 18); + R_D = rhs.template loadPacket(k, n + 19); + PACK_STEP; + + R_A = rhs.template loadPacket(k, n + 20); + R_B = rhs.template loadPacket(k, n + 21); + R_C = rhs.template loadPacket(k, n + 22); + R_D = rhs.template loadPacket(k, n + 23); + PACK_STEP; + + R_A = rhs.template loadPacket(k, n + 24); + R_B = rhs.template loadPacket(k, n + 25); + R_C = rhs.template loadPacket(k, n + 26); + R_D = rhs.template loadPacket(k, n + 27); + PACK_STEP; + + R_A = rhs.template loadPacket(k, n + 28); + R_B = rhs.template loadPacket(k, n + 29); + R_C = rhs.template loadPacket(k, n + 30); + R_D = rhs.template loadPacket(k, n + 31); + PACK_STEP; + + blockB_256 += 24; + } + } +#undef PACK_STEP +} + +// Perform the actual multiplication on packed inputs +template +struct gebp_kernel { + typedef typename DataMapper::LinearMapper LinearMapper; + + EIGEN_DONT_INLINE + void operator()(const DataMapper& res, const QInt8* blockA, + const QUInt8* blockB, Index rows, Index depth, Index cols, + QInt32 alpha, Index strideA = -1, Index strideB = -1, + Index offsetA = 0, Index offsetB = 0); +}; + +template +EIGEN_DONT_INLINE void +gebp_kernel::operator()(const DataMapper& res, + const QInt8* blockA, const QUInt8* blockB, + Index rows, Index depth, Index cols, + QInt32 alpha, Index strideA, + Index strideB, Index offsetA, + Index offsetB) { + EIGEN_STATIC_ASSERT(!ConjugateLhs, YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT(!ConjugateRhs, YOU_MADE_A_PROGRAMMING_MISTAKE); + eigen_assert(alpha.value == 1); + eigen_assert(strideA == -1); + eigen_assert(strideB == -1); + eigen_assert(offsetA == 0); + eigen_assert(offsetB == 0); + eigen_assert(rows > 0); + eigen_assert(cols > 0); + eigen_assert(depth > 0); + eigen_assert(blockA); + eigen_assert(blockB); + + // Use alternate function for weird sizes + if (rows % 32 != 0 || cols % 32 != 0 || depth % 32 != 0) { + gebp_kernel_any + gebp; + return gebp(res, blockA, blockB, rows, depth, cols, alpha, strideA, strideB, + offsetA, offsetB); + } + + // Create result block + QInt32* blockO = aligned_new(32 * 32); + // Allocating the result block is about 5-10% faster than declaring stack + // space. It is unclear why this is the case. + // ei_declare_aligned_stack_constructed_variable(QInt32, blockO, 32 * 32, 0); + memset(blockO, 0, 32 * 32 * sizeof(QInt32)); + + // Get vectorized pointers + __m256i* blockO_256 = reinterpret_cast<__m256i*>(blockO); + const __m256i* blockA_256 = reinterpret_cast(blockA); + const __m256i* blockB_256 = reinterpret_cast(blockB); + + // Loop over blocks of 32 columns + for (Index n = 0; n < cols; n += 32) { + // Reset index into blockA + Index indexL = 0; + // Loop over blocks of 32 rows + for (Index m = 0; m < rows; m += 32) { + // Reset index into blockB + Index indexR = n / 32 * depth; + // Loop over blocks of 8 on depth + for (Index k = 0; k < depth; k += 8) { + // Load inputs + __m256i L_AD0 = blockA_256[indexL++]; + __m256i L_AD8 = blockA_256[indexL++]; + __m256i L_AD16 = blockA_256[indexL++]; + __m256i L_AD24 = blockA_256[indexL++]; + __m256i L_EH0 = blockA_256[indexL++]; + __m256i L_EH8 = blockA_256[indexL++]; + __m256i L_EH16 = blockA_256[indexL++]; + __m256i L_EH24 = blockA_256[indexL++]; + __m256i R_AH0 = blockB_256[indexR++]; + __m256i R_AH4 = blockB_256[indexR++]; + __m256i R_AH8 = blockB_256[indexR++]; + __m256i R_AH12 = blockB_256[indexR++]; + __m256i R_AH16 = blockB_256[indexR++]; + __m256i R_AH20 = blockB_256[indexR++]; + __m256i R_AH24 = blockB_256[indexR++]; + __m256i R_AH28 = blockB_256[indexR++]; + + // This constant is used with madd to convert 16 bit to 32 bit + const __m256i ONE = _mm256_set1_epi32(0x00010001); + + // Declare variables used in COMPUTE_STEP + __m256i P_16_A, P_16_B, P_32_A, P_32_B, P_32; + +#define COMPUTE_STEP(R_INPUT_A, R_INPUT_B, OFFSET) \ + P_16_A = _mm256_maddubs_epi16(R_INPUT_A, L_AD0); \ + P_32_A = _mm256_madd_epi16(P_16_A, ONE); \ + P_16_B = _mm256_maddubs_epi16(R_INPUT_B, L_EH0); \ + P_32_B = _mm256_madd_epi16(P_16_B, ONE); \ + P_32 = _mm256_add_epi32(P_32_A, P_32_B); \ + _mm256_store_si256( \ + blockO_256 + 4 * OFFSET, \ + _mm256_add_epi32(_mm256_load_si256(blockO_256 + 4 * OFFSET), P_32)); \ + \ + P_16_A = _mm256_maddubs_epi16(R_INPUT_A, L_AD8); \ + P_32_A = _mm256_madd_epi16(P_16_A, ONE); \ + P_16_B = _mm256_maddubs_epi16(R_INPUT_B, L_EH8); \ + P_32_B = _mm256_madd_epi16(P_16_B, ONE); \ + P_32 = _mm256_add_epi32(P_32_A, P_32_B); \ + _mm256_store_si256( \ + blockO_256 + 4 * OFFSET + 1, \ + _mm256_add_epi32(_mm256_load_si256(blockO_256 + 4 * OFFSET + 1), P_32)); \ + \ + P_16_A = _mm256_maddubs_epi16(R_INPUT_A, L_AD16); \ + P_32_A = _mm256_madd_epi16(P_16_A, ONE); \ + P_16_B = _mm256_maddubs_epi16(R_INPUT_B, L_EH16); \ + P_32_B = _mm256_madd_epi16(P_16_B, ONE); \ + P_32 = _mm256_add_epi32(P_32_A, P_32_B); \ + _mm256_store_si256( \ + blockO_256 + 4 * OFFSET + 2, \ + _mm256_add_epi32(_mm256_load_si256(blockO_256 + 4 * OFFSET + 2), P_32)); \ + \ + P_16_A = _mm256_maddubs_epi16(R_INPUT_A, L_AD24); \ + P_32_A = _mm256_madd_epi16(P_16_A, ONE); \ + P_16_B = _mm256_maddubs_epi16(R_INPUT_B, L_EH24); \ + P_32_B = _mm256_madd_epi16(P_16_B, ONE); \ + P_32 = _mm256_add_epi32(P_32_A, P_32_B); \ + _mm256_store_si256( \ + blockO_256 + 4 * OFFSET + 3, \ + _mm256_add_epi32(_mm256_load_si256(blockO_256 + 4 * OFFSET + 3), P_32)); + + // Permute and shuffle to copy a single value across the entire vector + // Then compute the multiplication + __m256i R_AH0_ = _mm256_permute2x128_si256(R_AH0, R_AH0, 0x00); + __m256i R_AD0 = _mm256_shuffle_epi32(R_AH0_, 0x00); + __m256i R_EH0 = _mm256_shuffle_epi32(R_AH0_, 0x55); + COMPUTE_STEP(R_AD0, R_EH0, 0); + __m256i R_AD1 = _mm256_shuffle_epi32(R_AH0_, 0xAA); + __m256i R_EH1 = _mm256_shuffle_epi32(R_AH0_, 0xFF); + COMPUTE_STEP(R_AD1, R_EH1, 1); + R_AH0_ = _mm256_permute2x128_si256(R_AH0, R_AH0, 0x11); + __m256i R_AD2 = _mm256_shuffle_epi32(R_AH0_, 0x00); + __m256i R_EH2 = _mm256_shuffle_epi32(R_AH0_, 0x55); + COMPUTE_STEP(R_AD2, R_EH2, 2); + __m256i R_AD3 = _mm256_shuffle_epi32(R_AH0_, 0xAA); + __m256i R_EH3 = _mm256_shuffle_epi32(R_AH0_, 0xFF); + COMPUTE_STEP(R_AD3, R_EH3, 3); + + R_AH0_ = _mm256_permute2x128_si256(R_AH4, R_AH4, 0x00); + R_AD0 = _mm256_shuffle_epi32(R_AH0_, 0x00); + R_EH0 = _mm256_shuffle_epi32(R_AH0_, 0x55); + COMPUTE_STEP(R_AD0, R_EH0, 4); + R_AD1 = _mm256_shuffle_epi32(R_AH0_, 0xAA); + R_EH1 = _mm256_shuffle_epi32(R_AH0_, 0xFF); + COMPUTE_STEP(R_AD1, R_EH1, 5); + R_AH0_ = _mm256_permute2x128_si256(R_AH4, R_AH4, 0x11); + R_AD2 = _mm256_shuffle_epi32(R_AH0_, 0x00); + R_EH2 = _mm256_shuffle_epi32(R_AH0_, 0x55); + COMPUTE_STEP(R_AD2, R_EH2, 6); + R_AD3 = _mm256_shuffle_epi32(R_AH0_, 0xAA); + R_EH3 = _mm256_shuffle_epi32(R_AH0_, 0xFF); + COMPUTE_STEP(R_AD3, R_EH3, 7); + + R_AH0_ = _mm256_permute2x128_si256(R_AH8, R_AH8, 0x00); + R_AD0 = _mm256_shuffle_epi32(R_AH0_, 0x00); + R_EH0 = _mm256_shuffle_epi32(R_AH0_, 0x55); + COMPUTE_STEP(R_AD0, R_EH0, 8); + R_AD1 = _mm256_shuffle_epi32(R_AH0_, 0xAA); + R_EH1 = _mm256_shuffle_epi32(R_AH0_, 0xFF); + COMPUTE_STEP(R_AD1, R_EH1, 9); + R_AH0_ = _mm256_permute2x128_si256(R_AH8, R_AH8, 0x11); + R_AD2 = _mm256_shuffle_epi32(R_AH0_, 0x00); + R_EH2 = _mm256_shuffle_epi32(R_AH0_, 0x55); + COMPUTE_STEP(R_AD2, R_EH2, 10); + R_AD3 = _mm256_shuffle_epi32(R_AH0_, 0xAA); + R_EH3 = _mm256_shuffle_epi32(R_AH0_, 0xFF); + COMPUTE_STEP(R_AD3, R_EH3, 11); + + R_AH0_ = _mm256_permute2x128_si256(R_AH12, R_AH12, 0x00); + R_AD0 = _mm256_shuffle_epi32(R_AH0_, 0x00); + R_EH0 = _mm256_shuffle_epi32(R_AH0_, 0x55); + COMPUTE_STEP(R_AD0, R_EH0, 12); + R_AD1 = _mm256_shuffle_epi32(R_AH0_, 0xAA); + R_EH1 = _mm256_shuffle_epi32(R_AH0_, 0xFF); + COMPUTE_STEP(R_AD1, R_EH1, 13); + R_AH0_ = _mm256_permute2x128_si256(R_AH12, R_AH12, 0x11); + R_AD2 = _mm256_shuffle_epi32(R_AH0_, 0x00); + R_EH2 = _mm256_shuffle_epi32(R_AH0_, 0x55); + COMPUTE_STEP(R_AD2, R_EH2, 14); + R_AD3 = _mm256_shuffle_epi32(R_AH0_, 0xAA); + R_EH3 = _mm256_shuffle_epi32(R_AH0_, 0xFF); + COMPUTE_STEP(R_AD3, R_EH3, 15); + + R_AH0_ = _mm256_permute2x128_si256(R_AH16, R_AH16, 0x00); + R_AD0 = _mm256_shuffle_epi32(R_AH0_, 0x00); + R_EH0 = _mm256_shuffle_epi32(R_AH0_, 0x55); + COMPUTE_STEP(R_AD0, R_EH0, 16); + R_AD1 = _mm256_shuffle_epi32(R_AH0_, 0xAA); + R_EH1 = _mm256_shuffle_epi32(R_AH0_, 0xFF); + COMPUTE_STEP(R_AD1, R_EH1, 17); + R_AH0_ = _mm256_permute2x128_si256(R_AH16, R_AH16, 0x11); + R_AD2 = _mm256_shuffle_epi32(R_AH0_, 0x00); + R_EH2 = _mm256_shuffle_epi32(R_AH0_, 0x55); + COMPUTE_STEP(R_AD2, R_EH2, 18); + R_AD3 = _mm256_shuffle_epi32(R_AH0_, 0xAA); + R_EH3 = _mm256_shuffle_epi32(R_AH0_, 0xFF); + COMPUTE_STEP(R_AD3, R_EH3, 19); + + R_AH0_ = _mm256_permute2x128_si256(R_AH20, R_AH20, 0x00); + R_AD0 = _mm256_shuffle_epi32(R_AH0_, 0x00); + R_EH0 = _mm256_shuffle_epi32(R_AH0_, 0x55); + COMPUTE_STEP(R_AD0, R_EH0, 20); + R_AD1 = _mm256_shuffle_epi32(R_AH0_, 0xAA); + R_EH1 = _mm256_shuffle_epi32(R_AH0_, 0xFF); + COMPUTE_STEP(R_AD1, R_EH1, 21); + R_AH0_ = _mm256_permute2x128_si256(R_AH20, R_AH20, 0x11); + R_AD2 = _mm256_shuffle_epi32(R_AH0_, 0x00); + R_EH2 = _mm256_shuffle_epi32(R_AH0_, 0x55); + COMPUTE_STEP(R_AD2, R_EH2, 22); + R_AD3 = _mm256_shuffle_epi32(R_AH0_, 0xAA); + R_EH3 = _mm256_shuffle_epi32(R_AH0_, 0xFF); + COMPUTE_STEP(R_AD3, R_EH3, 23); + + R_AH0_ = _mm256_permute2x128_si256(R_AH24, R_AH24, 0x00); + R_AD0 = _mm256_shuffle_epi32(R_AH0_, 0x00); + R_EH0 = _mm256_shuffle_epi32(R_AH0_, 0x55); + COMPUTE_STEP(R_AD0, R_EH0, 24); + R_AD1 = _mm256_shuffle_epi32(R_AH0_, 0xAA); + R_EH1 = _mm256_shuffle_epi32(R_AH0_, 0xFF); + COMPUTE_STEP(R_AD1, R_EH1, 25); + R_AH0_ = _mm256_permute2x128_si256(R_AH24, R_AH24, 0x11); + R_AD2 = _mm256_shuffle_epi32(R_AH0_, 0x00); + R_EH2 = _mm256_shuffle_epi32(R_AH0_, 0x55); + COMPUTE_STEP(R_AD2, R_EH2, 26); + R_AD3 = _mm256_shuffle_epi32(R_AH0_, 0xAA); + R_EH3 = _mm256_shuffle_epi32(R_AH0_, 0xFF); + COMPUTE_STEP(R_AD3, R_EH3, 27); + + R_AH0_ = _mm256_permute2x128_si256(R_AH28, R_AH28, 0x00); + R_AD0 = _mm256_shuffle_epi32(R_AH0_, 0x00); + R_EH0 = _mm256_shuffle_epi32(R_AH0_, 0x55); + COMPUTE_STEP(R_AD0, R_EH0, 28); + R_AD1 = _mm256_shuffle_epi32(R_AH0_, 0xAA); + R_EH1 = _mm256_shuffle_epi32(R_AH0_, 0xFF); + COMPUTE_STEP(R_AD1, R_EH1, 29); + R_AH0_ = _mm256_permute2x128_si256(R_AH28, R_AH28, 0x11); + R_AD2 = _mm256_shuffle_epi32(R_AH0_, 0x00); + R_EH2 = _mm256_shuffle_epi32(R_AH0_, 0x55); + COMPUTE_STEP(R_AD2, R_EH2, 30); + R_AD3 = _mm256_shuffle_epi32(R_AH0_, 0xAA); + R_EH3 = _mm256_shuffle_epi32(R_AH0_, 0xFF); + COMPUTE_STEP(R_AD3, R_EH3, 31); + +#undef COMPUTE_STEP + } + + // Transfer the results to the result matrix + Index i = 0; + for (Index j = n; j < n + 32; j++) { + LinearMapper r0 = res.getLinearMapper(m, j); + LinearMapper r1 = res.getLinearMapper(m + 8, j); + LinearMapper r2 = res.getLinearMapper(m + 16, j); + LinearMapper r3 = res.getLinearMapper(m + 24, j); + typedef typename packet_traits::type Packet; + r0.template storePacket( + 0, _mm256_add_epi32(blockO_256[i++], + r0.template loadPacket(0))); + r1.template storePacket( + 0, _mm256_add_epi32(blockO_256[i++], + r1.template loadPacket(0))); + r2.template storePacket( + 0, _mm256_add_epi32(blockO_256[i++], + r2.template loadPacket(0))); + r3.template storePacket( + 0, _mm256_add_epi32(blockO_256[i++], + r3.template loadPacket(0))); + } + + // Zero the result block so it can be reused + memset(blockO, 0, 32 * 32 * sizeof(QInt32)); + } + } + aligned_delete(blockO, 32 * 32); +} + +#endif // EIGEN_USE_OPTIMIZED_INT8_UINT8_MAT_MAT_PRODUCT + +} // namespace internal +} // namespace Eigen + +#endif // TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_MATMATPRODUCTAVX2_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/fixedpoint/MatMatProductNEON.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/fixedpoint/MatMatProductNEON.h new file mode 100644 index 0000000000000000000000000000000000000000..6166b41ad30f482f77d590f9e3004664fa026b57 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/fixedpoint/MatMatProductNEON.h @@ -0,0 +1,316 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_MATMATPRODUCTNEON_H_ +#define TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_MATMATPRODUCTNEON_H_ + +namespace Eigen { +namespace internal { + +// Neon optimized implementation where both lhs and rhs are encoded using +// signed 8bit integers +#ifdef EIGEN_USE_OPTIMIZED_INT8_INT8_MAT_MAT_PRODUCT + +template +class gebp_traits { + public: + typedef QInt8 LhsScalar; + typedef QInt8 RhsScalar; + typedef QInt32 ResScalar; + + enum { + // register block size along the M and N directions + // One for the current implementation + nr = 4, + mr = 1, + // Progress made at each iteration of the product loop + // also 1 for the current implementation + LhsProgress = 1, + RhsProgress = 1 + }; +}; + +// The signed 8bit Mat-Mat product itself. +template +struct gebp_kernel { + EIGEN_DONT_INLINE + void operator()(const DataMapper& res, const QInt8* blockA, + const QInt8* blockB, Index rows, Index depth, Index cols, + QInt32 alpha, Index strideA = -1, Index strideB = -1, + Index offsetA = 0, Index offsetB = 0); +}; + +template +EIGEN_DONT_INLINE void +gebp_kernel::operator()(const DataMapper& res, + const QInt8* blockA, const QInt8* blockB, + Index rows, Index depth, Index cols, + QInt32 alpha, Index strideA, + Index strideB, Index offsetA, + Index offsetB) { + EIGEN_STATIC_ASSERT(!ConjugateLhs, YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT(!ConjugateRhs, YOU_MADE_A_PROGRAMMING_MISTAKE); + + eigen_assert(alpha.value == 1); + eigen_assert(strideA == -1); + eigen_assert(strideB == -1); + eigen_assert(offsetA == 0); + eigen_assert(offsetB == 0); + + eigen_assert(rows > 0); + eigen_assert(cols > 0); + eigen_assert(depth > 0); + eigen_assert(blockA); + eigen_assert(blockB); + + for (Index j = 0; j < cols; ++j) { + Index startB = j * depth; + + for (Index i = 0; i < rows; ++i) { + Index startA = i * depth; + + for (Index k = 0; k < depth; ++k) { + res(i, j) += blockA[startA + k] * blockB[startB + k]; + } + } + } +} +#endif + +// Neon optimized implementation of the case where the lhs is encoded using +// signed 8bit integers and the rhs using unsigned 8bit integers. +#ifdef EIGEN_USE_OPTIMIZED_INT8_UINT8_MAT_MAT_PRODUCT + +template +class gebp_traits { + public: + typedef QInt8 LhsScalar; + typedef QUInt8 RhsScalar; + typedef QInt32 ResScalar; + + enum { + // register block size along the M and N directions + nr = 4, + mr = 1, + // Progress made at each iteration of the product loop + // 1 for the current implementation + LhsProgress = 1, + RhsProgress = 1 + }; +}; + +// Mat-Mat product of a signed 8bit lhs with an unsigned 8bit rhs +template +struct gebp_kernel { + EIGEN_DONT_INLINE + void operator()(const DataMapper& res, const QInt8* blockA, + const QUInt8* blockB, Index rows, Index depth, Index cols, + QInt32 alpha, Index strideA = -1, Index strideB = -1, + Index offsetA = 0, Index offsetB = 0); +}; + +template +EIGEN_DONT_INLINE void +gebp_kernel::operator()(const DataMapper& res, + const QInt8* blockA, const QUInt8* blockB, + Index rows, Index depth, Index cols, + QInt32 alpha, Index strideA, + Index strideB, Index offsetA, + Index offsetB) { + EIGEN_STATIC_ASSERT(!ConjugateLhs, YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT(!ConjugateRhs, YOU_MADE_A_PROGRAMMING_MISTAKE); + + eigen_assert(alpha.value == 1); + eigen_assert(strideA == -1); + eigen_assert(strideB == -1); + eigen_assert(offsetA == 0); + eigen_assert(offsetB == 0); + + eigen_assert(rows > 0); + eigen_assert(cols > 0); + eigen_assert(depth > 0); + eigen_assert(blockA); + eigen_assert(blockB); + + for (Index j = 0; j < cols; ++j) { + Index startB = j * depth; + + for (Index i = 0; i < rows; ++i) { + Index startA = i * depth; + + for (Index k = 0; k < depth; ++k) { + res(i, j) += blockA[startA + k] * blockB[startB + k]; + } + } + } +} +#endif + +// Neon optimized implementation where the lhs is encoded using unsigned 8bit +// integers and the rhs using signed 8bit integers. +#ifdef EIGEN_USE_OPTIMIZED_UINT8_INT8_MAT_MAT_PRODUCT +template +class gebp_traits { + public: + typedef QUInt8 LhsScalar; + typedef QInt8 RhsScalar; + typedef QInt32 ResScalar; + + enum { + // register block size along the M and N directions + nr = 4, + mr = 1, + // Progress made at each iteration of the product loop + // 1 for the current implementation + LhsProgress = 1, + RhsProgress = 1 + }; +}; + +// Mat-Mat product of an unsigned 8bit lhs with a signed 8bit rhs +template +struct gebp_kernel { + EIGEN_DONT_INLINE + void operator()(const DataMapper& res, const QUInt8* blockA, + const QInt8* blockB, Index rows, Index depth, Index cols, + QInt32 alpha, Index strideA = -1, Index strideB = -1, + Index offsetA = 0, Index offsetB = 0); +}; + +template +EIGEN_DONT_INLINE void +gebp_kernel::operator()(const DataMapper& res, + const QUInt8* blockA, const QInt8* blockB, + Index rows, Index depth, Index cols, + QInt32 alpha, Index strideA, + Index strideB, Index offsetA, + Index offsetB) { + EIGEN_STATIC_ASSERT(!ConjugateLhs, YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT(!ConjugateRhs, YOU_MADE_A_PROGRAMMING_MISTAKE); + + eigen_assert(alpha.value == 1); + eigen_assert(strideA == -1); + eigen_assert(strideB == -1); + eigen_assert(offsetA == 0); + eigen_assert(offsetB == 0); + + eigen_assert(rows > 0); + eigen_assert(cols > 0); + eigen_assert(depth > 0); + eigen_assert(blockA); + eigen_assert(blockB); + + for (Index j = 0; j < cols; ++j) { + Index startB = j * depth; + + for (Index i = 0; i < rows; ++i) { + Index startA = i * depth; + + for (Index k = 0; k < depth; ++k) { + res(i, j) += blockA[startA + k] * blockB[startB + k]; + } + } + } +} +#endif + +#ifdef EIGEN_USE_OPTIMIZED_INT16_INT16_MAT_MAT_PRODUCT + +template +class gebp_traits { + public: + typedef QInt16 LhsScalar; + typedef QInt16 RhsScalar; + typedef QInt32 ResScalar; + + enum { + // register block size along the M and N directions + // One for the current implementation + nr = 4, + mr = 1, + // Progress made at each iteration of the product loop + // also 1 for the current implementation + LhsProgress = 1, + RhsProgress = 1 + }; +}; + +// The signed 16bit Mat-Mat product itself. +template +struct gebp_kernel { + EIGEN_DONT_INLINE + void operator()(const DataMapper& res, const QInt16* blockA, + const QInt16* blockB, Index rows, Index depth, Index cols, + QInt32 alpha, Index strideA = -1, Index strideB = -1, + Index offsetA = 0, Index offsetB = 0); +}; + +template +EIGEN_DONT_INLINE void +gebp_kernel::operator()(const DataMapper& res, + const QInt16* blockA, + const QInt16* blockB, Index rows, + Index depth, Index cols, QInt32 alpha, + Index strideA, Index strideB, + Index offsetA, Index offsetB) { + EIGEN_STATIC_ASSERT(!ConjugateLhs, YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT(!ConjugateRhs, YOU_MADE_A_PROGRAMMING_MISTAKE); + + eigen_assert(alpha.value == 1); + eigen_assert(strideA == -1); + eigen_assert(strideB == -1); + eigen_assert(offsetA == 0); + eigen_assert(offsetB == 0); + + eigen_assert(rows > 0); + eigen_assert(cols > 0); + eigen_assert(depth > 0); + eigen_assert(blockA); + eigen_assert(blockB); + + for (Index j = 0; j < cols; ++j) { + Index startB = j * depth; + + for (Index i = 0; i < rows; ++i) { + Index startA = i * depth; + + for (Index k = 0; k < depth; ++k) { + res(i, j) += blockA[startA + k] * blockB[startB + k]; + } + } + } +} +#endif + +} // namespace internal +} // namespace Eigen + +#endif // TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_MATMATPRODUCTNEON_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/fixedpoint/MatVecProduct.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/fixedpoint/MatVecProduct.h new file mode 100644 index 0000000000000000000000000000000000000000..1653df856f01ce4b1a40976cbc5e000e94e555a4 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/fixedpoint/MatVecProduct.h @@ -0,0 +1,151 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_MATVECPRODUCT_H_ +#define TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_MATVECPRODUCT_H_ + +namespace Eigen { +namespace internal { + +// Mat-Vec product +// Both lhs and rhs are encoded as 8bit signed integers +template +struct general_matrix_vector_product { + EIGEN_DONT_INLINE static void run(Index rows, Index cols, + const LhsMapper& lhs, const RhsMapper& rhs, + QInt32* res, Index resIncr, QInt8 alpha); +}; + +template +EIGEN_DONT_INLINE void general_matrix_vector_product< + Index, QInt8, LhsMapper, ColMajor, ConjugateLhs, QInt8, RhsMapper, + ConjugateRhs, Version>::run(Index rows, Index cols, const LhsMapper& lhs, + const RhsMapper& rhs, QInt32* res, + Index resIncr, QInt8 alpha) { + eigen_assert(alpha.value == 1); + eigen_assert(resIncr == 1); + eigen_assert(rows > 0); + eigen_assert(cols > 0); + + for (Index i = 0; i < rows; ++i) { + for (Index j = 0; j < cols; ++j) { + res[i] += lhs(i, j) * rhs(j, 0); + } + } +} + +// Mat-Vec product +// Both lhs and rhs are encoded as 16bit signed integers +template +struct general_matrix_vector_product { + EIGEN_DONT_INLINE static void run(Index rows, Index cols, + const LhsMapper& lhs, const RhsMapper& rhs, + QInt32* res, Index resIncr, QInt16 alpha); +}; + +template +EIGEN_DONT_INLINE void general_matrix_vector_product< + Index, QInt16, LhsMapper, ColMajor, ConjugateLhs, QInt16, RhsMapper, + ConjugateRhs, Version>::run(Index rows, Index cols, const LhsMapper& lhs, + const RhsMapper& rhs, QInt32* res, + Index resIncr, QInt16 alpha) { + eigen_assert(alpha.value == 1); + eigen_assert(resIncr == 1); + eigen_assert(rows > 0); + eigen_assert(cols > 0); + + for (Index i = 0; i < rows; ++i) { + for (Index j = 0; j < cols; ++j) { + res[i] += lhs(i, j) * rhs(j, 0); + } + } +} + +// Mat-Vec product +// The lhs is encoded using 8bit signed integers, the rhs using 8bit unsigned +// integers +template +struct general_matrix_vector_product { + EIGEN_DONT_INLINE static void run(Index rows, Index cols, + const LhsMapper& lhs, const RhsMapper& rhs, + QInt32* res, Index resIncr, QUInt8 alpha); +}; + +template +EIGEN_DONT_INLINE void general_matrix_vector_product< + Index, QInt8, LhsMapper, ColMajor, ConjugateLhs, QUInt8, RhsMapper, + ConjugateRhs, Version>::run(Index rows, Index cols, const LhsMapper& lhs, + const RhsMapper& rhs, QInt32* res, + Index resIncr, QUInt8 alpha) { + eigen_assert(alpha.value == 1); + eigen_assert(resIncr == 1); + eigen_assert(rows > 0); + eigen_assert(cols > 0); + + for (Index i = 0; i < rows; ++i) { + for (Index j = 0; j < cols; ++j) { + res[i] += lhs(i, j) * rhs(j, 0); + } + } +} + +// Mat-Vec product +// The lhs is encoded using bit unsigned integers, the rhs using 8bit signed +// integers +template +struct general_matrix_vector_product { + EIGEN_DONT_INLINE static void run(Index rows, Index cols, + const LhsMapper& lhs, const RhsMapper& rhs, + QInt32* res, Index resIncr, QInt8 alpha); +}; + +template +EIGEN_DONT_INLINE void general_matrix_vector_product< + Index, QUInt8, LhsMapper, ColMajor, ConjugateLhs, QInt8, RhsMapper, + ConjugateRhs, Version>::run(Index rows, Index cols, const LhsMapper& lhs, + const RhsMapper& rhs, QInt32* res, + Index resIncr, QInt8 alpha) { + eigen_assert(alpha.value == 1); + eigen_assert(resIncr == 1); + eigen_assert(rows > 0); + eigen_assert(cols > 0); + + for (Index i = 0; i < rows; ++i) { + for (Index j = 0; j < cols; ++j) { + res[i] += lhs(i, j) * rhs(j, 0); + } + } +} + +} // namespace internal +} // namespace Eigen + +#endif // TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_MATVECPRODUCT_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/fixedpoint/PacketMathAVX.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/fixedpoint/PacketMathAVX.h new file mode 100644 index 0000000000000000000000000000000000000000..12c60405045df809d491c349345cf5aca4b6ad31 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/fixedpoint/PacketMathAVX.h @@ -0,0 +1,164 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_PACKETMATHAVX_H_ +#define TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_PACKETMATHAVX_H_ +#ifdef _MSC_VER + +#include +#include +#include + +#endif + +namespace Eigen { +namespace internal { + +typedef eigen_packet_wrapper<__m256i, 10> Packet32q8i; +typedef eigen_packet_wrapper<__m128i, 11> Packet16q8i; + +template <> +struct packet_traits : default_packet_traits { + typedef Packet32q8i type; + typedef Packet16q8i half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 32, + }; + enum { + HasAdd = 0, + HasSub = 0, + HasMul = 0, + HasNegate = 0, + HasAbs = 0, + HasAbs2 = 0, + HasMin = 0, + HasMax = 0, + HasConj = 0, + HasSetLinear = 0 + }; +}; + +template <> +struct unpacket_traits { + typedef QInt8 type; + typedef Packet16q8i half; + enum { + size = 32, + alignment = Aligned32, + vectorizable = true, + masked_load_available = false, + masked_store_available = false + }; +}; + +template <> +struct unpacket_traits { + typedef QInt8 type; + typedef Packet16q8i half; + enum { + size = 16, + alignment = Aligned32, + vectorizable = true, + masked_load_available = false, + masked_store_available = false + }; +}; +template <> +EIGEN_STRONG_INLINE Packet32q8i pset1(const QInt8& from) { + return _mm256_set1_epi8(from.value); +} +template <> +EIGEN_STRONG_INLINE Packet32q8i ploadu(const QInt8* from) { + EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_si256( + reinterpret_cast(from)); +} +template <> +EIGEN_STRONG_INLINE Packet16q8i ploadu(const QInt8* from) { + EIGEN_DEBUG_UNALIGNED_LOAD return _mm_loadu_si128( + reinterpret_cast(from)); +} + +template <> +EIGEN_STRONG_INLINE Packet32q8i pload(const QInt8* from) { + EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_si256( + reinterpret_cast(from)); +} +template <> +EIGEN_STRONG_INLINE Packet16q8i pload(const QInt8* from) { + EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_si128( + reinterpret_cast(from)); +} + +template <> +EIGEN_STRONG_INLINE void pstoreu(QInt8* to, const Packet32q8i& from) { + EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_si256( + reinterpret_cast<__m256i*>(to), from.m_val); +} +template <> +EIGEN_STRONG_INLINE void pstoreu(QInt8* to, const Packet16q8i& from) { + EIGEN_DEBUG_UNALIGNED_STORE _mm_storeu_si128(reinterpret_cast<__m128i*>(to), + from.m_val); +} + +template <> +EIGEN_STRONG_INLINE void pstore(QInt8* to, const Packet32q8i& from) { + EIGEN_DEBUG_ALIGNED_STORE _mm256_store_si256(reinterpret_cast<__m256i*>(to), + from.m_val); +} +template <> +EIGEN_STRONG_INLINE void pstore(QInt8* to, const Packet16q8i& from) { + EIGEN_DEBUG_ALIGNED_STORE _mm_store_si128(reinterpret_cast<__m128i*>(to), + from.m_val); +} + +typedef __m256 Packet8f; + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 4, TgtCoeffRatio = 1 }; +}; + +template <> +EIGEN_STRONG_INLINE Packet32q8i +pcast(const Packet8f& a, const Packet8f& b, + const Packet8f& c, const Packet8f& d) { + const __m256i a_conv = _mm256_cvtps_epi32(a); + const __m256i b_conv = _mm256_cvtps_epi32(b); + const __m256i c_conv = _mm256_cvtps_epi32(c); + const __m256i d_conv = _mm256_cvtps_epi32(d); + __m128i low = _mm256_castsi256_si128(a_conv); + __m128i high = _mm256_extractf128_si256(a_conv, 1); + __m128i tmp = _mm_packs_epi32(low, high); + __m128i low2 = _mm256_castsi256_si128(b_conv); + __m128i high2 = _mm256_extractf128_si256(b_conv, 1); + __m128i tmp2 = _mm_packs_epi32(low2, high2); + __m128i converted_low = _mm_packs_epi16(tmp, tmp2); + low = _mm256_castsi256_si128(c_conv); + high = _mm256_extractf128_si256(c_conv, 1); + tmp = _mm_packs_epi32(low, high); + low2 = _mm256_castsi256_si128(d_conv); + high2 = _mm256_extractf128_si256(d_conv, 1); + tmp2 = _mm_packs_epi32(low2, high2); + __m128i converted_high = _mm_packs_epi16(tmp, tmp2); + return _mm256_insertf128_si256(_mm256_castsi128_si256(converted_low), + converted_high, 1); +} + +} // end namespace internal +} // end namespace Eigen + +#endif // TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_PACKETMATHAVX_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/fixedpoint/PacketMathAVX2.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/fixedpoint/PacketMathAVX2.h new file mode 100644 index 0000000000000000000000000000000000000000..1c9139fd1d95a58f1a199596f422fc93b2c08a2a --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/fixedpoint/PacketMathAVX2.h @@ -0,0 +1,560 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_PACKETMATHAVX2_H_ +#define TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_PACKETMATHAVX2_H_ +#ifdef _MSC_VER + +#include +#include +#include + +#endif + +inline int _mm256_extract_epi16_N0(const __m256i X) { + return _mm_extract_epi16(_mm256_extractf128_si256(X, 0 >> 3), 0 % 8); +} + +inline int _mm256_extract_epi16_N1(const __m256i X) { + return _mm_extract_epi16(_mm256_extractf128_si256(X, 1 >> 3), 1 % 8); +} + +inline int _mm256_extract_epi8_N0(const __m256i X) { + return _mm_extract_epi8(_mm256_extractf128_si256((X), 0 >> 4), 0 % 16); +} + +inline int _mm256_extract_epi8_N1(const __m256i X) { + return _mm_extract_epi8(_mm256_extractf128_si256((X), 1 >> 4), 1 % 16); +} + +namespace Eigen { +namespace internal { + +typedef eigen_packet_wrapper<__m256i, 20> Packet32q8i; +typedef eigen_packet_wrapper<__m256i, 21> Packet16q16i; +typedef eigen_packet_wrapper<__m256i, 22> Packet32q8u; +typedef eigen_packet_wrapper<__m128i, 23> Packet16q8i; +typedef eigen_packet_wrapper<__m128i, 25> Packet16q8u; +typedef eigen_packet_wrapper<__m128i, 26> Packet8q16i; +typedef eigen_packet_wrapper<__m256i, 27> Packet8q32i; +typedef eigen_packet_wrapper<__m128i, 28> Packet4q32i; + +#ifndef EIGEN_VECTORIZE_AVX512 +template <> +struct packet_traits : default_packet_traits { + typedef Packet32q8i type; + typedef Packet16q8i half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 32, + }; + enum { + HasAdd = 0, + HasSub = 0, + HasMul = 0, + HasNegate = 0, + HasAbs = 0, + HasAbs2 = 0, + HasMin = 1, + HasMax = 1, + HasConj = 0, + HasSetLinear = 0 + }; +}; +template <> +struct packet_traits : default_packet_traits { + typedef Packet32q8u type; + typedef Packet16q8u half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 32, + }; + enum { + HasAdd = 0, + HasSub = 0, + HasMul = 0, + HasNegate = 0, + HasAbs = 0, + HasAbs2 = 0, + HasMin = 1, + HasMax = 1, + HasConj = 0, + HasSetLinear = 0 + }; +}; +template <> +struct packet_traits : default_packet_traits { + typedef Packet16q16i type; + typedef Packet8q16i half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 16, + }; + enum { + HasAdd = 0, + HasSub = 0, + HasMul = 0, + HasNegate = 0, + HasAbs = 0, + HasAbs2 = 0, + HasMin = 1, + HasMax = 1, + HasConj = 0, + HasSetLinear = 0 + }; +}; +template <> +struct packet_traits : default_packet_traits { + typedef Packet8q32i type; + typedef Packet4q32i half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 8, + }; + enum { + HasAdd = 1, + HasSub = 1, + HasMul = 1, + HasNegate = 1, + HasAbs = 0, + HasAbs2 = 0, + HasMin = 1, + HasMax = 1, + HasConj = 0, + HasSetLinear = 0 + }; +}; +#endif + +template <> +struct unpacket_traits { + typedef QInt8 type; + typedef Packet16q8i half; + enum { + size = 32, + alignment = Aligned32, + vectorizable = true, + masked_load_available = false, + masked_store_available = false + }; +}; +template <> +struct unpacket_traits { + typedef QInt8 type; + typedef Packet16q8i half; + enum { + size = 16, + alignment = Aligned32, + vectorizable = true, + masked_load_available = false, + masked_store_available = false + }; +}; +template <> +struct unpacket_traits { + typedef QInt16 type; + typedef Packet8q16i half; + enum { + size = 16, + alignment = Aligned32, + vectorizable = true, + masked_load_available = false, + masked_store_available = false + }; +}; +template <> +struct unpacket_traits { + typedef QInt16 type; + typedef Packet8q16i half; + enum { + size = 8, + alignment = Aligned32, + vectorizable = true, + masked_load_available = false, + masked_store_available = false + }; +}; +template <> +struct unpacket_traits { + typedef QUInt8 type; + typedef Packet16q8u half; + enum { + size = 32, + alignment = Aligned32, + vectorizable = true, + masked_load_available = false, + masked_store_available = false + }; +}; +template <> +struct unpacket_traits { + typedef QInt32 type; + typedef Packet4q32i half; + enum { + size = 8, + alignment = Aligned32, + vectorizable = true, + masked_load_available = false, + masked_store_available = false + }; +}; + +// Unaligned load +template <> +EIGEN_STRONG_INLINE Packet32q8i ploadu(const QInt8* from) { + EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_si256( + reinterpret_cast(from)); +} +template <> +EIGEN_STRONG_INLINE Packet16q8i ploadu(const QInt8* from) { + EIGEN_DEBUG_UNALIGNED_LOAD return _mm_loadu_si128( + reinterpret_cast(from)); +} +template <> +EIGEN_STRONG_INLINE Packet32q8u ploadu(const QUInt8* from) { + EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_si256( + reinterpret_cast(from)); +} +template <> +EIGEN_STRONG_INLINE Packet16q16i ploadu(const QInt16* from) { + EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_si256( + reinterpret_cast(from)); +} +template <> +EIGEN_STRONG_INLINE Packet8q16i ploadu(const QInt16* from) { + EIGEN_DEBUG_UNALIGNED_LOAD return _mm_loadu_si128( + reinterpret_cast(from)); +} +template <> +EIGEN_STRONG_INLINE Packet8q32i ploadu(const QInt32* from) { + EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_si256( + reinterpret_cast(from)); +} + +// Aligned load +template <> +EIGEN_STRONG_INLINE Packet32q8i pload(const QInt8* from) { + EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_si256( + reinterpret_cast(from)); +} +template <> +EIGEN_STRONG_INLINE Packet16q8i pload(const QInt8* from) { + EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_si128( + reinterpret_cast(from)); +} +template <> +EIGEN_STRONG_INLINE Packet32q8u pload(const QUInt8* from) { + EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_si256( + reinterpret_cast(from)); +} +template <> +EIGEN_STRONG_INLINE Packet16q16i pload(const QInt16* from) { + EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_si256( + reinterpret_cast(from)); +} +template <> +EIGEN_STRONG_INLINE Packet8q16i pload(const QInt16* from) { + EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_si128( + reinterpret_cast(from)); +} +template <> +EIGEN_STRONG_INLINE Packet8q32i pload(const QInt32* from) { + EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_si256( + reinterpret_cast(from)); +} + +// Unaligned store +template <> +EIGEN_STRONG_INLINE void pstoreu(QInt8* to, const Packet32q8i& from) { + EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_si256( + reinterpret_cast<__m256i*>(to), from.m_val); +} +template <> +EIGEN_STRONG_INLINE void pstoreu(QInt8* to, const Packet16q8i& from) { + EIGEN_DEBUG_UNALIGNED_STORE _mm_storeu_si128(reinterpret_cast<__m128i*>(to), + from.m_val); +} +template <> +EIGEN_STRONG_INLINE void pstoreu(QUInt8* to, const Packet32q8u& from) { + EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_si256( + reinterpret_cast<__m256i*>(to), from.m_val); +} +template <> +EIGEN_STRONG_INLINE void pstoreu(QInt16* to, const Packet16q16i& from) { + EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_si256( + reinterpret_cast<__m256i*>(to), from.m_val); +} +template <> +EIGEN_STRONG_INLINE void pstoreu(QInt16* to, const Packet8q16i& from) { + EIGEN_DEBUG_UNALIGNED_STORE _mm_storeu_si128(reinterpret_cast<__m128i*>(to), + from.m_val); +} +template <> +EIGEN_STRONG_INLINE void pstoreu(QInt32* to, const Packet8q32i& from) { + EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_si256( + reinterpret_cast<__m256i*>(to), from.m_val); +} + +// Aligned store +template <> +EIGEN_STRONG_INLINE void pstore(QInt32* to, const Packet8q32i& from) { + EIGEN_DEBUG_ALIGNED_STORE _mm256_store_si256(reinterpret_cast<__m256i*>(to), + from.m_val); +} +template <> +EIGEN_STRONG_INLINE void pstore(QInt16* to, const Packet16q16i& from) { + EIGEN_DEBUG_ALIGNED_STORE _mm256_store_si256(reinterpret_cast<__m256i*>(to), + from.m_val); +} +template <> +EIGEN_STRONG_INLINE void pstore(QInt16* to, const Packet8q16i& from) { + EIGEN_DEBUG_ALIGNED_STORE _mm_store_si128(reinterpret_cast<__m128i*>(to), + from.m_val); +} +template <> +EIGEN_STRONG_INLINE void pstore(QUInt8* to, const Packet32q8u& from) { + EIGEN_DEBUG_ALIGNED_STORE _mm256_store_si256(reinterpret_cast<__m256i*>(to), + from.m_val); +} +template <> +EIGEN_STRONG_INLINE void pstore(QInt8* to, const Packet32q8i& from) { + EIGEN_DEBUG_ALIGNED_STORE _mm256_store_si256(reinterpret_cast<__m256i*>(to), + from.m_val); +} +template <> +EIGEN_STRONG_INLINE void pstore(QInt8* to, const Packet16q8i& from) { + EIGEN_DEBUG_ALIGNED_STORE _mm_store_si128(reinterpret_cast<__m128i*>(to), + from.m_val); +} + +// Extract first element. +template <> +EIGEN_STRONG_INLINE QInt32 pfirst(const Packet8q32i& a) { + return _mm_cvtsi128_si32(_mm256_castsi256_si128(a)); +} +template <> +EIGEN_STRONG_INLINE QInt16 pfirst(const Packet16q16i& a) { + return _mm256_extract_epi16_N0(a.m_val); +} +template <> +EIGEN_STRONG_INLINE QUInt8 pfirst(const Packet32q8u& a) { + return static_cast(_mm256_extract_epi8_N0(a.m_val)); +} +template <> +EIGEN_STRONG_INLINE QInt8 pfirst(const Packet32q8i& a) { + return _mm256_extract_epi8_N0(a.m_val); +} + +// Initialize to constant value. +template <> +EIGEN_STRONG_INLINE Packet32q8i pset1(const QInt8& from) { + return _mm256_set1_epi8(from.value); +} +template <> +EIGEN_STRONG_INLINE Packet32q8u pset1(const QUInt8& from) { + return _mm256_set1_epi8(static_cast(from.value)); +} +template <> +EIGEN_STRONG_INLINE Packet8q32i pset1(const QInt32& from) { + return _mm256_set1_epi32(from.value); +} + +// Basic arithmetic packet ops for QInt32. +template <> +EIGEN_STRONG_INLINE Packet8q32i padd(const Packet8q32i& a, + const Packet8q32i& b) { + return _mm256_add_epi32(a.m_val, b.m_val); +} +template <> +EIGEN_STRONG_INLINE Packet16q16i pset1(const QInt16& from) { + return _mm256_set1_epi16(from.value); +} +template <> +EIGEN_STRONG_INLINE Packet8q32i psub(const Packet8q32i& a, + const Packet8q32i& b) { + return _mm256_sub_epi32(a.m_val, b.m_val); +} +// Note: mullo truncates the result to 32 bits. +template <> +EIGEN_STRONG_INLINE Packet8q32i pmul(const Packet8q32i& a, + const Packet8q32i& b) { + return _mm256_mullo_epi32(a.m_val, b.m_val); +} +template <> +EIGEN_STRONG_INLINE Packet8q32i pnegate(const Packet8q32i& a) { + return _mm256_sub_epi32(_mm256_setzero_si256(), a.m_val); +} + +// Min and max. +template <> +EIGEN_STRONG_INLINE Packet8q32i pmin(const Packet8q32i& a, + const Packet8q32i& b) { + return _mm256_min_epi32(a.m_val, b.m_val); +} +template <> +EIGEN_STRONG_INLINE Packet8q32i pmax(const Packet8q32i& a, + const Packet8q32i& b) { + return _mm256_max_epi32(a.m_val, b.m_val); +} + +template <> +EIGEN_STRONG_INLINE Packet16q16i pmin(const Packet16q16i& a, + const Packet16q16i& b) { + return _mm256_min_epi16(a.m_val, b.m_val); +} +template <> +EIGEN_STRONG_INLINE Packet16q16i pmax(const Packet16q16i& a, + const Packet16q16i& b) { + return _mm256_max_epi16(a.m_val, b.m_val); +} + +template <> +EIGEN_STRONG_INLINE Packet32q8u pmin(const Packet32q8u& a, + const Packet32q8u& b) { + return _mm256_min_epu8(a.m_val, b.m_val); +} +template <> +EIGEN_STRONG_INLINE Packet32q8u pmax(const Packet32q8u& a, + const Packet32q8u& b) { + return _mm256_max_epu8(a.m_val, b.m_val); +} + +template <> +EIGEN_STRONG_INLINE Packet32q8i pmin(const Packet32q8i& a, + const Packet32q8i& b) { + return _mm256_min_epi8(a.m_val, b.m_val); +} +template <> +EIGEN_STRONG_INLINE Packet32q8i pmax(const Packet32q8i& a, + const Packet32q8i& b) { + return _mm256_max_epi8(a.m_val, b.m_val); +} + +// Reductions. +template <> +EIGEN_STRONG_INLINE QInt32 predux_min(const Packet8q32i& a) { + __m256i tmp = _mm256_min_epi32(a, _mm256_permute2f128_si256(a, a, 1)); + tmp = + _mm256_min_epi32(tmp, _mm256_shuffle_epi32(tmp, _MM_SHUFFLE(1, 0, 3, 2))); + return pfirst( + _mm256_min_epi32(tmp, _mm256_shuffle_epi32(tmp, 1))); +} +template <> +EIGEN_STRONG_INLINE QInt32 predux_max(const Packet8q32i& a) { + __m256i tmp = _mm256_max_epi32(a, _mm256_permute2f128_si256(a, a, 1)); + tmp = + _mm256_max_epi32(tmp, _mm256_shuffle_epi32(tmp, _MM_SHUFFLE(1, 0, 3, 2))); + return pfirst( + _mm256_max_epi32(tmp, _mm256_shuffle_epi32(tmp, 1))); +} + +template <> +EIGEN_STRONG_INLINE QInt16 predux_min(const Packet16q16i& a) { + __m256i tmp = _mm256_min_epi16(a, _mm256_permute2f128_si256(a, a, 1)); + tmp = + _mm256_min_epi16(tmp, _mm256_shuffle_epi32(tmp, _MM_SHUFFLE(1, 0, 3, 2))); + tmp = _mm256_min_epi16(tmp, _mm256_shuffle_epi32(tmp, 1)); + return std::min(_mm256_extract_epi16_N0(tmp), _mm256_extract_epi16_N1(tmp)); +} +template <> +EIGEN_STRONG_INLINE QInt16 predux_max(const Packet16q16i& a) { + __m256i tmp = _mm256_max_epi16(a, _mm256_permute2f128_si256(a, a, 1)); + tmp = + _mm256_max_epi16(tmp, _mm256_shuffle_epi32(tmp, _MM_SHUFFLE(1, 0, 3, 2))); + tmp = _mm256_max_epi16(tmp, _mm256_shuffle_epi32(tmp, 1)); + return std::max(_mm256_extract_epi16_N0(tmp), _mm256_extract_epi16_N1(tmp)); +} + +template <> +EIGEN_STRONG_INLINE QUInt8 predux_min(const Packet32q8u& a) { + __m256i tmp = _mm256_min_epu8(a, _mm256_permute2f128_si256(a, a, 1)); + tmp = + _mm256_min_epu8(tmp, _mm256_shuffle_epi32(tmp, _MM_SHUFFLE(1, 0, 3, 2))); + tmp = _mm256_min_epu8(tmp, _mm256_shuffle_epi32(tmp, 1)); + tmp = _mm256_min_epu8(tmp, + _mm256_shufflelo_epi16(tmp, _MM_SHUFFLE(1, 0, 3, 2))); + return std::min(static_cast(_mm256_extract_epi8_N0(tmp)), + static_cast(_mm256_extract_epi8_N1(tmp))); +} +template <> +EIGEN_STRONG_INLINE QUInt8 predux_max(const Packet32q8u& a) { + __m256i tmp = _mm256_max_epu8(a, _mm256_permute2f128_si256(a, a, 1)); + tmp = + _mm256_max_epu8(tmp, _mm256_shuffle_epi32(tmp, _MM_SHUFFLE(1, 0, 3, 2))); + tmp = _mm256_max_epu8(tmp, _mm256_shuffle_epi32(tmp, 1)); + tmp = _mm256_max_epu8(tmp, + _mm256_shufflelo_epi16(tmp, _MM_SHUFFLE(1, 0, 3, 2))); + return std::max(static_cast(_mm256_extract_epi8_N0(tmp)), + static_cast(_mm256_extract_epi8_N1(tmp))); +} + +template <> +EIGEN_STRONG_INLINE QInt8 predux_min(const Packet32q8i& a) { + __m256i tmp = _mm256_min_epi8(a, _mm256_permute2f128_si256(a, a, 1)); + tmp = + _mm256_min_epi8(tmp, _mm256_shuffle_epi32(tmp, _MM_SHUFFLE(1, 0, 3, 2))); + tmp = _mm256_min_epi8(tmp, _mm256_shuffle_epi32(tmp, 1)); + tmp = _mm256_min_epi8(tmp, + _mm256_shufflelo_epi16(tmp, _MM_SHUFFLE(1, 0, 3, 2))); + return std::min(_mm256_extract_epi8_N0(tmp), _mm256_extract_epi8_N1(tmp)); +} +template <> +EIGEN_STRONG_INLINE QInt8 predux_max(const Packet32q8i& a) { + __m256i tmp = _mm256_max_epi8(a, _mm256_permute2f128_si256(a, a, 1)); + tmp = + _mm256_max_epi8(tmp, _mm256_shuffle_epi32(tmp, _MM_SHUFFLE(1, 0, 3, 2))); + tmp = _mm256_max_epi8(tmp, _mm256_shuffle_epi32(tmp, 1)); + tmp = _mm256_max_epi8(tmp, + _mm256_shufflelo_epi16(tmp, _MM_SHUFFLE(1, 0, 3, 2))); + return std::max(_mm256_extract_epi8_N0(tmp), _mm256_extract_epi8_N1(tmp)); +} + +// Vectorized scaling of Packet32q8i by float. +template <> +struct scalar_product_op : binary_op_base { + typedef typename ScalarBinaryOpTraits::ReturnType result_type; +#ifdef EIGEN_SCALAR_BINARY_OP_PLUGIN + scalar_product_op(){EIGEN_SCALAR_BINARY_OP_PLUGIN} +#endif + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type + operator()(const QInt32& a, const double& b) const { + return a * b; + } + + EIGEN_STRONG_INLINE const Packet8q32i packetOp(const Packet8q32i& a, + const double& b) const { + __m256d scale = _mm256_set1_pd(b); + __m256d a_lo = _mm256_cvtepi32_pd(_mm256_castsi256_si128(a)); + __m128i result_lo = _mm256_cvtpd_epi32(_mm256_mul_pd(scale, a_lo)); + __m256d a_hi = _mm256_cvtepi32_pd(_mm256_extracti128_si256(a, 1)); + __m128i result_hi = _mm256_cvtpd_epi32(_mm256_mul_pd(scale, a_hi)); + return _mm256_insertf128_si256(_mm256_castsi128_si256(result_lo), result_hi, + 1); + } +}; + +template <> +struct functor_traits> { + enum { Cost = 4 * NumTraits::MulCost, PacketAccess = true }; +}; + +} // end namespace internal +} // end namespace Eigen + +#endif // TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_PACKETMATHAVX2_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/fixedpoint/PacketMathAVX512.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/fixedpoint/PacketMathAVX512.h new file mode 100644 index 0000000000000000000000000000000000000000..af02ac0ed2fd1d1ed5a2634ea63a7be3e572fdb9 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/fixedpoint/PacketMathAVX512.h @@ -0,0 +1,531 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_PACKETMATHAVX512_H_ +#define TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_PACKETMATHAVX512_H_ + +#include "PacketMathAVX2.h" + +namespace Eigen { +namespace internal { + +typedef eigen_packet_wrapper<__m512i, 30> Packet64q8i; +typedef eigen_packet_wrapper<__m512i, 31> Packet32q16i; +typedef eigen_packet_wrapper<__m512i, 32> Packet64q8u; +typedef eigen_packet_wrapper<__m512i, 33> Packet16q32i; + +template <> +struct packet_traits : default_packet_traits { + typedef Packet64q8i type; + typedef Packet32q8i half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 64, + }; + enum { + HasAdd = 0, + HasSub = 0, + HasMul = 0, + HasNegate = 0, + HasAbs = 0, + HasAbs2 = 0, + HasMin = 1, + HasMax = 1, + HasConj = 0, + HasSetLinear = 0 + }; +}; +template <> +struct packet_traits : default_packet_traits { + typedef Packet64q8u type; + typedef Packet32q8u half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 64, + }; + enum { + HasAdd = 0, + HasSub = 0, + HasMul = 0, + HasNegate = 0, + HasAbs = 0, + HasAbs2 = 0, + HasMin = 1, + HasMax = 1, + HasConj = 0, + HasSetLinear = 0 + }; +}; +template <> +struct packet_traits : default_packet_traits { + typedef Packet32q16i type; + typedef Packet16q16i half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 32, + }; + enum { + HasAdd = 0, + HasSub = 0, + HasMul = 0, + HasNegate = 0, + HasAbs = 0, + HasAbs2 = 0, + HasMin = 1, + HasMax = 1, + HasConj = 0, + HasSetLinear = 0 + }; +}; +template <> +struct packet_traits : default_packet_traits { + typedef Packet16q32i type; + typedef Packet8q32i half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 16, + }; + enum { + HasAdd = 1, + HasSub = 1, + HasMul = 1, + HasNegate = 1, + HasAbs = 0, + HasAbs2 = 0, + HasMin = 1, + HasMax = 1, + HasConj = 0, + HasSetLinear = 0 + }; +}; + +template <> +struct unpacket_traits { + typedef QInt8 type; + typedef Packet32q8i half; + enum { + size = 64, + alignment = Aligned64, + masked_load_available = false, + masked_store_available = false + }; +}; +template <> +struct unpacket_traits { + typedef QInt16 type; + typedef Packet16q16i half; + enum { + size = 32, + alignment = Aligned64, + masked_load_available = false, + masked_store_available = false + }; +}; +template <> +struct unpacket_traits { + typedef QUInt8 type; + typedef Packet32q8u half; + enum { + size = 64, + alignment = Aligned64, + masked_load_available = false, + masked_store_available = false + }; +}; +template <> +struct unpacket_traits { + typedef QInt32 type; + typedef Packet8q32i half; + enum { + size = 16, + alignment = Aligned64, + masked_load_available = false, + masked_store_available = false + }; +}; + +// Unaligned load +template <> +EIGEN_STRONG_INLINE Packet64q8i ploadu(const QInt8* from) { + EIGEN_DEBUG_UNALIGNED_LOAD return _mm512_loadu_si512( + reinterpret_cast(from)); +} +template <> +EIGEN_STRONG_INLINE Packet32q16i ploadu(const QInt16* from) { + EIGEN_DEBUG_UNALIGNED_LOAD return _mm512_loadu_si512( + reinterpret_cast(from)); +} +template <> +EIGEN_STRONG_INLINE Packet64q8u ploadu(const QUInt8* from) { + EIGEN_DEBUG_UNALIGNED_LOAD return _mm512_loadu_si512( + reinterpret_cast(from)); +} +template <> +EIGEN_STRONG_INLINE Packet16q32i ploadu(const QInt32* from) { + EIGEN_DEBUG_UNALIGNED_LOAD return _mm512_loadu_si512( + reinterpret_cast(from)); +} + +// Aligned load +template <> +EIGEN_STRONG_INLINE Packet64q8i pload(const QInt8* from) { + EIGEN_DEBUG_ALIGNED_LOAD return _mm512_load_si512( + reinterpret_cast(from)); +} +template <> +EIGEN_STRONG_INLINE Packet32q16i pload(const QInt16* from) { + EIGEN_DEBUG_ALIGNED_LOAD return _mm512_load_si512( + reinterpret_cast(from)); +} +template <> +EIGEN_STRONG_INLINE Packet64q8u pload(const QUInt8* from) { + EIGEN_DEBUG_ALIGNED_LOAD return _mm512_load_si512( + reinterpret_cast(from)); +} +template <> +EIGEN_STRONG_INLINE Packet16q32i pload(const QInt32* from) { + EIGEN_DEBUG_ALIGNED_LOAD return _mm512_load_si512( + reinterpret_cast(from)); +} + +// Unaligned store +template <> +EIGEN_STRONG_INLINE void pstoreu(QInt8* to, const Packet64q8i& from) { + EIGEN_DEBUG_UNALIGNED_STORE _mm512_storeu_si512( + reinterpret_cast<__m512i*>(to), from.m_val); +} +template <> +EIGEN_STRONG_INLINE void pstoreu(QInt16* to, const Packet32q16i& from) { + EIGEN_DEBUG_UNALIGNED_STORE _mm512_storeu_si512( + reinterpret_cast<__m512i*>(to), from.m_val); +} +template <> +EIGEN_STRONG_INLINE void pstoreu(QUInt8* to, const Packet64q8u& from) { + EIGEN_DEBUG_UNALIGNED_STORE _mm512_storeu_si512( + reinterpret_cast<__m512i*>(to), from.m_val); +} +template <> +EIGEN_STRONG_INLINE void pstoreu(QInt32* to, const Packet16q32i& from) { + EIGEN_DEBUG_UNALIGNED_STORE _mm512_storeu_si512( + reinterpret_cast<__m512i*>(to), from.m_val); +} + +// Aligned store +template <> +EIGEN_STRONG_INLINE void pstore(QInt32* to, const Packet16q32i& from) { + EIGEN_DEBUG_ALIGNED_STORE _mm512_store_si512(reinterpret_cast<__m512i*>(to), + from.m_val); +} +template <> +EIGEN_STRONG_INLINE void pstore(QUInt8* to, const Packet64q8u& from) { + EIGEN_DEBUG_ALIGNED_STORE _mm512_store_si512(reinterpret_cast<__m512i*>(to), + from.m_val); +} +template <> +EIGEN_STRONG_INLINE void pstore(QInt8* to, const Packet64q8i& from) { + EIGEN_DEBUG_ALIGNED_STORE _mm512_store_si512(reinterpret_cast<__m512i*>(to), + from.m_val); +} +template <> +EIGEN_STRONG_INLINE void pstore(QInt16* to, const Packet32q16i& from) { + EIGEN_DEBUG_ALIGNED_STORE _mm512_store_si512(reinterpret_cast<__m512i*>(to), + from.m_val); +} + +// Extract first element. +template <> +EIGEN_STRONG_INLINE QInt32 pfirst(const Packet16q32i& a) { + return _mm_cvtsi128_si32(_mm512_extracti32x4_epi32(a, 0)); +} +template <> +EIGEN_STRONG_INLINE QUInt8 pfirst(const Packet64q8u& a) { + return static_cast( + _mm_extract_epi8(_mm512_extracti32x4_epi32(a.m_val, 0), 0)); +} +template <> +EIGEN_STRONG_INLINE QInt8 pfirst(const Packet64q8i& a) { + return _mm_extract_epi8(_mm512_extracti32x4_epi32(a.m_val, 0), 0); +} +template <> +EIGEN_STRONG_INLINE QInt16 pfirst(const Packet32q16i& a) { + return _mm_extract_epi16(_mm512_extracti32x4_epi32(a.m_val, 0), 0); +} + +// Initialize to constant value. +template <> +EIGEN_STRONG_INLINE Packet64q8i pset1(const QInt8& from) { + return _mm512_set1_epi8(from.value); +} +template <> +EIGEN_STRONG_INLINE Packet32q16i pset1(const QInt16& from) { + return _mm512_set1_epi16(from.value); +} +template <> +EIGEN_STRONG_INLINE Packet64q8u pset1(const QUInt8& from) { + return _mm512_set1_epi8(static_cast(from.value)); +} +template <> +EIGEN_STRONG_INLINE Packet16q32i pset1(const QInt32& from) { + return _mm512_set1_epi32(from.value); +} + +// Basic arithmetic packet ops for QInt32. +template <> +EIGEN_STRONG_INLINE Packet16q32i padd(const Packet16q32i& a, + const Packet16q32i& b) { + return _mm512_add_epi32(a.m_val, b.m_val); +} +template <> +EIGEN_STRONG_INLINE Packet16q32i psub(const Packet16q32i& a, + const Packet16q32i& b) { + return _mm512_sub_epi32(a.m_val, b.m_val); +} +// Note: mullo truncates the result to 32 bits. +template <> +EIGEN_STRONG_INLINE Packet16q32i pmul(const Packet16q32i& a, + const Packet16q32i& b) { + return _mm512_mullo_epi32(a.m_val, b.m_val); +} +template <> +EIGEN_STRONG_INLINE Packet16q32i pnegate(const Packet16q32i& a) { + return _mm512_sub_epi32(_mm512_setzero_si512(), a.m_val); +} + +// Min and max. +template <> +EIGEN_STRONG_INLINE Packet16q32i pmin(const Packet16q32i& a, + const Packet16q32i& b) { + return _mm512_min_epi32(a.m_val, b.m_val); +} +template <> +EIGEN_STRONG_INLINE Packet16q32i pmax(const Packet16q32i& a, + const Packet16q32i& b) { + return _mm512_max_epi32(a.m_val, b.m_val); +} + +template <> +EIGEN_STRONG_INLINE Packet64q8u pmin(const Packet64q8u& a, + const Packet64q8u& b) { +#ifdef EIGEN_VECTORIZE_AVX512BW + return _mm512_min_epu8(a.m_val, b.m_val); +#else + __m256i ap0 = _mm512_extracti32x8_epi32(a.m_val, 0); + __m256i ap1 = _mm512_extracti32x8_epi32(a.m_val, 1); + __m256i bp0 = _mm512_extracti32x8_epi32(b.m_val, 0); + __m256i bp1 = _mm512_extracti32x8_epi32(b.m_val, 1); + __m256i r0 = _mm256_min_epu8(ap0, bp0); + __m256i r1 = _mm256_min_epu8(ap1, bp1); + return _mm512_inserti32x8(_mm512_castsi256_si512(r0), r1, 1); +#endif +} +template <> +EIGEN_STRONG_INLINE Packet64q8u pmax(const Packet64q8u& a, + const Packet64q8u& b) { +#ifdef EIGEN_VECTORIZE_AVX512BW + return _mm512_max_epu8(a.m_val, b.m_val); +#else + __m256i ap0 = _mm512_extracti32x8_epi32(a.m_val, 0); + __m256i ap1 = _mm512_extracti32x8_epi32(a.m_val, 1); + __m256i bp0 = _mm512_extracti32x8_epi32(b.m_val, 0); + __m256i bp1 = _mm512_extracti32x8_epi32(b.m_val, 1); + __m256i r0 = _mm256_max_epu8(ap0, bp0); + __m256i r1 = _mm256_max_epu8(ap1, bp1); + return _mm512_inserti32x8(_mm512_castsi256_si512(r0), r1, 1); +#endif +} + +template <> +EIGEN_STRONG_INLINE Packet64q8i pmin(const Packet64q8i& a, + const Packet64q8i& b) { +#ifdef EIGEN_VECTORIZE_AVX512BW + return _mm512_min_epi8(a.m_val, b.m_val); +#else + __m256i ap0 = _mm512_extracti32x8_epi32(a.m_val, 0); + __m256i ap1 = _mm512_extracti32x8_epi32(a.m_val, 1); + __m256i bp0 = _mm512_extracti32x8_epi32(b.m_val, 0); + __m256i bp1 = _mm512_extracti32x8_epi32(b.m_val, 1); + __m256i r0 = _mm256_min_epi8(ap0, bp0); + __m256i r1 = _mm256_min_epi8(ap1, bp1); + return _mm512_inserti32x8(_mm512_castsi256_si512(r0), r1, 1); +#endif +} +template <> +EIGEN_STRONG_INLINE Packet32q16i pmin(const Packet32q16i& a, + const Packet32q16i& b) { +#ifdef EIGEN_VECTORIZE_AVX512BW + return _mm512_min_epi16(a.m_val, b.m_val); +#else + __m256i ap0 = _mm512_extracti32x8_epi32(a.m_val, 0); + __m256i ap1 = _mm512_extracti32x8_epi32(a.m_val, 1); + __m256i bp0 = _mm512_extracti32x8_epi32(b.m_val, 0); + __m256i bp1 = _mm512_extracti32x8_epi32(b.m_val, 1); + __m256i r0 = _mm256_min_epi16(ap0, bp0); + __m256i r1 = _mm256_min_epi16(ap1, bp1); + return _mm512_inserti32x8(_mm512_castsi256_si512(r0), r1, 1); +#endif +} +template <> +EIGEN_STRONG_INLINE Packet64q8i pmax(const Packet64q8i& a, + const Packet64q8i& b) { +#ifdef EIGEN_VECTORIZE_AVX512BW + return _mm512_max_epi8(a.m_val, b.m_val); +#else + __m256i ap0 = _mm512_extracti32x8_epi32(a.m_val, 0); + __m256i ap1 = _mm512_extracti32x8_epi32(a.m_val, 1); + __m256i bp0 = _mm512_extracti32x8_epi32(b.m_val, 0); + __m256i bp1 = _mm512_extracti32x8_epi32(b.m_val, 1); + __m256i r0 = _mm256_max_epi8(ap0, bp0); + __m256i r1 = _mm256_max_epi8(ap1, bp1); + return _mm512_inserti32x8(_mm512_castsi256_si512(r0), r1, 1); +#endif +} +template <> +EIGEN_STRONG_INLINE Packet32q16i pmax(const Packet32q16i& a, + const Packet32q16i& b) { +#ifdef EIGEN_VECTORIZE_AVX512BW + return _mm512_max_epi16(a.m_val, b.m_val); +#else + __m256i ap0 = _mm512_extracti32x8_epi32(a.m_val, 0); + __m256i ap1 = _mm512_extracti32x8_epi32(a.m_val, 1); + __m256i bp0 = _mm512_extracti32x8_epi32(b.m_val, 0); + __m256i bp1 = _mm512_extracti32x8_epi32(b.m_val, 1); + __m256i r0 = _mm256_max_epi16(ap0, bp0); + __m256i r1 = _mm256_max_epi16(ap1, bp1); + return _mm512_inserti32x8(_mm512_castsi256_si512(r0), r1, 1); +#endif +} + +// Reductions. +template <> +EIGEN_STRONG_INLINE QInt32 predux_min(const Packet16q32i& a) { + Packet4i lane0 = _mm512_extracti32x4_epi32(a.m_val, 0); + Packet4i lane1 = _mm512_extracti32x4_epi32(a.m_val, 1); + Packet4i lane2 = _mm512_extracti32x4_epi32(a.m_val, 2); + Packet4i lane3 = _mm512_extracti32x4_epi32(a.m_val, 3); + Packet4i res = + _mm_min_epi32(_mm_min_epi32(lane0, lane1), _mm_min_epi32(lane2, lane3)); + res = _mm_min_epi32(res, _mm_shuffle_epi32(res, _MM_SHUFFLE(0, 0, 3, 2))); + res = _mm_min_epi32(res, _mm_shuffle_epi32(res, _MM_SHUFFLE(0, 0, 0, 1))); + return pfirst(res); +} +template <> +EIGEN_STRONG_INLINE QInt32 predux_max(const Packet16q32i& a) { + Packet4i lane0 = _mm512_extracti32x4_epi32(a.m_val, 0); + Packet4i lane1 = _mm512_extracti32x4_epi32(a.m_val, 1); + Packet4i lane2 = _mm512_extracti32x4_epi32(a.m_val, 2); + Packet4i lane3 = _mm512_extracti32x4_epi32(a.m_val, 3); + Packet4i res = + _mm_max_epi32(_mm_max_epi32(lane0, lane1), _mm_max_epi32(lane2, lane3)); + res = _mm_max_epi32(res, _mm_shuffle_epi32(res, _MM_SHUFFLE(0, 0, 3, 2))); + res = _mm_max_epi32(res, _mm_shuffle_epi32(res, _MM_SHUFFLE(0, 0, 0, 1))); + return pfirst(res); +} +template <> +EIGEN_STRONG_INLINE QInt16 predux_min(const Packet32q16i& a) { + Packet4i lane0 = _mm512_extracti32x4_epi32(a.m_val, 0); + Packet4i lane1 = _mm512_extracti32x4_epi32(a.m_val, 1); + Packet4i lane2 = _mm512_extracti32x4_epi32(a.m_val, 2); + Packet4i lane3 = _mm512_extracti32x4_epi32(a.m_val, 3); + Packet4i res = + _mm_min_epi16(_mm_min_epi16(lane0, lane1), _mm_min_epi16(lane2, lane3)); + res = _mm_min_epi16(res, _mm_shuffle_epi32(res, _MM_SHUFFLE(0, 0, 3, 2))); + res = _mm_min_epi16(res, _mm_shuffle_epi32(res, _MM_SHUFFLE(0, 0, 0, 1))); + std::uint32_t w = pfirst(res); + return std::min( + {static_cast(w >> 16), static_cast(w)}); +} +template <> +EIGEN_STRONG_INLINE QInt16 predux_max(const Packet32q16i& a) { + Packet4i lane0 = _mm512_extracti32x4_epi32(a.m_val, 0); + Packet4i lane1 = _mm512_extracti32x4_epi32(a.m_val, 1); + Packet4i lane2 = _mm512_extracti32x4_epi32(a.m_val, 2); + Packet4i lane3 = _mm512_extracti32x4_epi32(a.m_val, 3); + Packet4i res = + _mm_max_epi16(_mm_max_epi16(lane0, lane1), _mm_max_epi16(lane2, lane3)); + res = _mm_max_epi16(res, _mm_shuffle_epi32(res, _MM_SHUFFLE(0, 0, 3, 2))); + res = _mm_max_epi16(res, _mm_shuffle_epi32(res, _MM_SHUFFLE(0, 0, 0, 1))); + std::uint32_t w = pfirst(res); + return std::max( + {static_cast(w >> 16), static_cast(w)}); +} +template <> +EIGEN_STRONG_INLINE QUInt8 predux_min(const Packet64q8u& a) { + Packet4i lane0 = _mm512_extracti32x4_epi32(a.m_val, 0); + Packet4i lane1 = _mm512_extracti32x4_epi32(a.m_val, 1); + Packet4i lane2 = _mm512_extracti32x4_epi32(a.m_val, 2); + Packet4i lane3 = _mm512_extracti32x4_epi32(a.m_val, 3); + Packet4i res = + _mm_min_epu8(_mm_min_epu8(lane0, lane1), _mm_min_epu8(lane2, lane3)); + res = _mm_min_epu8(res, _mm_shuffle_epi32(res, _MM_SHUFFLE(0, 0, 3, 2))); + res = _mm_min_epu8(res, _mm_shuffle_epi32(res, _MM_SHUFFLE(0, 0, 0, 1))); + std::uint32_t w = pfirst(res); + return std::min( + {static_cast(w >> 24), static_cast(w >> 16), + static_cast(w >> 8), static_cast(w)}); +} +template <> +EIGEN_STRONG_INLINE QUInt8 predux_max(const Packet64q8u& a) { + Packet4i lane0 = _mm512_extracti32x4_epi32(a.m_val, 0); + Packet4i lane1 = _mm512_extracti32x4_epi32(a.m_val, 1); + Packet4i lane2 = _mm512_extracti32x4_epi32(a.m_val, 2); + Packet4i lane3 = _mm512_extracti32x4_epi32(a.m_val, 3); + Packet4i res = + _mm_max_epu8(_mm_max_epu8(lane0, lane1), _mm_max_epu8(lane2, lane3)); + res = _mm_max_epu8(res, _mm_shuffle_epi32(res, _MM_SHUFFLE(0, 0, 3, 2))); + res = _mm_max_epu8(res, _mm_shuffle_epi32(res, _MM_SHUFFLE(0, 0, 0, 1))); + std::uint32_t w = pfirst(res); + return std::max( + {static_cast(w >> 24), static_cast(w >> 16), + static_cast(w >> 8), static_cast(w)}); +} +template <> +EIGEN_STRONG_INLINE QInt8 predux_min(const Packet64q8i& a) { + Packet4i lane0 = _mm512_extracti32x4_epi32(a.m_val, 0); + Packet4i lane1 = _mm512_extracti32x4_epi32(a.m_val, 1); + Packet4i lane2 = _mm512_extracti32x4_epi32(a.m_val, 2); + Packet4i lane3 = _mm512_extracti32x4_epi32(a.m_val, 3); + Packet4i res = + _mm_min_epi8(_mm_min_epi8(lane0, lane1), _mm_min_epi8(lane2, lane3)); + res = _mm_min_epi8(res, _mm_shuffle_epi32(res, _MM_SHUFFLE(0, 0, 3, 2))); + res = _mm_min_epi8(res, _mm_shuffle_epi32(res, _MM_SHUFFLE(0, 0, 0, 1))); + std::uint32_t w = pfirst(res); + return std::min( + {static_cast(w >> 24), static_cast(w >> 16), + static_cast(w >> 8), static_cast(w)}); +} +template <> +EIGEN_STRONG_INLINE QInt8 predux_max(const Packet64q8i& a) { + Packet4i lane0 = _mm512_extracti32x4_epi32(a.m_val, 0); + Packet4i lane1 = _mm512_extracti32x4_epi32(a.m_val, 1); + Packet4i lane2 = _mm512_extracti32x4_epi32(a.m_val, 2); + Packet4i lane3 = _mm512_extracti32x4_epi32(a.m_val, 3); + Packet4i res = + _mm_max_epi8(_mm_max_epi8(lane0, lane1), _mm_max_epi8(lane2, lane3)); + res = _mm_max_epi8(res, _mm_shuffle_epi32(res, _MM_SHUFFLE(0, 0, 3, 2))); + res = _mm_max_epi8(res, _mm_shuffle_epi32(res, _MM_SHUFFLE(0, 0, 0, 1))); + std::uint32_t w = pfirst(res); + return std::min( + {static_cast(w >> 24), static_cast(w >> 16), + static_cast(w >> 8), static_cast(w)}); +} + +} // end namespace internal +} // end namespace Eigen + +#endif // TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_PACKETMATHAVX512_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/fixedpoint/TypeCastingAVX2.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/fixedpoint/TypeCastingAVX2.h new file mode 100644 index 0000000000000000000000000000000000000000..1e0fa8f3fe3259776f6b4135767276a825106cb1 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/fixedpoint/TypeCastingAVX2.h @@ -0,0 +1,108 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_TYPECASTINGAVX2_H_ +#define TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_TYPECASTINGAVX2_H_ + +namespace Eigen { +namespace internal { + +typedef __m256 Packet8f; + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 }; +}; + +template <> +EIGEN_STRONG_INLINE Packet8f pcast(const Packet8q32i& a) { + return _mm256_cvtepi32_ps(a.m_val); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 }; +}; + +template <> +EIGEN_STRONG_INLINE Packet8q32i pcast(const Packet8f& a) { + return _mm256_cvtps_epi32(a); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 4, TgtCoeffRatio = 1 }; +}; + +template <> +EIGEN_STRONG_INLINE Packet32q8i +pcast(const Packet8q32i& a, const Packet8q32i& b, + const Packet8q32i& c, const Packet8q32i& d) { + __m256i converted = _mm256_packs_epi16(_mm256_packs_epi32(a.m_val, b.m_val), + _mm256_packs_epi32(c.m_val, d.m_val)); + // Since packs does not cross 128 bit lane boundaries, + // we have to permute to properly order the final result. + const __m256i permute_mask = _mm256_set_epi32(7, 3, 6, 2, 5, 1, 4, 0); + return _mm256_permutevar8x32_epi32(converted, permute_mask); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 4, TgtCoeffRatio = 1 }; +}; + +template <> +EIGEN_STRONG_INLINE Packet32q8i +pcast(const Packet8f& a, const Packet8f& b, + const Packet8f& c, const Packet8f& d) { + const __m256i a_conv = _mm256_cvtps_epi32(a); + const __m256i b_conv = _mm256_cvtps_epi32(b); + const __m256i c_conv = _mm256_cvtps_epi32(c); + const __m256i d_conv = _mm256_cvtps_epi32(d); + __m256i converted = _mm256_packs_epi16(_mm256_packs_epi32(a_conv, b_conv), + _mm256_packs_epi32(c_conv, d_conv)); + const __m256i permute_mask = _mm256_set_epi32(7, 3, 6, 2, 5, 1, 4, 0); + return _mm256_permutevar8x32_epi32(converted, permute_mask); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 4, TgtCoeffRatio = 1 }; +}; + +template <> +EIGEN_STRONG_INLINE Packet32q8u +pcast(const Packet8q32i& a, const Packet8q32i& b, + const Packet8q32i& c, const Packet8q32i& d) { + // _mm256_packus_epi32 trims negative numbers to 0 but we can't allow numbers + // that are too large because _mm256_packus_epi16 expects signed input + // (example of problem input: 0x11111111, which saturates to 0xffff = -1, + // which saturates to 0). + const __m256i a_clip = _mm256_min_epi32(a, _mm256_set1_epi32(255)); + const __m256i b_clip = _mm256_min_epi32(b, _mm256_set1_epi32(255)); + const __m256i c_clip = _mm256_min_epi32(c, _mm256_set1_epi32(255)); + const __m256i d_clip = _mm256_min_epi32(d, _mm256_set1_epi32(255)); + const __m256i converted = _mm256_packus_epi16( + _mm256_packus_epi32(a_clip, b_clip), _mm256_packus_epi32(c_clip, d_clip)); + // Since packus does not cross 128 bit lane boundaries, + // we have to permute to properly order the final result. + const __m256i permute_mask = _mm256_set_epi32(7, 3, 6, 2, 5, 1, 4, 0); + return _mm256_permutevar8x32_epi32(converted, permute_mask); +} + +} // end namespace internal +} // end namespace Eigen + +#endif // TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_TYPECASTINGAVX2_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/fixedpoint/TypeCastingAVX512.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/fixedpoint/TypeCastingAVX512.h new file mode 100644 index 0000000000000000000000000000000000000000..54c052d1671bd07a5f30af8984b56e560922dd39 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/fixedpoint/TypeCastingAVX512.h @@ -0,0 +1,206 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_TYPECASTINGAVX512_H_ +#define TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_TYPECASTINGAVX512_H_ + +namespace Eigen { +namespace internal { + +typedef __m512 Packet16f; +typedef __m512i Packet16i; + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 }; +}; + +template <> +EIGEN_STRONG_INLINE Packet16f pcast(const Packet16q32i& a) { + return _mm512_cvtepi32_ps(a.m_val); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 }; +}; + +template <> +EIGEN_STRONG_INLINE Packet16q32i pcast(const Packet16f& a) { + return _mm512_cvtps_epi32(a); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 }; +}; + +template <> +EIGEN_STRONG_INLINE Packet32q16i pcast(const Packet16f& a, + const Packet16f& b) { + Packet16i a_int = _mm512_cvtps_epi32(a); + Packet16i b_int = _mm512_cvtps_epi32(b); +#ifdef EIGEN_VECTORIZE_AVX512BW + return _mm512_packs_epi32(a_int, b_int); +#else + Packet8i ab_int16_low = _mm256_permute4x64_epi64( + _mm256_packs_epi32(_mm512_castsi512_si256(a_int), + _mm512_castsi512_si256(b_int)), + _MM_SHUFFLE(0, 2, 1, 3)); + Packet8i ab_int16_high = _mm256_permute4x64_epi64( + _mm256_packs_epi32(_mm512_extracti32x8_epi32(a_int, 1), + _mm512_extracti32x8_epi32(b_int, 1)), + _MM_SHUFFLE(0, 2, 1, 3)); + return _mm512_inserti32x8(_mm512_castsi256_si512(ab_int16_low), ab_int16_high, + 1); +#endif +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 4, TgtCoeffRatio = 1 }; +}; + +template <> +EIGEN_STRONG_INLINE Packet64q8i pcast(const Packet16f& a, + const Packet16f& b, + const Packet16f& c, + const Packet16f& d) { + Packet16i a_int = _mm512_cvtps_epi32(a); + Packet16i b_int = _mm512_cvtps_epi32(b); + Packet16i c_int = _mm512_cvtps_epi32(c); + Packet16i d_int = _mm512_cvtps_epi32(d); +#ifdef EIGEN_VECTORIZE_AVX512BW + return _mm512_packs_epi16(_mm512_packs_epi32(a_int, b_int), + _mm512_packs_epi32(c_int, d_int)); +#else + Packet8i ab_int16_low = _mm256_permute4x64_epi64( + _mm256_packs_epi32(_mm512_castsi512_si256(a_int), + _mm512_castsi512_si256(b_int)), + _MM_SHUFFLE(0, 2, 1, 3)); + Packet8i cd_int16_low = _mm256_permute4x64_epi64( + _mm256_packs_epi32(_mm512_castsi512_si256(c_int), + _mm512_castsi512_si256(d_int)), + _MM_SHUFFLE(0, 2, 1, 3)); + Packet8i ab_int16_high = _mm256_permute4x64_epi64( + _mm256_packs_epi32(_mm512_extracti32x8_epi32(a_int, 1), + _mm512_extracti32x8_epi32(b_int, 1)), + _MM_SHUFFLE(0, 2, 1, 3)); + Packet8i cd_int16_high = _mm256_permute4x64_epi64( + _mm256_packs_epi32(_mm512_extracti32x8_epi32(c_int, 1), + _mm512_extracti32x8_epi32(d_int, 1)), + _MM_SHUFFLE(0, 2, 1, 3)); + Packet8i abcd_int8_low = _mm256_permute4x64_epi64( + _mm256_packs_epi16(ab_int16_low, cd_int16_low), _MM_SHUFFLE(0, 2, 1, 3)); + Packet8i abcd_int8_high = + _mm256_permute4x64_epi64(_mm256_packs_epi16(ab_int16_high, cd_int16_high), + _MM_SHUFFLE(0, 2, 1, 3)); + return _mm512_inserti32x8(_mm512_castsi256_si512(abcd_int8_low), + abcd_int8_high, 1); +#endif +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 4, TgtCoeffRatio = 1 }; +}; + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 }; +}; + +template <> +EIGEN_STRONG_INLINE Packet64q8i +pcast(const Packet16q32i& a, const Packet16q32i& b, + const Packet16q32i& c, const Packet16q32i& d) { + __m128i a_part = _mm512_cvtsepi32_epi8(a); + __m128i b_part = _mm512_cvtsepi32_epi8(b); + __m128i c_part = _mm512_cvtsepi32_epi8(c); + __m128i d_part = _mm512_cvtsepi32_epi8(d); + __m256i ab = + _mm256_inserti128_si256(_mm256_castsi128_si256(a_part), b_part, 1); + __m256i cd = + _mm256_inserti128_si256(_mm256_castsi128_si256(c_part), d_part, 1); + __m512i converted = _mm512_inserti64x4(_mm512_castsi256_si512(ab), cd, 1); + return converted; +} + +template <> +EIGEN_STRONG_INLINE Packet32q16i pcast( + const Packet16q32i& a, const Packet16q32i& b) { + __m256i a_part = _mm512_cvtsepi32_epi16(a); + __m256i b_part = _mm512_cvtsepi32_epi16(b); + __m512i converted = + _mm512_inserti64x4(_mm512_castsi256_si512(a_part), b_part, 1); + return converted; +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 4, TgtCoeffRatio = 1 }; +}; + +template <> +EIGEN_STRONG_INLINE Packet64q8u +pcast(const Packet16q32i& a, const Packet16q32i& b, + const Packet16q32i& c, const Packet16q32i& d) { + // Brute-force saturation since there isn't a pack operation for unsigned + // numbers that keeps the elements in order. + __m128i a_part = _mm512_cvtepi32_epi8(_mm512_max_epi32( + _mm512_min_epi32(a, _mm512_set1_epi32(255)), _mm512_setzero_si512())); + __m128i b_part = _mm512_cvtepi32_epi8(_mm512_max_epi32( + _mm512_min_epi32(b, _mm512_set1_epi32(255)), _mm512_setzero_si512())); + __m128i c_part = _mm512_cvtepi32_epi8(_mm512_max_epi32( + _mm512_min_epi32(c, _mm512_set1_epi32(255)), _mm512_setzero_si512())); + __m128i d_part = _mm512_cvtepi32_epi8(_mm512_max_epi32( + _mm512_min_epi32(d, _mm512_set1_epi32(255)), _mm512_setzero_si512())); + __m256i ab = + _mm256_inserti128_si256(_mm256_castsi128_si256(a_part), b_part, 1); + __m256i cd = + _mm256_inserti128_si256(_mm256_castsi128_si256(c_part), d_part, 1); + __m512i converted = _mm512_inserti64x4(_mm512_castsi256_si512(ab), cd, 1); + return converted; +} + +#if 0 +// The type Packet32q16u does not exist for AVX-512 yet +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 }; +}; + +template <> +EIGEN_STRONG_INLINE Packet32q16u +pcast(const Packet16q32i& a, + const Packet16q32i& b) { + // Brute-force saturation since there isn't a pack operation for unsigned + // numbers that keeps the elements in order. + __m256i a_part = + _mm512_cvtepi32_epi16(_mm512_max_epi32( + _mm512_min_epi32(a, _mm512_set1_epi32(65535)), _mm512_setzero_si512())); + __m256i b_part = _mm512_cvtepi32_epi16( + _mm512_max_epi32(_mm512_min_epi32(b, _mm512_set1_epi32(65535)), + _mm512_setzero_si512())); + __m512i converted = + _mm512_inserti64x4(_mm512_castsi256_si512(a_part), b_part, 1); + return converted; +} +#endif + +} // end namespace internal +} // end namespace Eigen + +#endif // TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_TYPECASTINGAVX512_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/fixedpoint_types.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/fixedpoint_types.h new file mode 100644 index 0000000000000000000000000000000000000000..5ade0b38be477de291d815eafe7cbb5089c07889 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/fixedpoint_types.h @@ -0,0 +1,354 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_TYPES_H_ +#define TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_TYPES_H_ + +#include + +#include +#include +#include + +namespace Eigen { + +// The mantissa part of the fixed point representation. See +// go/tensorfixedpoint for details +struct QInt8; +struct QUInt8; +struct QInt16; +struct QUInt16; +struct QInt32; + +template <> +struct NumTraits : GenericNumTraits {}; +template <> +struct NumTraits : GenericNumTraits {}; +template <> +struct NumTraits : GenericNumTraits {}; +template <> +struct NumTraits : GenericNumTraits {}; +template <> +struct NumTraits : GenericNumTraits {}; + +namespace internal { +template <> +struct scalar_product_traits { + enum { + // Cost = NumTraits::MulCost, + Defined = 1 + }; + typedef QInt32 ReturnType; +}; +} // namespace internal + +// Wrap the 8bit int into a QInt8 struct instead of using a typedef to prevent +// the compiler from silently type cast the mantissa into a bigger or a smaller +// representation. +struct QInt8 { + QInt8() : value(0) {} + QInt8(const int8_t v) : value(v) {} + QInt8(const QInt32 v); + + operator int() const { return static_cast(value); } + + int8_t value; +}; + +struct QUInt8 { + QUInt8() : value(0) {} + QUInt8(const uint8_t v) : value(v) {} + QUInt8(const QInt32 v); + + operator int() const { return static_cast(value); } + + uint8_t value; +}; + +struct QInt16 { + QInt16() : value(0) {} + QInt16(const int16_t v) : value(v) {} + QInt16(const QInt32 v); + operator int() const { return static_cast(value); } + + int16_t value; +}; + +struct QUInt16 { + QUInt16() : value(0) {} + QUInt16(const uint16_t v) : value(v) {} + QUInt16(const QInt32 v); + operator int() const { return static_cast(value); } + + uint16_t value; +}; + +struct QInt32 { + QInt32() : value(0) {} + QInt32(const int8_t v) : value(v) {} + QInt32(const int32_t v) : value(v) {} + QInt32(const uint32_t v) : value(static_cast(v)) {} + QInt32(const QInt8 v) : value(v.value) {} + QInt32(const float v) : value(static_cast(lrint(v))) {} +#ifdef EIGEN_MAKING_DOCS + // Workaround to fix build on PPC. + QInt32(unsigned long v) : value(v) {} +#endif + + operator float() const { return static_cast(value); } + + int32_t value; +}; + +EIGEN_STRONG_INLINE QInt8::QInt8(const QInt32 v) + : value(static_cast( + v.value > 127 ? 127 : (v.value < -128 ? -128 : v.value))) {} +EIGEN_STRONG_INLINE QUInt8::QUInt8(const QInt32 v) + : value(static_cast(v.value > 255 ? 255 + : (v.value < 0 ? 0 : v.value))) { +} +EIGEN_STRONG_INLINE QInt16::QInt16(const QInt32 v) + : value(static_cast( + v.value > 32767 ? 32767 : (v.value < -32768 ? -32768 : v.value))) {} +EIGEN_STRONG_INLINE QUInt16::QUInt16(const QInt32 v) + : value(static_cast( + v.value > 65535 ? 65535 : (v.value < 0 ? 0 : v.value))) {} + +// Basic widening 8-bit operations: This will be vectorized in future CLs. +EIGEN_STRONG_INLINE QInt32 operator*(const QInt8 a, const QInt8 b) { + return QInt32(static_cast(a.value) * static_cast(b.value)); +} +EIGEN_STRONG_INLINE QInt32 operator*(const QInt8 a, const QUInt8 b) { + return QInt32(static_cast(a.value) * static_cast(b.value)); +} +EIGEN_STRONG_INLINE QInt32 operator+(const QInt8 a, const QInt8 b) { + return QInt32(static_cast(a.value) + static_cast(b.value)); +} +EIGEN_STRONG_INLINE QInt32 operator-(const QInt8 a, const QInt8 b) { + return QInt32(static_cast(a.value) - static_cast(b.value)); +} + +// Basic widening 16-bit operations: This will be vectorized in future CLs. +EIGEN_STRONG_INLINE QInt32 operator*(const QInt16 a, const QInt16 b) { + return QInt32(static_cast(a.value) * static_cast(b.value)); +} +EIGEN_STRONG_INLINE QInt32 operator*(const QInt16 a, const QUInt16 b) { + return QInt32(static_cast(a.value) * static_cast(b.value)); +} +EIGEN_STRONG_INLINE QInt32 operator+(const QInt16 a, const QInt16 b) { + return QInt32(static_cast(a.value) + static_cast(b.value)); +} +EIGEN_STRONG_INLINE QInt32 operator-(const QInt16 a, const QInt16 b) { + return QInt32(static_cast(a.value) - static_cast(b.value)); +} + +// Mixed QInt32 op QInt8 operations. This will be vectorized in future CLs. +EIGEN_STRONG_INLINE QInt32 operator+(const QInt32 a, const QInt8 b) { + return QInt32(a.value + static_cast(b.value)); +} +EIGEN_STRONG_INLINE QInt32 operator+(const QInt8 a, const QInt32 b) { + return QInt32(static_cast(a.value) + b.value); +} +EIGEN_STRONG_INLINE QInt32 operator-(const QInt32 a, const QInt8 b) { + return QInt32(a.value - static_cast(b.value)); +} +EIGEN_STRONG_INLINE QInt32 operator-(const QInt8 a, const QInt32 b) { + return QInt32(static_cast(a.value) - b.value); +} +EIGEN_STRONG_INLINE QInt32 operator*(const QInt32 a, const QInt8 b) { + return QInt32(a.value * static_cast(b.value)); +} +EIGEN_STRONG_INLINE QInt32 operator*(const QInt8 a, const QInt32 b) { + return QInt32(static_cast(a.value) * b.value); +} + +// Mixed QInt32 op QInt16 operations. This will be vectorized in future CLs. +EIGEN_STRONG_INLINE QInt32 operator+(const QInt32 a, const QInt16 b) { + return QInt32(a.value + static_cast(b.value)); +} +EIGEN_STRONG_INLINE QInt32 operator+(const QInt16 a, const QInt32 b) { + return QInt32(static_cast(a.value) + b.value); +} +EIGEN_STRONG_INLINE QInt32 operator-(const QInt32 a, const QInt16 b) { + return QInt32(a.value - static_cast(b.value)); +} +EIGEN_STRONG_INLINE QInt32 operator-(const QInt16 a, const QInt32 b) { + return QInt32(static_cast(a.value) - b.value); +} +EIGEN_STRONG_INLINE QInt32 operator*(const QInt32 a, const QInt16 b) { + return QInt32(a.value * static_cast(b.value)); +} +EIGEN_STRONG_INLINE QInt32 operator*(const QInt16 a, const QInt32 b) { + return QInt32(static_cast(a.value) * b.value); +} + +// Mixed QInt32 op QUInt8 operations. This will be vectorized in future CLs. +EIGEN_STRONG_INLINE QInt32 operator+(const QInt32 a, const QUInt8 b) { + return QInt32(a.value + static_cast(b.value)); +} +EIGEN_STRONG_INLINE QInt32 operator+(const QUInt8 a, const QInt32 b) { + return QInt32(static_cast(a.value) + b.value); +} +EIGEN_STRONG_INLINE QInt32 operator-(const QInt32 a, const QUInt8 b) { + return QInt32(a.value - static_cast(b.value)); +} +EIGEN_STRONG_INLINE QInt32 operator-(const QUInt8 a, const QInt32 b) { + return QInt32(static_cast(a.value) - b.value); +} +EIGEN_STRONG_INLINE QInt32 operator*(const QInt32 a, const QUInt8 b) { + return QInt32(a.value * static_cast(b.value)); +} +EIGEN_STRONG_INLINE QInt32 operator*(const QUInt8 a, const QInt32 b) { + return QInt32(static_cast(a.value) * b.value); +} + +// Mixed QInt32 op QUInt16 operations. This will be vectorized in future CLs. +EIGEN_STRONG_INLINE QInt32 operator+(const QInt32 a, const QUInt16 b) { + return QInt32(a.value + static_cast(b.value)); +} +EIGEN_STRONG_INLINE QInt32 operator+(const QUInt16 a, const QInt32 b) { + return QInt32(static_cast(a.value) + b.value); +} +EIGEN_STRONG_INLINE QInt32 operator-(const QInt32 a, const QUInt16 b) { + return QInt32(a.value - static_cast(b.value)); +} +EIGEN_STRONG_INLINE QInt32 operator-(const QUInt16 a, const QInt32 b) { + return QInt32(static_cast(a.value) - b.value); +} +EIGEN_STRONG_INLINE QInt32 operator*(const QInt32 a, const QUInt16 b) { + return QInt32(a.value * static_cast(b.value)); +} +EIGEN_STRONG_INLINE QInt32 operator*(const QUInt16 a, const QInt32 b) { + return QInt32(static_cast(a.value) * b.value); +} + +// Basic arithmetic operations on QInt32, which behaves like a int32_t. +EIGEN_STRONG_INLINE QInt32 operator+(const QInt32 a, const QInt32 b) { + return a.value + b.value; +} +EIGEN_STRONG_INLINE QInt32 operator-(const QInt32 a, const QInt32 b) { + return a.value - b.value; +} +EIGEN_STRONG_INLINE QInt32 operator*(const QInt32 a, const QInt32 b) { + return a.value * b.value; +} +EIGEN_STRONG_INLINE QInt32 operator/(const QInt32 a, const QInt32 b) { + return a.value / b.value; +} +EIGEN_STRONG_INLINE QInt32& operator+=(QInt32& a, const QInt32 b) { + a.value += b.value; + return a; +} +EIGEN_STRONG_INLINE QInt32& operator-=(QInt32& a, const QInt32 b) { + a.value -= b.value; + return a; +} +EIGEN_STRONG_INLINE QInt32& operator*=(QInt32& a, const QInt32 b) { + a.value *= b.value; + return a; +} +EIGEN_STRONG_INLINE QInt32& operator/=(QInt32& a, const QInt32 b) { + a.value /= b.value; + return a; +} +EIGEN_STRONG_INLINE QInt32 operator-(const QInt32 a) { return -a.value; } + +// Scaling QInt32 by double. We do the arithmetic in double because +// float only has 23 bits of mantissa, so casting QInt32 to float might reduce +// accuracy by discarding up to 7 (least significant) bits. +EIGEN_STRONG_INLINE QInt32 operator*(const QInt32 a, const double b) { + return static_cast(lrint(static_cast(a.value) * b)); +} +EIGEN_STRONG_INLINE QInt32 operator*(const double a, const QInt32 b) { + return static_cast(lrint(a * static_cast(b.value))); +} +EIGEN_STRONG_INLINE QInt32& operator*=(QInt32& a, const double b) { + a.value = static_cast(lrint(static_cast(a.value) * b)); + return a; +} + +// Comparisons +EIGEN_STRONG_INLINE bool operator==(const QInt8 a, const QInt8 b) { + return a.value == b.value; +} +EIGEN_STRONG_INLINE bool operator==(const QUInt8 a, const QUInt8 b) { + return a.value == b.value; +} +EIGEN_STRONG_INLINE bool operator==(const QInt16 a, const QInt16 b) { + return a.value == b.value; +} +EIGEN_STRONG_INLINE bool operator==(const QUInt16 a, const QUInt16 b) { + return a.value == b.value; +} +EIGEN_STRONG_INLINE bool operator==(const QInt32 a, const QInt32 b) { + return a.value == b.value; +} + +EIGEN_STRONG_INLINE bool operator<(const QInt8 a, const QInt8 b) { + return a.value < b.value; +} +EIGEN_STRONG_INLINE bool operator<(const QUInt8 a, const QUInt8 b) { + return a.value < b.value; +} +EIGEN_STRONG_INLINE bool operator<(const QInt16 a, const QInt16 b) { + return a.value < b.value; +} +EIGEN_STRONG_INLINE bool operator<(const QUInt16 a, const QUInt16 b) { + return a.value < b.value; +} +EIGEN_STRONG_INLINE bool operator<(const QInt32 a, const QInt32 b) { + return a.value < b.value; +} + +EIGEN_STRONG_INLINE bool operator>(const QInt8 a, const QInt8 b) { + return a.value > b.value; +} +EIGEN_STRONG_INLINE bool operator>(const QUInt8 a, const QUInt8 b) { + return a.value > b.value; +} +EIGEN_STRONG_INLINE bool operator>(const QInt16 a, const QInt16 b) { + return a.value > b.value; +} +EIGEN_STRONG_INLINE bool operator>(const QUInt16 a, const QUInt16 b) { + return a.value > b.value; +} +EIGEN_STRONG_INLINE bool operator>(const QInt32 a, const QInt32 b) { + return a.value > b.value; +} + +EIGEN_STRONG_INLINE std::ostream& operator<<(std::ostream& os, QInt8 a) { + os << static_cast(a.value); + return os; +} +EIGEN_STRONG_INLINE std::ostream& operator<<(std::ostream& os, QUInt8 a) { + os << static_cast(a.value); + return os; +} +EIGEN_STRONG_INLINE std::ostream& operator<<(std::ostream& os, QInt16 a) { + os << static_cast(a.value); + return os; +} +EIGEN_STRONG_INLINE std::ostream& operator<<(std::ostream& os, QUInt16 a) { + os << static_cast(a.value); + return os; +} +EIGEN_STRONG_INLINE std::ostream& operator<<(std::ostream& os, QInt32 a) { + os << a.value; + return os; +} + +} // namespace Eigen + +#endif // TENSORFLOW_TSL_FRAMEWORK_FIXEDPOINT_TYPES_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/metrics.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/metrics.h new file mode 100644 index 0000000000000000000000000000000000000000..0c087f0bd3922a8f35e6d31f63be3ecb367d4acb --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/metrics.h @@ -0,0 +1,30 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_FRAMEWORK_METRICS_H_ +#define TENSORFLOW_TSL_FRAMEWORK_METRICS_H_ + +#include + +namespace tsl { +namespace metrics { + +// Updates the metrics stored about time BFC allocator spents during delay. +void UpdateBfcAllocatorDelayTime(const uint64_t delay_usecs); + +} // namespace metrics +} // namespace tsl + +#endif // TENSORFLOW_TSL_FRAMEWORK_METRICS_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/numeric_types.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/numeric_types.h new file mode 100644 index 0000000000000000000000000000000000000000..fc5c7a6c62144e8a7c7185ec31915792091a21f1 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/numeric_types.h @@ -0,0 +1,74 @@ +/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_FRAMEWORK_NUMERIC_TYPES_H_ +#define TENSORFLOW_TSL_FRAMEWORK_NUMERIC_TYPES_H_ + +#include + +#include "tsl/framework/fixedpoint_types.h" +#include "tsl/platform/types.h" + +namespace tsl { + +// Single precision complex. +typedef std::complex complex64; +// Double precision complex. +typedef std::complex complex128; + +// We use Eigen's QInt implementations for our quantized int types. +typedef Eigen::QInt8 qint8; +typedef Eigen::QUInt8 quint8; +typedef Eigen::QInt32 qint32; +typedef Eigen::QInt16 qint16; +typedef Eigen::QUInt16 quint16; + +} // namespace tsl + +static inline tsl::bfloat16 FloatToBFloat16(float float_val) { +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + return *reinterpret_cast( + reinterpret_cast(&float_val)); +#else + return *reinterpret_cast( + &(reinterpret_cast(&float_val)[1])); +#endif +} + +namespace Eigen { +template <> +struct NumTraits : GenericNumTraits { + enum { + RequireInitialization = 1, + ReadCost = HugeCost, + AddCost = HugeCost, + MulCost = HugeCost + }; + + static constexpr inline int digits10() { return 0; } + static constexpr inline int max_digits10() { return 0; } + + private: + static inline tsl::tstring epsilon(); + static inline tsl::tstring dummy_precision(); + static inline tsl::tstring lowest(); + static inline tsl::tstring highest(); + static inline tsl::tstring infinity(); + static inline tsl::tstring quiet_NaN(); +}; + +} // namespace Eigen + +#endif // TENSORFLOW_TSL_FRAMEWORK_NUMERIC_TYPES_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/shared_counter.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/shared_counter.h new file mode 100644 index 0000000000000000000000000000000000000000..7fb584ca8a3cbef90cba76d0613a3c9cfcf1149b --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/shared_counter.h @@ -0,0 +1,35 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_TSL_FRAMEWORK_SHARED_COUNTER_H_ +#define TENSORFLOW_TSL_FRAMEWORK_SHARED_COUNTER_H_ + +#include + +#include "tsl/platform/types.h" + +namespace tsl { +// A lightweight thread-safe monotone counter for establishing +// temporal ordering. +class SharedCounter { + public: + int64_t get() { return value_; } + int64_t next() { return ++value_; } + + private: + std::atomic value_{0}; +}; + +} // namespace tsl +#endif // TENSORFLOW_TSL_FRAMEWORK_SHARED_COUNTER_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/tracking_allocator.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/tracking_allocator.h new file mode 100644 index 0000000000000000000000000000000000000000..c31cb96e9aaac3f2bcd1ba58a32ad20062db8154 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/tracking_allocator.h @@ -0,0 +1,137 @@ +/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_FRAMEWORK_TRACKING_ALLOCATOR_H_ +#define TENSORFLOW_TSL_FRAMEWORK_TRACKING_ALLOCATOR_H_ + +#include + +#include "tsl/framework/allocator.h" +#include "tsl/lib/gtl/inlined_vector.h" +#include "tsl/platform/mutex.h" +#include "tsl/platform/thread_annotations.h" +#include "tsl/platform/types.h" + +namespace tsl { + +// TrackingAllocator is a wrapper for an Allocator. It keeps a running +// count of the number of bytes allocated through the wrapper. It is +// used by the Executor to "charge" allocations to particular Op +// executions. Each Op gets a separate TrackingAllocator wrapper +// around the underlying allocator. +// +// The implementation assumes the invariant that all calls to +// AllocateRaw by an Op (or work items spawned by the Op) will occur +// before the Op's Compute method returns. Thus the high watermark is +// established once Compute returns. +// +// DeallocateRaw can be called long after the Op has finished, +// e.g. when an output tensor is deallocated, and the wrapper cannot +// be deleted until the last of these calls has occurred. The +// TrackingAllocator keeps track of outstanding calls using a +// reference count, and deletes itself once the last call has been +// received and the high watermark has been retrieved. +struct AllocRecord { + AllocRecord(int64_t a_btyes, int64_t a_micros) + : alloc_bytes(a_btyes), alloc_micros(a_micros) {} + AllocRecord() : AllocRecord(0, 0) {} + + int64_t alloc_bytes; + int64_t alloc_micros; +}; + +class TrackingAllocator : public Allocator { + public: + explicit TrackingAllocator(Allocator* allocator, bool track_ids); + std::string Name() override { return allocator_->Name(); } + void* AllocateRaw(size_t alignment, size_t num_bytes) override { + return AllocateRaw(alignment, num_bytes, AllocationAttributes()); + } + void* AllocateRaw(size_t alignment, size_t num_bytes, + const AllocationAttributes& allocation_attr) override; + void DeallocateRaw(void* ptr) override; + bool TracksAllocationSizes() const override; + size_t RequestedSize(const void* ptr) const override; + size_t AllocatedSize(const void* ptr) const override; + int64_t AllocationId(const void* ptr) const override; + absl::optional GetStats() override; + bool ClearStats() override; + + AllocatorMemoryType GetMemoryType() const override { + return allocator_->GetMemoryType(); + } + + // If the underlying allocator tracks allocation sizes, this returns + // a tuple where the first value is the total number of bytes + // allocated through this wrapper, the second value is the high + // watermark of bytes allocated through this wrapper and the third value is + // the allocated bytes through this wrapper that are still alive. If the + // underlying allocator does not track allocation sizes the first + // value is the total number of bytes requested through this wrapper + // and the second and the third are 0. + // + std::tuple GetSizes(); + // After GetRecordsAndUnRef is called, the only further calls allowed + // on this wrapper are calls to DeallocateRaw with pointers that + // were allocated by this wrapper and have not yet been + // deallocated. After this call completes and all allocated pointers + // have been deallocated the wrapper will delete itself. + gtl::InlinedVector GetRecordsAndUnRef(); + // Returns a copy of allocation records collected so far. + gtl::InlinedVector GetCurrentRecords(); + + protected: + ~TrackingAllocator() override {} + + private: + bool UnRef() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_); + + Allocator* allocator_; // not owned. + mutable mutex mu_; + // the number of calls to AllocateRaw that have not yet been matched + // by a corresponding call to DeAllocateRaw, plus 1 if the Executor + // has not yet read out the high watermark. + int ref_ TF_GUARDED_BY(mu_); + // the current number of outstanding bytes that have been allocated + // by this wrapper, or 0 if the underlying allocator does not track + // allocation sizes. + size_t allocated_ TF_GUARDED_BY(mu_); + // the maximum number of outstanding bytes that have been allocated + // by this wrapper, or 0 if the underlying allocator does not track + // allocation sizes. + size_t high_watermark_ TF_GUARDED_BY(mu_); + // the total number of bytes that have been allocated by this + // wrapper if the underlying allocator tracks allocation sizes, + // otherwise the total number of bytes that have been requested by + // this allocator. + size_t total_bytes_ TF_GUARDED_BY(mu_); + + gtl::InlinedVector allocations_ TF_GUARDED_BY(mu_); + + // Track allocations locally if requested in the constructor and the + // underlying allocator doesn't already do it for us. + const bool track_sizes_locally_; + struct Chunk { + size_t requested_size; + size_t allocated_size; + int64_t allocation_id; + }; + std::unordered_map in_use_ TF_GUARDED_BY(mu_); + int64_t next_allocation_id_ TF_GUARDED_BY(mu_); +}; + +} // end namespace tsl + +#endif // TENSORFLOW_TSL_FRAMEWORK_TRACKING_ALLOCATOR_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/type_traits.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/type_traits.h new file mode 100644 index 0000000000000000000000000000000000000000..e96334d0027f2bcd3de680347384d6fa640c4efd --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/framework/type_traits.h @@ -0,0 +1,109 @@ +/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_FRAMEWORK_TYPE_TRAITS_H_ +#define TENSORFLOW_TSL_FRAMEWORK_TYPE_TRAITS_H_ + +#include +#include +#include + +#include "tsl/framework/numeric_types.h" +#include "tsl/platform/types.h" + +namespace tsl { + +// Functions to define quantization attribute of types. +struct true_type { + static constexpr bool value = true; +}; +struct false_type { + static constexpr bool value = false; +}; + +// Default is_quantized is false. +template +struct is_quantized : false_type {}; + +// Specialize the quantized types. +template <> +struct is_quantized : true_type {}; +template <> +struct is_quantized : true_type {}; +template <> +struct is_quantized : true_type {}; +template <> +struct is_quantized : true_type {}; +template <> +struct is_quantized : true_type {}; + +// Default is_complex is false. +template +struct is_complex : false_type {}; + +// Specialize std::complex and std::complex types. +template <> +struct is_complex> : true_type {}; +template <> +struct is_complex> : true_type {}; + +// is_simple_type::value if T[] can be safely constructed and destructed +// without running T() and ~T(). We do not use std::is_trivial +// directly because std::complex and std::complex are +// not trivial, but their arrays can be constructed and destructed +// without running their default ctors and dtors. +template +struct is_simple_type { + static constexpr bool value = + std::is_trivial::value || std::is_same::value || + std::is_same::value || std::is_same::value || + is_quantized::value || std::is_same::value || + std::is_same::value || + std::is_same::value || + std::is_same::value || std::is_same::value || + std::is_same::value; +}; + +} // namespace tsl + +// Define numeric limits for our quantized as subclasses of the +// standard types. +namespace std { +template <> +class numeric_limits : public numeric_limits {}; +template <> +class numeric_limits : public numeric_limits {}; +template <> +class numeric_limits : public numeric_limits {}; +template <> +class numeric_limits : public numeric_limits {}; +template <> +class numeric_limits : public numeric_limits {}; + +// Specialize is_signed for quantized types. +template <> +struct is_signed : public is_signed {}; +template <> +struct is_signed : public is_signed {}; +template <> +struct is_signed : public is_signed {}; +template <> +struct is_signed : public is_signed {}; +template <> +struct is_signed : public is_signed {}; + +} // namespace std + +#endif // TENSORFLOW_TSL_FRAMEWORK_TYPE_TRAITS_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/connected_traceme.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/connected_traceme.h new file mode 100644 index 0000000000000000000000000000000000000000..e6e5bfed1493cc36b08fbc67a0e686c10e653596 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/connected_traceme.h @@ -0,0 +1,118 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_TSL_PROFILER_LIB_CONNECTED_TRACEME_H_ +#define TENSORFLOW_TSL_PROFILER_LIB_CONNECTED_TRACEME_H_ + +#include +#include + +#include "absl/strings/string_view.h" +#include "absl/types/optional.h" +#include "tsl/profiler/lib/context_types.h" +#include "tsl/profiler/lib/traceme.h" +#include "tsl/profiler/lib/traceme_encode.h" + +namespace tsl { +namespace profiler { + +/* + * TraceMeProducer and TraceMeConsumer are used to correlate TraceMe events on + * different threads. TraceMeProducer generates the context information to be + * passed to TraceMeConsumer, which consists of the context id and optionally + * the context type. They may be provided by the user. Then, the events of the + * same context information can be correlated during the analysis. + * + * Example Usages: + * (1) Using the user-provided context type and id. The user is responsible for + * providing the same context type and id to TraceMeProducer and + * TraceMeConsumer. + * [Producer Thread] + * // user_context_id is provided by the user. + * TraceMeProducer producer( + * [&] { return TraceMeEncode("op_dispatch", {{"op_type", "matmul"}}); }, + * ContextType::kTfExecutor, user_context_id); + * [Consumer Thread] + * // user_context_id is provided by the user. + * TraceMeConsumer consumer( + * [&] { return "op_execute"; }, ContextType::kTfExecutor, user_context_id); + * + * (2) Using the user-provided context type and generic id. The user is + * responsible for passing the TraceMeProducer's context id to + * TraceMeConsumer as well as providing the same context type to + * TraceMeProducer and TraceMeConsumer. + * [Producer Thread] + * TraceMeProducer producer( + * [&] { return TraceMeEncode("op_dispatch", {{"op_type", "matmul"}}); }, + * ContextType::kTfExecutor); + * context_id = producer.GetContextId(); + * // Pass context_id to the consumer thread. + * [Consumer Thread] + * // context_id is passed from the producer thread. + * TraceMeConsumer consumer( + * [&] { return "op_execute"; }, ContextType::kTfExecutor, context_id); + * + * (3) Using the generic context information. The user is responsible for + * passing the TraceMeProducer's context id to TraceMeConsumer. + * [Producer Thread] + * TraceMeProducer producer( + * [&] { return TraceMeEncode("op_dispatch", {{"op_type", "matmul"}}); }); + * context_id = producer.GetContextId(); + * // Pass context_id to the consumer thread. + * [Consumer Thread] + * // context_id is passed from the producer thread. + * TraceMeConsumer consumer([&] { return "op_execute"; }, context_id); + */ +class TraceMeProducer : public TraceMe { + public: + template + explicit TraceMeProducer(NameT&& name, + ContextType context_type = ContextType::kGeneric, + absl::optional context_id = absl::nullopt, + int level = 2) + : TraceMe(std::forward(name), level), + context_id_(context_id.has_value() ? context_id.value() + : TraceMe::NewActivityId()) { + AppendMetadata([&] { + return TraceMeEncode({{"_pt", context_type}, {"_p", context_id_}}); + }); + } + + uint64 GetContextId() const { return context_id_; } + + private: + uint64 context_id_; +}; + +class TraceMeConsumer : public TraceMe { + public: + template + TraceMeConsumer(NameT&& name, ContextType context_type, uint64 context_id, + int level = 2) + : TraceMe(std::forward(name), level) { + AppendMetadata([&] { + return TraceMeEncode({{"_ct", context_type}, {"_c", context_id}}); + }); + } + + template + TraceMeConsumer(NameT&& name, uint64 context_id, int level = 2) + : TraceMeConsumer(std::forward(name), ContextType::kGeneric, + context_id, level) {} +}; + +} // namespace profiler +} // namespace tsl + +#endif // TENSORFLOW_TSL_PROFILER_LIB_CONNECTED_TRACEME_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/context_types.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/context_types.h new file mode 100644 index 0000000000000000000000000000000000000000..621f35462fdae2028f961eb74fb6dd32d2c77b33 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/context_types.h @@ -0,0 +1,59 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_TSL_PROFILER_LIB_CONTEXT_TYPES_H_ +#define TENSORFLOW_TSL_PROFILER_LIB_CONTEXT_TYPES_H_ + +#include + +namespace tsl { +namespace profiler { + +// Note: Please add new context type after all existing ones. +enum class ContextType : int { + kGeneric = 0, + kLegacy, + kTfExecutor, + kTfrtExecutor, + kSharedBatchScheduler, + kPjRt, + kAdaptiveSharedBatchScheduler, + kTfrtTpuRuntime, + kTpuEmbeddingEngine, + kGpuLaunch, + kBatcher, + kTpuStream, + kTpuLaunch, + kPathwaysExecutor, + kPjrtLibraryCall, + kLastContextType = ContextType::kTpuLaunch, +}; + +// In XFlow we encode context type as flow category as 6 bits. +static_assert(static_cast(ContextType::kLastContextType) < 64, + "Should have less than 64 categories."); + +const char* GetContextTypeString(ContextType context_type); + +inline ContextType GetSafeContextType(uint32_t context_type) { + if (context_type > static_cast(ContextType::kLastContextType)) { + return ContextType::kGeneric; + } + return static_cast(context_type); +} + +} // namespace profiler +} // namespace tsl + +#endif // TENSORFLOW_TSL_PROFILER_LIB_CONTEXT_TYPES_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/nvtx_utils.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/nvtx_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..e3eaaa08af79e8e932b212b2792ba5431034485f --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/nvtx_utils.h @@ -0,0 +1,105 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PROFILER_LIB_NVTX_UTILS_H_ +#define TENSORFLOW_TSL_PROFILER_LIB_NVTX_UTILS_H_ + +#include + +#include "absl/strings/string_view.h" +#include "tsl/platform/logging.h" +#include "tsl/platform/macros.h" + +#if GOOGLE_CUDA +#include "nvtx3/nvToolsExt.h" +#else +// Some typedef to help build without NVTX. +typedef void* nvtxEventAttributes_t; +typedef void* nvtxDomainHandle_t; +typedef void* nvtxStringHandle_t; +#endif + +namespace tsl { +namespace profiler { +namespace nvtx { + +// A helper function that return the domains to use if NVTX profiling +// is enabled. +inline std::optional GetNVTXDomain() { +#if GOOGLE_CUDA + static nvtxDomainHandle_t domain; + static bool is_enabled = [] { + bool _is_enabled = false; + // Force NVTX marker if a tool triggered the profiler. + domain = nvtxDomainCreateA("TSL"); + if (domain) { + _is_enabled = true; + } + VLOG(1) << "Is NVTX marker enabled? " << _is_enabled; + return _is_enabled; + }(); + if (is_enabled) return domain; +#endif + return {}; +} + +// A helper function to decide whether to enable CUDA NVTX profiling ranges. +inline bool RangesEnabled() { +#if GOOGLE_CUDA + return GetNVTXDomain().has_value(); +#else + return false; +#endif +} + +// Two types of NVTX range annotation are supported, the older/simpler option +// is to use std::string and have the NVTX implementation copy a C-style +// string every time. The other option is to pass a struct implementing two +// methods: +// +// std::string_view Title() const; +// nvtxStringHandle_t NvtxRegisteredTitle() const; +// +// in which case NvtxRegisteredTitle() will be used when starting NVTX ranges, +// avoiding this string copy. +// The Title() method is needed because AnnotationStack::PushAnnotation(...) is +// the backend for some annotations when NVTX is not enabled, and it does not +// recognise registered strings. has_annotation_api_v +// distinguishes between the two types of annotation. +template +inline constexpr bool has_annotation_api_v = + !std::is_same_v; + +template +void RangePush(nvtxDomainHandle_t domain, const AnnotationType& annotation) { +#if GOOGLE_CUDA + nvtxEventAttributes_t attrs{}; + attrs.version = NVTX_VERSION; + attrs.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; + if constexpr (has_annotation_api_v>) { + attrs.messageType = NVTX_MESSAGE_TYPE_REGISTERED; + attrs.message.registered = annotation.NvtxRegisteredTitle(); + } else { + attrs.messageType = NVTX_MESSAGE_TYPE_ASCII; + attrs.message.ascii = annotation.c_str(); + } + ::nvtxDomainRangePushEx(domain, &attrs); +#endif +} + +} // namespace nvtx +} // namespace profiler +} // namespace tsl +#endif // TENSORFLOW_TSL_PROFILER_LIB_NVTX_UTILS_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/profiler_factory.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/profiler_factory.h new file mode 100644 index 0000000000000000000000000000000000000000..7e6c64c39a702f4b98e4638d364de8f7c59d07b3 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/profiler_factory.h @@ -0,0 +1,47 @@ +/* Copyright 2019 The TensorFlow Authors All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_TSL_PROFILER_LIB_PROFILER_FACTORY_H_ +#define TENSORFLOW_TSL_PROFILER_LIB_PROFILER_FACTORY_H_ + +#include +#include +#include + +#include "tsl/profiler/lib/profiler_interface.h" +#include "tsl/profiler/protobuf/profiler_options.pb.h" + +namespace tsl { +namespace profiler { + +// A ProfilerFactory returns an instance of ProfilerInterface if ProfileOptions +// require it. Otherwise, it might return nullptr. +using ProfilerFactory = std::function( + const tensorflow::ProfileOptions&)>; + +// Registers a profiler factory. Should be invoked at most once per factory. +void RegisterProfilerFactory(ProfilerFactory factory); + +// Invokes all registered profiler factories with the given options, and +// returns the instantiated (non-null) profiler interfaces. +std::vector> CreateProfilers( + const tensorflow::ProfileOptions& options); + +// For testing only. +void ClearRegisteredProfilersForTest(); + +} // namespace profiler +} // namespace tsl + +#endif // TENSORFLOW_TSL_PROFILER_LIB_PROFILER_FACTORY_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/profiler_interface.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/profiler_interface.h new file mode 100644 index 0000000000000000000000000000000000000000..8aa693cb844618f30de6a9441899d26132a81c94 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/profiler_interface.h @@ -0,0 +1,49 @@ +/* Copyright 2016 The TensorFlow Authors All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_TSL_PROFILER_LIB_PROFILER_INTERFACE_H_ +#define TENSORFLOW_TSL_PROFILER_LIB_PROFILER_INTERFACE_H_ + +#include "tsl/platform/status.h" +#include "tsl/profiler/protobuf/xplane.pb.h" + +namespace tsl { +namespace profiler { + +// Interface for tensorflow profiler plugins. +// +// ProfileSession calls each of these methods at most once per instance, and +// implementations can rely on that guarantee for simplicity. +// +// Thread-safety: Implementations are only required to be go/thread-compatible. +// ProfileSession is go/thread-safe and synchronizes access to ProfilerInterface +// instances. +class ProfilerInterface { + public: + virtual ~ProfilerInterface() = default; + + // Starts profiling. + virtual Status Start() = 0; + + // Stops profiling. + virtual Status Stop() = 0; + + // Saves collected profile data into XSpace. + virtual Status CollectData(tensorflow::profiler::XSpace* space) = 0; +}; + +} // namespace profiler +} // namespace tsl + +#endif // TENSORFLOW_TSL_PROFILER_LIB_PROFILER_INTERFACE_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/profiler_lock.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/profiler_lock.h new file mode 100644 index 0000000000000000000000000000000000000000..aead8353b2304fcaefc2413276d22795914ca71e --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/profiler_lock.h @@ -0,0 +1,73 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_TSL_PROFILER_LIB_PROFILER_LOCK_H_ +#define TENSORFLOW_TSL_PROFILER_LIB_PROFILER_LOCK_H_ + +#include + +#include "tsl/platform/statusor.h" + +namespace tsl { +namespace profiler { + +constexpr absl::string_view kProfilerLockContention = + "Another profiling session active."; + +// Handle for the profiler lock. At most one instance of this class, the +// "active" instance, owns the profiler lock. +class ProfilerLock { + public: + // Returns true if the process has active profiling session. + static bool HasActiveSession(); + + // Acquires the profiler lock if no other profiler session is currently + // active. + static StatusOr Acquire(); + + // Default constructor creates an inactive instance. + ProfilerLock() = default; + + // Non-copyable. + ProfilerLock(const ProfilerLock&) = delete; + ProfilerLock& operator=(const ProfilerLock&) = delete; + + // Movable. + ProfilerLock(ProfilerLock&& other) + : active_(std::exchange(other.active_, false)) {} + ProfilerLock& operator=(ProfilerLock&& other) { + active_ = std::exchange(other.active_, false); + return *this; + } + + ~ProfilerLock() { ReleaseIfActive(); } + + // Allow creating another active instance. + void ReleaseIfActive(); + + // Returns true if this is the active instance. + bool Active() const { return active_; } + + private: + // Explicit constructor allows creating an active instance, private so it can + // only be called by Acquire. + explicit ProfilerLock(bool active) : active_(active) {} + + bool active_ = false; +}; + +} // namespace profiler +} // namespace tsl + +#endif // TENSORFLOW_TSL_PROFILER_LIB_PROFILER_LOCK_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/profiler_session.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/profiler_session.h new file mode 100644 index 0000000000000000000000000000000000000000..424e5c87d0b4ef760dbfa7757ee8932cd7444d70 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/profiler_session.h @@ -0,0 +1,93 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_TSL_PROFILER_LIB_PROFILER_SESSION_H_ +#define TENSORFLOW_TSL_PROFILER_LIB_PROFILER_SESSION_H_ + +#include +#include +#include + +#include "tsl/platform/mutex.h" +#include "tsl/platform/platform.h" +#include "tsl/platform/status.h" +#include "tsl/platform/thread_annotations.h" +#include "tsl/platform/types.h" +#include "tsl/profiler/protobuf/profiler_options.pb.h" +#include "tsl/profiler/protobuf/xplane.pb.h" + +#if !defined(IS_MOBILE_PLATFORM) +#include "tsl/profiler/lib/profiler_interface.h" +#include "tsl/profiler/lib/profiler_lock.h" +#endif + +namespace tsl { + +// A profiler which will start profiling when creating the object and will stop +// when either the object is destroyed or CollectData is called. +// Multiple instances can be created, but at most one of them will profile. +// Status() will return OK only for the instance that is profiling. +// Thread-safety: ProfilerSession is thread-safe. +class ProfilerSession { + public: + // Creates a ProfilerSession and starts profiling. + static std::unique_ptr Create( + const tensorflow::ProfileOptions& options); + + static tensorflow::ProfileOptions DefaultOptions() { + tensorflow::ProfileOptions options; + options.set_version(1); + options.set_device_tracer_level(1); + options.set_host_tracer_level(2); + options.set_device_type(tensorflow::ProfileOptions::UNSPECIFIED); + options.set_python_tracer_level(0); + options.set_enable_hlo_proto(true); + options.set_include_dataset_ops(true); + return options; + } + + // Deletes an existing Profiler and enables starting a new one. + ~ProfilerSession(); + + tsl::Status Status() TF_LOCKS_EXCLUDED(mutex_); + + // Collects profile data into XSpace. + tsl::Status CollectData(tensorflow::profiler::XSpace* space) + TF_LOCKS_EXCLUDED(mutex_); + + private: + // Constructs an instance of the class and starts profiling + explicit ProfilerSession(const tensorflow::ProfileOptions& options); + + // ProfilerSession is neither copyable or movable. + ProfilerSession(const ProfilerSession&) = delete; + ProfilerSession& operator=(const ProfilerSession&) = delete; + +#if !defined(IS_MOBILE_PLATFORM) + // Collects profile data into XSpace without post-processsing. + tsl::Status CollectDataInternal(tensorflow::profiler::XSpace* space); + + profiler::ProfilerLock profiler_lock_ TF_GUARDED_BY(mutex_); + + std::unique_ptr profilers_ TF_GUARDED_BY(mutex_); + + uint64 start_time_ns_; + tensorflow::ProfileOptions options_; +#endif + tsl::Status status_ TF_GUARDED_BY(mutex_); + mutex mutex_; +}; + +} // namespace tsl +#endif // TENSORFLOW_TSL_PROFILER_LIB_PROFILER_SESSION_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/scoped_annotation.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/scoped_annotation.h new file mode 100644 index 0000000000000000000000000000000000000000..b07ae6c8947a99c530323f75f9279d0f3abece4b --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/scoped_annotation.h @@ -0,0 +1,159 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_TSL_PROFILER_LIB_SCOPED_ANNOTATION_H_ +#define TENSORFLOW_TSL_PROFILER_LIB_SCOPED_ANNOTATION_H_ + +#include + +#include +#include +#include +#include +#include + +#include "absl/strings/string_view.h" +#include "tsl/platform/macros.h" +#include "tsl/platform/types.h" + +#if !defined(IS_MOBILE_PLATFORM) +#include "tsl/profiler/backends/cpu/annotation_stack.h" +#include "tsl/profiler/lib/nvtx_utils.h" +#endif + +namespace tsl { +namespace profiler { + +// Adds an annotation to all activities for the duration of the instance +// lifetime through the currently registered TraceCollector. +// +// Usage: { +// ScopedAnnotation annotation("my kernels"); +// Kernel1<<>>; +// LaunchKernel2(); // Launches a CUDA kernel. +// } +// This will add 'my kernels' to both kernels in the profiler UI +class ScopedAnnotation { + public: + explicit ScopedAnnotation(absl::string_view name) { +#if !defined(IS_MOBILE_PLATFORM) +#if GOOGLE_CUDA + std::optional domain = + tsl::profiler::nvtx::GetNVTXDomain(); + if (TF_PREDICT_FALSE(domain.has_value())) { + tsl::profiler::nvtx::RangePush(domain.value(), std::string{name}); + } else // NOLINT +#endif + if (TF_PREDICT_FALSE(AnnotationStack::IsEnabled())) { + old_length_ = AnnotationStack::PushAnnotation(name); + } +#endif + } + + explicit ScopedAnnotation(const char* name) + : ScopedAnnotation(absl::string_view(name)) {} + + explicit ScopedAnnotation(const string& name) { +#if !defined(IS_MOBILE_PLATFORM) +#if GOOGLE_CUDA + std::optional domain = + tsl::profiler::nvtx::GetNVTXDomain(); + if (TF_PREDICT_FALSE(domain.has_value())) { + tsl::profiler::nvtx::RangePush(domain.value(), name); + } else // NOLINT +#endif + if (TF_PREDICT_FALSE(AnnotationStack::IsEnabled())) { + old_length_ = AnnotationStack::PushAnnotation(name); + } +#endif + } + + explicit ScopedAnnotation(string&& name) { +#if !defined(IS_MOBILE_PLATFORM) +#if GOOGLE_CUDA + std::optional domain = + tsl::profiler::nvtx::GetNVTXDomain(); + if (TF_PREDICT_FALSE(domain.has_value())) { + tsl::profiler::nvtx::RangePush(domain.value(), name); + } else // NOLINT +#endif + if (TF_PREDICT_FALSE(AnnotationStack::IsEnabled())) { + old_length_ = AnnotationStack::PushAnnotation(std::move(name)); + } +#endif + } + + template + explicit ScopedAnnotation(NameGeneratorT name_generator) { +#if !defined(IS_MOBILE_PLATFORM) +#if GOOGLE_CUDA + std::optional domain = + tsl::profiler::nvtx::GetNVTXDomain(); + if (TF_PREDICT_FALSE(domain.has_value())) { + tsl::profiler::nvtx::RangePush(domain.value(), name_generator()); + } else // NOLINT +#endif + if (TF_PREDICT_FALSE(AnnotationStack::IsEnabled())) { + auto annotation = name_generator(); + if constexpr (tsl::profiler::nvtx::has_annotation_api_v< + std::decay_t>) { + old_length_ = AnnotationStack::PushAnnotation(annotation.Title()); + } else { + old_length_ = AnnotationStack::PushAnnotation(std::move(annotation)); + } + } +#endif + } + + // Pops the name passed in the constructor from the current annotation. + ~ScopedAnnotation() { + // TODO(b/137971921): without this memory fence, two presubmit tests will + // fail probably due to compiler in that presubmit config. + std::atomic_thread_fence(std::memory_order_acquire); +#if !defined(IS_MOBILE_PLATFORM) +#if GOOGLE_CUDA + std::optional domain = + tsl::profiler::nvtx::GetNVTXDomain(); + if (TF_PREDICT_FALSE(domain.has_value())) { + ::nvtxDomainRangePop(domain.value()); + } else // NOLINT +#endif + if (TF_PREDICT_FALSE(old_length_ != kInvalidLength)) { + AnnotationStack::PopAnnotation(old_length_); + } +#endif + } + + static bool IsEnabled() { +#if !defined(IS_MOBILE_PLATFORM) + return AnnotationStack::IsEnabled(); +#else + return false; +#endif + } + + private: + // signals that annotation is disabled at the constructor. + static constexpr size_t kInvalidLength = static_cast(-1); + + ScopedAnnotation(const ScopedAnnotation&) = delete; + void operator=(const ScopedAnnotation&) = delete; + + size_t old_length_ = kInvalidLength; +}; + +} // namespace profiler +} // namespace tsl + +#endif // TENSORFLOW_TSL_PROFILER_LIB_SCOPED_ANNOTATION_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/scoped_annotation_stack.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/scoped_annotation_stack.h new file mode 100644 index 0000000000000000000000000000000000000000..db46f7c99135e4d40913679767dcec7d9abbdd68 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/scoped_annotation_stack.h @@ -0,0 +1,118 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_TSL_PROFILER_LIB_SCOPED_ANNOTATION_STACK_H_ +#define TENSORFLOW_TSL_PROFILER_LIB_SCOPED_ANNOTATION_STACK_H_ + +#include + +#include +#include +#include +#include + +#include "absl/strings/string_view.h" +#if !defined(IS_MOBILE_PLATFORM) +#include "tsl/profiler/backends/cpu/annotation_stack.h" +#include "tsl/profiler/lib/nvtx_utils.h" +#endif + +namespace tsl { +namespace profiler { + +// ScopedAnnotation for clients that can't use RAII for managing the lifetime +// of annotations. It provides an API similar to the `TraceMe::ActivityStart` +// and `TraceMe::ActivityEnd`. +// +// Usage: +// int64_t id = ScopedAnnotationStack::ActivityStart("foo"); +// foo(); +// ScopedAnnotationStack::ActivityEnd(id); +// +// Prefer a regular `ScopedAnnotation`. The name of this class is a misnomer, +// because it doesn't do any automatic destruction at the scope end, it's just +// for the sake of consistency. +class ScopedAnnotationStack { + static constexpr size_t kInvalidActivity = static_cast(-1); + + public: + static bool IsEnabled() { return AnnotationStack::IsEnabled(); } + + static int64_t ActivityStart(std::string name) { +#if !defined(IS_MOBILE_PLATFORM) +#if GOOGLE_CUDA + std::optional domain = + tsl::profiler::nvtx::GetNVTXDomain(); + if (TF_PREDICT_FALSE(domain.has_value())) { + tsl::profiler::nvtx::RangePush(domain.value(), name); + } else // NOLINT +#endif + if (TF_PREDICT_FALSE(AnnotationStack::IsEnabled())) { + return AnnotationStack::PushAnnotation(std::move(name)); + } +#endif + return kInvalidActivity; + } + + static int64_t ActivityStart(std::string_view name) { + return ActivityStart(std::string(name)); + } + + static int64_t ActivityStart(const char* name) { + return ActivityStart(std::string_view(name)); + } + + template + static int64_t ActivityStart(NameGeneratorT name_generator) { +#if !defined(IS_MOBILE_PLATFORM) +#if GOOGLE_CUDA + std::optional domain = + tsl::profiler::nvtx::GetNVTXDomain(); + if (TF_PREDICT_FALSE(domain.has_value())) { + tsl::profiler::nvtx::RangePush(domain.value(), name_generator()); + } else // NOLINT +#endif + if (TF_PREDICT_FALSE(AnnotationStack::IsEnabled())) { + auto annotation = name_generator(); + if constexpr (tsl::profiler::nvtx::has_annotation_api_v< + std::decay_t>) { + return AnnotationStack::PushAnnotation(annotation.Title()); + } else { + return AnnotationStack::PushAnnotation(std::move(annotation)); + } + } +#endif + return kInvalidActivity; + } + + static void ActivityEnd(int64_t activity_id) { +#if !defined(IS_MOBILE_PLATFORM) +#if GOOGLE_CUDA + std::optional domain = + tsl::profiler::nvtx::GetNVTXDomain(); + if (TF_PREDICT_FALSE(domain.has_value())) { + ::nvtxDomainRangePop(domain.value()); + } else // NOLINT +#endif + if (TF_PREDICT_FALSE(activity_id != kInvalidActivity)) { + AnnotationStack::PopAnnotation(activity_id); + } +#endif + } +}; + +} // namespace profiler +} // namespace tsl + +#endif // TENSORFLOW_TSL_PROFILER_LIB_SCOPED_ANNOTATION_STACK_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/scoped_memory_debug_annotation.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/scoped_memory_debug_annotation.h new file mode 100644 index 0000000000000000000000000000000000000000..5f86bf1b2e4ddc634a78a374b6f128e9556ddf1d --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/scoped_memory_debug_annotation.h @@ -0,0 +1,112 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_TSL_PROFILER_LIB_SCOPED_MEMORY_DEBUG_ANNOTATION_H_ +#define TENSORFLOW_TSL_PROFILER_LIB_SCOPED_MEMORY_DEBUG_ANNOTATION_H_ + +#include +#include +#include +#include + +namespace tsl { +namespace profiler { + +// Annotations for memory profiling and debugging purpose. +// ScopedMemoryDebugAnnotation will cache the annotations in thread-local +// memory, and some allocators will try to tag allocations with the annotations. +struct MemoryDebugAnnotation { + const char* pending_op_name = nullptr; + int64_t pending_step_id = 0; + const char* pending_region_type = nullptr; + int32_t pending_data_type = 0; + // A lambda function, when invoked, it will generate the string that describe + // the shape of the pending tensor. By default, the TensorShape string is an + // empty string. + std::function pending_shape_func = []() { return ""; }; +}; + +// Wrapper class of MemoryDebugAnnotation for RAII. +class ScopedMemoryDebugAnnotation { + public: + static const MemoryDebugAnnotation& CurrentAnnotation() { + return *ThreadMemoryDebugAnnotation(); + } + + explicit ScopedMemoryDebugAnnotation(const char* op_name) { + MemoryDebugAnnotation* thread_local_annotation = + ThreadMemoryDebugAnnotation(); + last_annotation_ = *thread_local_annotation; + *thread_local_annotation = MemoryDebugAnnotation(); + thread_local_annotation->pending_op_name = op_name; + } + + explicit ScopedMemoryDebugAnnotation(const char* op_name, int64_t step_id) { + MemoryDebugAnnotation* thread_local_annotation = + ThreadMemoryDebugAnnotation(); + last_annotation_ = *thread_local_annotation; + *thread_local_annotation = MemoryDebugAnnotation(); + thread_local_annotation->pending_op_name = op_name; + thread_local_annotation->pending_step_id = step_id; + } + + // This constructor keeps the pending_op_name and pending_step_id from parent + // (if any). Otherwise it overwrites with op_name. + explicit ScopedMemoryDebugAnnotation( + const char* op_name, const char* region_type, int32_t data_type, + std::function&& pending_shape_func) { + MemoryDebugAnnotation* thread_local_annotation = + ThreadMemoryDebugAnnotation(); + last_annotation_ = *thread_local_annotation; + if (!thread_local_annotation->pending_op_name) { + thread_local_annotation->pending_op_name = op_name; + } + thread_local_annotation->pending_region_type = region_type; + thread_local_annotation->pending_data_type = data_type; + thread_local_annotation->pending_shape_func = std::move(pending_shape_func); + } + + explicit ScopedMemoryDebugAnnotation( + const char* op_name, int64_t step_id, const char* region_type, + int32_t data_type, std::function&& pending_shape_func) { + MemoryDebugAnnotation* thread_local_annotation = + ThreadMemoryDebugAnnotation(); + last_annotation_ = *thread_local_annotation; + thread_local_annotation->pending_op_name = op_name; + thread_local_annotation->pending_step_id = step_id; + thread_local_annotation->pending_region_type = region_type; + thread_local_annotation->pending_data_type = data_type; + thread_local_annotation->pending_shape_func = std::move(pending_shape_func); + } + + ~ScopedMemoryDebugAnnotation() { + *ThreadMemoryDebugAnnotation() = last_annotation_; + } + + private: + // Returns a pointer to the MemoryDebugAnnotation for the current thread. + static MemoryDebugAnnotation* ThreadMemoryDebugAnnotation(); + + // Stores the previous values in case the annotations are nested. + MemoryDebugAnnotation last_annotation_; + + ScopedMemoryDebugAnnotation(const ScopedMemoryDebugAnnotation&) = delete; + ScopedMemoryDebugAnnotation& operator=(const ScopedMemoryDebugAnnotation&) = + delete; +}; + +} // namespace profiler +} // namespace tsl + +#endif // TENSORFLOW_TSL_PROFILER_LIB_SCOPED_MEMORY_DEBUG_ANNOTATION_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/traceme.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/traceme.h new file mode 100644 index 0000000000000000000000000000000000000000..323659eadb62fe50321b3ba3d1ad579fda70ce27 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/traceme.h @@ -0,0 +1,333 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_TSL_PROFILER_LIB_TRACEME_H_ +#define TENSORFLOW_TSL_PROFILER_LIB_TRACEME_H_ + +#include +#include +#include + +#include "absl/strings/string_view.h" +#include "tsl/platform/logging.h" +#include "tsl/platform/macros.h" +#include "tsl/platform/platform.h" +#include "tsl/platform/types.h" +#include "tsl/profiler/lib/traceme_encode.h" // IWYU pragma: export + +#if !defined(IS_MOBILE_PLATFORM) +#include "tsl/profiler/backends/cpu/traceme_recorder.h" +#include "tsl/profiler/utils/time_utils.h" +#endif + +namespace tsl { +namespace profiler { + +// NOTE: Borrowed from boost C++ libraries. When TF embrace C++17 this should +// be replaced with std::is_invocable; +template +struct is_invocable + : std::is_constructible< + std::function, + std::reference_wrapper::type> > {}; + +// Predefined levels: +// - Level 1 (kCritical) is the default and used only for user instrumentation. +// - Level 2 (kInfo) is used by profiler for instrumenting high level program +// execution details (expensive TF ops, XLA ops, etc). +// - Level 3 (kVerbose) is also used by profiler to instrument more verbose +// (low-level) program execution details (cheap TF ops, etc). +enum TraceMeLevel { + kCritical = 1, + kInfo = 2, + kVerbose = 3, +}; + +// This is specifically used for instrumenting Tensorflow ops. +// Takes input as whether a TF op is expensive or not and returns the TraceMe +// level to be assigned to trace that particular op. Assigns level 2 for +// expensive ops (these are high-level details and shown by default in profiler +// UI). Assigns level 3 for cheap ops (low-level details not shown by default). +inline int GetTFTraceMeLevel(bool is_expensive) { + return is_expensive ? kInfo : kVerbose; +} + +// This class permits user-specified (CPU) tracing activities. A trace activity +// is started when an object of this class is created and stopped when the +// object is destroyed. +// +// CPU tracing can be useful when trying to understand what parts of GPU +// computation (e.g., kernels and memcpy) correspond to higher level activities +// in the overall program. For instance, a collection of kernels maybe +// performing one "step" of a program that is better visualized together than +// interspersed with kernels from other "steps". Therefore, a TraceMe object +// can be created at each "step". +// +// Two APIs are provided: +// (1) Scoped object: a TraceMe object starts tracing on construction, and +// stops tracing when it goes out of scope. +// { +// TraceMe trace("step"); +// ... do some work ... +// } +// TraceMe objects can be members of a class, or allocated on the heap. +// (2) Static methods: ActivityStart and ActivityEnd may be called in pairs. +// auto id = ActivityStart("step"); +// ... do some work ... +// ActivityEnd(id); +// The two static methods should be called within the same thread. +class TraceMe { + public: + // Constructor that traces a user-defined activity labeled with name + // in the UI. Level defines the trace priority, used for filtering TraceMe + // events. By default, traces with TraceMe level <= 2 are recorded. Levels: + // - Must be a positive integer. + // - Can be a value in enum TraceMeLevel. + // Users are welcome to use level > 3 in their code, if they wish to filter + // out their host traces based on verbosity. + explicit TraceMe(absl::string_view name, int level = 1) { + DCHECK_GE(level, 1); +#if !defined(IS_MOBILE_PLATFORM) + if (TF_PREDICT_FALSE(TraceMeRecorder::Active(level))) { + new (&no_init_.name) std::string(name); + start_time_ = GetCurrentTimeNanos(); + } +#endif + } + + // Do not allow passing a temporary string as the overhead of generating that + // string should only be incurred when tracing is enabled. Wrap the temporary + // string generation (e.g., StrCat) in a lambda and use the name_generator + // template instead. + explicit TraceMe(std::string&& name, int level = 1) = delete; + + // Do not allow passing strings by reference or value since the caller + // may unintentionally maintain ownership of the name. + // Explicitly wrap the name in a string_view if you really wish to maintain + // ownership of a string already generated for other purposes. For temporary + // strings (e.g., result of StrCat) use the name_generator template. + explicit TraceMe(const std::string& name, int level = 1) = delete; + + // This overload is necessary to make TraceMe's with string literals work. + // Otherwise, the name_generator template would be used. + explicit TraceMe(const char* raw, int level = 1) + : TraceMe(absl::string_view(raw), level) {} + + // This overload only generates the name (and possibly metadata) if tracing is + // enabled. Useful for avoiding expensive operations (e.g., string + // concatenation) when tracing is disabled. + // name_generator may be a lambda or functor that returns a type that the + // string() constructor can take, e.g., the result of TraceMeEncode. + // name_generator is templated, rather than a std::function to avoid + // allocations std::function might make even if never called. + // Example Usage: + // TraceMe trace_me([&]() { + // return StrCat("my_trace", id); + // } + // TraceMe op_trace_me([&]() { + // return TraceMeOp(op_name, op_type); + // } + // TraceMe trace_me_with_metadata([&value1]() { + // return TraceMeEncode("my_trace", {{"key1", value1}, {"key2", 42}}); + // }); + template ::value, bool> = true> + explicit TraceMe(NameGeneratorT&& name_generator, int level = 1) { + DCHECK_GE(level, 1); +#if !defined(IS_MOBILE_PLATFORM) + if (TF_PREDICT_FALSE(TraceMeRecorder::Active(level))) { + new (&no_init_.name) + std::string(std::forward(name_generator)()); + start_time_ = GetCurrentTimeNanos(); + } +#endif + } + + // Movable. + TraceMe(TraceMe&& other) { *this = std::move(other); } + TraceMe& operator=(TraceMe&& other) { +#if !defined(IS_MOBILE_PLATFORM) + if (TF_PREDICT_FALSE(other.start_time_ != kUntracedActivity)) { + new (&no_init_.name) std::string(std::move(other.no_init_.name)); + other.no_init_.name.~string(); + start_time_ = std::exchange(other.start_time_, kUntracedActivity); + } +#endif + return *this; + } + + ~TraceMe() { Stop(); } + + // Stop tracing the activity. Called by the destructor, but exposed to allow + // stopping tracing before the object goes out of scope. Only has an effect + // the first time it is called. + void Stop() { + // We do not need to check the trace level again here. + // - If tracing wasn't active to start with, we have kUntracedActivity. + // - If tracing was active and was stopped, we have + // TraceMeRecorder::Active(). + // - If tracing was active and was restarted at a lower level, we may + // spuriously record the event. This is extremely rare, and acceptable as + // event will be discarded when its start timestamp fall outside of the + // start/stop session timestamp. +#if !defined(IS_MOBILE_PLATFORM) + if (TF_PREDICT_FALSE(start_time_ != kUntracedActivity)) { + if (TF_PREDICT_TRUE(TraceMeRecorder::Active())) { + TraceMeRecorder::Record( + {std::move(no_init_.name), start_time_, GetCurrentTimeNanos()}); + } + no_init_.name.~string(); + start_time_ = kUntracedActivity; + } +#endif + } + + // Appends new_metadata to the TraceMe name passed to the constructor. + // metadata_generator may be a lambda or functor that returns a type that the + // string() constructor can take, e.g., the result of TraceMeEncode. + // metadata_generator is only evaluated when tracing is enabled. + // metadata_generator is templated, rather than a std::function to avoid + // allocations std::function might make even if never called. + // Example Usage: + // trace_me.AppendMetadata([&value1]() { + // return TraceMeEncode({{"key1", value1}, {"key2", 42}}); + // }); + template < + typename MetadataGeneratorT, + std::enable_if_t::value, bool> = true> + void AppendMetadata(MetadataGeneratorT&& metadata_generator) { +#if !defined(IS_MOBILE_PLATFORM) + if (TF_PREDICT_FALSE(start_time_ != kUntracedActivity)) { + if (TF_PREDICT_TRUE(TraceMeRecorder::Active())) { + traceme_internal::AppendMetadata( + &no_init_.name, + std::forward(metadata_generator)()); + } + } +#endif + } + + // Static API, for use when scoped objects are inconvenient. + + // Record the start time of an activity. + // Returns the activity ID, which is used to stop the activity. + // Calls `name_generator` to get the name for activity. + template ::value, bool> = true> + static int64_t ActivityStart(NameGeneratorT&& name_generator, int level = 1) { +#if !defined(IS_MOBILE_PLATFORM) + if (TF_PREDICT_FALSE(TraceMeRecorder::Active(level))) { + int64_t activity_id = TraceMeRecorder::NewActivityId(); + TraceMeRecorder::Record({std::forward(name_generator)(), + GetCurrentTimeNanos(), -activity_id}); + return activity_id; + } +#endif + return kUntracedActivity; + } + + // Record the start time of an activity. + // Returns the activity ID, which is used to stop the activity. + static int64_t ActivityStart(absl::string_view name, int level = 1) { +#if !defined(IS_MOBILE_PLATFORM) + if (TF_PREDICT_FALSE(TraceMeRecorder::Active(level))) { + int64_t activity_id = TraceMeRecorder::NewActivityId(); + TraceMeRecorder::Record( + {std::string(name), GetCurrentTimeNanos(), -activity_id}); + return activity_id; + } +#endif + return kUntracedActivity; + } + + // Same as ActivityStart above, an overload for "const std::string&" + static int64_t ActivityStart(const std::string& name, int level = 1) { + return ActivityStart(absl::string_view(name), level); + } + + // Same as ActivityStart above, an overload for "const char*" + static int64_t ActivityStart(const char* name, int level = 1) { + return ActivityStart(absl::string_view(name), level); + } + + // Record the end time of an activity started by ActivityStart(). + static void ActivityEnd(int64_t activity_id) { +#if !defined(IS_MOBILE_PLATFORM) + // We don't check the level again (see TraceMe::Stop()). + if (TF_PREDICT_FALSE(activity_id != kUntracedActivity)) { + if (TF_PREDICT_TRUE(TraceMeRecorder::Active())) { + TraceMeRecorder::Record( + {std::string(), -activity_id, GetCurrentTimeNanos()}); + } + } +#endif + } + + // Records the time of an instant activity. + template ::value, bool> = true> + static void InstantActivity(NameGeneratorT&& name_generator, int level = 1) { +#if !defined(IS_MOBILE_PLATFORM) + if (TF_PREDICT_FALSE(TraceMeRecorder::Active(level))) { + int64_t now = GetCurrentTimeNanos(); + TraceMeRecorder::Record({std::forward(name_generator)(), + /*start_time=*/now, /*end_time=*/now}); + } +#endif + } + + static bool Active(int level = 1) { +#if !defined(IS_MOBILE_PLATFORM) + return TraceMeRecorder::Active(level); +#else + return false; +#endif + } + + static int64_t NewActivityId() { +#if !defined(IS_MOBILE_PLATFORM) + return TraceMeRecorder::NewActivityId(); +#else + return 0; +#endif + } + + private: + // Start time used when tracing is disabled. + constexpr static int64_t kUntracedActivity = 0; + + TraceMe(const TraceMe&) = delete; + void operator=(const TraceMe&) = delete; + + // Wrap the name into a union so that we can avoid the cost of string + // initialization when tracing is disabled. + union NoInit { + NoInit() {} + ~NoInit() {} + std::string name; + } no_init_; + + int64_t start_time_ = kUntracedActivity; +}; + +// Whether OpKernel::TraceString will populate additional information for +// profiler, such as tensor shapes. +inline bool TfOpDetailsEnabled() { + return TraceMe::Active(TraceMeLevel::kVerbose); +} + +} // namespace profiler +} // namespace tsl + +#endif // TENSORFLOW_TSL_PROFILER_LIB_TRACEME_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/traceme_encode.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/traceme_encode.h new file mode 100644 index 0000000000000000000000000000000000000000..76c5f301e7d703a5577527770ecd0b1e631551cc --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/lib/traceme_encode.h @@ -0,0 +1,174 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_TSL_PROFILER_LIB_TRACEME_ENCODE_H_ +#define TENSORFLOW_TSL_PROFILER_LIB_TRACEME_ENCODE_H_ + +#include + +#include +#include + +#include "absl/base/attributes.h" +#include "absl/strings/match.h" +#include "absl/strings/str_cat.h" +#include "absl/strings/string_view.h" +#include "tsl/platform/logging.h" +#include "tsl/platform/macros.h" + +namespace tsl { +namespace profiler { + +// An argument passed to TraceMeEncode. +struct TraceMeArg { + // String conversions of value types are supported via AlphaNum. We keep a + // reference to the AlphaNum's internal buffer here, so it must remain valid + // for the lifetime of this object. We cannot store it by value because it is + // not safe to construct an AlphaNum as a member of a class, particularly when + // AbslStringify is being used (it may reference default arguments that are on + // the caller's stack, if we constructed it here those default arguments would + // be destroyed before they are used). + TraceMeArg(absl::string_view k, + const absl::AlphaNum& v ABSL_ATTRIBUTE_LIFETIME_BOUND) + : key(k), value(v.Piece()) {} + + TraceMeArg(const TraceMeArg&) = delete; + void operator=(const TraceMeArg&) = delete; + + absl::string_view key; + absl::string_view value; +}; + +namespace traceme_internal { + +// Copies the contents of str to the address pointed by out. +// Returns the address after the copy. +// REQUIRED: The address range [out, out + str.size()] must have been allocated. +TF_ATTRIBUTE_ALWAYS_INLINE inline char* Append(char* out, + absl::string_view str) { + DCHECK(!absl::StrContains(str, '#')) + << "'#' is not a valid character in TraceMeEncode"; + const size_t str_size = str.size(); + if (TF_PREDICT_TRUE(str_size > 0)) { + memcpy(out, str.data(), str_size); + out += str_size; + } + return out; +} + +// Appends args encoded as TraceMe metadata to name. +TF_ATTRIBUTE_ALWAYS_INLINE inline std::string AppendArgs( + std::string name, std::initializer_list args) { + if (TF_PREDICT_TRUE(args.size() > 0)) { + const auto old_size = name.size(); + auto new_size = old_size + args.size() * 2 + 1; + for (const auto& arg : args) { + new_size += arg.key.size() + arg.value.size(); + } + name.resize(new_size); + char* const begin = &name[0]; + char* out = begin + old_size; + *out++ = '#'; + for (const auto& arg : args) { + out = Append(out, arg.key); + *out++ = '='; + out = Append(out, arg.value); + *out++ = ','; + } + *(out - 1) = '#'; + DCHECK_EQ(out, begin + new_size); + } + return name; +} + +// Appends new_metadata to the metadata part of name. +TF_ATTRIBUTE_ALWAYS_INLINE inline void AppendMetadata( + std::string* name, absl::string_view new_metadata) { + if (!TF_PREDICT_FALSE(new_metadata.empty())) { + if (!name->empty() && name->back() == '#') { // name already has metadata + name->back() = ','; + if (TF_PREDICT_TRUE(new_metadata.front() == '#')) { + new_metadata.remove_prefix(1); + } + } + name->append(new_metadata.data(), new_metadata.size()); + } +} + +} // namespace traceme_internal + +// Encodes an event name and arguments into TraceMe metadata. +// Use within a lambda to avoid expensive operations when tracing is disabled. +// Example Usage: +// TraceMe trace_me([value1]() { +// return TraceMeEncode("my_trace", {{"key1", value1}, {"key2", 42}}); +// }); +TF_ATTRIBUTE_ALWAYS_INLINE inline std::string TraceMeEncode( + std::string name, std::initializer_list args) { + return traceme_internal::AppendArgs(std::move(name), args); +} +TF_ATTRIBUTE_ALWAYS_INLINE inline std::string TraceMeEncode( + absl::string_view name, std::initializer_list args) { + return traceme_internal::AppendArgs(std::string(name), args); +} +TF_ATTRIBUTE_ALWAYS_INLINE inline std::string TraceMeEncode( + const char* name, std::initializer_list args) { + return traceme_internal::AppendArgs(std::string(name), args); +} + +// Encodes arguments into TraceMe metadata. +// Use within a lambda to avoid expensive operations when tracing is disabled. +// Example Usage: +// TraceMe trace_me("my_trace"); +// ... +// trace_me.AppendMetadata([value1]() { +// return TraceMeEncode({{"key1", value1}, {"key2", 42}}); +// }); +TF_ATTRIBUTE_ALWAYS_INLINE inline std::string TraceMeEncode( + std::initializer_list args) { + return traceme_internal::AppendArgs(std::string(), args); +} + +// Concatenates op_name and op_type. +TF_ATTRIBUTE_ALWAYS_INLINE inline std::string TraceMeOp( + absl::string_view op_name, absl::string_view op_type) { + return absl::StrCat(op_name, ":", op_type); +} + +TF_ATTRIBUTE_ALWAYS_INLINE inline std::string TraceMeOp(const char* op_name, + const char* op_type) { + return absl::StrCat(op_name, ":", op_type); +} + +TF_ATTRIBUTE_ALWAYS_INLINE inline std::string TraceMeOp( + std::string&& op_name, absl::string_view op_type) { + absl::StrAppend(&op_name, ":", op_type); + return op_name; +} + +// Concatenates op_name and op_type. +TF_ATTRIBUTE_ALWAYS_INLINE inline std::string TraceMeOpOverride( + absl::string_view op_name, absl::string_view op_type) { + return absl::StrCat("#tf_op=", op_name, ":", op_type, "#"); +} + +TF_ATTRIBUTE_ALWAYS_INLINE inline std::string TraceMeOpOverride( + const char* op_name, const char* op_type) { + return absl::StrCat("#tf_op=", op_name, ":", op_type, "#"); +} + +} // namespace profiler +} // namespace tsl + +#endif // TENSORFLOW_TSL_PROFILER_LIB_TRACEME_ENCODE_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/rpc/client/save_profile.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/rpc/client/save_profile.h new file mode 100644 index 0000000000000000000000000000000000000000..bd342a86030cf4d8061a874569f943705ff8ab4a --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/rpc/client/save_profile.h @@ -0,0 +1,58 @@ +/* Copyright 2017 The TensorFlow Authors All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PROFILER_RPC_CLIENT_SAVE_PROFILE_H_ +#define TENSORFLOW_TSL_PROFILER_RPC_CLIENT_SAVE_PROFILE_H_ + +#include +#include + +#include "tsl/platform/status.h" +#include "tsl/platform/types.h" +#include "tsl/profiler/protobuf/profiler_service.pb.h" +#include "tsl/profiler/protobuf/xplane.pb.h" + +namespace tsl { +namespace profiler { + +std::string GetCurrentTimeStampAsString(); + +// Returns the profile plugin directory given a logdir to TensorBoard. +std::string GetTensorBoardProfilePluginDir(const std::string& logdir); + +// Saves all profiling tool data in a profile to //. +// This writes user-facing log messages to `os`. +// Note: this function creates a directory even when all fields in +// ProfileResponse are unset/empty. +Status SaveProfile(const std::string& repository_root, const std::string& run, + const std::string& host, + const tensorflow::ProfileResponse& response, + std::ostream* os); + +// Gzip the data and save to //. +Status SaveGzippedToolData(const std::string& repository_root, + const std::string& run, const std::string& host, + const std::string& tool_name, + const std::string& data); + +// Save XSpace to //_.. +Status SaveXSpace(const std::string& repository_root, const std::string& run, + const std::string& host, + const tensorflow::profiler::XSpace& xspace); + +} // namespace profiler +} // namespace tsl + +#endif // TENSORFLOW_TSL_PROFILER_RPC_CLIENT_SAVE_PROFILE_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/rpc/profiler_service_impl.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/rpc/profiler_service_impl.h new file mode 100644 index 0000000000000000000000000000000000000000..2442213870f7be39e4ae18fe57400d0db7420244 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/rpc/profiler_service_impl.h @@ -0,0 +1,31 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_TSL_PROFILER_RPC_PROFILER_SERVICE_IMPL_H_ +#define TENSORFLOW_TSL_PROFILER_RPC_PROFILER_SERVICE_IMPL_H_ + +#include + +#include "tsl/profiler/protobuf/profiler_service.grpc.pb.h" + +namespace tsl { +namespace profiler { + +std::unique_ptr +CreateProfilerService(); + +} // namespace profiler +} // namespace tsl + +#endif // TENSORFLOW_TSL_PROFILER_RPC_PROFILER_SERVICE_IMPL_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/utils/math_utils.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/utils/math_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..cce73c2961c2234f0ed58259dfbf757c2d55ff23 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/utils/math_utils.h @@ -0,0 +1,72 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PROFILER_UTILS_MATH_UTILS_H_ +#define TENSORFLOW_TSL_PROFILER_UTILS_MATH_UTILS_H_ + +#include + +namespace tsl { +namespace profiler { + +// Converts among different SI units. +// https://en.wikipedia.org/wiki/International_System_of_Units +// NOTE: We use uint64 for picos and nanos, which are used in +// storage, and double for other units that are used in the UI. +inline double PicoToNano(uint64_t p) { return p / 1E3; } +inline double PicoToMicro(uint64_t p) { return p / 1E6; } +inline double PicoToMilli(uint64_t p) { return p / 1E9; } +inline double PicoToUni(uint64_t p) { return p / 1E12; } +inline uint64_t NanoToPico(uint64_t n) { return n * 1000; } +inline double NanoToMicro(uint64_t n) { return n / 1E3; } +inline double NanoToMilli(uint64_t n) { return n / 1E6; } +inline double MicroToNano(double u) { return u * 1E3; } +inline double MicroToMilli(double u) { return u / 1E3; } +inline uint64_t MilliToPico(double m) { return m * 1E9; } +inline uint64_t MilliToNano(double m) { return m * 1E6; } +inline double MilliToUni(double m) { return m / 1E3; } +inline uint64_t UniToPico(double uni) { return uni * 1E12; } +inline uint64_t UniToNano(double uni) { return uni * 1E9; } +inline double UniToMicro(double uni) { return uni * 1E6; } +inline double UniToGiga(double uni) { return uni / 1E9; } +inline double GigaToUni(double giga) { return giga * 1E9; } +inline double GigaToTera(double giga) { return giga / 1E3; } +inline double TeraToGiga(double tera) { return tera * 1E3; } + +// Convert from clock cycles to seconds. +inline double CyclesToSeconds(double cycles, double frequency_hz) { + // cycles / (cycles/s) = s. + return cycles / frequency_hz; +} + +// Checks the divisor and returns 0 to avoid divide by zero. +inline double SafeDivide(double dividend, double divisor) { + constexpr double kEpsilon = 1.0E-10; + if ((-kEpsilon < divisor) && (divisor < kEpsilon)) return 0.0; + return dividend / divisor; +} + +inline double GibiToGiga(double gibi) { return gibi * ((1 << 30) / 1.0e9); } +inline double GigaToGibi(double giga) { return giga / ((1 << 30) / 1.0e9); } + +// Calculates GiB/s. +inline double GibibytesPerSecond(double gigabytes, double ns) { + return GigaToGibi(SafeDivide(gigabytes, ns)); +} + +} // namespace profiler +} // namespace tsl + +#endif // TENSORFLOW_TSL_PROFILER_UTILS_MATH_UTILS_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/utils/time_utils.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/utils/time_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..3cd30214f49975e361b82daa52d60cc3645c6135 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/utils/time_utils.h @@ -0,0 +1,43 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PROFILER_UTILS_TIME_UTILS_H_ +#define TENSORFLOW_TSL_PROFILER_UTILS_TIME_UTILS_H_ + +#include + +#include "tsl/profiler/utils/math_utils.h" + +namespace tsl { +namespace profiler { + +// Returns the current CPU wallclock time in nanoseconds. +int64_t GetCurrentTimeNanos(); + +// Sleeps for the specified duration. +void SleepForNanos(int64_t ns); +inline void SleepForMicros(int64_t us) { SleepForNanos(MicroToNano(us)); } +inline void SleepForMillis(int64_t ms) { SleepForNanos(MilliToNano(ms)); } +inline void SleepForSeconds(int64_t s) { SleepForNanos(UniToNano(s)); } + +// Spins to simulate doing some work instead of sleeping, because sleep +// precision is poor. For testing only. +void SpinForNanos(int64_t ns); +inline void SpinForMicros(int64_t us) { SpinForNanos(us * 1000); } + +} // namespace profiler +} // namespace tsl + +#endif // TENSORFLOW_TSL_PROFILER_UTILS_TIME_UTILS_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/utils/trace_utils.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/utils/trace_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..27d27c5b9966a9a783913e16c6b6c815616313ef --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/utils/trace_utils.h @@ -0,0 +1,79 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PROFILER_UTILS_TRACE_UTILS_H_ +#define TENSORFLOW_TSL_PROFILER_UTILS_TRACE_UTILS_H_ + +#include + +#include "absl/strings/numbers.h" +#include "absl/strings/string_view.h" +#include "tsl/platform/types.h" + +namespace tsl { +namespace profiler { + +// Constants used as trace_viewer PID (device_id in trace_events.proto). +// PID 0 is unused. +// Support up to 500 accelerator devices. +constexpr uint32 kFirstDeviceId = 1; +constexpr uint32 kLastDeviceId = 500; +// Support Upto 200 custom planes as fake devices (i.e., planes with a +// "/custom:" prefix). See `::kCustomPlanePrefix` for more +// information +constexpr uint32 kFirstCustomPlaneDeviceId = kLastDeviceId + 1; +constexpr uint32 kMaxCustomPlaneDevicesPerHost = 200; +constexpr uint32 kLastCustomPlaneDeviceId = + kFirstCustomPlaneDeviceId + kMaxCustomPlaneDevicesPerHost - 1; +// Host threads are shown as a single fake device. +constexpr uint32 kHostThreadsDeviceId = kLastCustomPlaneDeviceId + 1; + +// Constants used as trace_viewer TID (resource_id in trace_events.proto). +constexpr int kThreadIdDerivedMin = 0xdeadbeef; +constexpr int kThreadIdStepInfo = kThreadIdDerivedMin; +constexpr int kThreadIdKernelLaunch = kThreadIdDerivedMin + 1; +constexpr int kThreadIdTfNameScope = kThreadIdDerivedMin + 2; +constexpr int kThreadIdTfOp = kThreadIdDerivedMin + 3; +constexpr int kThreadIdHloModule = kThreadIdDerivedMin + 4; +constexpr int kThreadIdHloOp = kThreadIdDerivedMin + 5; +constexpr int kThreadIdOverhead = kThreadIdDerivedMin + 6; +constexpr int kThreadIdSource = kThreadIdDerivedMin + 7; +constexpr int kThreadIdDerivedMax = kThreadIdSource; + +static inline bool IsDerivedThreadId(int thread_id) { + return thread_id >= kThreadIdDerivedMin && thread_id <= kThreadIdDerivedMax; +} + +// Parses the device ordinal (N) from device names that use TensorFlow +// convention: "hostname /device:xPU:N". +static inline std::optional ParseDeviceOrdinal( + absl::string_view device_name) { + if (auto pos = device_name.find_last_of(':'); + pos != absl::string_view::npos) { + device_name.remove_prefix(pos + 1); + } + if (auto pos = device_name.find_first_of(' '); + pos != absl::string_view::npos) { + device_name.remove_suffix(device_name.size() - pos); + } + uint32_t device_id; + if (absl::SimpleAtoi(device_name, &device_id)) return device_id; + return std::nullopt; +} + +} // namespace profiler +} // namespace tsl + +#endif // TENSORFLOW_TSL_PROFILER_UTILS_TRACE_UTILS_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/utils/xplane_schema.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/utils/xplane_schema.h new file mode 100644 index 0000000000000000000000000000000000000000..df8084eb92542dd8d0502a299cb6aa82552ae2eb --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/profiler/utils/xplane_schema.h @@ -0,0 +1,500 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PROFILER_UTILS_XPLANE_SCHEMA_H_ +#define TENSORFLOW_TSL_PROFILER_UTILS_XPLANE_SCHEMA_H_ + +#include +#include +#include +#include + +#include "absl/hash/hash.h" +#include "absl/strings/str_cat.h" +#include "absl/strings/string_view.h" +#include "absl/types/optional.h" +#include "tsl/platform/logging.h" +#include "tsl/platform/macros.h" +#include "tsl/platform/types.h" +#include "tsl/profiler/lib/context_types.h" + +namespace tsl { +namespace profiler { + +// Name of XPlane that contains TraceMe events. +TF_CONST_INIT extern const absl::string_view kHostThreadsPlaneName; +// Name prefix of XPlane that contains GPU events. +TF_CONST_INIT extern const absl::string_view kGpuPlanePrefix; +// Name prefix of XPlane that contains TPU events. +TF_CONST_INIT extern const absl::string_view kTpuPlanePrefix; +// Regex for XPlanes that contain TensorCore planes. +TF_CONST_INIT extern const char kTpuPlaneRegex[]; +// Name prefix of XPlane that contains custom device events. +TF_CONST_INIT extern const absl::string_view kCustomPlanePrefix; +// Name prefix of XPlane that contains TPU non-core events such as HBM, ICI etc. +TF_CONST_INIT extern const absl::string_view kTpuNonCorePlaneNamePrefix; +// Name prefix of XPlane that contains TPU runtime events. +TF_CONST_INIT extern const absl::string_view kTpuRuntimePlaneName; +// Name of XPlane that contains CUPTI driver API generated events. +TF_CONST_INIT extern const absl::string_view kCuptiDriverApiPlaneName; +// Name of XPlane that contains Roctracer API generated events. +TF_CONST_INIT extern const absl::string_view kRoctracerApiPlaneName; +// Name of XPlane that contains profile metadata such as XLA debug info. +TF_CONST_INIT extern const absl::string_view kMetadataPlaneName; +// Name of XPlane that contains kpi related metrics. +TF_CONST_INIT extern const absl::string_view kTFStreamzPlaneName; +// Name of XPlane that contains events from python tracer. +TF_CONST_INIT extern const absl::string_view kPythonTracerPlaneName; +// Name of XPlane that contains kTrace thread-switch events +TF_CONST_INIT extern const absl::string_view kHostCpusPlaneName; +// Name of XPlane that contains kTrace system calls. +TF_CONST_INIT extern const absl::string_view kSyscallsPlaneName; + +// Names of XLines that contain ML-level events. +TF_CONST_INIT extern const absl::string_view kStepLineName; +TF_CONST_INIT extern const absl::string_view kTensorFlowNameScopeLineName; +TF_CONST_INIT extern const absl::string_view kTensorFlowOpLineName; +TF_CONST_INIT extern const absl::string_view kXlaModuleLineName; +TF_CONST_INIT extern const absl::string_view kXlaOpLineName; +TF_CONST_INIT extern const absl::string_view kXlaAsyncOpLineName; +TF_CONST_INIT extern const absl::string_view kKernelLaunchLineName; +TF_CONST_INIT extern const absl::string_view kSourceLineName; +TF_CONST_INIT extern const absl::string_view kCounterEventsLineName; + +// GPU device vendors. +TF_CONST_INIT extern const absl::string_view kDeviceVendorNvidia; +TF_CONST_INIT extern const absl::string_view kDeviceVendorAMD; + +// Name of Xplane that contains environment information +TF_CONST_INIT extern const absl::string_view kTaskEnvPlaneName; + +// Max collectives to display per TPU. +// Since in most cases there will be more than 9 collectives, the last line +// contains all collectives that did not qualify to get their own line. +static constexpr uint32_t kMaxCollectivesToDisplay = 9; + +// Interesting event types (i.e., TraceMe names). +enum HostEventType { + kFirstHostEventType = 0, + kUnknownHostEventType = kFirstHostEventType, + kTraceContext, + kSessionRun, + kFunctionRun, + kRunGraph, + kRunGraphDone, + kTfOpRun, + kEagerKernelExecute, + kExecutorStateProcess, + kExecutorDoneCallback, + kMemoryAllocation, + kMemoryDeallocation, + // Performance counter related. + kRemotePerf, + // tf.data captured function events. + kTfDataCapturedFunctionRun, + kTfDataCapturedFunctionRunWithBorrowedArgs, + kTfDataCapturedFunctionRunInstantiated, + kTfDataCapturedFunctionRunAsync, + // Loop ops. + kParallelForOp, + kForeverOp, + kWhileOpEvalCond, + kWhileOpStartBody, + kForOp, + // tf.data related. + kIteratorGetNextOp, + kIteratorGetNextAsOptionalOp, + kIterator, + kDeviceInputPipelineSecondIterator, + kPrefetchProduce, + kPrefetchConsume, + kParallelInterleaveProduce, + kParallelInterleaveConsume, + kParallelInterleaveInitializedInput, + kParallelMapProduce, + kParallelMapConsume, + kMapAndBatchProduce, + kMapAndBatchConsume, + kParseExampleProduce, + kParseExampleConsume, + kParallelBatchProduce, + kParallelBatchConsume, + // Batching related. + kBatchingSessionRun, + kProcessBatch, + kConcatInputTensors, + kMergeInputTensors, + kScheduleWithoutSplit, + kScheduleWithSplit, + kScheduleWithEagerSplit, + kASBSQueueSchedule, + // TFRT related. + kTfrtModelRun, + // Serving related. + kServingModelRun, + // GPU related. + kKernelLaunch, + kKernelExecute, + // TPU related + kEnqueueRequestLocked, + kRunProgramRequest, + kHostCallbackRequest, + kTransferH2DRequest, + kTransferPreprocessedH2DRequest, + kTransferD2HRequest, + kOnDeviceSendRequest, + kOnDeviceRecvRequest, + kOnDeviceSendRecvLocalRequest, + kCustomWait, + kOnDeviceSendRequestMulti, + kOnDeviceRecvRequestMulti, + kPjrtAsyncWait, + kDoEnqueueProgram, + kDoEnqueueContinuationProgram, + kWriteHbm, + kReadHbm, + kTpuExecuteOp, + kCompleteCallbacks, + kTransferToDeviceIssueEvent, + kTransferToDeviceDone, + kTransferFromDeviceIssueEvent, + kTransferFromDeviceDone, + kTpuSystemExecute, + kTpuPartitionedCallOpInitializeVarOnTpu, + kTpuPartitionedCallOpExecuteRemote, + kTpuPartitionedCallOpExecuteLocal, + kLinearize, + kDelinearize, + kTransferBufferFromDeviceFastPath, + kLastHostEventType = kTransferBufferFromDeviceFastPath, +}; + +enum StatType { + kFirstStatType = 0, + kUnknownStatType = kFirstStatType, + // TraceMe arguments. + kStepId, + kDeviceOrdinal, + kChipOrdinal, + kNodeOrdinal, + kModelId, + kQueueId, + kQueueAddr, + kRequestId, + kRunId, + kReplicaId, + kGraphType, + kStepNum, + kIterNum, + kIndexOnHost, + kAllocatorName, + kBytesReserved, + kBytesAllocated, + kBytesAvailable, + kFragmentation, + kPeakBytesInUse, + kRequestedBytes, + kAllocationBytes, + kAddress, + kRegionType, + kDataType, + kTensorShapes, + kTensorLayout, + kKpiName, + kKpiValue, + kElementId, + kParentId, + kCoreType, + // XPlane semantics related. + kProducerType, + kConsumerType, + kProducerId, + kConsumerId, + kIsRoot, + kIsAsync, + // Device trace arguments. + kDeviceId, + kDeviceTypeString, + kContextId, + kCorrelationId, + // TODO(b/176137043): These "details" should differentiate between activity + // and API event sources. + kMemcpyDetails, + kMemallocDetails, + kMemFreeDetails, + kMemsetDetails, + kMemoryResidencyDetails, + kNVTXRange, + kKernelDetails, + kStream, + // Stats added when processing traces. + kGroupId, + kFlow, + kStepName, + kTfOp, + kHloOp, + kDeduplicatedName, + kHloCategory, + kHloModule, + kProgramId, + kEquation, + kIsEager, + kIsFunc, + kTfFunctionCall, + kTfFunctionTracingCount, + kFlops, + kModelFlops, + kBytesAccessed, + kMemoryAccessBreakdown, + kSourceInfo, + kModelName, + kModelVersion, + kBytesTransferred, + kDmaQueue, + kDcnCollectiveInfo, + // Performance counter related. + kRawValue, + kScaledValue, + kThreadId, + kMatrixUnitUtilizationPercent, + // XLA metadata map related. + kHloProto, + // Device capability related. + kDevCapClockRateKHz, + kDevCapCoreCount, + kDevCapMemoryBandwidth, + kDevCapMemorySize, + kDevCapComputeCapMajor, + kDevCapComputeCapMinor, + kDevCapPeakTeraflopsPerSecond, + kDevCapPeakHbmBwGigabytesPerSecond, + kDevCapPeakSramRdBwGigabytesPerSecond, + kDevCapPeakSramWrBwGigabytesPerSecond, + kDevVendor, + // Batching related. + kBatchSizeAfterPadding, + kPaddingAmount, + kBatchingInputTaskSize, + // GPU occupancy metrics + kTheoreticalOccupancyPct, + kOccupancyMinGridSize, + kOccupancySuggestedBlockSize, + // Aggregated Stats + kSelfDurationPs, + kMinDurationPs, + kTotalProfileDurationPs, + kMaxIterationNum, + kDeviceType, + kUsesMegaCore, + kSymbolId, + kTfOpName, + kDmaStallDurationPs, + kKey, + kPayloadSizeBytes, + kDuration, + kBufferSize, + kTransfers, + // Dcn message Stats + kDcnLabel, + kDcnSourceSliceId, + kDcnSourcePerSliceDeviceId, + kDcnDestinationSliceId, + kDcnDestinationPerSliceDeviceId, + kDcnChunk, + kDcnLoopIndex, + kEdgeTpuModelInfo, + kEdgeTpuModelProfileInfo, + kEdgeTpuMlir, + kLastStatType = kEdgeTpuMlir, +}; + +enum MegaScaleStatType : uint8_t { + kMegaScaleGraphKey, + kFirstMegaScaleStatType = kMegaScaleGraphKey, + kMegaScaleLocalDeviceId, + kMegaScaleNumActions, + kMegaScaleCollectiveType, + kMegaScaleInputSize, + kMegaScaleSlackUs, + kMegaScaleActionType, + kMegaScaleStartEndType, + kMegaScaleActionIndex, + kMegaScaleActionDurationNs, + kMegaScaleActionInputs, + kMegaScaleTransferSource, + kMegaScaleTransferDestinations, + kMegaScaleBufferSizes, + kMegaScaleComputeOperation, + kMegaScaleChunk, + kMegaScaleLaunchId, + kMegaScaleLoopIteration, + kMegaScaleGraphProtos, + kLastMegaScaleStatType = kMegaScaleGraphProtos, +}; + +static constexpr uint32_t kLineIdOffset = 10000; + +enum LineIdType { + kFirstLineIdType = kLineIdOffset, + kUnknownLineIdType = kFirstLineIdType, + // DCN Traffic + kDcnHostTraffic, + kDcnCollectiveTraffic, + // kDcnCollectiveTrafficMax reserves id's from kDcnCollectiveTraffic to + // (kDcnCollectiveTraffic + kMaxCollectivesToDisplay) for DcnCollective lines. + kDcnCollectiveTrafficMax = kDcnCollectiveTraffic + kMaxCollectivesToDisplay, + kLastLineIdType = kDcnCollectiveTrafficMax, +}; + +inline std::string TpuPlaneName(int32_t device_ordinal) { + return absl::StrCat(kTpuPlanePrefix, device_ordinal); +} + +inline std::string GpuPlaneName(int32_t device_ordinal) { + return absl::StrCat(kGpuPlanePrefix, device_ordinal); +} + +absl::string_view GetHostEventTypeStr(HostEventType event_type); + +bool IsHostEventType(HostEventType event_type, absl::string_view event_name); + +inline bool IsHostEventType(HostEventType event_type, + absl::string_view event_name) { + return GetHostEventTypeStr(event_type) == event_name; +} + +std::optional FindHostEventType(absl::string_view event_name); + +std::optional FindTfOpEventType(absl::string_view event_name); + +absl::string_view GetStatTypeStr(StatType stat_type); + +bool IsStatType(StatType stat_type, absl::string_view stat_name); + +inline bool IsStatType(StatType stat_type, absl::string_view stat_name) { + return GetStatTypeStr(stat_type) == stat_name; +} + +std::optional FindStatType(absl::string_view stat_name); + +absl::string_view GetMegaScaleStatTypeStr(MegaScaleStatType stat_type); + +inline bool IsMegaScaleStatType(MegaScaleStatType stat_type, + absl::string_view stat_name) { + return GetMegaScaleStatTypeStr(stat_type) == stat_name; +} + +std::optional FindMegaScaleStatType(absl::string_view stat_name); + +// Returns true if the given event shouldn't be shown in the trace viewer. +bool IsInternalEvent(std::optional event_type); + +// Returns true if the given stat shouldn't be shown in the trace viewer. +bool IsInternalStat(std::optional stat_type); + +// Support for flow events: +// This class enables encoding/decoding the flow id and direction, stored as +// XStat value. The flow id are limited to 56 bits. +class XFlow { + public: + enum FlowDirection { + kFlowUnspecified = 0x0, + kFlowIn = 0x1, + kFlowOut = 0x2, + kFlowInOut = 0x3, + }; + + XFlow(uint64_t flow_id, FlowDirection direction, + ContextType category = ContextType::kGeneric) { + DCHECK_NE(direction, kFlowUnspecified); + encoded_.parts.direction = direction; + encoded_.parts.flow_id = flow_id; + encoded_.parts.category = static_cast(category); + } + + // Encoding + uint64 ToStatValue() const { return encoded_.whole; } + + // Decoding + static XFlow FromStatValue(uint64_t encoded) { return XFlow(encoded); } + + /* NOTE: absl::HashOf is not consistent across processes (some process level + * salt is added), even different executions of the same program. + * However we are not tracking cross-host flows, i.e. A single flow's + * participating events are from the same XSpace. On the other hand, + * events from the same XSpace is always processed in the same profiler + * process. Flows from different hosts are unlikely to collide because of + * 2^56 hash space. Therefore, we can consider this is good for now. We should + * revisit the hash function when cross-hosts flows became more popular. + */ + template + static uint64_t GetFlowId(Args&&... args) { + return absl::HashOf(std::forward(args)...) & kFlowMask; + } + + uint64_t Id() const { return encoded_.parts.flow_id; } + ContextType Category() const { + return GetSafeContextType(encoded_.parts.category); + } + FlowDirection Direction() const { + return FlowDirection(encoded_.parts.direction); + } + + static uint64_t GetUniqueId() { // unique in current process. + return next_flow_id_.fetch_add(1); + } + + private: + explicit XFlow(uint64_t encoded) { encoded_.whole = encoded; } + static constexpr uint64_t kFlowMask = (1ULL << 56) - 1; + static std::atomic next_flow_id_; + + union { + // Encoded representation. + uint64_t whole; + struct { + uint64_t direction : 2; + uint64_t flow_id : 56; + uint64_t category : 6; + } parts; + } encoded_ ABSL_ATTRIBUTE_PACKED; + + static_assert(sizeof(encoded_) == sizeof(uint64_t), "Must be 64 bits."); +}; + +// String constants for XProf TraceMes for DCN Messages. +TF_CONST_INIT extern const absl::string_view kMegaScaleDcnReceive; +TF_CONST_INIT extern const absl::string_view kMegaScaleDcnSend; +TF_CONST_INIT extern const absl::string_view kMegaScaleDcnSendFinished; +TF_CONST_INIT extern const absl::string_view kMegaScaleDcnMemAllocate; +TF_CONST_INIT extern const absl::string_view kMegaScaleDcnMemCopy; +TF_CONST_INIT extern const absl::string_view kMegaScaleTopologyDiscovery; +TF_CONST_INIT extern const absl::string_view kMegaScaleBarrier; +TF_CONST_INIT extern const absl::string_view kMegaScaleHostCommand; +TF_CONST_INIT extern const absl::string_view kMegaScaleD2HTransferStart; +TF_CONST_INIT extern const absl::string_view kMegaScaleD2HTransferFinished; +TF_CONST_INIT extern const absl::string_view kMegaScaleH2DTransferStart; +TF_CONST_INIT extern const absl::string_view kMegaScaleH2DTransferFinished; +TF_CONST_INIT extern const absl::string_view kMegaScaleReductionStart; +TF_CONST_INIT extern const absl::string_view kMegaScaleReductionFinished; +TF_CONST_INIT extern const char kXProfMetadataKey[]; +TF_CONST_INIT extern const char kXProfMetadataFlow[]; +TF_CONST_INIT extern const char kXProfMetadataTransfers[]; +TF_CONST_INIT extern const char kXProfMetadataBufferSize[]; + +} // namespace profiler +} // namespace tsl + +#endif // TENSORFLOW_TSL_PROFILER_UTILS_XPLANE_SCHEMA_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/bfc_memory_map.pb.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/bfc_memory_map.pb.h new file mode 100644 index 0000000000000000000000000000000000000000..983b739307d615be03bb0bd2df4884f085e46c90 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/bfc_memory_map.pb.h @@ -0,0 +1,1846 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: tsl/protobuf/bfc_memory_map.proto + +#ifndef GOOGLE_PROTOBUF_INCLUDED_tsl_2fprotobuf_2fbfc_5fmemory_5fmap_2eproto +#define GOOGLE_PROTOBUF_INCLUDED_tsl_2fprotobuf_2fbfc_5fmemory_5fmap_2eproto + +#include +#include + +#include +#if PROTOBUF_VERSION < 3021000 +#error This file was generated by a newer version of protoc which is +#error incompatible with your Protocol Buffer headers. Please update +#error your headers. +#endif +#if 3021009 < PROTOBUF_MIN_PROTOC_VERSION +#error This file was generated by an older version of protoc which is +#error incompatible with your Protocol Buffer headers. Please +#error regenerate this file with a newer version of protoc. +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include // IWYU pragma: export +#include // IWYU pragma: export +#include +// @@protoc_insertion_point(includes) +#include +#define PROTOBUF_INTERNAL_EXPORT_tsl_2fprotobuf_2fbfc_5fmemory_5fmap_2eproto +PROTOBUF_NAMESPACE_OPEN +namespace internal { +class AnyMetadata; +} // namespace internal +PROTOBUF_NAMESPACE_CLOSE + +// Internal implementation detail -- do not use these members. +struct TableStruct_tsl_2fprotobuf_2fbfc_5fmemory_5fmap_2eproto { + static const uint32_t offsets[]; +}; +extern const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_tsl_2fprotobuf_2fbfc_5fmemory_5fmap_2eproto; +namespace tensorflow { +class BinSummary; +struct BinSummaryDefaultTypeInternal; +extern BinSummaryDefaultTypeInternal _BinSummary_default_instance_; +class MemAllocatorStats; +struct MemAllocatorStatsDefaultTypeInternal; +extern MemAllocatorStatsDefaultTypeInternal _MemAllocatorStats_default_instance_; +class MemChunk; +struct MemChunkDefaultTypeInternal; +extern MemChunkDefaultTypeInternal _MemChunk_default_instance_; +class MemoryDump; +struct MemoryDumpDefaultTypeInternal; +extern MemoryDumpDefaultTypeInternal _MemoryDump_default_instance_; +class SnapShot; +struct SnapShotDefaultTypeInternal; +extern SnapShotDefaultTypeInternal _SnapShot_default_instance_; +} // namespace tensorflow +PROTOBUF_NAMESPACE_OPEN +template<> ::tensorflow::BinSummary* Arena::CreateMaybeMessage<::tensorflow::BinSummary>(Arena*); +template<> ::tensorflow::MemAllocatorStats* Arena::CreateMaybeMessage<::tensorflow::MemAllocatorStats>(Arena*); +template<> ::tensorflow::MemChunk* Arena::CreateMaybeMessage<::tensorflow::MemChunk>(Arena*); +template<> ::tensorflow::MemoryDump* Arena::CreateMaybeMessage<::tensorflow::MemoryDump>(Arena*); +template<> ::tensorflow::SnapShot* Arena::CreateMaybeMessage<::tensorflow::SnapShot>(Arena*); +PROTOBUF_NAMESPACE_CLOSE +namespace tensorflow { + +// =================================================================== + +class MemAllocatorStats final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:tensorflow.MemAllocatorStats) */ { + public: + inline MemAllocatorStats() : MemAllocatorStats(nullptr) {} + ~MemAllocatorStats() override; + explicit PROTOBUF_CONSTEXPR MemAllocatorStats(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + MemAllocatorStats(const MemAllocatorStats& from); + MemAllocatorStats(MemAllocatorStats&& from) noexcept + : MemAllocatorStats() { + *this = ::std::move(from); + } + + inline MemAllocatorStats& operator=(const MemAllocatorStats& from) { + CopyFrom(from); + return *this; + } + inline MemAllocatorStats& operator=(MemAllocatorStats&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const MemAllocatorStats& default_instance() { + return *internal_default_instance(); + } + static inline const MemAllocatorStats* internal_default_instance() { + return reinterpret_cast( + &_MemAllocatorStats_default_instance_); + } + static constexpr int kIndexInFileMessages = + 0; + + friend void swap(MemAllocatorStats& a, MemAllocatorStats& b) { + a.Swap(&b); + } + inline void Swap(MemAllocatorStats* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(MemAllocatorStats* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + MemAllocatorStats* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const MemAllocatorStats& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom( const MemAllocatorStats& from) { + MemAllocatorStats::MergeImpl(*this, from); + } + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _impl_._cached_size_.Get(); } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(MemAllocatorStats* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.MemAllocatorStats"; + } + protected: + explicit MemAllocatorStats(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kNumAllocsFieldNumber = 1, + kBytesInUseFieldNumber = 2, + kPeakBytesInUseFieldNumber = 3, + kLargestAllocSizeFieldNumber = 4, + kFragmentationMetricFieldNumber = 5, + }; + // int64 num_allocs = 1; + void clear_num_allocs(); + int64_t num_allocs() const; + void set_num_allocs(int64_t value); + private: + int64_t _internal_num_allocs() const; + void _internal_set_num_allocs(int64_t value); + public: + + // int64 bytes_in_use = 2; + void clear_bytes_in_use(); + int64_t bytes_in_use() const; + void set_bytes_in_use(int64_t value); + private: + int64_t _internal_bytes_in_use() const; + void _internal_set_bytes_in_use(int64_t value); + public: + + // int64 peak_bytes_in_use = 3; + void clear_peak_bytes_in_use(); + int64_t peak_bytes_in_use() const; + void set_peak_bytes_in_use(int64_t value); + private: + int64_t _internal_peak_bytes_in_use() const; + void _internal_set_peak_bytes_in_use(int64_t value); + public: + + // int64 largest_alloc_size = 4; + void clear_largest_alloc_size(); + int64_t largest_alloc_size() const; + void set_largest_alloc_size(int64_t value); + private: + int64_t _internal_largest_alloc_size() const; + void _internal_set_largest_alloc_size(int64_t value); + public: + + // float fragmentation_metric = 5; + void clear_fragmentation_metric(); + float fragmentation_metric() const; + void set_fragmentation_metric(float value); + private: + float _internal_fragmentation_metric() const; + void _internal_set_fragmentation_metric(float value); + public: + + // @@protoc_insertion_point(class_scope:tensorflow.MemAllocatorStats) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + int64_t num_allocs_; + int64_t bytes_in_use_; + int64_t peak_bytes_in_use_; + int64_t largest_alloc_size_; + float fragmentation_metric_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + }; + union { Impl_ _impl_; }; + friend struct ::TableStruct_tsl_2fprotobuf_2fbfc_5fmemory_5fmap_2eproto; +}; +// ------------------------------------------------------------------- + +class MemChunk final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:tensorflow.MemChunk) */ { + public: + inline MemChunk() : MemChunk(nullptr) {} + ~MemChunk() override; + explicit PROTOBUF_CONSTEXPR MemChunk(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + MemChunk(const MemChunk& from); + MemChunk(MemChunk&& from) noexcept + : MemChunk() { + *this = ::std::move(from); + } + + inline MemChunk& operator=(const MemChunk& from) { + CopyFrom(from); + return *this; + } + inline MemChunk& operator=(MemChunk&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const MemChunk& default_instance() { + return *internal_default_instance(); + } + static inline const MemChunk* internal_default_instance() { + return reinterpret_cast( + &_MemChunk_default_instance_); + } + static constexpr int kIndexInFileMessages = + 1; + + friend void swap(MemChunk& a, MemChunk& b) { + a.Swap(&b); + } + inline void Swap(MemChunk* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(MemChunk* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + MemChunk* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const MemChunk& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom( const MemChunk& from) { + MemChunk::MergeImpl(*this, from); + } + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _impl_._cached_size_.Get(); } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(MemChunk* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.MemChunk"; + } + protected: + explicit MemChunk(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kOpNameFieldNumber = 5, + kAddressFieldNumber = 1, + kSizeFieldNumber = 2, + kRequestedSizeFieldNumber = 3, + kFreedAtCountFieldNumber = 6, + kBinFieldNumber = 4, + kInUseFieldNumber = 8, + kActionCountFieldNumber = 7, + kStepIdFieldNumber = 9, + }; + // string op_name = 5; + void clear_op_name(); + const std::string& op_name() const; + template + void set_op_name(ArgT0&& arg0, ArgT... args); + std::string* mutable_op_name(); + PROTOBUF_NODISCARD std::string* release_op_name(); + void set_allocated_op_name(std::string* op_name); + private: + const std::string& _internal_op_name() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_op_name(const std::string& value); + std::string* _internal_mutable_op_name(); + public: + + // uint64 address = 1; + void clear_address(); + uint64_t address() const; + void set_address(uint64_t value); + private: + uint64_t _internal_address() const; + void _internal_set_address(uint64_t value); + public: + + // int64 size = 2; + void clear_size(); + int64_t size() const; + void set_size(int64_t value); + private: + int64_t _internal_size() const; + void _internal_set_size(int64_t value); + public: + + // int64 requested_size = 3; + void clear_requested_size(); + int64_t requested_size() const; + void set_requested_size(int64_t value); + private: + int64_t _internal_requested_size() const; + void _internal_set_requested_size(int64_t value); + public: + + // uint64 freed_at_count = 6; + void clear_freed_at_count(); + uint64_t freed_at_count() const; + void set_freed_at_count(uint64_t value); + private: + uint64_t _internal_freed_at_count() const; + void _internal_set_freed_at_count(uint64_t value); + public: + + // int32 bin = 4; + void clear_bin(); + int32_t bin() const; + void set_bin(int32_t value); + private: + int32_t _internal_bin() const; + void _internal_set_bin(int32_t value); + public: + + // bool in_use = 8; + void clear_in_use(); + bool in_use() const; + void set_in_use(bool value); + private: + bool _internal_in_use() const; + void _internal_set_in_use(bool value); + public: + + // uint64 action_count = 7; + void clear_action_count(); + uint64_t action_count() const; + void set_action_count(uint64_t value); + private: + uint64_t _internal_action_count() const; + void _internal_set_action_count(uint64_t value); + public: + + // uint64 step_id = 9; + void clear_step_id(); + uint64_t step_id() const; + void set_step_id(uint64_t value); + private: + uint64_t _internal_step_id() const; + void _internal_set_step_id(uint64_t value); + public: + + // @@protoc_insertion_point(class_scope:tensorflow.MemChunk) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr op_name_; + uint64_t address_; + int64_t size_; + int64_t requested_size_; + uint64_t freed_at_count_; + int32_t bin_; + bool in_use_; + uint64_t action_count_; + uint64_t step_id_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + }; + union { Impl_ _impl_; }; + friend struct ::TableStruct_tsl_2fprotobuf_2fbfc_5fmemory_5fmap_2eproto; +}; +// ------------------------------------------------------------------- + +class BinSummary final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:tensorflow.BinSummary) */ { + public: + inline BinSummary() : BinSummary(nullptr) {} + ~BinSummary() override; + explicit PROTOBUF_CONSTEXPR BinSummary(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + BinSummary(const BinSummary& from); + BinSummary(BinSummary&& from) noexcept + : BinSummary() { + *this = ::std::move(from); + } + + inline BinSummary& operator=(const BinSummary& from) { + CopyFrom(from); + return *this; + } + inline BinSummary& operator=(BinSummary&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const BinSummary& default_instance() { + return *internal_default_instance(); + } + static inline const BinSummary* internal_default_instance() { + return reinterpret_cast( + &_BinSummary_default_instance_); + } + static constexpr int kIndexInFileMessages = + 2; + + friend void swap(BinSummary& a, BinSummary& b) { + a.Swap(&b); + } + inline void Swap(BinSummary* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(BinSummary* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + BinSummary* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const BinSummary& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom( const BinSummary& from) { + BinSummary::MergeImpl(*this, from); + } + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _impl_._cached_size_.Get(); } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(BinSummary* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.BinSummary"; + } + protected: + explicit BinSummary(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kTotalBytesInUseFieldNumber = 2, + kTotalBytesInBinFieldNumber = 3, + kTotalChunksInUseFieldNumber = 4, + kTotalChunksInBinFieldNumber = 5, + kBinFieldNumber = 1, + }; + // int64 total_bytes_in_use = 2; + void clear_total_bytes_in_use(); + int64_t total_bytes_in_use() const; + void set_total_bytes_in_use(int64_t value); + private: + int64_t _internal_total_bytes_in_use() const; + void _internal_set_total_bytes_in_use(int64_t value); + public: + + // int64 total_bytes_in_bin = 3; + void clear_total_bytes_in_bin(); + int64_t total_bytes_in_bin() const; + void set_total_bytes_in_bin(int64_t value); + private: + int64_t _internal_total_bytes_in_bin() const; + void _internal_set_total_bytes_in_bin(int64_t value); + public: + + // int64 total_chunks_in_use = 4; + void clear_total_chunks_in_use(); + int64_t total_chunks_in_use() const; + void set_total_chunks_in_use(int64_t value); + private: + int64_t _internal_total_chunks_in_use() const; + void _internal_set_total_chunks_in_use(int64_t value); + public: + + // int64 total_chunks_in_bin = 5; + void clear_total_chunks_in_bin(); + int64_t total_chunks_in_bin() const; + void set_total_chunks_in_bin(int64_t value); + private: + int64_t _internal_total_chunks_in_bin() const; + void _internal_set_total_chunks_in_bin(int64_t value); + public: + + // int32 bin = 1; + void clear_bin(); + int32_t bin() const; + void set_bin(int32_t value); + private: + int32_t _internal_bin() const; + void _internal_set_bin(int32_t value); + public: + + // @@protoc_insertion_point(class_scope:tensorflow.BinSummary) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + int64_t total_bytes_in_use_; + int64_t total_bytes_in_bin_; + int64_t total_chunks_in_use_; + int64_t total_chunks_in_bin_; + int32_t bin_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + }; + union { Impl_ _impl_; }; + friend struct ::TableStruct_tsl_2fprotobuf_2fbfc_5fmemory_5fmap_2eproto; +}; +// ------------------------------------------------------------------- + +class SnapShot final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:tensorflow.SnapShot) */ { + public: + inline SnapShot() : SnapShot(nullptr) {} + ~SnapShot() override; + explicit PROTOBUF_CONSTEXPR SnapShot(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + SnapShot(const SnapShot& from); + SnapShot(SnapShot&& from) noexcept + : SnapShot() { + *this = ::std::move(from); + } + + inline SnapShot& operator=(const SnapShot& from) { + CopyFrom(from); + return *this; + } + inline SnapShot& operator=(SnapShot&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const SnapShot& default_instance() { + return *internal_default_instance(); + } + static inline const SnapShot* internal_default_instance() { + return reinterpret_cast( + &_SnapShot_default_instance_); + } + static constexpr int kIndexInFileMessages = + 3; + + friend void swap(SnapShot& a, SnapShot& b) { + a.Swap(&b); + } + inline void Swap(SnapShot* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(SnapShot* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + SnapShot* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const SnapShot& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom( const SnapShot& from) { + SnapShot::MergeImpl(*this, from); + } + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _impl_._cached_size_.Get(); } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(SnapShot* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.SnapShot"; + } + protected: + explicit SnapShot(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kActionCountFieldNumber = 1, + kSizeFieldNumber = 2, + }; + // uint64 action_count = 1; + void clear_action_count(); + uint64_t action_count() const; + void set_action_count(uint64_t value); + private: + uint64_t _internal_action_count() const; + void _internal_set_action_count(uint64_t value); + public: + + // int64 size = 2; + void clear_size(); + int64_t size() const; + void set_size(int64_t value); + private: + int64_t _internal_size() const; + void _internal_set_size(int64_t value); + public: + + // @@protoc_insertion_point(class_scope:tensorflow.SnapShot) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + uint64_t action_count_; + int64_t size_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + }; + union { Impl_ _impl_; }; + friend struct ::TableStruct_tsl_2fprotobuf_2fbfc_5fmemory_5fmap_2eproto; +}; +// ------------------------------------------------------------------- + +class MemoryDump final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:tensorflow.MemoryDump) */ { + public: + inline MemoryDump() : MemoryDump(nullptr) {} + ~MemoryDump() override; + explicit PROTOBUF_CONSTEXPR MemoryDump(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + MemoryDump(const MemoryDump& from); + MemoryDump(MemoryDump&& from) noexcept + : MemoryDump() { + *this = ::std::move(from); + } + + inline MemoryDump& operator=(const MemoryDump& from) { + CopyFrom(from); + return *this; + } + inline MemoryDump& operator=(MemoryDump&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const MemoryDump& default_instance() { + return *internal_default_instance(); + } + static inline const MemoryDump* internal_default_instance() { + return reinterpret_cast( + &_MemoryDump_default_instance_); + } + static constexpr int kIndexInFileMessages = + 4; + + friend void swap(MemoryDump& a, MemoryDump& b) { + a.Swap(&b); + } + inline void Swap(MemoryDump* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(MemoryDump* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + MemoryDump* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const MemoryDump& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom( const MemoryDump& from) { + MemoryDump::MergeImpl(*this, from); + } + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _impl_._cached_size_.Get(); } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(MemoryDump* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.MemoryDump"; + } + protected: + explicit MemoryDump(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kBinSummaryFieldNumber = 2, + kChunkFieldNumber = 3, + kSnapShotFieldNumber = 4, + kAllocatorNameFieldNumber = 1, + kStatsFieldNumber = 5, + }; + // repeated .tensorflow.BinSummary bin_summary = 2; + int bin_summary_size() const; + private: + int _internal_bin_summary_size() const; + public: + void clear_bin_summary(); + ::tensorflow::BinSummary* mutable_bin_summary(int index); + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::BinSummary >* + mutable_bin_summary(); + private: + const ::tensorflow::BinSummary& _internal_bin_summary(int index) const; + ::tensorflow::BinSummary* _internal_add_bin_summary(); + public: + const ::tensorflow::BinSummary& bin_summary(int index) const; + ::tensorflow::BinSummary* add_bin_summary(); + const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::BinSummary >& + bin_summary() const; + + // repeated .tensorflow.MemChunk chunk = 3; + int chunk_size() const; + private: + int _internal_chunk_size() const; + public: + void clear_chunk(); + ::tensorflow::MemChunk* mutable_chunk(int index); + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::MemChunk >* + mutable_chunk(); + private: + const ::tensorflow::MemChunk& _internal_chunk(int index) const; + ::tensorflow::MemChunk* _internal_add_chunk(); + public: + const ::tensorflow::MemChunk& chunk(int index) const; + ::tensorflow::MemChunk* add_chunk(); + const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::MemChunk >& + chunk() const; + + // repeated .tensorflow.SnapShot snap_shot = 4; + int snap_shot_size() const; + private: + int _internal_snap_shot_size() const; + public: + void clear_snap_shot(); + ::tensorflow::SnapShot* mutable_snap_shot(int index); + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::SnapShot >* + mutable_snap_shot(); + private: + const ::tensorflow::SnapShot& _internal_snap_shot(int index) const; + ::tensorflow::SnapShot* _internal_add_snap_shot(); + public: + const ::tensorflow::SnapShot& snap_shot(int index) const; + ::tensorflow::SnapShot* add_snap_shot(); + const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::SnapShot >& + snap_shot() const; + + // string allocator_name = 1; + void clear_allocator_name(); + const std::string& allocator_name() const; + template + void set_allocator_name(ArgT0&& arg0, ArgT... args); + std::string* mutable_allocator_name(); + PROTOBUF_NODISCARD std::string* release_allocator_name(); + void set_allocated_allocator_name(std::string* allocator_name); + private: + const std::string& _internal_allocator_name() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_allocator_name(const std::string& value); + std::string* _internal_mutable_allocator_name(); + public: + + // .tensorflow.MemAllocatorStats stats = 5; + bool has_stats() const; + private: + bool _internal_has_stats() const; + public: + void clear_stats(); + const ::tensorflow::MemAllocatorStats& stats() const; + PROTOBUF_NODISCARD ::tensorflow::MemAllocatorStats* release_stats(); + ::tensorflow::MemAllocatorStats* mutable_stats(); + void set_allocated_stats(::tensorflow::MemAllocatorStats* stats); + private: + const ::tensorflow::MemAllocatorStats& _internal_stats() const; + ::tensorflow::MemAllocatorStats* _internal_mutable_stats(); + public: + void unsafe_arena_set_allocated_stats( + ::tensorflow::MemAllocatorStats* stats); + ::tensorflow::MemAllocatorStats* unsafe_arena_release_stats(); + + // @@protoc_insertion_point(class_scope:tensorflow.MemoryDump) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::BinSummary > bin_summary_; + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::MemChunk > chunk_; + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::SnapShot > snap_shot_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr allocator_name_; + ::tensorflow::MemAllocatorStats* stats_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + }; + union { Impl_ _impl_; }; + friend struct ::TableStruct_tsl_2fprotobuf_2fbfc_5fmemory_5fmap_2eproto; +}; +// =================================================================== + + +// =================================================================== + +#ifdef __GNUC__ + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wstrict-aliasing" +#endif // __GNUC__ +// MemAllocatorStats + +// int64 num_allocs = 1; +inline void MemAllocatorStats::clear_num_allocs() { + _impl_.num_allocs_ = int64_t{0}; +} +inline int64_t MemAllocatorStats::_internal_num_allocs() const { + return _impl_.num_allocs_; +} +inline int64_t MemAllocatorStats::num_allocs() const { + // @@protoc_insertion_point(field_get:tensorflow.MemAllocatorStats.num_allocs) + return _internal_num_allocs(); +} +inline void MemAllocatorStats::_internal_set_num_allocs(int64_t value) { + + _impl_.num_allocs_ = value; +} +inline void MemAllocatorStats::set_num_allocs(int64_t value) { + _internal_set_num_allocs(value); + // @@protoc_insertion_point(field_set:tensorflow.MemAllocatorStats.num_allocs) +} + +// int64 bytes_in_use = 2; +inline void MemAllocatorStats::clear_bytes_in_use() { + _impl_.bytes_in_use_ = int64_t{0}; +} +inline int64_t MemAllocatorStats::_internal_bytes_in_use() const { + return _impl_.bytes_in_use_; +} +inline int64_t MemAllocatorStats::bytes_in_use() const { + // @@protoc_insertion_point(field_get:tensorflow.MemAllocatorStats.bytes_in_use) + return _internal_bytes_in_use(); +} +inline void MemAllocatorStats::_internal_set_bytes_in_use(int64_t value) { + + _impl_.bytes_in_use_ = value; +} +inline void MemAllocatorStats::set_bytes_in_use(int64_t value) { + _internal_set_bytes_in_use(value); + // @@protoc_insertion_point(field_set:tensorflow.MemAllocatorStats.bytes_in_use) +} + +// int64 peak_bytes_in_use = 3; +inline void MemAllocatorStats::clear_peak_bytes_in_use() { + _impl_.peak_bytes_in_use_ = int64_t{0}; +} +inline int64_t MemAllocatorStats::_internal_peak_bytes_in_use() const { + return _impl_.peak_bytes_in_use_; +} +inline int64_t MemAllocatorStats::peak_bytes_in_use() const { + // @@protoc_insertion_point(field_get:tensorflow.MemAllocatorStats.peak_bytes_in_use) + return _internal_peak_bytes_in_use(); +} +inline void MemAllocatorStats::_internal_set_peak_bytes_in_use(int64_t value) { + + _impl_.peak_bytes_in_use_ = value; +} +inline void MemAllocatorStats::set_peak_bytes_in_use(int64_t value) { + _internal_set_peak_bytes_in_use(value); + // @@protoc_insertion_point(field_set:tensorflow.MemAllocatorStats.peak_bytes_in_use) +} + +// int64 largest_alloc_size = 4; +inline void MemAllocatorStats::clear_largest_alloc_size() { + _impl_.largest_alloc_size_ = int64_t{0}; +} +inline int64_t MemAllocatorStats::_internal_largest_alloc_size() const { + return _impl_.largest_alloc_size_; +} +inline int64_t MemAllocatorStats::largest_alloc_size() const { + // @@protoc_insertion_point(field_get:tensorflow.MemAllocatorStats.largest_alloc_size) + return _internal_largest_alloc_size(); +} +inline void MemAllocatorStats::_internal_set_largest_alloc_size(int64_t value) { + + _impl_.largest_alloc_size_ = value; +} +inline void MemAllocatorStats::set_largest_alloc_size(int64_t value) { + _internal_set_largest_alloc_size(value); + // @@protoc_insertion_point(field_set:tensorflow.MemAllocatorStats.largest_alloc_size) +} + +// float fragmentation_metric = 5; +inline void MemAllocatorStats::clear_fragmentation_metric() { + _impl_.fragmentation_metric_ = 0; +} +inline float MemAllocatorStats::_internal_fragmentation_metric() const { + return _impl_.fragmentation_metric_; +} +inline float MemAllocatorStats::fragmentation_metric() const { + // @@protoc_insertion_point(field_get:tensorflow.MemAllocatorStats.fragmentation_metric) + return _internal_fragmentation_metric(); +} +inline void MemAllocatorStats::_internal_set_fragmentation_metric(float value) { + + _impl_.fragmentation_metric_ = value; +} +inline void MemAllocatorStats::set_fragmentation_metric(float value) { + _internal_set_fragmentation_metric(value); + // @@protoc_insertion_point(field_set:tensorflow.MemAllocatorStats.fragmentation_metric) +} + +// ------------------------------------------------------------------- + +// MemChunk + +// uint64 address = 1; +inline void MemChunk::clear_address() { + _impl_.address_ = uint64_t{0u}; +} +inline uint64_t MemChunk::_internal_address() const { + return _impl_.address_; +} +inline uint64_t MemChunk::address() const { + // @@protoc_insertion_point(field_get:tensorflow.MemChunk.address) + return _internal_address(); +} +inline void MemChunk::_internal_set_address(uint64_t value) { + + _impl_.address_ = value; +} +inline void MemChunk::set_address(uint64_t value) { + _internal_set_address(value); + // @@protoc_insertion_point(field_set:tensorflow.MemChunk.address) +} + +// int64 size = 2; +inline void MemChunk::clear_size() { + _impl_.size_ = int64_t{0}; +} +inline int64_t MemChunk::_internal_size() const { + return _impl_.size_; +} +inline int64_t MemChunk::size() const { + // @@protoc_insertion_point(field_get:tensorflow.MemChunk.size) + return _internal_size(); +} +inline void MemChunk::_internal_set_size(int64_t value) { + + _impl_.size_ = value; +} +inline void MemChunk::set_size(int64_t value) { + _internal_set_size(value); + // @@protoc_insertion_point(field_set:tensorflow.MemChunk.size) +} + +// int64 requested_size = 3; +inline void MemChunk::clear_requested_size() { + _impl_.requested_size_ = int64_t{0}; +} +inline int64_t MemChunk::_internal_requested_size() const { + return _impl_.requested_size_; +} +inline int64_t MemChunk::requested_size() const { + // @@protoc_insertion_point(field_get:tensorflow.MemChunk.requested_size) + return _internal_requested_size(); +} +inline void MemChunk::_internal_set_requested_size(int64_t value) { + + _impl_.requested_size_ = value; +} +inline void MemChunk::set_requested_size(int64_t value) { + _internal_set_requested_size(value); + // @@protoc_insertion_point(field_set:tensorflow.MemChunk.requested_size) +} + +// int32 bin = 4; +inline void MemChunk::clear_bin() { + _impl_.bin_ = 0; +} +inline int32_t MemChunk::_internal_bin() const { + return _impl_.bin_; +} +inline int32_t MemChunk::bin() const { + // @@protoc_insertion_point(field_get:tensorflow.MemChunk.bin) + return _internal_bin(); +} +inline void MemChunk::_internal_set_bin(int32_t value) { + + _impl_.bin_ = value; +} +inline void MemChunk::set_bin(int32_t value) { + _internal_set_bin(value); + // @@protoc_insertion_point(field_set:tensorflow.MemChunk.bin) +} + +// string op_name = 5; +inline void MemChunk::clear_op_name() { + _impl_.op_name_.ClearToEmpty(); +} +inline const std::string& MemChunk::op_name() const { + // @@protoc_insertion_point(field_get:tensorflow.MemChunk.op_name) + return _internal_op_name(); +} +template +inline PROTOBUF_ALWAYS_INLINE +void MemChunk::set_op_name(ArgT0&& arg0, ArgT... args) { + + _impl_.op_name_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:tensorflow.MemChunk.op_name) +} +inline std::string* MemChunk::mutable_op_name() { + std::string* _s = _internal_mutable_op_name(); + // @@protoc_insertion_point(field_mutable:tensorflow.MemChunk.op_name) + return _s; +} +inline const std::string& MemChunk::_internal_op_name() const { + return _impl_.op_name_.Get(); +} +inline void MemChunk::_internal_set_op_name(const std::string& value) { + + _impl_.op_name_.Set(value, GetArenaForAllocation()); +} +inline std::string* MemChunk::_internal_mutable_op_name() { + + return _impl_.op_name_.Mutable(GetArenaForAllocation()); +} +inline std::string* MemChunk::release_op_name() { + // @@protoc_insertion_point(field_release:tensorflow.MemChunk.op_name) + return _impl_.op_name_.Release(); +} +inline void MemChunk::set_allocated_op_name(std::string* op_name) { + if (op_name != nullptr) { + + } else { + + } + _impl_.op_name_.SetAllocated(op_name, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.op_name_.IsDefault()) { + _impl_.op_name_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:tensorflow.MemChunk.op_name) +} + +// uint64 freed_at_count = 6; +inline void MemChunk::clear_freed_at_count() { + _impl_.freed_at_count_ = uint64_t{0u}; +} +inline uint64_t MemChunk::_internal_freed_at_count() const { + return _impl_.freed_at_count_; +} +inline uint64_t MemChunk::freed_at_count() const { + // @@protoc_insertion_point(field_get:tensorflow.MemChunk.freed_at_count) + return _internal_freed_at_count(); +} +inline void MemChunk::_internal_set_freed_at_count(uint64_t value) { + + _impl_.freed_at_count_ = value; +} +inline void MemChunk::set_freed_at_count(uint64_t value) { + _internal_set_freed_at_count(value); + // @@protoc_insertion_point(field_set:tensorflow.MemChunk.freed_at_count) +} + +// uint64 action_count = 7; +inline void MemChunk::clear_action_count() { + _impl_.action_count_ = uint64_t{0u}; +} +inline uint64_t MemChunk::_internal_action_count() const { + return _impl_.action_count_; +} +inline uint64_t MemChunk::action_count() const { + // @@protoc_insertion_point(field_get:tensorflow.MemChunk.action_count) + return _internal_action_count(); +} +inline void MemChunk::_internal_set_action_count(uint64_t value) { + + _impl_.action_count_ = value; +} +inline void MemChunk::set_action_count(uint64_t value) { + _internal_set_action_count(value); + // @@protoc_insertion_point(field_set:tensorflow.MemChunk.action_count) +} + +// bool in_use = 8; +inline void MemChunk::clear_in_use() { + _impl_.in_use_ = false; +} +inline bool MemChunk::_internal_in_use() const { + return _impl_.in_use_; +} +inline bool MemChunk::in_use() const { + // @@protoc_insertion_point(field_get:tensorflow.MemChunk.in_use) + return _internal_in_use(); +} +inline void MemChunk::_internal_set_in_use(bool value) { + + _impl_.in_use_ = value; +} +inline void MemChunk::set_in_use(bool value) { + _internal_set_in_use(value); + // @@protoc_insertion_point(field_set:tensorflow.MemChunk.in_use) +} + +// uint64 step_id = 9; +inline void MemChunk::clear_step_id() { + _impl_.step_id_ = uint64_t{0u}; +} +inline uint64_t MemChunk::_internal_step_id() const { + return _impl_.step_id_; +} +inline uint64_t MemChunk::step_id() const { + // @@protoc_insertion_point(field_get:tensorflow.MemChunk.step_id) + return _internal_step_id(); +} +inline void MemChunk::_internal_set_step_id(uint64_t value) { + + _impl_.step_id_ = value; +} +inline void MemChunk::set_step_id(uint64_t value) { + _internal_set_step_id(value); + // @@protoc_insertion_point(field_set:tensorflow.MemChunk.step_id) +} + +// ------------------------------------------------------------------- + +// BinSummary + +// int32 bin = 1; +inline void BinSummary::clear_bin() { + _impl_.bin_ = 0; +} +inline int32_t BinSummary::_internal_bin() const { + return _impl_.bin_; +} +inline int32_t BinSummary::bin() const { + // @@protoc_insertion_point(field_get:tensorflow.BinSummary.bin) + return _internal_bin(); +} +inline void BinSummary::_internal_set_bin(int32_t value) { + + _impl_.bin_ = value; +} +inline void BinSummary::set_bin(int32_t value) { + _internal_set_bin(value); + // @@protoc_insertion_point(field_set:tensorflow.BinSummary.bin) +} + +// int64 total_bytes_in_use = 2; +inline void BinSummary::clear_total_bytes_in_use() { + _impl_.total_bytes_in_use_ = int64_t{0}; +} +inline int64_t BinSummary::_internal_total_bytes_in_use() const { + return _impl_.total_bytes_in_use_; +} +inline int64_t BinSummary::total_bytes_in_use() const { + // @@protoc_insertion_point(field_get:tensorflow.BinSummary.total_bytes_in_use) + return _internal_total_bytes_in_use(); +} +inline void BinSummary::_internal_set_total_bytes_in_use(int64_t value) { + + _impl_.total_bytes_in_use_ = value; +} +inline void BinSummary::set_total_bytes_in_use(int64_t value) { + _internal_set_total_bytes_in_use(value); + // @@protoc_insertion_point(field_set:tensorflow.BinSummary.total_bytes_in_use) +} + +// int64 total_bytes_in_bin = 3; +inline void BinSummary::clear_total_bytes_in_bin() { + _impl_.total_bytes_in_bin_ = int64_t{0}; +} +inline int64_t BinSummary::_internal_total_bytes_in_bin() const { + return _impl_.total_bytes_in_bin_; +} +inline int64_t BinSummary::total_bytes_in_bin() const { + // @@protoc_insertion_point(field_get:tensorflow.BinSummary.total_bytes_in_bin) + return _internal_total_bytes_in_bin(); +} +inline void BinSummary::_internal_set_total_bytes_in_bin(int64_t value) { + + _impl_.total_bytes_in_bin_ = value; +} +inline void BinSummary::set_total_bytes_in_bin(int64_t value) { + _internal_set_total_bytes_in_bin(value); + // @@protoc_insertion_point(field_set:tensorflow.BinSummary.total_bytes_in_bin) +} + +// int64 total_chunks_in_use = 4; +inline void BinSummary::clear_total_chunks_in_use() { + _impl_.total_chunks_in_use_ = int64_t{0}; +} +inline int64_t BinSummary::_internal_total_chunks_in_use() const { + return _impl_.total_chunks_in_use_; +} +inline int64_t BinSummary::total_chunks_in_use() const { + // @@protoc_insertion_point(field_get:tensorflow.BinSummary.total_chunks_in_use) + return _internal_total_chunks_in_use(); +} +inline void BinSummary::_internal_set_total_chunks_in_use(int64_t value) { + + _impl_.total_chunks_in_use_ = value; +} +inline void BinSummary::set_total_chunks_in_use(int64_t value) { + _internal_set_total_chunks_in_use(value); + // @@protoc_insertion_point(field_set:tensorflow.BinSummary.total_chunks_in_use) +} + +// int64 total_chunks_in_bin = 5; +inline void BinSummary::clear_total_chunks_in_bin() { + _impl_.total_chunks_in_bin_ = int64_t{0}; +} +inline int64_t BinSummary::_internal_total_chunks_in_bin() const { + return _impl_.total_chunks_in_bin_; +} +inline int64_t BinSummary::total_chunks_in_bin() const { + // @@protoc_insertion_point(field_get:tensorflow.BinSummary.total_chunks_in_bin) + return _internal_total_chunks_in_bin(); +} +inline void BinSummary::_internal_set_total_chunks_in_bin(int64_t value) { + + _impl_.total_chunks_in_bin_ = value; +} +inline void BinSummary::set_total_chunks_in_bin(int64_t value) { + _internal_set_total_chunks_in_bin(value); + // @@protoc_insertion_point(field_set:tensorflow.BinSummary.total_chunks_in_bin) +} + +// ------------------------------------------------------------------- + +// SnapShot + +// uint64 action_count = 1; +inline void SnapShot::clear_action_count() { + _impl_.action_count_ = uint64_t{0u}; +} +inline uint64_t SnapShot::_internal_action_count() const { + return _impl_.action_count_; +} +inline uint64_t SnapShot::action_count() const { + // @@protoc_insertion_point(field_get:tensorflow.SnapShot.action_count) + return _internal_action_count(); +} +inline void SnapShot::_internal_set_action_count(uint64_t value) { + + _impl_.action_count_ = value; +} +inline void SnapShot::set_action_count(uint64_t value) { + _internal_set_action_count(value); + // @@protoc_insertion_point(field_set:tensorflow.SnapShot.action_count) +} + +// int64 size = 2; +inline void SnapShot::clear_size() { + _impl_.size_ = int64_t{0}; +} +inline int64_t SnapShot::_internal_size() const { + return _impl_.size_; +} +inline int64_t SnapShot::size() const { + // @@protoc_insertion_point(field_get:tensorflow.SnapShot.size) + return _internal_size(); +} +inline void SnapShot::_internal_set_size(int64_t value) { + + _impl_.size_ = value; +} +inline void SnapShot::set_size(int64_t value) { + _internal_set_size(value); + // @@protoc_insertion_point(field_set:tensorflow.SnapShot.size) +} + +// ------------------------------------------------------------------- + +// MemoryDump + +// string allocator_name = 1; +inline void MemoryDump::clear_allocator_name() { + _impl_.allocator_name_.ClearToEmpty(); +} +inline const std::string& MemoryDump::allocator_name() const { + // @@protoc_insertion_point(field_get:tensorflow.MemoryDump.allocator_name) + return _internal_allocator_name(); +} +template +inline PROTOBUF_ALWAYS_INLINE +void MemoryDump::set_allocator_name(ArgT0&& arg0, ArgT... args) { + + _impl_.allocator_name_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:tensorflow.MemoryDump.allocator_name) +} +inline std::string* MemoryDump::mutable_allocator_name() { + std::string* _s = _internal_mutable_allocator_name(); + // @@protoc_insertion_point(field_mutable:tensorflow.MemoryDump.allocator_name) + return _s; +} +inline const std::string& MemoryDump::_internal_allocator_name() const { + return _impl_.allocator_name_.Get(); +} +inline void MemoryDump::_internal_set_allocator_name(const std::string& value) { + + _impl_.allocator_name_.Set(value, GetArenaForAllocation()); +} +inline std::string* MemoryDump::_internal_mutable_allocator_name() { + + return _impl_.allocator_name_.Mutable(GetArenaForAllocation()); +} +inline std::string* MemoryDump::release_allocator_name() { + // @@protoc_insertion_point(field_release:tensorflow.MemoryDump.allocator_name) + return _impl_.allocator_name_.Release(); +} +inline void MemoryDump::set_allocated_allocator_name(std::string* allocator_name) { + if (allocator_name != nullptr) { + + } else { + + } + _impl_.allocator_name_.SetAllocated(allocator_name, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.allocator_name_.IsDefault()) { + _impl_.allocator_name_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:tensorflow.MemoryDump.allocator_name) +} + +// repeated .tensorflow.BinSummary bin_summary = 2; +inline int MemoryDump::_internal_bin_summary_size() const { + return _impl_.bin_summary_.size(); +} +inline int MemoryDump::bin_summary_size() const { + return _internal_bin_summary_size(); +} +inline void MemoryDump::clear_bin_summary() { + _impl_.bin_summary_.Clear(); +} +inline ::tensorflow::BinSummary* MemoryDump::mutable_bin_summary(int index) { + // @@protoc_insertion_point(field_mutable:tensorflow.MemoryDump.bin_summary) + return _impl_.bin_summary_.Mutable(index); +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::BinSummary >* +MemoryDump::mutable_bin_summary() { + // @@protoc_insertion_point(field_mutable_list:tensorflow.MemoryDump.bin_summary) + return &_impl_.bin_summary_; +} +inline const ::tensorflow::BinSummary& MemoryDump::_internal_bin_summary(int index) const { + return _impl_.bin_summary_.Get(index); +} +inline const ::tensorflow::BinSummary& MemoryDump::bin_summary(int index) const { + // @@protoc_insertion_point(field_get:tensorflow.MemoryDump.bin_summary) + return _internal_bin_summary(index); +} +inline ::tensorflow::BinSummary* MemoryDump::_internal_add_bin_summary() { + return _impl_.bin_summary_.Add(); +} +inline ::tensorflow::BinSummary* MemoryDump::add_bin_summary() { + ::tensorflow::BinSummary* _add = _internal_add_bin_summary(); + // @@protoc_insertion_point(field_add:tensorflow.MemoryDump.bin_summary) + return _add; +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::BinSummary >& +MemoryDump::bin_summary() const { + // @@protoc_insertion_point(field_list:tensorflow.MemoryDump.bin_summary) + return _impl_.bin_summary_; +} + +// repeated .tensorflow.MemChunk chunk = 3; +inline int MemoryDump::_internal_chunk_size() const { + return _impl_.chunk_.size(); +} +inline int MemoryDump::chunk_size() const { + return _internal_chunk_size(); +} +inline void MemoryDump::clear_chunk() { + _impl_.chunk_.Clear(); +} +inline ::tensorflow::MemChunk* MemoryDump::mutable_chunk(int index) { + // @@protoc_insertion_point(field_mutable:tensorflow.MemoryDump.chunk) + return _impl_.chunk_.Mutable(index); +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::MemChunk >* +MemoryDump::mutable_chunk() { + // @@protoc_insertion_point(field_mutable_list:tensorflow.MemoryDump.chunk) + return &_impl_.chunk_; +} +inline const ::tensorflow::MemChunk& MemoryDump::_internal_chunk(int index) const { + return _impl_.chunk_.Get(index); +} +inline const ::tensorflow::MemChunk& MemoryDump::chunk(int index) const { + // @@protoc_insertion_point(field_get:tensorflow.MemoryDump.chunk) + return _internal_chunk(index); +} +inline ::tensorflow::MemChunk* MemoryDump::_internal_add_chunk() { + return _impl_.chunk_.Add(); +} +inline ::tensorflow::MemChunk* MemoryDump::add_chunk() { + ::tensorflow::MemChunk* _add = _internal_add_chunk(); + // @@protoc_insertion_point(field_add:tensorflow.MemoryDump.chunk) + return _add; +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::MemChunk >& +MemoryDump::chunk() const { + // @@protoc_insertion_point(field_list:tensorflow.MemoryDump.chunk) + return _impl_.chunk_; +} + +// repeated .tensorflow.SnapShot snap_shot = 4; +inline int MemoryDump::_internal_snap_shot_size() const { + return _impl_.snap_shot_.size(); +} +inline int MemoryDump::snap_shot_size() const { + return _internal_snap_shot_size(); +} +inline void MemoryDump::clear_snap_shot() { + _impl_.snap_shot_.Clear(); +} +inline ::tensorflow::SnapShot* MemoryDump::mutable_snap_shot(int index) { + // @@protoc_insertion_point(field_mutable:tensorflow.MemoryDump.snap_shot) + return _impl_.snap_shot_.Mutable(index); +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::SnapShot >* +MemoryDump::mutable_snap_shot() { + // @@protoc_insertion_point(field_mutable_list:tensorflow.MemoryDump.snap_shot) + return &_impl_.snap_shot_; +} +inline const ::tensorflow::SnapShot& MemoryDump::_internal_snap_shot(int index) const { + return _impl_.snap_shot_.Get(index); +} +inline const ::tensorflow::SnapShot& MemoryDump::snap_shot(int index) const { + // @@protoc_insertion_point(field_get:tensorflow.MemoryDump.snap_shot) + return _internal_snap_shot(index); +} +inline ::tensorflow::SnapShot* MemoryDump::_internal_add_snap_shot() { + return _impl_.snap_shot_.Add(); +} +inline ::tensorflow::SnapShot* MemoryDump::add_snap_shot() { + ::tensorflow::SnapShot* _add = _internal_add_snap_shot(); + // @@protoc_insertion_point(field_add:tensorflow.MemoryDump.snap_shot) + return _add; +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::SnapShot >& +MemoryDump::snap_shot() const { + // @@protoc_insertion_point(field_list:tensorflow.MemoryDump.snap_shot) + return _impl_.snap_shot_; +} + +// .tensorflow.MemAllocatorStats stats = 5; +inline bool MemoryDump::_internal_has_stats() const { + return this != internal_default_instance() && _impl_.stats_ != nullptr; +} +inline bool MemoryDump::has_stats() const { + return _internal_has_stats(); +} +inline void MemoryDump::clear_stats() { + if (GetArenaForAllocation() == nullptr && _impl_.stats_ != nullptr) { + delete _impl_.stats_; + } + _impl_.stats_ = nullptr; +} +inline const ::tensorflow::MemAllocatorStats& MemoryDump::_internal_stats() const { + const ::tensorflow::MemAllocatorStats* p = _impl_.stats_; + return p != nullptr ? *p : reinterpret_cast( + ::tensorflow::_MemAllocatorStats_default_instance_); +} +inline const ::tensorflow::MemAllocatorStats& MemoryDump::stats() const { + // @@protoc_insertion_point(field_get:tensorflow.MemoryDump.stats) + return _internal_stats(); +} +inline void MemoryDump::unsafe_arena_set_allocated_stats( + ::tensorflow::MemAllocatorStats* stats) { + if (GetArenaForAllocation() == nullptr) { + delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.stats_); + } + _impl_.stats_ = stats; + if (stats) { + + } else { + + } + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.MemoryDump.stats) +} +inline ::tensorflow::MemAllocatorStats* MemoryDump::release_stats() { + + ::tensorflow::MemAllocatorStats* temp = _impl_.stats_; + _impl_.stats_ = nullptr; +#ifdef PROTOBUF_FORCE_COPY_IN_RELEASE + auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp); + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + if (GetArenaForAllocation() == nullptr) { delete old; } +#else // PROTOBUF_FORCE_COPY_IN_RELEASE + if (GetArenaForAllocation() != nullptr) { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + } +#endif // !PROTOBUF_FORCE_COPY_IN_RELEASE + return temp; +} +inline ::tensorflow::MemAllocatorStats* MemoryDump::unsafe_arena_release_stats() { + // @@protoc_insertion_point(field_release:tensorflow.MemoryDump.stats) + + ::tensorflow::MemAllocatorStats* temp = _impl_.stats_; + _impl_.stats_ = nullptr; + return temp; +} +inline ::tensorflow::MemAllocatorStats* MemoryDump::_internal_mutable_stats() { + + if (_impl_.stats_ == nullptr) { + auto* p = CreateMaybeMessage<::tensorflow::MemAllocatorStats>(GetArenaForAllocation()); + _impl_.stats_ = p; + } + return _impl_.stats_; +} +inline ::tensorflow::MemAllocatorStats* MemoryDump::mutable_stats() { + ::tensorflow::MemAllocatorStats* _msg = _internal_mutable_stats(); + // @@protoc_insertion_point(field_mutable:tensorflow.MemoryDump.stats) + return _msg; +} +inline void MemoryDump::set_allocated_stats(::tensorflow::MemAllocatorStats* stats) { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); + if (message_arena == nullptr) { + delete _impl_.stats_; + } + if (stats) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(stats); + if (message_arena != submessage_arena) { + stats = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, stats, submessage_arena); + } + + } else { + + } + _impl_.stats_ = stats; + // @@protoc_insertion_point(field_set_allocated:tensorflow.MemoryDump.stats) +} + +#ifdef __GNUC__ + #pragma GCC diagnostic pop +#endif // __GNUC__ +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + + +// @@protoc_insertion_point(namespace_scope) + +} // namespace tensorflow + +// @@protoc_insertion_point(global_scope) + +#include +#endif // GOOGLE_PROTOBUF_INCLUDED_GOOGLE_PROTOBUF_INCLUDED_tsl_2fprotobuf_2fbfc_5fmemory_5fmap_2eproto diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/coordination_config.pb.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/coordination_config.pb.h new file mode 100644 index 0000000000000000000000000000000000000000..f9986fd4edc4204029a59980ffe824094621be06 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/coordination_config.pb.h @@ -0,0 +1,970 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: tsl/protobuf/coordination_config.proto + +#ifndef GOOGLE_PROTOBUF_INCLUDED_tsl_2fprotobuf_2fcoordination_5fconfig_2eproto +#define GOOGLE_PROTOBUF_INCLUDED_tsl_2fprotobuf_2fcoordination_5fconfig_2eproto + +#include +#include + +#include +#if PROTOBUF_VERSION < 3021000 +#error This file was generated by a newer version of protoc which is +#error incompatible with your Protocol Buffer headers. Please update +#error your headers. +#endif +#if 3021009 < PROTOBUF_MIN_PROTOC_VERSION +#error This file was generated by an older version of protoc which is +#error incompatible with your Protocol Buffer headers. Please +#error regenerate this file with a newer version of protoc. +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include // IWYU pragma: export +#include // IWYU pragma: export +#include +// @@protoc_insertion_point(includes) +#include +#define PROTOBUF_INTERNAL_EXPORT_tsl_2fprotobuf_2fcoordination_5fconfig_2eproto +PROTOBUF_NAMESPACE_OPEN +namespace internal { +class AnyMetadata; +} // namespace internal +PROTOBUF_NAMESPACE_CLOSE + +// Internal implementation detail -- do not use these members. +struct TableStruct_tsl_2fprotobuf_2fcoordination_5fconfig_2eproto { + static const uint32_t offsets[]; +}; +extern const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_tsl_2fprotobuf_2fcoordination_5fconfig_2eproto; +namespace tensorflow { +class CoordinatedJob; +struct CoordinatedJobDefaultTypeInternal; +extern CoordinatedJobDefaultTypeInternal _CoordinatedJob_default_instance_; +class CoordinationServiceConfig; +struct CoordinationServiceConfigDefaultTypeInternal; +extern CoordinationServiceConfigDefaultTypeInternal _CoordinationServiceConfig_default_instance_; +} // namespace tensorflow +PROTOBUF_NAMESPACE_OPEN +template<> ::tensorflow::CoordinatedJob* Arena::CreateMaybeMessage<::tensorflow::CoordinatedJob>(Arena*); +template<> ::tensorflow::CoordinationServiceConfig* Arena::CreateMaybeMessage<::tensorflow::CoordinationServiceConfig>(Arena*); +PROTOBUF_NAMESPACE_CLOSE +namespace tensorflow { + +// =================================================================== + +class CoordinatedJob final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:tensorflow.CoordinatedJob) */ { + public: + inline CoordinatedJob() : CoordinatedJob(nullptr) {} + ~CoordinatedJob() override; + explicit PROTOBUF_CONSTEXPR CoordinatedJob(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + CoordinatedJob(const CoordinatedJob& from); + CoordinatedJob(CoordinatedJob&& from) noexcept + : CoordinatedJob() { + *this = ::std::move(from); + } + + inline CoordinatedJob& operator=(const CoordinatedJob& from) { + CopyFrom(from); + return *this; + } + inline CoordinatedJob& operator=(CoordinatedJob&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const CoordinatedJob& default_instance() { + return *internal_default_instance(); + } + static inline const CoordinatedJob* internal_default_instance() { + return reinterpret_cast( + &_CoordinatedJob_default_instance_); + } + static constexpr int kIndexInFileMessages = + 0; + + friend void swap(CoordinatedJob& a, CoordinatedJob& b) { + a.Swap(&b); + } + inline void Swap(CoordinatedJob* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(CoordinatedJob* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + CoordinatedJob* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const CoordinatedJob& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom( const CoordinatedJob& from) { + CoordinatedJob::MergeImpl(*this, from); + } + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _impl_._cached_size_.Get(); } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(CoordinatedJob* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.CoordinatedJob"; + } + protected: + explicit CoordinatedJob(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kNameFieldNumber = 1, + kNumTasksFieldNumber = 2, + }; + // string name = 1; + void clear_name(); + const std::string& name() const; + template + void set_name(ArgT0&& arg0, ArgT... args); + std::string* mutable_name(); + PROTOBUF_NODISCARD std::string* release_name(); + void set_allocated_name(std::string* name); + private: + const std::string& _internal_name() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_name(const std::string& value); + std::string* _internal_mutable_name(); + public: + + // int32 num_tasks = 2; + void clear_num_tasks(); + int32_t num_tasks() const; + void set_num_tasks(int32_t value); + private: + int32_t _internal_num_tasks() const; + void _internal_set_num_tasks(int32_t value); + public: + + // @@protoc_insertion_point(class_scope:tensorflow.CoordinatedJob) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr name_; + int32_t num_tasks_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + }; + union { Impl_ _impl_; }; + friend struct ::TableStruct_tsl_2fprotobuf_2fcoordination_5fconfig_2eproto; +}; +// ------------------------------------------------------------------- + +class CoordinationServiceConfig final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:tensorflow.CoordinationServiceConfig) */ { + public: + inline CoordinationServiceConfig() : CoordinationServiceConfig(nullptr) {} + ~CoordinationServiceConfig() override; + explicit PROTOBUF_CONSTEXPR CoordinationServiceConfig(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + CoordinationServiceConfig(const CoordinationServiceConfig& from); + CoordinationServiceConfig(CoordinationServiceConfig&& from) noexcept + : CoordinationServiceConfig() { + *this = ::std::move(from); + } + + inline CoordinationServiceConfig& operator=(const CoordinationServiceConfig& from) { + CopyFrom(from); + return *this; + } + inline CoordinationServiceConfig& operator=(CoordinationServiceConfig&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const CoordinationServiceConfig& default_instance() { + return *internal_default_instance(); + } + static inline const CoordinationServiceConfig* internal_default_instance() { + return reinterpret_cast( + &_CoordinationServiceConfig_default_instance_); + } + static constexpr int kIndexInFileMessages = + 1; + + friend void swap(CoordinationServiceConfig& a, CoordinationServiceConfig& b) { + a.Swap(&b); + } + inline void Swap(CoordinationServiceConfig* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(CoordinationServiceConfig* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + CoordinationServiceConfig* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const CoordinationServiceConfig& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom( const CoordinationServiceConfig& from) { + CoordinationServiceConfig::MergeImpl(*this, from); + } + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _impl_._cached_size_.Get(); } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(CoordinationServiceConfig* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.CoordinationServiceConfig"; + } + protected: + explicit CoordinationServiceConfig(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kRecoverableJobsFieldNumber = 9, + kCoordinatedJobListFieldNumber = 10, + kServiceTypeFieldNumber = 1, + kServiceLeaderFieldNumber = 2, + kClusterRegisterTimeoutInMsFieldNumber = 4, + kHeartbeatTimeoutInMsFieldNumber = 5, + kShutdownBarrierTimeoutInMsFieldNumber = 7, + kEnableHealthCheckFieldNumber = 3, + kAgentDestructionWithoutShutdownFieldNumber = 8, + kAllowNewIncarnationToReconnectFieldNumber = 11, + kForceDisableFieldNumber = 12, + }; + // repeated string recoverable_jobs = 9; + int recoverable_jobs_size() const; + private: + int _internal_recoverable_jobs_size() const; + public: + void clear_recoverable_jobs(); + const std::string& recoverable_jobs(int index) const; + std::string* mutable_recoverable_jobs(int index); + void set_recoverable_jobs(int index, const std::string& value); + void set_recoverable_jobs(int index, std::string&& value); + void set_recoverable_jobs(int index, const char* value); + void set_recoverable_jobs(int index, const char* value, size_t size); + std::string* add_recoverable_jobs(); + void add_recoverable_jobs(const std::string& value); + void add_recoverable_jobs(std::string&& value); + void add_recoverable_jobs(const char* value); + void add_recoverable_jobs(const char* value, size_t size); + const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField& recoverable_jobs() const; + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField* mutable_recoverable_jobs(); + private: + const std::string& _internal_recoverable_jobs(int index) const; + std::string* _internal_add_recoverable_jobs(); + public: + + // repeated .tensorflow.CoordinatedJob coordinated_job_list = 10; + int coordinated_job_list_size() const; + private: + int _internal_coordinated_job_list_size() const; + public: + void clear_coordinated_job_list(); + ::tensorflow::CoordinatedJob* mutable_coordinated_job_list(int index); + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::CoordinatedJob >* + mutable_coordinated_job_list(); + private: + const ::tensorflow::CoordinatedJob& _internal_coordinated_job_list(int index) const; + ::tensorflow::CoordinatedJob* _internal_add_coordinated_job_list(); + public: + const ::tensorflow::CoordinatedJob& coordinated_job_list(int index) const; + ::tensorflow::CoordinatedJob* add_coordinated_job_list(); + const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::CoordinatedJob >& + coordinated_job_list() const; + + // string service_type = 1; + void clear_service_type(); + const std::string& service_type() const; + template + void set_service_type(ArgT0&& arg0, ArgT... args); + std::string* mutable_service_type(); + PROTOBUF_NODISCARD std::string* release_service_type(); + void set_allocated_service_type(std::string* service_type); + private: + const std::string& _internal_service_type() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_service_type(const std::string& value); + std::string* _internal_mutable_service_type(); + public: + + // string service_leader = 2; + void clear_service_leader(); + const std::string& service_leader() const; + template + void set_service_leader(ArgT0&& arg0, ArgT... args); + std::string* mutable_service_leader(); + PROTOBUF_NODISCARD std::string* release_service_leader(); + void set_allocated_service_leader(std::string* service_leader); + private: + const std::string& _internal_service_leader() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_service_leader(const std::string& value); + std::string* _internal_mutable_service_leader(); + public: + + // int64 cluster_register_timeout_in_ms = 4; + void clear_cluster_register_timeout_in_ms(); + int64_t cluster_register_timeout_in_ms() const; + void set_cluster_register_timeout_in_ms(int64_t value); + private: + int64_t _internal_cluster_register_timeout_in_ms() const; + void _internal_set_cluster_register_timeout_in_ms(int64_t value); + public: + + // int64 heartbeat_timeout_in_ms = 5; + void clear_heartbeat_timeout_in_ms(); + int64_t heartbeat_timeout_in_ms() const; + void set_heartbeat_timeout_in_ms(int64_t value); + private: + int64_t _internal_heartbeat_timeout_in_ms() const; + void _internal_set_heartbeat_timeout_in_ms(int64_t value); + public: + + // int64 shutdown_barrier_timeout_in_ms = 7; + void clear_shutdown_barrier_timeout_in_ms(); + int64_t shutdown_barrier_timeout_in_ms() const; + void set_shutdown_barrier_timeout_in_ms(int64_t value); + private: + int64_t _internal_shutdown_barrier_timeout_in_ms() const; + void _internal_set_shutdown_barrier_timeout_in_ms(int64_t value); + public: + + // bool enable_health_check = 3; + void clear_enable_health_check(); + bool enable_health_check() const; + void set_enable_health_check(bool value); + private: + bool _internal_enable_health_check() const; + void _internal_set_enable_health_check(bool value); + public: + + // bool agent_destruction_without_shutdown = 8; + void clear_agent_destruction_without_shutdown(); + bool agent_destruction_without_shutdown() const; + void set_agent_destruction_without_shutdown(bool value); + private: + bool _internal_agent_destruction_without_shutdown() const; + void _internal_set_agent_destruction_without_shutdown(bool value); + public: + + // bool allow_new_incarnation_to_reconnect = 11; + void clear_allow_new_incarnation_to_reconnect(); + bool allow_new_incarnation_to_reconnect() const; + void set_allow_new_incarnation_to_reconnect(bool value); + private: + bool _internal_allow_new_incarnation_to_reconnect() const; + void _internal_set_allow_new_incarnation_to_reconnect(bool value); + public: + + // bool force_disable = 12; + void clear_force_disable(); + bool force_disable() const; + void set_force_disable(bool value); + private: + bool _internal_force_disable() const; + void _internal_set_force_disable(bool value); + public: + + // @@protoc_insertion_point(class_scope:tensorflow.CoordinationServiceConfig) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField recoverable_jobs_; + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::CoordinatedJob > coordinated_job_list_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr service_type_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr service_leader_; + int64_t cluster_register_timeout_in_ms_; + int64_t heartbeat_timeout_in_ms_; + int64_t shutdown_barrier_timeout_in_ms_; + bool enable_health_check_; + bool agent_destruction_without_shutdown_; + bool allow_new_incarnation_to_reconnect_; + bool force_disable_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + }; + union { Impl_ _impl_; }; + friend struct ::TableStruct_tsl_2fprotobuf_2fcoordination_5fconfig_2eproto; +}; +// =================================================================== + + +// =================================================================== + +#ifdef __GNUC__ + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wstrict-aliasing" +#endif // __GNUC__ +// CoordinatedJob + +// string name = 1; +inline void CoordinatedJob::clear_name() { + _impl_.name_.ClearToEmpty(); +} +inline const std::string& CoordinatedJob::name() const { + // @@protoc_insertion_point(field_get:tensorflow.CoordinatedJob.name) + return _internal_name(); +} +template +inline PROTOBUF_ALWAYS_INLINE +void CoordinatedJob::set_name(ArgT0&& arg0, ArgT... args) { + + _impl_.name_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:tensorflow.CoordinatedJob.name) +} +inline std::string* CoordinatedJob::mutable_name() { + std::string* _s = _internal_mutable_name(); + // @@protoc_insertion_point(field_mutable:tensorflow.CoordinatedJob.name) + return _s; +} +inline const std::string& CoordinatedJob::_internal_name() const { + return _impl_.name_.Get(); +} +inline void CoordinatedJob::_internal_set_name(const std::string& value) { + + _impl_.name_.Set(value, GetArenaForAllocation()); +} +inline std::string* CoordinatedJob::_internal_mutable_name() { + + return _impl_.name_.Mutable(GetArenaForAllocation()); +} +inline std::string* CoordinatedJob::release_name() { + // @@protoc_insertion_point(field_release:tensorflow.CoordinatedJob.name) + return _impl_.name_.Release(); +} +inline void CoordinatedJob::set_allocated_name(std::string* name) { + if (name != nullptr) { + + } else { + + } + _impl_.name_.SetAllocated(name, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.name_.IsDefault()) { + _impl_.name_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:tensorflow.CoordinatedJob.name) +} + +// int32 num_tasks = 2; +inline void CoordinatedJob::clear_num_tasks() { + _impl_.num_tasks_ = 0; +} +inline int32_t CoordinatedJob::_internal_num_tasks() const { + return _impl_.num_tasks_; +} +inline int32_t CoordinatedJob::num_tasks() const { + // @@protoc_insertion_point(field_get:tensorflow.CoordinatedJob.num_tasks) + return _internal_num_tasks(); +} +inline void CoordinatedJob::_internal_set_num_tasks(int32_t value) { + + _impl_.num_tasks_ = value; +} +inline void CoordinatedJob::set_num_tasks(int32_t value) { + _internal_set_num_tasks(value); + // @@protoc_insertion_point(field_set:tensorflow.CoordinatedJob.num_tasks) +} + +// ------------------------------------------------------------------- + +// CoordinationServiceConfig + +// string service_type = 1; +inline void CoordinationServiceConfig::clear_service_type() { + _impl_.service_type_.ClearToEmpty(); +} +inline const std::string& CoordinationServiceConfig::service_type() const { + // @@protoc_insertion_point(field_get:tensorflow.CoordinationServiceConfig.service_type) + return _internal_service_type(); +} +template +inline PROTOBUF_ALWAYS_INLINE +void CoordinationServiceConfig::set_service_type(ArgT0&& arg0, ArgT... args) { + + _impl_.service_type_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:tensorflow.CoordinationServiceConfig.service_type) +} +inline std::string* CoordinationServiceConfig::mutable_service_type() { + std::string* _s = _internal_mutable_service_type(); + // @@protoc_insertion_point(field_mutable:tensorflow.CoordinationServiceConfig.service_type) + return _s; +} +inline const std::string& CoordinationServiceConfig::_internal_service_type() const { + return _impl_.service_type_.Get(); +} +inline void CoordinationServiceConfig::_internal_set_service_type(const std::string& value) { + + _impl_.service_type_.Set(value, GetArenaForAllocation()); +} +inline std::string* CoordinationServiceConfig::_internal_mutable_service_type() { + + return _impl_.service_type_.Mutable(GetArenaForAllocation()); +} +inline std::string* CoordinationServiceConfig::release_service_type() { + // @@protoc_insertion_point(field_release:tensorflow.CoordinationServiceConfig.service_type) + return _impl_.service_type_.Release(); +} +inline void CoordinationServiceConfig::set_allocated_service_type(std::string* service_type) { + if (service_type != nullptr) { + + } else { + + } + _impl_.service_type_.SetAllocated(service_type, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.service_type_.IsDefault()) { + _impl_.service_type_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:tensorflow.CoordinationServiceConfig.service_type) +} + +// string service_leader = 2; +inline void CoordinationServiceConfig::clear_service_leader() { + _impl_.service_leader_.ClearToEmpty(); +} +inline const std::string& CoordinationServiceConfig::service_leader() const { + // @@protoc_insertion_point(field_get:tensorflow.CoordinationServiceConfig.service_leader) + return _internal_service_leader(); +} +template +inline PROTOBUF_ALWAYS_INLINE +void CoordinationServiceConfig::set_service_leader(ArgT0&& arg0, ArgT... args) { + + _impl_.service_leader_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:tensorflow.CoordinationServiceConfig.service_leader) +} +inline std::string* CoordinationServiceConfig::mutable_service_leader() { + std::string* _s = _internal_mutable_service_leader(); + // @@protoc_insertion_point(field_mutable:tensorflow.CoordinationServiceConfig.service_leader) + return _s; +} +inline const std::string& CoordinationServiceConfig::_internal_service_leader() const { + return _impl_.service_leader_.Get(); +} +inline void CoordinationServiceConfig::_internal_set_service_leader(const std::string& value) { + + _impl_.service_leader_.Set(value, GetArenaForAllocation()); +} +inline std::string* CoordinationServiceConfig::_internal_mutable_service_leader() { + + return _impl_.service_leader_.Mutable(GetArenaForAllocation()); +} +inline std::string* CoordinationServiceConfig::release_service_leader() { + // @@protoc_insertion_point(field_release:tensorflow.CoordinationServiceConfig.service_leader) + return _impl_.service_leader_.Release(); +} +inline void CoordinationServiceConfig::set_allocated_service_leader(std::string* service_leader) { + if (service_leader != nullptr) { + + } else { + + } + _impl_.service_leader_.SetAllocated(service_leader, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.service_leader_.IsDefault()) { + _impl_.service_leader_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:tensorflow.CoordinationServiceConfig.service_leader) +} + +// bool enable_health_check = 3; +inline void CoordinationServiceConfig::clear_enable_health_check() { + _impl_.enable_health_check_ = false; +} +inline bool CoordinationServiceConfig::_internal_enable_health_check() const { + return _impl_.enable_health_check_; +} +inline bool CoordinationServiceConfig::enable_health_check() const { + // @@protoc_insertion_point(field_get:tensorflow.CoordinationServiceConfig.enable_health_check) + return _internal_enable_health_check(); +} +inline void CoordinationServiceConfig::_internal_set_enable_health_check(bool value) { + + _impl_.enable_health_check_ = value; +} +inline void CoordinationServiceConfig::set_enable_health_check(bool value) { + _internal_set_enable_health_check(value); + // @@protoc_insertion_point(field_set:tensorflow.CoordinationServiceConfig.enable_health_check) +} + +// int64 cluster_register_timeout_in_ms = 4; +inline void CoordinationServiceConfig::clear_cluster_register_timeout_in_ms() { + _impl_.cluster_register_timeout_in_ms_ = int64_t{0}; +} +inline int64_t CoordinationServiceConfig::_internal_cluster_register_timeout_in_ms() const { + return _impl_.cluster_register_timeout_in_ms_; +} +inline int64_t CoordinationServiceConfig::cluster_register_timeout_in_ms() const { + // @@protoc_insertion_point(field_get:tensorflow.CoordinationServiceConfig.cluster_register_timeout_in_ms) + return _internal_cluster_register_timeout_in_ms(); +} +inline void CoordinationServiceConfig::_internal_set_cluster_register_timeout_in_ms(int64_t value) { + + _impl_.cluster_register_timeout_in_ms_ = value; +} +inline void CoordinationServiceConfig::set_cluster_register_timeout_in_ms(int64_t value) { + _internal_set_cluster_register_timeout_in_ms(value); + // @@protoc_insertion_point(field_set:tensorflow.CoordinationServiceConfig.cluster_register_timeout_in_ms) +} + +// int64 heartbeat_timeout_in_ms = 5; +inline void CoordinationServiceConfig::clear_heartbeat_timeout_in_ms() { + _impl_.heartbeat_timeout_in_ms_ = int64_t{0}; +} +inline int64_t CoordinationServiceConfig::_internal_heartbeat_timeout_in_ms() const { + return _impl_.heartbeat_timeout_in_ms_; +} +inline int64_t CoordinationServiceConfig::heartbeat_timeout_in_ms() const { + // @@protoc_insertion_point(field_get:tensorflow.CoordinationServiceConfig.heartbeat_timeout_in_ms) + return _internal_heartbeat_timeout_in_ms(); +} +inline void CoordinationServiceConfig::_internal_set_heartbeat_timeout_in_ms(int64_t value) { + + _impl_.heartbeat_timeout_in_ms_ = value; +} +inline void CoordinationServiceConfig::set_heartbeat_timeout_in_ms(int64_t value) { + _internal_set_heartbeat_timeout_in_ms(value); + // @@protoc_insertion_point(field_set:tensorflow.CoordinationServiceConfig.heartbeat_timeout_in_ms) +} + +// repeated .tensorflow.CoordinatedJob coordinated_job_list = 10; +inline int CoordinationServiceConfig::_internal_coordinated_job_list_size() const { + return _impl_.coordinated_job_list_.size(); +} +inline int CoordinationServiceConfig::coordinated_job_list_size() const { + return _internal_coordinated_job_list_size(); +} +inline void CoordinationServiceConfig::clear_coordinated_job_list() { + _impl_.coordinated_job_list_.Clear(); +} +inline ::tensorflow::CoordinatedJob* CoordinationServiceConfig::mutable_coordinated_job_list(int index) { + // @@protoc_insertion_point(field_mutable:tensorflow.CoordinationServiceConfig.coordinated_job_list) + return _impl_.coordinated_job_list_.Mutable(index); +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::CoordinatedJob >* +CoordinationServiceConfig::mutable_coordinated_job_list() { + // @@protoc_insertion_point(field_mutable_list:tensorflow.CoordinationServiceConfig.coordinated_job_list) + return &_impl_.coordinated_job_list_; +} +inline const ::tensorflow::CoordinatedJob& CoordinationServiceConfig::_internal_coordinated_job_list(int index) const { + return _impl_.coordinated_job_list_.Get(index); +} +inline const ::tensorflow::CoordinatedJob& CoordinationServiceConfig::coordinated_job_list(int index) const { + // @@protoc_insertion_point(field_get:tensorflow.CoordinationServiceConfig.coordinated_job_list) + return _internal_coordinated_job_list(index); +} +inline ::tensorflow::CoordinatedJob* CoordinationServiceConfig::_internal_add_coordinated_job_list() { + return _impl_.coordinated_job_list_.Add(); +} +inline ::tensorflow::CoordinatedJob* CoordinationServiceConfig::add_coordinated_job_list() { + ::tensorflow::CoordinatedJob* _add = _internal_add_coordinated_job_list(); + // @@protoc_insertion_point(field_add:tensorflow.CoordinationServiceConfig.coordinated_job_list) + return _add; +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::CoordinatedJob >& +CoordinationServiceConfig::coordinated_job_list() const { + // @@protoc_insertion_point(field_list:tensorflow.CoordinationServiceConfig.coordinated_job_list) + return _impl_.coordinated_job_list_; +} + +// int64 shutdown_barrier_timeout_in_ms = 7; +inline void CoordinationServiceConfig::clear_shutdown_barrier_timeout_in_ms() { + _impl_.shutdown_barrier_timeout_in_ms_ = int64_t{0}; +} +inline int64_t CoordinationServiceConfig::_internal_shutdown_barrier_timeout_in_ms() const { + return _impl_.shutdown_barrier_timeout_in_ms_; +} +inline int64_t CoordinationServiceConfig::shutdown_barrier_timeout_in_ms() const { + // @@protoc_insertion_point(field_get:tensorflow.CoordinationServiceConfig.shutdown_barrier_timeout_in_ms) + return _internal_shutdown_barrier_timeout_in_ms(); +} +inline void CoordinationServiceConfig::_internal_set_shutdown_barrier_timeout_in_ms(int64_t value) { + + _impl_.shutdown_barrier_timeout_in_ms_ = value; +} +inline void CoordinationServiceConfig::set_shutdown_barrier_timeout_in_ms(int64_t value) { + _internal_set_shutdown_barrier_timeout_in_ms(value); + // @@protoc_insertion_point(field_set:tensorflow.CoordinationServiceConfig.shutdown_barrier_timeout_in_ms) +} + +// bool agent_destruction_without_shutdown = 8; +inline void CoordinationServiceConfig::clear_agent_destruction_without_shutdown() { + _impl_.agent_destruction_without_shutdown_ = false; +} +inline bool CoordinationServiceConfig::_internal_agent_destruction_without_shutdown() const { + return _impl_.agent_destruction_without_shutdown_; +} +inline bool CoordinationServiceConfig::agent_destruction_without_shutdown() const { + // @@protoc_insertion_point(field_get:tensorflow.CoordinationServiceConfig.agent_destruction_without_shutdown) + return _internal_agent_destruction_without_shutdown(); +} +inline void CoordinationServiceConfig::_internal_set_agent_destruction_without_shutdown(bool value) { + + _impl_.agent_destruction_without_shutdown_ = value; +} +inline void CoordinationServiceConfig::set_agent_destruction_without_shutdown(bool value) { + _internal_set_agent_destruction_without_shutdown(value); + // @@protoc_insertion_point(field_set:tensorflow.CoordinationServiceConfig.agent_destruction_without_shutdown) +} + +// repeated string recoverable_jobs = 9; +inline int CoordinationServiceConfig::_internal_recoverable_jobs_size() const { + return _impl_.recoverable_jobs_.size(); +} +inline int CoordinationServiceConfig::recoverable_jobs_size() const { + return _internal_recoverable_jobs_size(); +} +inline void CoordinationServiceConfig::clear_recoverable_jobs() { + _impl_.recoverable_jobs_.Clear(); +} +inline std::string* CoordinationServiceConfig::add_recoverable_jobs() { + std::string* _s = _internal_add_recoverable_jobs(); + // @@protoc_insertion_point(field_add_mutable:tensorflow.CoordinationServiceConfig.recoverable_jobs) + return _s; +} +inline const std::string& CoordinationServiceConfig::_internal_recoverable_jobs(int index) const { + return _impl_.recoverable_jobs_.Get(index); +} +inline const std::string& CoordinationServiceConfig::recoverable_jobs(int index) const { + // @@protoc_insertion_point(field_get:tensorflow.CoordinationServiceConfig.recoverable_jobs) + return _internal_recoverable_jobs(index); +} +inline std::string* CoordinationServiceConfig::mutable_recoverable_jobs(int index) { + // @@protoc_insertion_point(field_mutable:tensorflow.CoordinationServiceConfig.recoverable_jobs) + return _impl_.recoverable_jobs_.Mutable(index); +} +inline void CoordinationServiceConfig::set_recoverable_jobs(int index, const std::string& value) { + _impl_.recoverable_jobs_.Mutable(index)->assign(value); + // @@protoc_insertion_point(field_set:tensorflow.CoordinationServiceConfig.recoverable_jobs) +} +inline void CoordinationServiceConfig::set_recoverable_jobs(int index, std::string&& value) { + _impl_.recoverable_jobs_.Mutable(index)->assign(std::move(value)); + // @@protoc_insertion_point(field_set:tensorflow.CoordinationServiceConfig.recoverable_jobs) +} +inline void CoordinationServiceConfig::set_recoverable_jobs(int index, const char* value) { + GOOGLE_DCHECK(value != nullptr); + _impl_.recoverable_jobs_.Mutable(index)->assign(value); + // @@protoc_insertion_point(field_set_char:tensorflow.CoordinationServiceConfig.recoverable_jobs) +} +inline void CoordinationServiceConfig::set_recoverable_jobs(int index, const char* value, size_t size) { + _impl_.recoverable_jobs_.Mutable(index)->assign( + reinterpret_cast(value), size); + // @@protoc_insertion_point(field_set_pointer:tensorflow.CoordinationServiceConfig.recoverable_jobs) +} +inline std::string* CoordinationServiceConfig::_internal_add_recoverable_jobs() { + return _impl_.recoverable_jobs_.Add(); +} +inline void CoordinationServiceConfig::add_recoverable_jobs(const std::string& value) { + _impl_.recoverable_jobs_.Add()->assign(value); + // @@protoc_insertion_point(field_add:tensorflow.CoordinationServiceConfig.recoverable_jobs) +} +inline void CoordinationServiceConfig::add_recoverable_jobs(std::string&& value) { + _impl_.recoverable_jobs_.Add(std::move(value)); + // @@protoc_insertion_point(field_add:tensorflow.CoordinationServiceConfig.recoverable_jobs) +} +inline void CoordinationServiceConfig::add_recoverable_jobs(const char* value) { + GOOGLE_DCHECK(value != nullptr); + _impl_.recoverable_jobs_.Add()->assign(value); + // @@protoc_insertion_point(field_add_char:tensorflow.CoordinationServiceConfig.recoverable_jobs) +} +inline void CoordinationServiceConfig::add_recoverable_jobs(const char* value, size_t size) { + _impl_.recoverable_jobs_.Add()->assign(reinterpret_cast(value), size); + // @@protoc_insertion_point(field_add_pointer:tensorflow.CoordinationServiceConfig.recoverable_jobs) +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField& +CoordinationServiceConfig::recoverable_jobs() const { + // @@protoc_insertion_point(field_list:tensorflow.CoordinationServiceConfig.recoverable_jobs) + return _impl_.recoverable_jobs_; +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField* +CoordinationServiceConfig::mutable_recoverable_jobs() { + // @@protoc_insertion_point(field_mutable_list:tensorflow.CoordinationServiceConfig.recoverable_jobs) + return &_impl_.recoverable_jobs_; +} + +// bool allow_new_incarnation_to_reconnect = 11; +inline void CoordinationServiceConfig::clear_allow_new_incarnation_to_reconnect() { + _impl_.allow_new_incarnation_to_reconnect_ = false; +} +inline bool CoordinationServiceConfig::_internal_allow_new_incarnation_to_reconnect() const { + return _impl_.allow_new_incarnation_to_reconnect_; +} +inline bool CoordinationServiceConfig::allow_new_incarnation_to_reconnect() const { + // @@protoc_insertion_point(field_get:tensorflow.CoordinationServiceConfig.allow_new_incarnation_to_reconnect) + return _internal_allow_new_incarnation_to_reconnect(); +} +inline void CoordinationServiceConfig::_internal_set_allow_new_incarnation_to_reconnect(bool value) { + + _impl_.allow_new_incarnation_to_reconnect_ = value; +} +inline void CoordinationServiceConfig::set_allow_new_incarnation_to_reconnect(bool value) { + _internal_set_allow_new_incarnation_to_reconnect(value); + // @@protoc_insertion_point(field_set:tensorflow.CoordinationServiceConfig.allow_new_incarnation_to_reconnect) +} + +// bool force_disable = 12; +inline void CoordinationServiceConfig::clear_force_disable() { + _impl_.force_disable_ = false; +} +inline bool CoordinationServiceConfig::_internal_force_disable() const { + return _impl_.force_disable_; +} +inline bool CoordinationServiceConfig::force_disable() const { + // @@protoc_insertion_point(field_get:tensorflow.CoordinationServiceConfig.force_disable) + return _internal_force_disable(); +} +inline void CoordinationServiceConfig::_internal_set_force_disable(bool value) { + + _impl_.force_disable_ = value; +} +inline void CoordinationServiceConfig::set_force_disable(bool value) { + _internal_set_force_disable(value); + // @@protoc_insertion_point(field_set:tensorflow.CoordinationServiceConfig.force_disable) +} + +#ifdef __GNUC__ + #pragma GCC diagnostic pop +#endif // __GNUC__ +// ------------------------------------------------------------------- + + +// @@protoc_insertion_point(namespace_scope) + +} // namespace tensorflow + +// @@protoc_insertion_point(global_scope) + +#include +#endif // GOOGLE_PROTOBUF_INCLUDED_GOOGLE_PROTOBUF_INCLUDED_tsl_2fprotobuf_2fcoordination_5fconfig_2eproto diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/coordination_config.proto b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/coordination_config.proto new file mode 100644 index 0000000000000000000000000000000000000000..035a49e6f20e9c142bfd2b6276aca9323ece4153 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/coordination_config.proto @@ -0,0 +1,70 @@ +syntax = "proto3"; + +package tensorflow; + +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto"; + +// Represents a job type and the number of tasks under this job. +// For example, ("worker", 20) implies that there will be 20 worker tasks. +message CoordinatedJob { + string name = 1; + int32 num_tasks = 2; +} + +// Coordination service configuration parameters. +// The system picks appropriate values for fields that are not set. +message CoordinationServiceConfig { + // Type of coordination service implementation to enable. + // For example, setting the service type as "standalone" starts a service + // instance on the leader task to provide the coordination services such as + // heartbeats and consistent key-value store. + string service_type = 1; + + // Address where the coordination service instance is hosted. + string service_leader = 2; + + // Whether to enable the health check mechanism. + bool enable_health_check = 3; + + // Maximum wait time for all members in the cluster to be registered. + int64 cluster_register_timeout_in_ms = 4; + + // Heartbeat timeout, if a task does not record heartbeat in this time + // window, it will be considered disconnected. + // Note: This is also used as a grace period to accept any heartbeats after + // the agent has disconnected, to account for the lag time between the service + // recording the state change and the agent stopping heartbeats. + int64 heartbeat_timeout_in_ms = 5; + + // The list of `CoordinatedJob`s that will register in coordination service. + reserved 6; + repeated CoordinatedJob coordinated_job_list = 10; + + // Denotes how long to wait for all coordination agents to reach the barriers + // (after the first shutdown request) before disconnecting together. If + // set to 0, no barrier is imposed upon shutdown and each worker can + // disconnect individually. + int64 shutdown_barrier_timeout_in_ms = 7; + + // If set, agents do not make an explicit Shutdown() call. Service will only + // find out about the disconnecte agent via stale heartbeats. Used for + // testing. + bool agent_destruction_without_shutdown = 8; + + // The list of jobs which are recoverable. If a task in this list fails, + // it will not propagate error to other tasks. + // If empty, no jobs will be recoverable and every task failure will cause + // error propagation to other tasks. + repeated string recoverable_jobs = 9; + + // If a task restarts with a new incarnation, we may allow it to reconnect + // silently. This is useful when we know that a task can immediately resume + // work upon re-connecting to the service. + bool allow_new_incarnation_to_reconnect = 11; + + // Disables coordination service. + // Some libraries enable coordination service by default even if the user did + // not specify any config. This field allows users to explicitly disable + // coordination service under all situations. + bool force_disable = 12; +} diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/coordination_service.grpc.pb.cc b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/coordination_service.grpc.pb.cc new file mode 100644 index 0000000000000000000000000000000000000000..9a26d7cdaf05ddb09b3bde7608853847610e4856 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/coordination_service.grpc.pb.cc @@ -0,0 +1,674 @@ +// Generated by the gRPC C++ plugin. +// If you make any local change, they will be lost. +// source: tsl/protobuf/coordination_service.proto + +#include "tsl/protobuf/coordination_service.pb.h" +#include "tsl/protobuf/coordination_service.grpc.pb.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +namespace tensorflow { + +static const char* grpcCoordinationService_method_names[] = { + "/tensorflow.CoordinationService/RegisterTask", + "/tensorflow.CoordinationService/Heartbeat", + "/tensorflow.CoordinationService/WaitForAllTasks", + "/tensorflow.CoordinationService/ShutdownTask", + "/tensorflow.CoordinationService/ResetTask", + "/tensorflow.CoordinationService/ReportErrorToTask", + "/tensorflow.CoordinationService/ReportErrorToService", + "/tensorflow.CoordinationService/GetTaskState", + "/tensorflow.CoordinationService/InsertKeyValue", + "/tensorflow.CoordinationService/GetKeyValue", + "/tensorflow.CoordinationService/TryGetKeyValue", + "/tensorflow.CoordinationService/GetKeyValueDir", + "/tensorflow.CoordinationService/DeleteKeyValue", + "/tensorflow.CoordinationService/Barrier", + "/tensorflow.CoordinationService/CancelBarrier", +}; + +std::unique_ptr< grpc::CoordinationService::Stub> grpc::CoordinationService::NewStub(const std::shared_ptr< ::grpc::ChannelInterface>& channel, const ::grpc::StubOptions& options) { + (void)options; + std::unique_ptr< grpc::CoordinationService::Stub> stub(new grpc::CoordinationService::Stub(channel)); + return stub; +} + +grpc::CoordinationService::Stub::Stub(const std::shared_ptr< ::grpc::ChannelInterface>& channel) + : channel_(channel), rpcmethod_RegisterTask_(grpcCoordinationService_method_names[0], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) + , rpcmethod_Heartbeat_(grpcCoordinationService_method_names[1], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) + , rpcmethod_WaitForAllTasks_(grpcCoordinationService_method_names[2], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) + , rpcmethod_ShutdownTask_(grpcCoordinationService_method_names[3], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) + , rpcmethod_ResetTask_(grpcCoordinationService_method_names[4], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) + , rpcmethod_ReportErrorToTask_(grpcCoordinationService_method_names[5], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) + , rpcmethod_ReportErrorToService_(grpcCoordinationService_method_names[6], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) + , rpcmethod_GetTaskState_(grpcCoordinationService_method_names[7], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) + , rpcmethod_InsertKeyValue_(grpcCoordinationService_method_names[8], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) + , rpcmethod_GetKeyValue_(grpcCoordinationService_method_names[9], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) + , rpcmethod_TryGetKeyValue_(grpcCoordinationService_method_names[10], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) + , rpcmethod_GetKeyValueDir_(grpcCoordinationService_method_names[11], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) + , rpcmethod_DeleteKeyValue_(grpcCoordinationService_method_names[12], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) + , rpcmethod_Barrier_(grpcCoordinationService_method_names[13], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) + , rpcmethod_CancelBarrier_(grpcCoordinationService_method_names[14], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) + {} + +::grpc::Status grpc::CoordinationService::Stub::RegisterTask(::grpc::ClientContext* context, const ::tensorflow::RegisterTaskRequest& request, ::tensorflow::RegisterTaskResponse* response) { + return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_RegisterTask_, context, request, response); +} + +void grpc::CoordinationService::Stub::experimental_async::RegisterTask(::grpc::ClientContext* context, const ::tensorflow::RegisterTaskRequest* request, ::tensorflow::RegisterTaskResponse* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_RegisterTask_, context, request, response, std::move(f)); +} + +void grpc::CoordinationService::Stub::experimental_async::RegisterTask(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::RegisterTaskResponse* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_RegisterTask_, context, request, response, std::move(f)); +} + +void grpc::CoordinationService::Stub::experimental_async::RegisterTask(::grpc::ClientContext* context, const ::tensorflow::RegisterTaskRequest* request, ::tensorflow::RegisterTaskResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_RegisterTask_, context, request, response, reactor); +} + +void grpc::CoordinationService::Stub::experimental_async::RegisterTask(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::RegisterTaskResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_RegisterTask_, context, request, response, reactor); +} + +::grpc::ClientAsyncResponseReader< ::tensorflow::RegisterTaskResponse>* grpc::CoordinationService::Stub::AsyncRegisterTaskRaw(::grpc::ClientContext* context, const ::tensorflow::RegisterTaskRequest& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::tensorflow::RegisterTaskResponse>::Create(channel_.get(), cq, rpcmethod_RegisterTask_, context, request, true); +} + +::grpc::ClientAsyncResponseReader< ::tensorflow::RegisterTaskResponse>* grpc::CoordinationService::Stub::PrepareAsyncRegisterTaskRaw(::grpc::ClientContext* context, const ::tensorflow::RegisterTaskRequest& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::tensorflow::RegisterTaskResponse>::Create(channel_.get(), cq, rpcmethod_RegisterTask_, context, request, false); +} + +::grpc::Status grpc::CoordinationService::Stub::Heartbeat(::grpc::ClientContext* context, const ::tensorflow::HeartbeatRequest& request, ::tensorflow::HeartbeatResponse* response) { + return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_Heartbeat_, context, request, response); +} + +void grpc::CoordinationService::Stub::experimental_async::Heartbeat(::grpc::ClientContext* context, const ::tensorflow::HeartbeatRequest* request, ::tensorflow::HeartbeatResponse* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Heartbeat_, context, request, response, std::move(f)); +} + +void grpc::CoordinationService::Stub::experimental_async::Heartbeat(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::HeartbeatResponse* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Heartbeat_, context, request, response, std::move(f)); +} + +void grpc::CoordinationService::Stub::experimental_async::Heartbeat(::grpc::ClientContext* context, const ::tensorflow::HeartbeatRequest* request, ::tensorflow::HeartbeatResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Heartbeat_, context, request, response, reactor); +} + +void grpc::CoordinationService::Stub::experimental_async::Heartbeat(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::HeartbeatResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Heartbeat_, context, request, response, reactor); +} + +::grpc::ClientAsyncResponseReader< ::tensorflow::HeartbeatResponse>* grpc::CoordinationService::Stub::AsyncHeartbeatRaw(::grpc::ClientContext* context, const ::tensorflow::HeartbeatRequest& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::tensorflow::HeartbeatResponse>::Create(channel_.get(), cq, rpcmethod_Heartbeat_, context, request, true); +} + +::grpc::ClientAsyncResponseReader< ::tensorflow::HeartbeatResponse>* grpc::CoordinationService::Stub::PrepareAsyncHeartbeatRaw(::grpc::ClientContext* context, const ::tensorflow::HeartbeatRequest& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::tensorflow::HeartbeatResponse>::Create(channel_.get(), cq, rpcmethod_Heartbeat_, context, request, false); +} + +::grpc::Status grpc::CoordinationService::Stub::WaitForAllTasks(::grpc::ClientContext* context, const ::tensorflow::WaitForAllTasksRequest& request, ::tensorflow::WaitForAllTasksResponse* response) { + return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_WaitForAllTasks_, context, request, response); +} + +void grpc::CoordinationService::Stub::experimental_async::WaitForAllTasks(::grpc::ClientContext* context, const ::tensorflow::WaitForAllTasksRequest* request, ::tensorflow::WaitForAllTasksResponse* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_WaitForAllTasks_, context, request, response, std::move(f)); +} + +void grpc::CoordinationService::Stub::experimental_async::WaitForAllTasks(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::WaitForAllTasksResponse* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_WaitForAllTasks_, context, request, response, std::move(f)); +} + +void grpc::CoordinationService::Stub::experimental_async::WaitForAllTasks(::grpc::ClientContext* context, const ::tensorflow::WaitForAllTasksRequest* request, ::tensorflow::WaitForAllTasksResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_WaitForAllTasks_, context, request, response, reactor); +} + +void grpc::CoordinationService::Stub::experimental_async::WaitForAllTasks(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::WaitForAllTasksResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_WaitForAllTasks_, context, request, response, reactor); +} + +::grpc::ClientAsyncResponseReader< ::tensorflow::WaitForAllTasksResponse>* grpc::CoordinationService::Stub::AsyncWaitForAllTasksRaw(::grpc::ClientContext* context, const ::tensorflow::WaitForAllTasksRequest& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::tensorflow::WaitForAllTasksResponse>::Create(channel_.get(), cq, rpcmethod_WaitForAllTasks_, context, request, true); +} + +::grpc::ClientAsyncResponseReader< ::tensorflow::WaitForAllTasksResponse>* grpc::CoordinationService::Stub::PrepareAsyncWaitForAllTasksRaw(::grpc::ClientContext* context, const ::tensorflow::WaitForAllTasksRequest& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::tensorflow::WaitForAllTasksResponse>::Create(channel_.get(), cq, rpcmethod_WaitForAllTasks_, context, request, false); +} + +::grpc::Status grpc::CoordinationService::Stub::ShutdownTask(::grpc::ClientContext* context, const ::tensorflow::ShutdownTaskRequest& request, ::tensorflow::ShutdownTaskResponse* response) { + return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_ShutdownTask_, context, request, response); +} + +void grpc::CoordinationService::Stub::experimental_async::ShutdownTask(::grpc::ClientContext* context, const ::tensorflow::ShutdownTaskRequest* request, ::tensorflow::ShutdownTaskResponse* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_ShutdownTask_, context, request, response, std::move(f)); +} + +void grpc::CoordinationService::Stub::experimental_async::ShutdownTask(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::ShutdownTaskResponse* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_ShutdownTask_, context, request, response, std::move(f)); +} + +void grpc::CoordinationService::Stub::experimental_async::ShutdownTask(::grpc::ClientContext* context, const ::tensorflow::ShutdownTaskRequest* request, ::tensorflow::ShutdownTaskResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_ShutdownTask_, context, request, response, reactor); +} + +void grpc::CoordinationService::Stub::experimental_async::ShutdownTask(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::ShutdownTaskResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_ShutdownTask_, context, request, response, reactor); +} + +::grpc::ClientAsyncResponseReader< ::tensorflow::ShutdownTaskResponse>* grpc::CoordinationService::Stub::AsyncShutdownTaskRaw(::grpc::ClientContext* context, const ::tensorflow::ShutdownTaskRequest& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::tensorflow::ShutdownTaskResponse>::Create(channel_.get(), cq, rpcmethod_ShutdownTask_, context, request, true); +} + +::grpc::ClientAsyncResponseReader< ::tensorflow::ShutdownTaskResponse>* grpc::CoordinationService::Stub::PrepareAsyncShutdownTaskRaw(::grpc::ClientContext* context, const ::tensorflow::ShutdownTaskRequest& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::tensorflow::ShutdownTaskResponse>::Create(channel_.get(), cq, rpcmethod_ShutdownTask_, context, request, false); +} + +::grpc::Status grpc::CoordinationService::Stub::ResetTask(::grpc::ClientContext* context, const ::tensorflow::ResetTaskRequest& request, ::tensorflow::ResetTaskResponse* response) { + return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_ResetTask_, context, request, response); +} + +void grpc::CoordinationService::Stub::experimental_async::ResetTask(::grpc::ClientContext* context, const ::tensorflow::ResetTaskRequest* request, ::tensorflow::ResetTaskResponse* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_ResetTask_, context, request, response, std::move(f)); +} + +void grpc::CoordinationService::Stub::experimental_async::ResetTask(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::ResetTaskResponse* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_ResetTask_, context, request, response, std::move(f)); +} + +void grpc::CoordinationService::Stub::experimental_async::ResetTask(::grpc::ClientContext* context, const ::tensorflow::ResetTaskRequest* request, ::tensorflow::ResetTaskResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_ResetTask_, context, request, response, reactor); +} + +void grpc::CoordinationService::Stub::experimental_async::ResetTask(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::ResetTaskResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_ResetTask_, context, request, response, reactor); +} + +::grpc::ClientAsyncResponseReader< ::tensorflow::ResetTaskResponse>* grpc::CoordinationService::Stub::AsyncResetTaskRaw(::grpc::ClientContext* context, const ::tensorflow::ResetTaskRequest& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::tensorflow::ResetTaskResponse>::Create(channel_.get(), cq, rpcmethod_ResetTask_, context, request, true); +} + +::grpc::ClientAsyncResponseReader< ::tensorflow::ResetTaskResponse>* grpc::CoordinationService::Stub::PrepareAsyncResetTaskRaw(::grpc::ClientContext* context, const ::tensorflow::ResetTaskRequest& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::tensorflow::ResetTaskResponse>::Create(channel_.get(), cq, rpcmethod_ResetTask_, context, request, false); +} + +::grpc::Status grpc::CoordinationService::Stub::ReportErrorToTask(::grpc::ClientContext* context, const ::tensorflow::ReportErrorToTaskRequest& request, ::tensorflow::ReportErrorToTaskResponse* response) { + return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_ReportErrorToTask_, context, request, response); +} + +void grpc::CoordinationService::Stub::experimental_async::ReportErrorToTask(::grpc::ClientContext* context, const ::tensorflow::ReportErrorToTaskRequest* request, ::tensorflow::ReportErrorToTaskResponse* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_ReportErrorToTask_, context, request, response, std::move(f)); +} + +void grpc::CoordinationService::Stub::experimental_async::ReportErrorToTask(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::ReportErrorToTaskResponse* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_ReportErrorToTask_, context, request, response, std::move(f)); +} + +void grpc::CoordinationService::Stub::experimental_async::ReportErrorToTask(::grpc::ClientContext* context, const ::tensorflow::ReportErrorToTaskRequest* request, ::tensorflow::ReportErrorToTaskResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_ReportErrorToTask_, context, request, response, reactor); +} + +void grpc::CoordinationService::Stub::experimental_async::ReportErrorToTask(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::ReportErrorToTaskResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_ReportErrorToTask_, context, request, response, reactor); +} + +::grpc::ClientAsyncResponseReader< ::tensorflow::ReportErrorToTaskResponse>* grpc::CoordinationService::Stub::AsyncReportErrorToTaskRaw(::grpc::ClientContext* context, const ::tensorflow::ReportErrorToTaskRequest& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::tensorflow::ReportErrorToTaskResponse>::Create(channel_.get(), cq, rpcmethod_ReportErrorToTask_, context, request, true); +} + +::grpc::ClientAsyncResponseReader< ::tensorflow::ReportErrorToTaskResponse>* grpc::CoordinationService::Stub::PrepareAsyncReportErrorToTaskRaw(::grpc::ClientContext* context, const ::tensorflow::ReportErrorToTaskRequest& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::tensorflow::ReportErrorToTaskResponse>::Create(channel_.get(), cq, rpcmethod_ReportErrorToTask_, context, request, false); +} + +::grpc::Status grpc::CoordinationService::Stub::ReportErrorToService(::grpc::ClientContext* context, const ::tensorflow::ReportErrorToServiceRequest& request, ::tensorflow::ReportErrorToServiceResponse* response) { + return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_ReportErrorToService_, context, request, response); +} + +void grpc::CoordinationService::Stub::experimental_async::ReportErrorToService(::grpc::ClientContext* context, const ::tensorflow::ReportErrorToServiceRequest* request, ::tensorflow::ReportErrorToServiceResponse* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_ReportErrorToService_, context, request, response, std::move(f)); +} + +void grpc::CoordinationService::Stub::experimental_async::ReportErrorToService(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::ReportErrorToServiceResponse* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_ReportErrorToService_, context, request, response, std::move(f)); +} + +void grpc::CoordinationService::Stub::experimental_async::ReportErrorToService(::grpc::ClientContext* context, const ::tensorflow::ReportErrorToServiceRequest* request, ::tensorflow::ReportErrorToServiceResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_ReportErrorToService_, context, request, response, reactor); +} + +void grpc::CoordinationService::Stub::experimental_async::ReportErrorToService(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::ReportErrorToServiceResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_ReportErrorToService_, context, request, response, reactor); +} + +::grpc::ClientAsyncResponseReader< ::tensorflow::ReportErrorToServiceResponse>* grpc::CoordinationService::Stub::AsyncReportErrorToServiceRaw(::grpc::ClientContext* context, const ::tensorflow::ReportErrorToServiceRequest& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::tensorflow::ReportErrorToServiceResponse>::Create(channel_.get(), cq, rpcmethod_ReportErrorToService_, context, request, true); +} + +::grpc::ClientAsyncResponseReader< ::tensorflow::ReportErrorToServiceResponse>* grpc::CoordinationService::Stub::PrepareAsyncReportErrorToServiceRaw(::grpc::ClientContext* context, const ::tensorflow::ReportErrorToServiceRequest& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::tensorflow::ReportErrorToServiceResponse>::Create(channel_.get(), cq, rpcmethod_ReportErrorToService_, context, request, false); +} + +::grpc::Status grpc::CoordinationService::Stub::GetTaskState(::grpc::ClientContext* context, const ::tensorflow::GetTaskStateRequest& request, ::tensorflow::GetTaskStateResponse* response) { + return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_GetTaskState_, context, request, response); +} + +void grpc::CoordinationService::Stub::experimental_async::GetTaskState(::grpc::ClientContext* context, const ::tensorflow::GetTaskStateRequest* request, ::tensorflow::GetTaskStateResponse* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_GetTaskState_, context, request, response, std::move(f)); +} + +void grpc::CoordinationService::Stub::experimental_async::GetTaskState(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::GetTaskStateResponse* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_GetTaskState_, context, request, response, std::move(f)); +} + +void grpc::CoordinationService::Stub::experimental_async::GetTaskState(::grpc::ClientContext* context, const ::tensorflow::GetTaskStateRequest* request, ::tensorflow::GetTaskStateResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_GetTaskState_, context, request, response, reactor); +} + +void grpc::CoordinationService::Stub::experimental_async::GetTaskState(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::GetTaskStateResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_GetTaskState_, context, request, response, reactor); +} + +::grpc::ClientAsyncResponseReader< ::tensorflow::GetTaskStateResponse>* grpc::CoordinationService::Stub::AsyncGetTaskStateRaw(::grpc::ClientContext* context, const ::tensorflow::GetTaskStateRequest& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::tensorflow::GetTaskStateResponse>::Create(channel_.get(), cq, rpcmethod_GetTaskState_, context, request, true); +} + +::grpc::ClientAsyncResponseReader< ::tensorflow::GetTaskStateResponse>* grpc::CoordinationService::Stub::PrepareAsyncGetTaskStateRaw(::grpc::ClientContext* context, const ::tensorflow::GetTaskStateRequest& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::tensorflow::GetTaskStateResponse>::Create(channel_.get(), cq, rpcmethod_GetTaskState_, context, request, false); +} + +::grpc::Status grpc::CoordinationService::Stub::InsertKeyValue(::grpc::ClientContext* context, const ::tensorflow::InsertKeyValueRequest& request, ::tensorflow::InsertKeyValueResponse* response) { + return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_InsertKeyValue_, context, request, response); +} + +void grpc::CoordinationService::Stub::experimental_async::InsertKeyValue(::grpc::ClientContext* context, const ::tensorflow::InsertKeyValueRequest* request, ::tensorflow::InsertKeyValueResponse* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_InsertKeyValue_, context, request, response, std::move(f)); +} + +void grpc::CoordinationService::Stub::experimental_async::InsertKeyValue(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::InsertKeyValueResponse* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_InsertKeyValue_, context, request, response, std::move(f)); +} + +void grpc::CoordinationService::Stub::experimental_async::InsertKeyValue(::grpc::ClientContext* context, const ::tensorflow::InsertKeyValueRequest* request, ::tensorflow::InsertKeyValueResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_InsertKeyValue_, context, request, response, reactor); +} + +void grpc::CoordinationService::Stub::experimental_async::InsertKeyValue(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::InsertKeyValueResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_InsertKeyValue_, context, request, response, reactor); +} + +::grpc::ClientAsyncResponseReader< ::tensorflow::InsertKeyValueResponse>* grpc::CoordinationService::Stub::AsyncInsertKeyValueRaw(::grpc::ClientContext* context, const ::tensorflow::InsertKeyValueRequest& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::tensorflow::InsertKeyValueResponse>::Create(channel_.get(), cq, rpcmethod_InsertKeyValue_, context, request, true); +} + +::grpc::ClientAsyncResponseReader< ::tensorflow::InsertKeyValueResponse>* grpc::CoordinationService::Stub::PrepareAsyncInsertKeyValueRaw(::grpc::ClientContext* context, const ::tensorflow::InsertKeyValueRequest& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::tensorflow::InsertKeyValueResponse>::Create(channel_.get(), cq, rpcmethod_InsertKeyValue_, context, request, false); +} + +::grpc::Status grpc::CoordinationService::Stub::GetKeyValue(::grpc::ClientContext* context, const ::tensorflow::GetKeyValueRequest& request, ::tensorflow::GetKeyValueResponse* response) { + return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_GetKeyValue_, context, request, response); +} + +void grpc::CoordinationService::Stub::experimental_async::GetKeyValue(::grpc::ClientContext* context, const ::tensorflow::GetKeyValueRequest* request, ::tensorflow::GetKeyValueResponse* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_GetKeyValue_, context, request, response, std::move(f)); +} + +void grpc::CoordinationService::Stub::experimental_async::GetKeyValue(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::GetKeyValueResponse* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_GetKeyValue_, context, request, response, std::move(f)); +} + +void grpc::CoordinationService::Stub::experimental_async::GetKeyValue(::grpc::ClientContext* context, const ::tensorflow::GetKeyValueRequest* request, ::tensorflow::GetKeyValueResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_GetKeyValue_, context, request, response, reactor); +} + +void grpc::CoordinationService::Stub::experimental_async::GetKeyValue(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::GetKeyValueResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_GetKeyValue_, context, request, response, reactor); +} + +::grpc::ClientAsyncResponseReader< ::tensorflow::GetKeyValueResponse>* grpc::CoordinationService::Stub::AsyncGetKeyValueRaw(::grpc::ClientContext* context, const ::tensorflow::GetKeyValueRequest& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::tensorflow::GetKeyValueResponse>::Create(channel_.get(), cq, rpcmethod_GetKeyValue_, context, request, true); +} + +::grpc::ClientAsyncResponseReader< ::tensorflow::GetKeyValueResponse>* grpc::CoordinationService::Stub::PrepareAsyncGetKeyValueRaw(::grpc::ClientContext* context, const ::tensorflow::GetKeyValueRequest& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::tensorflow::GetKeyValueResponse>::Create(channel_.get(), cq, rpcmethod_GetKeyValue_, context, request, false); +} + +::grpc::Status grpc::CoordinationService::Stub::TryGetKeyValue(::grpc::ClientContext* context, const ::tensorflow::TryGetKeyValueRequest& request, ::tensorflow::TryGetKeyValueResponse* response) { + return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_TryGetKeyValue_, context, request, response); +} + +void grpc::CoordinationService::Stub::experimental_async::TryGetKeyValue(::grpc::ClientContext* context, const ::tensorflow::TryGetKeyValueRequest* request, ::tensorflow::TryGetKeyValueResponse* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_TryGetKeyValue_, context, request, response, std::move(f)); +} + +void grpc::CoordinationService::Stub::experimental_async::TryGetKeyValue(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::TryGetKeyValueResponse* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_TryGetKeyValue_, context, request, response, std::move(f)); +} + +void grpc::CoordinationService::Stub::experimental_async::TryGetKeyValue(::grpc::ClientContext* context, const ::tensorflow::TryGetKeyValueRequest* request, ::tensorflow::TryGetKeyValueResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_TryGetKeyValue_, context, request, response, reactor); +} + +void grpc::CoordinationService::Stub::experimental_async::TryGetKeyValue(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::TryGetKeyValueResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_TryGetKeyValue_, context, request, response, reactor); +} + +::grpc::ClientAsyncResponseReader< ::tensorflow::TryGetKeyValueResponse>* grpc::CoordinationService::Stub::AsyncTryGetKeyValueRaw(::grpc::ClientContext* context, const ::tensorflow::TryGetKeyValueRequest& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::tensorflow::TryGetKeyValueResponse>::Create(channel_.get(), cq, rpcmethod_TryGetKeyValue_, context, request, true); +} + +::grpc::ClientAsyncResponseReader< ::tensorflow::TryGetKeyValueResponse>* grpc::CoordinationService::Stub::PrepareAsyncTryGetKeyValueRaw(::grpc::ClientContext* context, const ::tensorflow::TryGetKeyValueRequest& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::tensorflow::TryGetKeyValueResponse>::Create(channel_.get(), cq, rpcmethod_TryGetKeyValue_, context, request, false); +} + +::grpc::Status grpc::CoordinationService::Stub::GetKeyValueDir(::grpc::ClientContext* context, const ::tensorflow::GetKeyValueDirRequest& request, ::tensorflow::GetKeyValueDirResponse* response) { + return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_GetKeyValueDir_, context, request, response); +} + +void grpc::CoordinationService::Stub::experimental_async::GetKeyValueDir(::grpc::ClientContext* context, const ::tensorflow::GetKeyValueDirRequest* request, ::tensorflow::GetKeyValueDirResponse* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_GetKeyValueDir_, context, request, response, std::move(f)); +} + +void grpc::CoordinationService::Stub::experimental_async::GetKeyValueDir(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::GetKeyValueDirResponse* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_GetKeyValueDir_, context, request, response, std::move(f)); +} + +void grpc::CoordinationService::Stub::experimental_async::GetKeyValueDir(::grpc::ClientContext* context, const ::tensorflow::GetKeyValueDirRequest* request, ::tensorflow::GetKeyValueDirResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_GetKeyValueDir_, context, request, response, reactor); +} + +void grpc::CoordinationService::Stub::experimental_async::GetKeyValueDir(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::GetKeyValueDirResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_GetKeyValueDir_, context, request, response, reactor); +} + +::grpc::ClientAsyncResponseReader< ::tensorflow::GetKeyValueDirResponse>* grpc::CoordinationService::Stub::AsyncGetKeyValueDirRaw(::grpc::ClientContext* context, const ::tensorflow::GetKeyValueDirRequest& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::tensorflow::GetKeyValueDirResponse>::Create(channel_.get(), cq, rpcmethod_GetKeyValueDir_, context, request, true); +} + +::grpc::ClientAsyncResponseReader< ::tensorflow::GetKeyValueDirResponse>* grpc::CoordinationService::Stub::PrepareAsyncGetKeyValueDirRaw(::grpc::ClientContext* context, const ::tensorflow::GetKeyValueDirRequest& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::tensorflow::GetKeyValueDirResponse>::Create(channel_.get(), cq, rpcmethod_GetKeyValueDir_, context, request, false); +} + +::grpc::Status grpc::CoordinationService::Stub::DeleteKeyValue(::grpc::ClientContext* context, const ::tensorflow::DeleteKeyValueRequest& request, ::tensorflow::DeleteKeyValueResponse* response) { + return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_DeleteKeyValue_, context, request, response); +} + +void grpc::CoordinationService::Stub::experimental_async::DeleteKeyValue(::grpc::ClientContext* context, const ::tensorflow::DeleteKeyValueRequest* request, ::tensorflow::DeleteKeyValueResponse* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_DeleteKeyValue_, context, request, response, std::move(f)); +} + +void grpc::CoordinationService::Stub::experimental_async::DeleteKeyValue(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::DeleteKeyValueResponse* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_DeleteKeyValue_, context, request, response, std::move(f)); +} + +void grpc::CoordinationService::Stub::experimental_async::DeleteKeyValue(::grpc::ClientContext* context, const ::tensorflow::DeleteKeyValueRequest* request, ::tensorflow::DeleteKeyValueResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_DeleteKeyValue_, context, request, response, reactor); +} + +void grpc::CoordinationService::Stub::experimental_async::DeleteKeyValue(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::DeleteKeyValueResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_DeleteKeyValue_, context, request, response, reactor); +} + +::grpc::ClientAsyncResponseReader< ::tensorflow::DeleteKeyValueResponse>* grpc::CoordinationService::Stub::AsyncDeleteKeyValueRaw(::grpc::ClientContext* context, const ::tensorflow::DeleteKeyValueRequest& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::tensorflow::DeleteKeyValueResponse>::Create(channel_.get(), cq, rpcmethod_DeleteKeyValue_, context, request, true); +} + +::grpc::ClientAsyncResponseReader< ::tensorflow::DeleteKeyValueResponse>* grpc::CoordinationService::Stub::PrepareAsyncDeleteKeyValueRaw(::grpc::ClientContext* context, const ::tensorflow::DeleteKeyValueRequest& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::tensorflow::DeleteKeyValueResponse>::Create(channel_.get(), cq, rpcmethod_DeleteKeyValue_, context, request, false); +} + +::grpc::Status grpc::CoordinationService::Stub::Barrier(::grpc::ClientContext* context, const ::tensorflow::BarrierRequest& request, ::tensorflow::BarrierResponse* response) { + return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_Barrier_, context, request, response); +} + +void grpc::CoordinationService::Stub::experimental_async::Barrier(::grpc::ClientContext* context, const ::tensorflow::BarrierRequest* request, ::tensorflow::BarrierResponse* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Barrier_, context, request, response, std::move(f)); +} + +void grpc::CoordinationService::Stub::experimental_async::Barrier(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::BarrierResponse* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Barrier_, context, request, response, std::move(f)); +} + +void grpc::CoordinationService::Stub::experimental_async::Barrier(::grpc::ClientContext* context, const ::tensorflow::BarrierRequest* request, ::tensorflow::BarrierResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Barrier_, context, request, response, reactor); +} + +void grpc::CoordinationService::Stub::experimental_async::Barrier(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::BarrierResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Barrier_, context, request, response, reactor); +} + +::grpc::ClientAsyncResponseReader< ::tensorflow::BarrierResponse>* grpc::CoordinationService::Stub::AsyncBarrierRaw(::grpc::ClientContext* context, const ::tensorflow::BarrierRequest& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::tensorflow::BarrierResponse>::Create(channel_.get(), cq, rpcmethod_Barrier_, context, request, true); +} + +::grpc::ClientAsyncResponseReader< ::tensorflow::BarrierResponse>* grpc::CoordinationService::Stub::PrepareAsyncBarrierRaw(::grpc::ClientContext* context, const ::tensorflow::BarrierRequest& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::tensorflow::BarrierResponse>::Create(channel_.get(), cq, rpcmethod_Barrier_, context, request, false); +} + +::grpc::Status grpc::CoordinationService::Stub::CancelBarrier(::grpc::ClientContext* context, const ::tensorflow::CancelBarrierRequest& request, ::tensorflow::CancelBarrierResponse* response) { + return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_CancelBarrier_, context, request, response); +} + +void grpc::CoordinationService::Stub::experimental_async::CancelBarrier(::grpc::ClientContext* context, const ::tensorflow::CancelBarrierRequest* request, ::tensorflow::CancelBarrierResponse* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_CancelBarrier_, context, request, response, std::move(f)); +} + +void grpc::CoordinationService::Stub::experimental_async::CancelBarrier(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::CancelBarrierResponse* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_CancelBarrier_, context, request, response, std::move(f)); +} + +void grpc::CoordinationService::Stub::experimental_async::CancelBarrier(::grpc::ClientContext* context, const ::tensorflow::CancelBarrierRequest* request, ::tensorflow::CancelBarrierResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_CancelBarrier_, context, request, response, reactor); +} + +void grpc::CoordinationService::Stub::experimental_async::CancelBarrier(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::CancelBarrierResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_CancelBarrier_, context, request, response, reactor); +} + +::grpc::ClientAsyncResponseReader< ::tensorflow::CancelBarrierResponse>* grpc::CoordinationService::Stub::AsyncCancelBarrierRaw(::grpc::ClientContext* context, const ::tensorflow::CancelBarrierRequest& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::tensorflow::CancelBarrierResponse>::Create(channel_.get(), cq, rpcmethod_CancelBarrier_, context, request, true); +} + +::grpc::ClientAsyncResponseReader< ::tensorflow::CancelBarrierResponse>* grpc::CoordinationService::Stub::PrepareAsyncCancelBarrierRaw(::grpc::ClientContext* context, const ::tensorflow::CancelBarrierRequest& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::tensorflow::CancelBarrierResponse>::Create(channel_.get(), cq, rpcmethod_CancelBarrier_, context, request, false); +} + +grpc::CoordinationService::Service::Service() { + AddMethod(new ::grpc::internal::RpcServiceMethod( + grpcCoordinationService_method_names[0], + ::grpc::internal::RpcMethod::NORMAL_RPC, + new ::grpc::internal::RpcMethodHandler< grpc::CoordinationService::Service, ::tensorflow::RegisterTaskRequest, ::tensorflow::RegisterTaskResponse>( + std::mem_fn(&grpc::CoordinationService::Service::RegisterTask), this))); + AddMethod(new ::grpc::internal::RpcServiceMethod( + grpcCoordinationService_method_names[1], + ::grpc::internal::RpcMethod::NORMAL_RPC, + new ::grpc::internal::RpcMethodHandler< grpc::CoordinationService::Service, ::tensorflow::HeartbeatRequest, ::tensorflow::HeartbeatResponse>( + std::mem_fn(&grpc::CoordinationService::Service::Heartbeat), this))); + AddMethod(new ::grpc::internal::RpcServiceMethod( + grpcCoordinationService_method_names[2], + ::grpc::internal::RpcMethod::NORMAL_RPC, + new ::grpc::internal::RpcMethodHandler< grpc::CoordinationService::Service, ::tensorflow::WaitForAllTasksRequest, ::tensorflow::WaitForAllTasksResponse>( + std::mem_fn(&grpc::CoordinationService::Service::WaitForAllTasks), this))); + AddMethod(new ::grpc::internal::RpcServiceMethod( + grpcCoordinationService_method_names[3], + ::grpc::internal::RpcMethod::NORMAL_RPC, + new ::grpc::internal::RpcMethodHandler< grpc::CoordinationService::Service, ::tensorflow::ShutdownTaskRequest, ::tensorflow::ShutdownTaskResponse>( + std::mem_fn(&grpc::CoordinationService::Service::ShutdownTask), this))); + AddMethod(new ::grpc::internal::RpcServiceMethod( + grpcCoordinationService_method_names[4], + ::grpc::internal::RpcMethod::NORMAL_RPC, + new ::grpc::internal::RpcMethodHandler< grpc::CoordinationService::Service, ::tensorflow::ResetTaskRequest, ::tensorflow::ResetTaskResponse>( + std::mem_fn(&grpc::CoordinationService::Service::ResetTask), this))); + AddMethod(new ::grpc::internal::RpcServiceMethod( + grpcCoordinationService_method_names[5], + ::grpc::internal::RpcMethod::NORMAL_RPC, + new ::grpc::internal::RpcMethodHandler< grpc::CoordinationService::Service, ::tensorflow::ReportErrorToTaskRequest, ::tensorflow::ReportErrorToTaskResponse>( + std::mem_fn(&grpc::CoordinationService::Service::ReportErrorToTask), this))); + AddMethod(new ::grpc::internal::RpcServiceMethod( + grpcCoordinationService_method_names[6], + ::grpc::internal::RpcMethod::NORMAL_RPC, + new ::grpc::internal::RpcMethodHandler< grpc::CoordinationService::Service, ::tensorflow::ReportErrorToServiceRequest, ::tensorflow::ReportErrorToServiceResponse>( + std::mem_fn(&grpc::CoordinationService::Service::ReportErrorToService), this))); + AddMethod(new ::grpc::internal::RpcServiceMethod( + grpcCoordinationService_method_names[7], + ::grpc::internal::RpcMethod::NORMAL_RPC, + new ::grpc::internal::RpcMethodHandler< grpc::CoordinationService::Service, ::tensorflow::GetTaskStateRequest, ::tensorflow::GetTaskStateResponse>( + std::mem_fn(&grpc::CoordinationService::Service::GetTaskState), this))); + AddMethod(new ::grpc::internal::RpcServiceMethod( + grpcCoordinationService_method_names[8], + ::grpc::internal::RpcMethod::NORMAL_RPC, + new ::grpc::internal::RpcMethodHandler< grpc::CoordinationService::Service, ::tensorflow::InsertKeyValueRequest, ::tensorflow::InsertKeyValueResponse>( + std::mem_fn(&grpc::CoordinationService::Service::InsertKeyValue), this))); + AddMethod(new ::grpc::internal::RpcServiceMethod( + grpcCoordinationService_method_names[9], + ::grpc::internal::RpcMethod::NORMAL_RPC, + new ::grpc::internal::RpcMethodHandler< grpc::CoordinationService::Service, ::tensorflow::GetKeyValueRequest, ::tensorflow::GetKeyValueResponse>( + std::mem_fn(&grpc::CoordinationService::Service::GetKeyValue), this))); + AddMethod(new ::grpc::internal::RpcServiceMethod( + grpcCoordinationService_method_names[10], + ::grpc::internal::RpcMethod::NORMAL_RPC, + new ::grpc::internal::RpcMethodHandler< grpc::CoordinationService::Service, ::tensorflow::TryGetKeyValueRequest, ::tensorflow::TryGetKeyValueResponse>( + std::mem_fn(&grpc::CoordinationService::Service::TryGetKeyValue), this))); + AddMethod(new ::grpc::internal::RpcServiceMethod( + grpcCoordinationService_method_names[11], + ::grpc::internal::RpcMethod::NORMAL_RPC, + new ::grpc::internal::RpcMethodHandler< grpc::CoordinationService::Service, ::tensorflow::GetKeyValueDirRequest, ::tensorflow::GetKeyValueDirResponse>( + std::mem_fn(&grpc::CoordinationService::Service::GetKeyValueDir), this))); + AddMethod(new ::grpc::internal::RpcServiceMethod( + grpcCoordinationService_method_names[12], + ::grpc::internal::RpcMethod::NORMAL_RPC, + new ::grpc::internal::RpcMethodHandler< grpc::CoordinationService::Service, ::tensorflow::DeleteKeyValueRequest, ::tensorflow::DeleteKeyValueResponse>( + std::mem_fn(&grpc::CoordinationService::Service::DeleteKeyValue), this))); + AddMethod(new ::grpc::internal::RpcServiceMethod( + grpcCoordinationService_method_names[13], + ::grpc::internal::RpcMethod::NORMAL_RPC, + new ::grpc::internal::RpcMethodHandler< grpc::CoordinationService::Service, ::tensorflow::BarrierRequest, ::tensorflow::BarrierResponse>( + std::mem_fn(&grpc::CoordinationService::Service::Barrier), this))); + AddMethod(new ::grpc::internal::RpcServiceMethod( + grpcCoordinationService_method_names[14], + ::grpc::internal::RpcMethod::NORMAL_RPC, + new ::grpc::internal::RpcMethodHandler< grpc::CoordinationService::Service, ::tensorflow::CancelBarrierRequest, ::tensorflow::CancelBarrierResponse>( + std::mem_fn(&grpc::CoordinationService::Service::CancelBarrier), this))); +} + +grpc::CoordinationService::Service::~Service() { +} + +::grpc::Status grpc::CoordinationService::Service::RegisterTask(::grpc::ServerContext* context, const ::tensorflow::RegisterTaskRequest* request, ::tensorflow::RegisterTaskResponse* response) { + (void) context; + (void) request; + (void) response; + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); +} + +::grpc::Status grpc::CoordinationService::Service::Heartbeat(::grpc::ServerContext* context, const ::tensorflow::HeartbeatRequest* request, ::tensorflow::HeartbeatResponse* response) { + (void) context; + (void) request; + (void) response; + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); +} + +::grpc::Status grpc::CoordinationService::Service::WaitForAllTasks(::grpc::ServerContext* context, const ::tensorflow::WaitForAllTasksRequest* request, ::tensorflow::WaitForAllTasksResponse* response) { + (void) context; + (void) request; + (void) response; + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); +} + +::grpc::Status grpc::CoordinationService::Service::ShutdownTask(::grpc::ServerContext* context, const ::tensorflow::ShutdownTaskRequest* request, ::tensorflow::ShutdownTaskResponse* response) { + (void) context; + (void) request; + (void) response; + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); +} + +::grpc::Status grpc::CoordinationService::Service::ResetTask(::grpc::ServerContext* context, const ::tensorflow::ResetTaskRequest* request, ::tensorflow::ResetTaskResponse* response) { + (void) context; + (void) request; + (void) response; + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); +} + +::grpc::Status grpc::CoordinationService::Service::ReportErrorToTask(::grpc::ServerContext* context, const ::tensorflow::ReportErrorToTaskRequest* request, ::tensorflow::ReportErrorToTaskResponse* response) { + (void) context; + (void) request; + (void) response; + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); +} + +::grpc::Status grpc::CoordinationService::Service::ReportErrorToService(::grpc::ServerContext* context, const ::tensorflow::ReportErrorToServiceRequest* request, ::tensorflow::ReportErrorToServiceResponse* response) { + (void) context; + (void) request; + (void) response; + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); +} + +::grpc::Status grpc::CoordinationService::Service::GetTaskState(::grpc::ServerContext* context, const ::tensorflow::GetTaskStateRequest* request, ::tensorflow::GetTaskStateResponse* response) { + (void) context; + (void) request; + (void) response; + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); +} + +::grpc::Status grpc::CoordinationService::Service::InsertKeyValue(::grpc::ServerContext* context, const ::tensorflow::InsertKeyValueRequest* request, ::tensorflow::InsertKeyValueResponse* response) { + (void) context; + (void) request; + (void) response; + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); +} + +::grpc::Status grpc::CoordinationService::Service::GetKeyValue(::grpc::ServerContext* context, const ::tensorflow::GetKeyValueRequest* request, ::tensorflow::GetKeyValueResponse* response) { + (void) context; + (void) request; + (void) response; + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); +} + +::grpc::Status grpc::CoordinationService::Service::TryGetKeyValue(::grpc::ServerContext* context, const ::tensorflow::TryGetKeyValueRequest* request, ::tensorflow::TryGetKeyValueResponse* response) { + (void) context; + (void) request; + (void) response; + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); +} + +::grpc::Status grpc::CoordinationService::Service::GetKeyValueDir(::grpc::ServerContext* context, const ::tensorflow::GetKeyValueDirRequest* request, ::tensorflow::GetKeyValueDirResponse* response) { + (void) context; + (void) request; + (void) response; + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); +} + +::grpc::Status grpc::CoordinationService::Service::DeleteKeyValue(::grpc::ServerContext* context, const ::tensorflow::DeleteKeyValueRequest* request, ::tensorflow::DeleteKeyValueResponse* response) { + (void) context; + (void) request; + (void) response; + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); +} + +::grpc::Status grpc::CoordinationService::Service::Barrier(::grpc::ServerContext* context, const ::tensorflow::BarrierRequest* request, ::tensorflow::BarrierResponse* response) { + (void) context; + (void) request; + (void) response; + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); +} + +::grpc::Status grpc::CoordinationService::Service::CancelBarrier(::grpc::ServerContext* context, const ::tensorflow::CancelBarrierRequest* request, ::tensorflow::CancelBarrierResponse* response) { + (void) context; + (void) request; + (void) response; + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); +} + + +} // namespace tensorflow + diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/coordination_service.pb.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/coordination_service.pb.h new file mode 100644 index 0000000000000000000000000000000000000000..ad8c50891636f167d27bda264e3e64f718d5944b --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/coordination_service.pb.h @@ -0,0 +1,8531 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: tsl/protobuf/coordination_service.proto + +#ifndef GOOGLE_PROTOBUF_INCLUDED_tsl_2fprotobuf_2fcoordination_5fservice_2eproto +#define GOOGLE_PROTOBUF_INCLUDED_tsl_2fprotobuf_2fcoordination_5fservice_2eproto + +#include +#include + +#include +#if PROTOBUF_VERSION < 3021000 +#error This file was generated by a newer version of protoc which is +#error incompatible with your Protocol Buffer headers. Please update +#error your headers. +#endif +#if 3021009 < PROTOBUF_MIN_PROTOC_VERSION +#error This file was generated by an older version of protoc which is +#error incompatible with your Protocol Buffer headers. Please +#error regenerate this file with a newer version of protoc. +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include // IWYU pragma: export +#include // IWYU pragma: export +#include +#include +#include +// @@protoc_insertion_point(includes) +#include +#define PROTOBUF_INTERNAL_EXPORT_tsl_2fprotobuf_2fcoordination_5fservice_2eproto +PROTOBUF_NAMESPACE_OPEN +namespace internal { +class AnyMetadata; +} // namespace internal +PROTOBUF_NAMESPACE_CLOSE + +// Internal implementation detail -- do not use these members. +struct TableStruct_tsl_2fprotobuf_2fcoordination_5fservice_2eproto { + static const uint32_t offsets[]; +}; +extern const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_tsl_2fprotobuf_2fcoordination_5fservice_2eproto; +namespace tensorflow { +class BarrierRequest; +struct BarrierRequestDefaultTypeInternal; +extern BarrierRequestDefaultTypeInternal _BarrierRequest_default_instance_; +class BarrierResponse; +struct BarrierResponseDefaultTypeInternal; +extern BarrierResponseDefaultTypeInternal _BarrierResponse_default_instance_; +class CancelBarrierRequest; +struct CancelBarrierRequestDefaultTypeInternal; +extern CancelBarrierRequestDefaultTypeInternal _CancelBarrierRequest_default_instance_; +class CancelBarrierResponse; +struct CancelBarrierResponseDefaultTypeInternal; +extern CancelBarrierResponseDefaultTypeInternal _CancelBarrierResponse_default_instance_; +class CoordinatedTask; +struct CoordinatedTaskDefaultTypeInternal; +extern CoordinatedTaskDefaultTypeInternal _CoordinatedTask_default_instance_; +class CoordinatedTaskStateInfo; +struct CoordinatedTaskStateInfoDefaultTypeInternal; +extern CoordinatedTaskStateInfoDefaultTypeInternal _CoordinatedTaskStateInfo_default_instance_; +class CoordinationServiceError; +struct CoordinationServiceErrorDefaultTypeInternal; +extern CoordinationServiceErrorDefaultTypeInternal _CoordinationServiceError_default_instance_; +class DeleteKeyValueRequest; +struct DeleteKeyValueRequestDefaultTypeInternal; +extern DeleteKeyValueRequestDefaultTypeInternal _DeleteKeyValueRequest_default_instance_; +class DeleteKeyValueResponse; +struct DeleteKeyValueResponseDefaultTypeInternal; +extern DeleteKeyValueResponseDefaultTypeInternal _DeleteKeyValueResponse_default_instance_; +class DeviceInfo; +struct DeviceInfoDefaultTypeInternal; +extern DeviceInfoDefaultTypeInternal _DeviceInfo_default_instance_; +class GetKeyValueDirRequest; +struct GetKeyValueDirRequestDefaultTypeInternal; +extern GetKeyValueDirRequestDefaultTypeInternal _GetKeyValueDirRequest_default_instance_; +class GetKeyValueDirResponse; +struct GetKeyValueDirResponseDefaultTypeInternal; +extern GetKeyValueDirResponseDefaultTypeInternal _GetKeyValueDirResponse_default_instance_; +class GetKeyValueRequest; +struct GetKeyValueRequestDefaultTypeInternal; +extern GetKeyValueRequestDefaultTypeInternal _GetKeyValueRequest_default_instance_; +class GetKeyValueResponse; +struct GetKeyValueResponseDefaultTypeInternal; +extern GetKeyValueResponseDefaultTypeInternal _GetKeyValueResponse_default_instance_; +class GetTaskStateRequest; +struct GetTaskStateRequestDefaultTypeInternal; +extern GetTaskStateRequestDefaultTypeInternal _GetTaskStateRequest_default_instance_; +class GetTaskStateResponse; +struct GetTaskStateResponseDefaultTypeInternal; +extern GetTaskStateResponseDefaultTypeInternal _GetTaskStateResponse_default_instance_; +class HeartbeatRequest; +struct HeartbeatRequestDefaultTypeInternal; +extern HeartbeatRequestDefaultTypeInternal _HeartbeatRequest_default_instance_; +class HeartbeatResponse; +struct HeartbeatResponseDefaultTypeInternal; +extern HeartbeatResponseDefaultTypeInternal _HeartbeatResponse_default_instance_; +class InsertKeyValueRequest; +struct InsertKeyValueRequestDefaultTypeInternal; +extern InsertKeyValueRequestDefaultTypeInternal _InsertKeyValueRequest_default_instance_; +class InsertKeyValueResponse; +struct InsertKeyValueResponseDefaultTypeInternal; +extern InsertKeyValueResponseDefaultTypeInternal _InsertKeyValueResponse_default_instance_; +class KeyValueEntry; +struct KeyValueEntryDefaultTypeInternal; +extern KeyValueEntryDefaultTypeInternal _KeyValueEntry_default_instance_; +class RegisterTaskRequest; +struct RegisterTaskRequestDefaultTypeInternal; +extern RegisterTaskRequestDefaultTypeInternal _RegisterTaskRequest_default_instance_; +class RegisterTaskResponse; +struct RegisterTaskResponseDefaultTypeInternal; +extern RegisterTaskResponseDefaultTypeInternal _RegisterTaskResponse_default_instance_; +class ReportErrorToServiceRequest; +struct ReportErrorToServiceRequestDefaultTypeInternal; +extern ReportErrorToServiceRequestDefaultTypeInternal _ReportErrorToServiceRequest_default_instance_; +class ReportErrorToServiceResponse; +struct ReportErrorToServiceResponseDefaultTypeInternal; +extern ReportErrorToServiceResponseDefaultTypeInternal _ReportErrorToServiceResponse_default_instance_; +class ReportErrorToTaskRequest; +struct ReportErrorToTaskRequestDefaultTypeInternal; +extern ReportErrorToTaskRequestDefaultTypeInternal _ReportErrorToTaskRequest_default_instance_; +class ReportErrorToTaskResponse; +struct ReportErrorToTaskResponseDefaultTypeInternal; +extern ReportErrorToTaskResponseDefaultTypeInternal _ReportErrorToTaskResponse_default_instance_; +class ResetTaskRequest; +struct ResetTaskRequestDefaultTypeInternal; +extern ResetTaskRequestDefaultTypeInternal _ResetTaskRequest_default_instance_; +class ResetTaskResponse; +struct ResetTaskResponseDefaultTypeInternal; +extern ResetTaskResponseDefaultTypeInternal _ResetTaskResponse_default_instance_; +class ShutdownTaskRequest; +struct ShutdownTaskRequestDefaultTypeInternal; +extern ShutdownTaskRequestDefaultTypeInternal _ShutdownTaskRequest_default_instance_; +class ShutdownTaskResponse; +struct ShutdownTaskResponseDefaultTypeInternal; +extern ShutdownTaskResponseDefaultTypeInternal _ShutdownTaskResponse_default_instance_; +class TryGetKeyValueRequest; +struct TryGetKeyValueRequestDefaultTypeInternal; +extern TryGetKeyValueRequestDefaultTypeInternal _TryGetKeyValueRequest_default_instance_; +class TryGetKeyValueResponse; +struct TryGetKeyValueResponseDefaultTypeInternal; +extern TryGetKeyValueResponseDefaultTypeInternal _TryGetKeyValueResponse_default_instance_; +class WaitForAllTasksRequest; +struct WaitForAllTasksRequestDefaultTypeInternal; +extern WaitForAllTasksRequestDefaultTypeInternal _WaitForAllTasksRequest_default_instance_; +class WaitForAllTasksResponse; +struct WaitForAllTasksResponseDefaultTypeInternal; +extern WaitForAllTasksResponseDefaultTypeInternal _WaitForAllTasksResponse_default_instance_; +} // namespace tensorflow +PROTOBUF_NAMESPACE_OPEN +template<> ::tensorflow::BarrierRequest* Arena::CreateMaybeMessage<::tensorflow::BarrierRequest>(Arena*); +template<> ::tensorflow::BarrierResponse* Arena::CreateMaybeMessage<::tensorflow::BarrierResponse>(Arena*); +template<> ::tensorflow::CancelBarrierRequest* Arena::CreateMaybeMessage<::tensorflow::CancelBarrierRequest>(Arena*); +template<> ::tensorflow::CancelBarrierResponse* Arena::CreateMaybeMessage<::tensorflow::CancelBarrierResponse>(Arena*); +template<> ::tensorflow::CoordinatedTask* Arena::CreateMaybeMessage<::tensorflow::CoordinatedTask>(Arena*); +template<> ::tensorflow::CoordinatedTaskStateInfo* Arena::CreateMaybeMessage<::tensorflow::CoordinatedTaskStateInfo>(Arena*); +template<> ::tensorflow::CoordinationServiceError* Arena::CreateMaybeMessage<::tensorflow::CoordinationServiceError>(Arena*); +template<> ::tensorflow::DeleteKeyValueRequest* Arena::CreateMaybeMessage<::tensorflow::DeleteKeyValueRequest>(Arena*); +template<> ::tensorflow::DeleteKeyValueResponse* Arena::CreateMaybeMessage<::tensorflow::DeleteKeyValueResponse>(Arena*); +template<> ::tensorflow::DeviceInfo* Arena::CreateMaybeMessage<::tensorflow::DeviceInfo>(Arena*); +template<> ::tensorflow::GetKeyValueDirRequest* Arena::CreateMaybeMessage<::tensorflow::GetKeyValueDirRequest>(Arena*); +template<> ::tensorflow::GetKeyValueDirResponse* Arena::CreateMaybeMessage<::tensorflow::GetKeyValueDirResponse>(Arena*); +template<> ::tensorflow::GetKeyValueRequest* Arena::CreateMaybeMessage<::tensorflow::GetKeyValueRequest>(Arena*); +template<> ::tensorflow::GetKeyValueResponse* Arena::CreateMaybeMessage<::tensorflow::GetKeyValueResponse>(Arena*); +template<> ::tensorflow::GetTaskStateRequest* Arena::CreateMaybeMessage<::tensorflow::GetTaskStateRequest>(Arena*); +template<> ::tensorflow::GetTaskStateResponse* Arena::CreateMaybeMessage<::tensorflow::GetTaskStateResponse>(Arena*); +template<> ::tensorflow::HeartbeatRequest* Arena::CreateMaybeMessage<::tensorflow::HeartbeatRequest>(Arena*); +template<> ::tensorflow::HeartbeatResponse* Arena::CreateMaybeMessage<::tensorflow::HeartbeatResponse>(Arena*); +template<> ::tensorflow::InsertKeyValueRequest* Arena::CreateMaybeMessage<::tensorflow::InsertKeyValueRequest>(Arena*); +template<> ::tensorflow::InsertKeyValueResponse* Arena::CreateMaybeMessage<::tensorflow::InsertKeyValueResponse>(Arena*); +template<> ::tensorflow::KeyValueEntry* Arena::CreateMaybeMessage<::tensorflow::KeyValueEntry>(Arena*); +template<> ::tensorflow::RegisterTaskRequest* Arena::CreateMaybeMessage<::tensorflow::RegisterTaskRequest>(Arena*); +template<> ::tensorflow::RegisterTaskResponse* Arena::CreateMaybeMessage<::tensorflow::RegisterTaskResponse>(Arena*); +template<> ::tensorflow::ReportErrorToServiceRequest* Arena::CreateMaybeMessage<::tensorflow::ReportErrorToServiceRequest>(Arena*); +template<> ::tensorflow::ReportErrorToServiceResponse* Arena::CreateMaybeMessage<::tensorflow::ReportErrorToServiceResponse>(Arena*); +template<> ::tensorflow::ReportErrorToTaskRequest* Arena::CreateMaybeMessage<::tensorflow::ReportErrorToTaskRequest>(Arena*); +template<> ::tensorflow::ReportErrorToTaskResponse* Arena::CreateMaybeMessage<::tensorflow::ReportErrorToTaskResponse>(Arena*); +template<> ::tensorflow::ResetTaskRequest* Arena::CreateMaybeMessage<::tensorflow::ResetTaskRequest>(Arena*); +template<> ::tensorflow::ResetTaskResponse* Arena::CreateMaybeMessage<::tensorflow::ResetTaskResponse>(Arena*); +template<> ::tensorflow::ShutdownTaskRequest* Arena::CreateMaybeMessage<::tensorflow::ShutdownTaskRequest>(Arena*); +template<> ::tensorflow::ShutdownTaskResponse* Arena::CreateMaybeMessage<::tensorflow::ShutdownTaskResponse>(Arena*); +template<> ::tensorflow::TryGetKeyValueRequest* Arena::CreateMaybeMessage<::tensorflow::TryGetKeyValueRequest>(Arena*); +template<> ::tensorflow::TryGetKeyValueResponse* Arena::CreateMaybeMessage<::tensorflow::TryGetKeyValueResponse>(Arena*); +template<> ::tensorflow::WaitForAllTasksRequest* Arena::CreateMaybeMessage<::tensorflow::WaitForAllTasksRequest>(Arena*); +template<> ::tensorflow::WaitForAllTasksResponse* Arena::CreateMaybeMessage<::tensorflow::WaitForAllTasksResponse>(Arena*); +PROTOBUF_NAMESPACE_CLOSE +namespace tensorflow { + +enum CoordinatedTaskState : int { + TASKSTATE_UNSPECIFIED = 0, + TASKSTATE_UNINITIALIZED = 1, + TASKSTATE_DISCONNECTED = 2, + TASKSTATE_CONNECTED = 3, + TASKSTATE_ERROR = 4, + CoordinatedTaskState_INT_MIN_SENTINEL_DO_NOT_USE_ = std::numeric_limits::min(), + CoordinatedTaskState_INT_MAX_SENTINEL_DO_NOT_USE_ = std::numeric_limits::max() +}; +bool CoordinatedTaskState_IsValid(int value); +constexpr CoordinatedTaskState CoordinatedTaskState_MIN = TASKSTATE_UNSPECIFIED; +constexpr CoordinatedTaskState CoordinatedTaskState_MAX = TASKSTATE_ERROR; +constexpr int CoordinatedTaskState_ARRAYSIZE = CoordinatedTaskState_MAX + 1; + +const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* CoordinatedTaskState_descriptor(); +template +inline const std::string& CoordinatedTaskState_Name(T enum_t_value) { + static_assert(::std::is_same::value || + ::std::is_integral::value, + "Incorrect type passed to function CoordinatedTaskState_Name."); + return ::PROTOBUF_NAMESPACE_ID::internal::NameOfEnum( + CoordinatedTaskState_descriptor(), enum_t_value); +} +inline bool CoordinatedTaskState_Parse( + ::PROTOBUF_NAMESPACE_ID::ConstStringParam name, CoordinatedTaskState* value) { + return ::PROTOBUF_NAMESPACE_ID::internal::ParseNamedEnum( + CoordinatedTaskState_descriptor(), name, value); +} +// =================================================================== + +class CoordinatedTask final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:tensorflow.CoordinatedTask) */ { + public: + inline CoordinatedTask() : CoordinatedTask(nullptr) {} + ~CoordinatedTask() override; + explicit PROTOBUF_CONSTEXPR CoordinatedTask(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + CoordinatedTask(const CoordinatedTask& from); + CoordinatedTask(CoordinatedTask&& from) noexcept + : CoordinatedTask() { + *this = ::std::move(from); + } + + inline CoordinatedTask& operator=(const CoordinatedTask& from) { + CopyFrom(from); + return *this; + } + inline CoordinatedTask& operator=(CoordinatedTask&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const CoordinatedTask& default_instance() { + return *internal_default_instance(); + } + static inline const CoordinatedTask* internal_default_instance() { + return reinterpret_cast( + &_CoordinatedTask_default_instance_); + } + static constexpr int kIndexInFileMessages = + 0; + + friend void swap(CoordinatedTask& a, CoordinatedTask& b) { + a.Swap(&b); + } + inline void Swap(CoordinatedTask* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(CoordinatedTask* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + CoordinatedTask* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const CoordinatedTask& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom( const CoordinatedTask& from) { + CoordinatedTask::MergeImpl(*this, from); + } + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _impl_._cached_size_.Get(); } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(CoordinatedTask* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.CoordinatedTask"; + } + protected: + explicit CoordinatedTask(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kJobNameFieldNumber = 1, + kTaskIdFieldNumber = 2, + }; + // string job_name = 1; + void clear_job_name(); + const std::string& job_name() const; + template + void set_job_name(ArgT0&& arg0, ArgT... args); + std::string* mutable_job_name(); + PROTOBUF_NODISCARD std::string* release_job_name(); + void set_allocated_job_name(std::string* job_name); + private: + const std::string& _internal_job_name() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_job_name(const std::string& value); + std::string* _internal_mutable_job_name(); + public: + + // int32 task_id = 2; + void clear_task_id(); + int32_t task_id() const; + void set_task_id(int32_t value); + private: + int32_t _internal_task_id() const; + void _internal_set_task_id(int32_t value); + public: + + // @@protoc_insertion_point(class_scope:tensorflow.CoordinatedTask) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr job_name_; + int32_t task_id_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + }; + union { Impl_ _impl_; }; + friend struct ::TableStruct_tsl_2fprotobuf_2fcoordination_5fservice_2eproto; +}; +// ------------------------------------------------------------------- + +class CoordinationServiceError final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:tensorflow.CoordinationServiceError) */ { + public: + inline CoordinationServiceError() : CoordinationServiceError(nullptr) {} + ~CoordinationServiceError() override; + explicit PROTOBUF_CONSTEXPR CoordinationServiceError(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + CoordinationServiceError(const CoordinationServiceError& from); + CoordinationServiceError(CoordinationServiceError&& from) noexcept + : CoordinationServiceError() { + *this = ::std::move(from); + } + + inline CoordinationServiceError& operator=(const CoordinationServiceError& from) { + CopyFrom(from); + return *this; + } + inline CoordinationServiceError& operator=(CoordinationServiceError&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const CoordinationServiceError& default_instance() { + return *internal_default_instance(); + } + static inline const CoordinationServiceError* internal_default_instance() { + return reinterpret_cast( + &_CoordinationServiceError_default_instance_); + } + static constexpr int kIndexInFileMessages = + 1; + + friend void swap(CoordinationServiceError& a, CoordinationServiceError& b) { + a.Swap(&b); + } + inline void Swap(CoordinationServiceError* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(CoordinationServiceError* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + CoordinationServiceError* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const CoordinationServiceError& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom( const CoordinationServiceError& from) { + CoordinationServiceError::MergeImpl(*this, from); + } + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _impl_._cached_size_.Get(); } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(CoordinationServiceError* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.CoordinationServiceError"; + } + protected: + explicit CoordinationServiceError(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kSourceTaskFieldNumber = 4, + kIsReportedErrorFieldNumber = 3, + }; + // .tensorflow.CoordinatedTask source_task = 4; + bool has_source_task() const; + private: + bool _internal_has_source_task() const; + public: + void clear_source_task(); + const ::tensorflow::CoordinatedTask& source_task() const; + PROTOBUF_NODISCARD ::tensorflow::CoordinatedTask* release_source_task(); + ::tensorflow::CoordinatedTask* mutable_source_task(); + void set_allocated_source_task(::tensorflow::CoordinatedTask* source_task); + private: + const ::tensorflow::CoordinatedTask& _internal_source_task() const; + ::tensorflow::CoordinatedTask* _internal_mutable_source_task(); + public: + void unsafe_arena_set_allocated_source_task( + ::tensorflow::CoordinatedTask* source_task); + ::tensorflow::CoordinatedTask* unsafe_arena_release_source_task(); + + // bool is_reported_error = 3; + void clear_is_reported_error(); + bool is_reported_error() const; + void set_is_reported_error(bool value); + private: + bool _internal_is_reported_error() const; + void _internal_set_is_reported_error(bool value); + public: + + // @@protoc_insertion_point(class_scope:tensorflow.CoordinationServiceError) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + ::tensorflow::CoordinatedTask* source_task_; + bool is_reported_error_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + }; + union { Impl_ _impl_; }; + friend struct ::TableStruct_tsl_2fprotobuf_2fcoordination_5fservice_2eproto; +}; +// ------------------------------------------------------------------- + +class CoordinatedTaskStateInfo final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:tensorflow.CoordinatedTaskStateInfo) */ { + public: + inline CoordinatedTaskStateInfo() : CoordinatedTaskStateInfo(nullptr) {} + ~CoordinatedTaskStateInfo() override; + explicit PROTOBUF_CONSTEXPR CoordinatedTaskStateInfo(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + CoordinatedTaskStateInfo(const CoordinatedTaskStateInfo& from); + CoordinatedTaskStateInfo(CoordinatedTaskStateInfo&& from) noexcept + : CoordinatedTaskStateInfo() { + *this = ::std::move(from); + } + + inline CoordinatedTaskStateInfo& operator=(const CoordinatedTaskStateInfo& from) { + CopyFrom(from); + return *this; + } + inline CoordinatedTaskStateInfo& operator=(CoordinatedTaskStateInfo&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const CoordinatedTaskStateInfo& default_instance() { + return *internal_default_instance(); + } + static inline const CoordinatedTaskStateInfo* internal_default_instance() { + return reinterpret_cast( + &_CoordinatedTaskStateInfo_default_instance_); + } + static constexpr int kIndexInFileMessages = + 2; + + friend void swap(CoordinatedTaskStateInfo& a, CoordinatedTaskStateInfo& b) { + a.Swap(&b); + } + inline void Swap(CoordinatedTaskStateInfo* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(CoordinatedTaskStateInfo* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + CoordinatedTaskStateInfo* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const CoordinatedTaskStateInfo& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom( const CoordinatedTaskStateInfo& from) { + CoordinatedTaskStateInfo::MergeImpl(*this, from); + } + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _impl_._cached_size_.Get(); } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(CoordinatedTaskStateInfo* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.CoordinatedTaskStateInfo"; + } + protected: + explicit CoordinatedTaskStateInfo(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kErrorMessageFieldNumber = 4, + kTaskFieldNumber = 1, + kErrorPayloadFieldNumber = 5, + kStateFieldNumber = 2, + kErrorCodeFieldNumber = 3, + }; + // string error_message = 4; + void clear_error_message(); + const std::string& error_message() const; + template + void set_error_message(ArgT0&& arg0, ArgT... args); + std::string* mutable_error_message(); + PROTOBUF_NODISCARD std::string* release_error_message(); + void set_allocated_error_message(std::string* error_message); + private: + const std::string& _internal_error_message() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_error_message(const std::string& value); + std::string* _internal_mutable_error_message(); + public: + + // .tensorflow.CoordinatedTask task = 1; + bool has_task() const; + private: + bool _internal_has_task() const; + public: + void clear_task(); + const ::tensorflow::CoordinatedTask& task() const; + PROTOBUF_NODISCARD ::tensorflow::CoordinatedTask* release_task(); + ::tensorflow::CoordinatedTask* mutable_task(); + void set_allocated_task(::tensorflow::CoordinatedTask* task); + private: + const ::tensorflow::CoordinatedTask& _internal_task() const; + ::tensorflow::CoordinatedTask* _internal_mutable_task(); + public: + void unsafe_arena_set_allocated_task( + ::tensorflow::CoordinatedTask* task); + ::tensorflow::CoordinatedTask* unsafe_arena_release_task(); + + // .tensorflow.CoordinationServiceError error_payload = 5; + bool has_error_payload() const; + private: + bool _internal_has_error_payload() const; + public: + void clear_error_payload(); + const ::tensorflow::CoordinationServiceError& error_payload() const; + PROTOBUF_NODISCARD ::tensorflow::CoordinationServiceError* release_error_payload(); + ::tensorflow::CoordinationServiceError* mutable_error_payload(); + void set_allocated_error_payload(::tensorflow::CoordinationServiceError* error_payload); + private: + const ::tensorflow::CoordinationServiceError& _internal_error_payload() const; + ::tensorflow::CoordinationServiceError* _internal_mutable_error_payload(); + public: + void unsafe_arena_set_allocated_error_payload( + ::tensorflow::CoordinationServiceError* error_payload); + ::tensorflow::CoordinationServiceError* unsafe_arena_release_error_payload(); + + // .tensorflow.CoordinatedTaskState state = 2; + void clear_state(); + ::tensorflow::CoordinatedTaskState state() const; + void set_state(::tensorflow::CoordinatedTaskState value); + private: + ::tensorflow::CoordinatedTaskState _internal_state() const; + void _internal_set_state(::tensorflow::CoordinatedTaskState value); + public: + + // int32 error_code = 3; + void clear_error_code(); + int32_t error_code() const; + void set_error_code(int32_t value); + private: + int32_t _internal_error_code() const; + void _internal_set_error_code(int32_t value); + public: + + // @@protoc_insertion_point(class_scope:tensorflow.CoordinatedTaskStateInfo) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr error_message_; + ::tensorflow::CoordinatedTask* task_; + ::tensorflow::CoordinationServiceError* error_payload_; + int state_; + int32_t error_code_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + }; + union { Impl_ _impl_; }; + friend struct ::TableStruct_tsl_2fprotobuf_2fcoordination_5fservice_2eproto; +}; +// ------------------------------------------------------------------- + +class DeviceInfo final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:tensorflow.DeviceInfo) */ { + public: + inline DeviceInfo() : DeviceInfo(nullptr) {} + ~DeviceInfo() override; + explicit PROTOBUF_CONSTEXPR DeviceInfo(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + DeviceInfo(const DeviceInfo& from); + DeviceInfo(DeviceInfo&& from) noexcept + : DeviceInfo() { + *this = ::std::move(from); + } + + inline DeviceInfo& operator=(const DeviceInfo& from) { + CopyFrom(from); + return *this; + } + inline DeviceInfo& operator=(DeviceInfo&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const DeviceInfo& default_instance() { + return *internal_default_instance(); + } + static inline const DeviceInfo* internal_default_instance() { + return reinterpret_cast( + &_DeviceInfo_default_instance_); + } + static constexpr int kIndexInFileMessages = + 3; + + friend void swap(DeviceInfo& a, DeviceInfo& b) { + a.Swap(&b); + } + inline void Swap(DeviceInfo* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(DeviceInfo* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + DeviceInfo* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const DeviceInfo& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom( const DeviceInfo& from) { + DeviceInfo::MergeImpl(*this, from); + } + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _impl_._cached_size_.Get(); } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(DeviceInfo* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.DeviceInfo"; + } + protected: + explicit DeviceInfo(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kDeviceFieldNumber = 1, + }; + // repeated .google.protobuf.Any device = 1; + int device_size() const; + private: + int _internal_device_size() const; + public: + void clear_device(); + ::PROTOBUF_NAMESPACE_ID::Any* mutable_device(int index); + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::PROTOBUF_NAMESPACE_ID::Any >* + mutable_device(); + private: + const ::PROTOBUF_NAMESPACE_ID::Any& _internal_device(int index) const; + ::PROTOBUF_NAMESPACE_ID::Any* _internal_add_device(); + public: + const ::PROTOBUF_NAMESPACE_ID::Any& device(int index) const; + ::PROTOBUF_NAMESPACE_ID::Any* add_device(); + const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::PROTOBUF_NAMESPACE_ID::Any >& + device() const; + + // @@protoc_insertion_point(class_scope:tensorflow.DeviceInfo) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::PROTOBUF_NAMESPACE_ID::Any > device_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + }; + union { Impl_ _impl_; }; + friend struct ::TableStruct_tsl_2fprotobuf_2fcoordination_5fservice_2eproto; +}; +// ------------------------------------------------------------------- + +class RegisterTaskRequest final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:tensorflow.RegisterTaskRequest) */ { + public: + inline RegisterTaskRequest() : RegisterTaskRequest(nullptr) {} + ~RegisterTaskRequest() override; + explicit PROTOBUF_CONSTEXPR RegisterTaskRequest(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + RegisterTaskRequest(const RegisterTaskRequest& from); + RegisterTaskRequest(RegisterTaskRequest&& from) noexcept + : RegisterTaskRequest() { + *this = ::std::move(from); + } + + inline RegisterTaskRequest& operator=(const RegisterTaskRequest& from) { + CopyFrom(from); + return *this; + } + inline RegisterTaskRequest& operator=(RegisterTaskRequest&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const RegisterTaskRequest& default_instance() { + return *internal_default_instance(); + } + static inline const RegisterTaskRequest* internal_default_instance() { + return reinterpret_cast( + &_RegisterTaskRequest_default_instance_); + } + static constexpr int kIndexInFileMessages = + 4; + + friend void swap(RegisterTaskRequest& a, RegisterTaskRequest& b) { + a.Swap(&b); + } + inline void Swap(RegisterTaskRequest* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(RegisterTaskRequest* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + RegisterTaskRequest* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const RegisterTaskRequest& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom( const RegisterTaskRequest& from) { + RegisterTaskRequest::MergeImpl(*this, from); + } + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _impl_._cached_size_.Get(); } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(RegisterTaskRequest* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.RegisterTaskRequest"; + } + protected: + explicit RegisterTaskRequest(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kSourceTaskFieldNumber = 5, + kIncarnationFieldNumber = 3, + }; + // .tensorflow.CoordinatedTask source_task = 5; + bool has_source_task() const; + private: + bool _internal_has_source_task() const; + public: + void clear_source_task(); + const ::tensorflow::CoordinatedTask& source_task() const; + PROTOBUF_NODISCARD ::tensorflow::CoordinatedTask* release_source_task(); + ::tensorflow::CoordinatedTask* mutable_source_task(); + void set_allocated_source_task(::tensorflow::CoordinatedTask* source_task); + private: + const ::tensorflow::CoordinatedTask& _internal_source_task() const; + ::tensorflow::CoordinatedTask* _internal_mutable_source_task(); + public: + void unsafe_arena_set_allocated_source_task( + ::tensorflow::CoordinatedTask* source_task); + ::tensorflow::CoordinatedTask* unsafe_arena_release_source_task(); + + // fixed64 incarnation = 3; + void clear_incarnation(); + uint64_t incarnation() const; + void set_incarnation(uint64_t value); + private: + uint64_t _internal_incarnation() const; + void _internal_set_incarnation(uint64_t value); + public: + + // @@protoc_insertion_point(class_scope:tensorflow.RegisterTaskRequest) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + ::tensorflow::CoordinatedTask* source_task_; + uint64_t incarnation_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + }; + union { Impl_ _impl_; }; + friend struct ::TableStruct_tsl_2fprotobuf_2fcoordination_5fservice_2eproto; +}; +// ------------------------------------------------------------------- + +class RegisterTaskResponse final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:tensorflow.RegisterTaskResponse) */ { + public: + inline RegisterTaskResponse() : RegisterTaskResponse(nullptr) {} + ~RegisterTaskResponse() override; + explicit PROTOBUF_CONSTEXPR RegisterTaskResponse(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + RegisterTaskResponse(const RegisterTaskResponse& from); + RegisterTaskResponse(RegisterTaskResponse&& from) noexcept + : RegisterTaskResponse() { + *this = ::std::move(from); + } + + inline RegisterTaskResponse& operator=(const RegisterTaskResponse& from) { + CopyFrom(from); + return *this; + } + inline RegisterTaskResponse& operator=(RegisterTaskResponse&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const RegisterTaskResponse& default_instance() { + return *internal_default_instance(); + } + static inline const RegisterTaskResponse* internal_default_instance() { + return reinterpret_cast( + &_RegisterTaskResponse_default_instance_); + } + static constexpr int kIndexInFileMessages = + 5; + + friend void swap(RegisterTaskResponse& a, RegisterTaskResponse& b) { + a.Swap(&b); + } + inline void Swap(RegisterTaskResponse* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(RegisterTaskResponse* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + RegisterTaskResponse* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const RegisterTaskResponse& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom( const RegisterTaskResponse& from) { + RegisterTaskResponse::MergeImpl(*this, from); + } + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _impl_._cached_size_.Get(); } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(RegisterTaskResponse* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.RegisterTaskResponse"; + } + protected: + explicit RegisterTaskResponse(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kLeaderIncarnationFieldNumber = 1, + }; + // fixed64 leader_incarnation = 1; + void clear_leader_incarnation(); + uint64_t leader_incarnation() const; + void set_leader_incarnation(uint64_t value); + private: + uint64_t _internal_leader_incarnation() const; + void _internal_set_leader_incarnation(uint64_t value); + public: + + // @@protoc_insertion_point(class_scope:tensorflow.RegisterTaskResponse) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + uint64_t leader_incarnation_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + }; + union { Impl_ _impl_; }; + friend struct ::TableStruct_tsl_2fprotobuf_2fcoordination_5fservice_2eproto; +}; +// ------------------------------------------------------------------- + +class HeartbeatRequest final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:tensorflow.HeartbeatRequest) */ { + public: + inline HeartbeatRequest() : HeartbeatRequest(nullptr) {} + ~HeartbeatRequest() override; + explicit PROTOBUF_CONSTEXPR HeartbeatRequest(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + HeartbeatRequest(const HeartbeatRequest& from); + HeartbeatRequest(HeartbeatRequest&& from) noexcept + : HeartbeatRequest() { + *this = ::std::move(from); + } + + inline HeartbeatRequest& operator=(const HeartbeatRequest& from) { + CopyFrom(from); + return *this; + } + inline HeartbeatRequest& operator=(HeartbeatRequest&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const HeartbeatRequest& default_instance() { + return *internal_default_instance(); + } + static inline const HeartbeatRequest* internal_default_instance() { + return reinterpret_cast( + &_HeartbeatRequest_default_instance_); + } + static constexpr int kIndexInFileMessages = + 6; + + friend void swap(HeartbeatRequest& a, HeartbeatRequest& b) { + a.Swap(&b); + } + inline void Swap(HeartbeatRequest* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(HeartbeatRequest* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + HeartbeatRequest* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const HeartbeatRequest& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom( const HeartbeatRequest& from) { + HeartbeatRequest::MergeImpl(*this, from); + } + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _impl_._cached_size_.Get(); } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(HeartbeatRequest* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.HeartbeatRequest"; + } + protected: + explicit HeartbeatRequest(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kSourceTaskFieldNumber = 4, + kIncarnationFieldNumber = 3, + }; + // .tensorflow.CoordinatedTask source_task = 4; + bool has_source_task() const; + private: + bool _internal_has_source_task() const; + public: + void clear_source_task(); + const ::tensorflow::CoordinatedTask& source_task() const; + PROTOBUF_NODISCARD ::tensorflow::CoordinatedTask* release_source_task(); + ::tensorflow::CoordinatedTask* mutable_source_task(); + void set_allocated_source_task(::tensorflow::CoordinatedTask* source_task); + private: + const ::tensorflow::CoordinatedTask& _internal_source_task() const; + ::tensorflow::CoordinatedTask* _internal_mutable_source_task(); + public: + void unsafe_arena_set_allocated_source_task( + ::tensorflow::CoordinatedTask* source_task); + ::tensorflow::CoordinatedTask* unsafe_arena_release_source_task(); + + // fixed64 incarnation = 3; + void clear_incarnation(); + uint64_t incarnation() const; + void set_incarnation(uint64_t value); + private: + uint64_t _internal_incarnation() const; + void _internal_set_incarnation(uint64_t value); + public: + + // @@protoc_insertion_point(class_scope:tensorflow.HeartbeatRequest) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + ::tensorflow::CoordinatedTask* source_task_; + uint64_t incarnation_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + }; + union { Impl_ _impl_; }; + friend struct ::TableStruct_tsl_2fprotobuf_2fcoordination_5fservice_2eproto; +}; +// ------------------------------------------------------------------- + +class HeartbeatResponse final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:tensorflow.HeartbeatResponse) */ { + public: + inline HeartbeatResponse() : HeartbeatResponse(nullptr) {} + ~HeartbeatResponse() override; + explicit PROTOBUF_CONSTEXPR HeartbeatResponse(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + HeartbeatResponse(const HeartbeatResponse& from); + HeartbeatResponse(HeartbeatResponse&& from) noexcept + : HeartbeatResponse() { + *this = ::std::move(from); + } + + inline HeartbeatResponse& operator=(const HeartbeatResponse& from) { + CopyFrom(from); + return *this; + } + inline HeartbeatResponse& operator=(HeartbeatResponse&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const HeartbeatResponse& default_instance() { + return *internal_default_instance(); + } + static inline const HeartbeatResponse* internal_default_instance() { + return reinterpret_cast( + &_HeartbeatResponse_default_instance_); + } + static constexpr int kIndexInFileMessages = + 7; + + friend void swap(HeartbeatResponse& a, HeartbeatResponse& b) { + a.Swap(&b); + } + inline void Swap(HeartbeatResponse* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(HeartbeatResponse* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + HeartbeatResponse* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const HeartbeatResponse& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom( const HeartbeatResponse& from) { + HeartbeatResponse::MergeImpl(*this, from); + } + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _impl_._cached_size_.Get(); } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(HeartbeatResponse* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.HeartbeatResponse"; + } + protected: + explicit HeartbeatResponse(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kLeaderIncarnationFieldNumber = 1, + }; + // fixed64 leader_incarnation = 1; + void clear_leader_incarnation(); + uint64_t leader_incarnation() const; + void set_leader_incarnation(uint64_t value); + private: + uint64_t _internal_leader_incarnation() const; + void _internal_set_leader_incarnation(uint64_t value); + public: + + // @@protoc_insertion_point(class_scope:tensorflow.HeartbeatResponse) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + uint64_t leader_incarnation_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + }; + union { Impl_ _impl_; }; + friend struct ::TableStruct_tsl_2fprotobuf_2fcoordination_5fservice_2eproto; +}; +// ------------------------------------------------------------------- + +class WaitForAllTasksRequest final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:tensorflow.WaitForAllTasksRequest) */ { + public: + inline WaitForAllTasksRequest() : WaitForAllTasksRequest(nullptr) {} + ~WaitForAllTasksRequest() override; + explicit PROTOBUF_CONSTEXPR WaitForAllTasksRequest(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + WaitForAllTasksRequest(const WaitForAllTasksRequest& from); + WaitForAllTasksRequest(WaitForAllTasksRequest&& from) noexcept + : WaitForAllTasksRequest() { + *this = ::std::move(from); + } + + inline WaitForAllTasksRequest& operator=(const WaitForAllTasksRequest& from) { + CopyFrom(from); + return *this; + } + inline WaitForAllTasksRequest& operator=(WaitForAllTasksRequest&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const WaitForAllTasksRequest& default_instance() { + return *internal_default_instance(); + } + static inline const WaitForAllTasksRequest* internal_default_instance() { + return reinterpret_cast( + &_WaitForAllTasksRequest_default_instance_); + } + static constexpr int kIndexInFileMessages = + 8; + + friend void swap(WaitForAllTasksRequest& a, WaitForAllTasksRequest& b) { + a.Swap(&b); + } + inline void Swap(WaitForAllTasksRequest* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(WaitForAllTasksRequest* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + WaitForAllTasksRequest* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const WaitForAllTasksRequest& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom( const WaitForAllTasksRequest& from) { + WaitForAllTasksRequest::MergeImpl(*this, from); + } + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _impl_._cached_size_.Get(); } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(WaitForAllTasksRequest* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.WaitForAllTasksRequest"; + } + protected: + explicit WaitForAllTasksRequest(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kSourceTaskFieldNumber = 5, + kDeviceInfoFieldNumber = 6, + }; + // .tensorflow.CoordinatedTask source_task = 5; + bool has_source_task() const; + private: + bool _internal_has_source_task() const; + public: + void clear_source_task(); + const ::tensorflow::CoordinatedTask& source_task() const; + PROTOBUF_NODISCARD ::tensorflow::CoordinatedTask* release_source_task(); + ::tensorflow::CoordinatedTask* mutable_source_task(); + void set_allocated_source_task(::tensorflow::CoordinatedTask* source_task); + private: + const ::tensorflow::CoordinatedTask& _internal_source_task() const; + ::tensorflow::CoordinatedTask* _internal_mutable_source_task(); + public: + void unsafe_arena_set_allocated_source_task( + ::tensorflow::CoordinatedTask* source_task); + ::tensorflow::CoordinatedTask* unsafe_arena_release_source_task(); + + // .tensorflow.DeviceInfo device_info = 6; + bool has_device_info() const; + private: + bool _internal_has_device_info() const; + public: + void clear_device_info(); + const ::tensorflow::DeviceInfo& device_info() const; + PROTOBUF_NODISCARD ::tensorflow::DeviceInfo* release_device_info(); + ::tensorflow::DeviceInfo* mutable_device_info(); + void set_allocated_device_info(::tensorflow::DeviceInfo* device_info); + private: + const ::tensorflow::DeviceInfo& _internal_device_info() const; + ::tensorflow::DeviceInfo* _internal_mutable_device_info(); + public: + void unsafe_arena_set_allocated_device_info( + ::tensorflow::DeviceInfo* device_info); + ::tensorflow::DeviceInfo* unsafe_arena_release_device_info(); + + // @@protoc_insertion_point(class_scope:tensorflow.WaitForAllTasksRequest) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + ::tensorflow::CoordinatedTask* source_task_; + ::tensorflow::DeviceInfo* device_info_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + }; + union { Impl_ _impl_; }; + friend struct ::TableStruct_tsl_2fprotobuf_2fcoordination_5fservice_2eproto; +}; +// ------------------------------------------------------------------- + +class WaitForAllTasksResponse final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:tensorflow.WaitForAllTasksResponse) */ { + public: + inline WaitForAllTasksResponse() : WaitForAllTasksResponse(nullptr) {} + ~WaitForAllTasksResponse() override; + explicit PROTOBUF_CONSTEXPR WaitForAllTasksResponse(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + WaitForAllTasksResponse(const WaitForAllTasksResponse& from); + WaitForAllTasksResponse(WaitForAllTasksResponse&& from) noexcept + : WaitForAllTasksResponse() { + *this = ::std::move(from); + } + + inline WaitForAllTasksResponse& operator=(const WaitForAllTasksResponse& from) { + CopyFrom(from); + return *this; + } + inline WaitForAllTasksResponse& operator=(WaitForAllTasksResponse&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const WaitForAllTasksResponse& default_instance() { + return *internal_default_instance(); + } + static inline const WaitForAllTasksResponse* internal_default_instance() { + return reinterpret_cast( + &_WaitForAllTasksResponse_default_instance_); + } + static constexpr int kIndexInFileMessages = + 9; + + friend void swap(WaitForAllTasksResponse& a, WaitForAllTasksResponse& b) { + a.Swap(&b); + } + inline void Swap(WaitForAllTasksResponse* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(WaitForAllTasksResponse* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + WaitForAllTasksResponse* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const WaitForAllTasksResponse& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom( const WaitForAllTasksResponse& from) { + WaitForAllTasksResponse::MergeImpl(*this, from); + } + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _impl_._cached_size_.Get(); } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(WaitForAllTasksResponse* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.WaitForAllTasksResponse"; + } + protected: + explicit WaitForAllTasksResponse(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kDeviceInfoFieldNumber = 4, + kLeaderIncarnationFieldNumber = 1, + }; + // .tensorflow.DeviceInfo device_info = 4; + bool has_device_info() const; + private: + bool _internal_has_device_info() const; + public: + void clear_device_info(); + const ::tensorflow::DeviceInfo& device_info() const; + PROTOBUF_NODISCARD ::tensorflow::DeviceInfo* release_device_info(); + ::tensorflow::DeviceInfo* mutable_device_info(); + void set_allocated_device_info(::tensorflow::DeviceInfo* device_info); + private: + const ::tensorflow::DeviceInfo& _internal_device_info() const; + ::tensorflow::DeviceInfo* _internal_mutable_device_info(); + public: + void unsafe_arena_set_allocated_device_info( + ::tensorflow::DeviceInfo* device_info); + ::tensorflow::DeviceInfo* unsafe_arena_release_device_info(); + + // fixed64 leader_incarnation = 1; + void clear_leader_incarnation(); + uint64_t leader_incarnation() const; + void set_leader_incarnation(uint64_t value); + private: + uint64_t _internal_leader_incarnation() const; + void _internal_set_leader_incarnation(uint64_t value); + public: + + // @@protoc_insertion_point(class_scope:tensorflow.WaitForAllTasksResponse) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + ::tensorflow::DeviceInfo* device_info_; + uint64_t leader_incarnation_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + }; + union { Impl_ _impl_; }; + friend struct ::TableStruct_tsl_2fprotobuf_2fcoordination_5fservice_2eproto; +}; +// ------------------------------------------------------------------- + +class ShutdownTaskRequest final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:tensorflow.ShutdownTaskRequest) */ { + public: + inline ShutdownTaskRequest() : ShutdownTaskRequest(nullptr) {} + ~ShutdownTaskRequest() override; + explicit PROTOBUF_CONSTEXPR ShutdownTaskRequest(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + ShutdownTaskRequest(const ShutdownTaskRequest& from); + ShutdownTaskRequest(ShutdownTaskRequest&& from) noexcept + : ShutdownTaskRequest() { + *this = ::std::move(from); + } + + inline ShutdownTaskRequest& operator=(const ShutdownTaskRequest& from) { + CopyFrom(from); + return *this; + } + inline ShutdownTaskRequest& operator=(ShutdownTaskRequest&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const ShutdownTaskRequest& default_instance() { + return *internal_default_instance(); + } + static inline const ShutdownTaskRequest* internal_default_instance() { + return reinterpret_cast( + &_ShutdownTaskRequest_default_instance_); + } + static constexpr int kIndexInFileMessages = + 10; + + friend void swap(ShutdownTaskRequest& a, ShutdownTaskRequest& b) { + a.Swap(&b); + } + inline void Swap(ShutdownTaskRequest* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(ShutdownTaskRequest* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + ShutdownTaskRequest* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const ShutdownTaskRequest& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom( const ShutdownTaskRequest& from) { + ShutdownTaskRequest::MergeImpl(*this, from); + } + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _impl_._cached_size_.Get(); } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(ShutdownTaskRequest* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.ShutdownTaskRequest"; + } + protected: + explicit ShutdownTaskRequest(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kSourceTaskFieldNumber = 1, + }; + // .tensorflow.CoordinatedTask source_task = 1; + bool has_source_task() const; + private: + bool _internal_has_source_task() const; + public: + void clear_source_task(); + const ::tensorflow::CoordinatedTask& source_task() const; + PROTOBUF_NODISCARD ::tensorflow::CoordinatedTask* release_source_task(); + ::tensorflow::CoordinatedTask* mutable_source_task(); + void set_allocated_source_task(::tensorflow::CoordinatedTask* source_task); + private: + const ::tensorflow::CoordinatedTask& _internal_source_task() const; + ::tensorflow::CoordinatedTask* _internal_mutable_source_task(); + public: + void unsafe_arena_set_allocated_source_task( + ::tensorflow::CoordinatedTask* source_task); + ::tensorflow::CoordinatedTask* unsafe_arena_release_source_task(); + + // @@protoc_insertion_point(class_scope:tensorflow.ShutdownTaskRequest) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + ::tensorflow::CoordinatedTask* source_task_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + }; + union { Impl_ _impl_; }; + friend struct ::TableStruct_tsl_2fprotobuf_2fcoordination_5fservice_2eproto; +}; +// ------------------------------------------------------------------- + +class ShutdownTaskResponse final : + public ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase /* @@protoc_insertion_point(class_definition:tensorflow.ShutdownTaskResponse) */ { + public: + inline ShutdownTaskResponse() : ShutdownTaskResponse(nullptr) {} + explicit PROTOBUF_CONSTEXPR ShutdownTaskResponse(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + ShutdownTaskResponse(const ShutdownTaskResponse& from); + ShutdownTaskResponse(ShutdownTaskResponse&& from) noexcept + : ShutdownTaskResponse() { + *this = ::std::move(from); + } + + inline ShutdownTaskResponse& operator=(const ShutdownTaskResponse& from) { + CopyFrom(from); + return *this; + } + inline ShutdownTaskResponse& operator=(ShutdownTaskResponse&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const ShutdownTaskResponse& default_instance() { + return *internal_default_instance(); + } + static inline const ShutdownTaskResponse* internal_default_instance() { + return reinterpret_cast( + &_ShutdownTaskResponse_default_instance_); + } + static constexpr int kIndexInFileMessages = + 11; + + friend void swap(ShutdownTaskResponse& a, ShutdownTaskResponse& b) { + a.Swap(&b); + } + inline void Swap(ShutdownTaskResponse* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(ShutdownTaskResponse* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + ShutdownTaskResponse* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::CopyFrom; + inline void CopyFrom(const ShutdownTaskResponse& from) { + ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::CopyImpl(*this, from); + } + using ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::MergeFrom; + void MergeFrom(const ShutdownTaskResponse& from) { + ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::MergeImpl(*this, from); + } + public: + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.ShutdownTaskResponse"; + } + protected: + explicit ShutdownTaskResponse(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + // @@protoc_insertion_point(class_scope:tensorflow.ShutdownTaskResponse) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + }; + friend struct ::TableStruct_tsl_2fprotobuf_2fcoordination_5fservice_2eproto; +}; +// ------------------------------------------------------------------- + +class ResetTaskRequest final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:tensorflow.ResetTaskRequest) */ { + public: + inline ResetTaskRequest() : ResetTaskRequest(nullptr) {} + ~ResetTaskRequest() override; + explicit PROTOBUF_CONSTEXPR ResetTaskRequest(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + ResetTaskRequest(const ResetTaskRequest& from); + ResetTaskRequest(ResetTaskRequest&& from) noexcept + : ResetTaskRequest() { + *this = ::std::move(from); + } + + inline ResetTaskRequest& operator=(const ResetTaskRequest& from) { + CopyFrom(from); + return *this; + } + inline ResetTaskRequest& operator=(ResetTaskRequest&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const ResetTaskRequest& default_instance() { + return *internal_default_instance(); + } + static inline const ResetTaskRequest* internal_default_instance() { + return reinterpret_cast( + &_ResetTaskRequest_default_instance_); + } + static constexpr int kIndexInFileMessages = + 12; + + friend void swap(ResetTaskRequest& a, ResetTaskRequest& b) { + a.Swap(&b); + } + inline void Swap(ResetTaskRequest* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(ResetTaskRequest* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + ResetTaskRequest* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const ResetTaskRequest& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom( const ResetTaskRequest& from) { + ResetTaskRequest::MergeImpl(*this, from); + } + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _impl_._cached_size_.Get(); } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(ResetTaskRequest* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.ResetTaskRequest"; + } + protected: + explicit ResetTaskRequest(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kSourceTaskFieldNumber = 1, + }; + // .tensorflow.CoordinatedTask source_task = 1; + bool has_source_task() const; + private: + bool _internal_has_source_task() const; + public: + void clear_source_task(); + const ::tensorflow::CoordinatedTask& source_task() const; + PROTOBUF_NODISCARD ::tensorflow::CoordinatedTask* release_source_task(); + ::tensorflow::CoordinatedTask* mutable_source_task(); + void set_allocated_source_task(::tensorflow::CoordinatedTask* source_task); + private: + const ::tensorflow::CoordinatedTask& _internal_source_task() const; + ::tensorflow::CoordinatedTask* _internal_mutable_source_task(); + public: + void unsafe_arena_set_allocated_source_task( + ::tensorflow::CoordinatedTask* source_task); + ::tensorflow::CoordinatedTask* unsafe_arena_release_source_task(); + + // @@protoc_insertion_point(class_scope:tensorflow.ResetTaskRequest) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + ::tensorflow::CoordinatedTask* source_task_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + }; + union { Impl_ _impl_; }; + friend struct ::TableStruct_tsl_2fprotobuf_2fcoordination_5fservice_2eproto; +}; +// ------------------------------------------------------------------- + +class ResetTaskResponse final : + public ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase /* @@protoc_insertion_point(class_definition:tensorflow.ResetTaskResponse) */ { + public: + inline ResetTaskResponse() : ResetTaskResponse(nullptr) {} + explicit PROTOBUF_CONSTEXPR ResetTaskResponse(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + ResetTaskResponse(const ResetTaskResponse& from); + ResetTaskResponse(ResetTaskResponse&& from) noexcept + : ResetTaskResponse() { + *this = ::std::move(from); + } + + inline ResetTaskResponse& operator=(const ResetTaskResponse& from) { + CopyFrom(from); + return *this; + } + inline ResetTaskResponse& operator=(ResetTaskResponse&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const ResetTaskResponse& default_instance() { + return *internal_default_instance(); + } + static inline const ResetTaskResponse* internal_default_instance() { + return reinterpret_cast( + &_ResetTaskResponse_default_instance_); + } + static constexpr int kIndexInFileMessages = + 13; + + friend void swap(ResetTaskResponse& a, ResetTaskResponse& b) { + a.Swap(&b); + } + inline void Swap(ResetTaskResponse* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(ResetTaskResponse* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + ResetTaskResponse* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::CopyFrom; + inline void CopyFrom(const ResetTaskResponse& from) { + ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::CopyImpl(*this, from); + } + using ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::MergeFrom; + void MergeFrom(const ResetTaskResponse& from) { + ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::MergeImpl(*this, from); + } + public: + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.ResetTaskResponse"; + } + protected: + explicit ResetTaskResponse(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + // @@protoc_insertion_point(class_scope:tensorflow.ResetTaskResponse) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + }; + friend struct ::TableStruct_tsl_2fprotobuf_2fcoordination_5fservice_2eproto; +}; +// ------------------------------------------------------------------- + +class ReportErrorToTaskRequest final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:tensorflow.ReportErrorToTaskRequest) */ { + public: + inline ReportErrorToTaskRequest() : ReportErrorToTaskRequest(nullptr) {} + ~ReportErrorToTaskRequest() override; + explicit PROTOBUF_CONSTEXPR ReportErrorToTaskRequest(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + ReportErrorToTaskRequest(const ReportErrorToTaskRequest& from); + ReportErrorToTaskRequest(ReportErrorToTaskRequest&& from) noexcept + : ReportErrorToTaskRequest() { + *this = ::std::move(from); + } + + inline ReportErrorToTaskRequest& operator=(const ReportErrorToTaskRequest& from) { + CopyFrom(from); + return *this; + } + inline ReportErrorToTaskRequest& operator=(ReportErrorToTaskRequest&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const ReportErrorToTaskRequest& default_instance() { + return *internal_default_instance(); + } + static inline const ReportErrorToTaskRequest* internal_default_instance() { + return reinterpret_cast( + &_ReportErrorToTaskRequest_default_instance_); + } + static constexpr int kIndexInFileMessages = + 14; + + friend void swap(ReportErrorToTaskRequest& a, ReportErrorToTaskRequest& b) { + a.Swap(&b); + } + inline void Swap(ReportErrorToTaskRequest* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(ReportErrorToTaskRequest* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + ReportErrorToTaskRequest* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const ReportErrorToTaskRequest& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom( const ReportErrorToTaskRequest& from) { + ReportErrorToTaskRequest::MergeImpl(*this, from); + } + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _impl_._cached_size_.Get(); } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(ReportErrorToTaskRequest* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.ReportErrorToTaskRequest"; + } + protected: + explicit ReportErrorToTaskRequest(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kErrorMessageFieldNumber = 2, + kErrorPayloadFieldNumber = 5, + kErrorCodeFieldNumber = 1, + }; + // string error_message = 2; + void clear_error_message(); + const std::string& error_message() const; + template + void set_error_message(ArgT0&& arg0, ArgT... args); + std::string* mutable_error_message(); + PROTOBUF_NODISCARD std::string* release_error_message(); + void set_allocated_error_message(std::string* error_message); + private: + const std::string& _internal_error_message() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_error_message(const std::string& value); + std::string* _internal_mutable_error_message(); + public: + + // .tensorflow.CoordinationServiceError error_payload = 5; + bool has_error_payload() const; + private: + bool _internal_has_error_payload() const; + public: + void clear_error_payload(); + const ::tensorflow::CoordinationServiceError& error_payload() const; + PROTOBUF_NODISCARD ::tensorflow::CoordinationServiceError* release_error_payload(); + ::tensorflow::CoordinationServiceError* mutable_error_payload(); + void set_allocated_error_payload(::tensorflow::CoordinationServiceError* error_payload); + private: + const ::tensorflow::CoordinationServiceError& _internal_error_payload() const; + ::tensorflow::CoordinationServiceError* _internal_mutable_error_payload(); + public: + void unsafe_arena_set_allocated_error_payload( + ::tensorflow::CoordinationServiceError* error_payload); + ::tensorflow::CoordinationServiceError* unsafe_arena_release_error_payload(); + + // int32 error_code = 1; + void clear_error_code(); + int32_t error_code() const; + void set_error_code(int32_t value); + private: + int32_t _internal_error_code() const; + void _internal_set_error_code(int32_t value); + public: + + // @@protoc_insertion_point(class_scope:tensorflow.ReportErrorToTaskRequest) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr error_message_; + ::tensorflow::CoordinationServiceError* error_payload_; + int32_t error_code_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + }; + union { Impl_ _impl_; }; + friend struct ::TableStruct_tsl_2fprotobuf_2fcoordination_5fservice_2eproto; +}; +// ------------------------------------------------------------------- + +class ReportErrorToTaskResponse final : + public ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase /* @@protoc_insertion_point(class_definition:tensorflow.ReportErrorToTaskResponse) */ { + public: + inline ReportErrorToTaskResponse() : ReportErrorToTaskResponse(nullptr) {} + explicit PROTOBUF_CONSTEXPR ReportErrorToTaskResponse(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + ReportErrorToTaskResponse(const ReportErrorToTaskResponse& from); + ReportErrorToTaskResponse(ReportErrorToTaskResponse&& from) noexcept + : ReportErrorToTaskResponse() { + *this = ::std::move(from); + } + + inline ReportErrorToTaskResponse& operator=(const ReportErrorToTaskResponse& from) { + CopyFrom(from); + return *this; + } + inline ReportErrorToTaskResponse& operator=(ReportErrorToTaskResponse&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const ReportErrorToTaskResponse& default_instance() { + return *internal_default_instance(); + } + static inline const ReportErrorToTaskResponse* internal_default_instance() { + return reinterpret_cast( + &_ReportErrorToTaskResponse_default_instance_); + } + static constexpr int kIndexInFileMessages = + 15; + + friend void swap(ReportErrorToTaskResponse& a, ReportErrorToTaskResponse& b) { + a.Swap(&b); + } + inline void Swap(ReportErrorToTaskResponse* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(ReportErrorToTaskResponse* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + ReportErrorToTaskResponse* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::CopyFrom; + inline void CopyFrom(const ReportErrorToTaskResponse& from) { + ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::CopyImpl(*this, from); + } + using ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::MergeFrom; + void MergeFrom(const ReportErrorToTaskResponse& from) { + ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::MergeImpl(*this, from); + } + public: + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.ReportErrorToTaskResponse"; + } + protected: + explicit ReportErrorToTaskResponse(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + // @@protoc_insertion_point(class_scope:tensorflow.ReportErrorToTaskResponse) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + }; + friend struct ::TableStruct_tsl_2fprotobuf_2fcoordination_5fservice_2eproto; +}; +// ------------------------------------------------------------------- + +class ReportErrorToServiceRequest final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:tensorflow.ReportErrorToServiceRequest) */ { + public: + inline ReportErrorToServiceRequest() : ReportErrorToServiceRequest(nullptr) {} + ~ReportErrorToServiceRequest() override; + explicit PROTOBUF_CONSTEXPR ReportErrorToServiceRequest(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + ReportErrorToServiceRequest(const ReportErrorToServiceRequest& from); + ReportErrorToServiceRequest(ReportErrorToServiceRequest&& from) noexcept + : ReportErrorToServiceRequest() { + *this = ::std::move(from); + } + + inline ReportErrorToServiceRequest& operator=(const ReportErrorToServiceRequest& from) { + CopyFrom(from); + return *this; + } + inline ReportErrorToServiceRequest& operator=(ReportErrorToServiceRequest&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const ReportErrorToServiceRequest& default_instance() { + return *internal_default_instance(); + } + static inline const ReportErrorToServiceRequest* internal_default_instance() { + return reinterpret_cast( + &_ReportErrorToServiceRequest_default_instance_); + } + static constexpr int kIndexInFileMessages = + 16; + + friend void swap(ReportErrorToServiceRequest& a, ReportErrorToServiceRequest& b) { + a.Swap(&b); + } + inline void Swap(ReportErrorToServiceRequest* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(ReportErrorToServiceRequest* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + ReportErrorToServiceRequest* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const ReportErrorToServiceRequest& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom( const ReportErrorToServiceRequest& from) { + ReportErrorToServiceRequest::MergeImpl(*this, from); + } + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _impl_._cached_size_.Get(); } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(ReportErrorToServiceRequest* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.ReportErrorToServiceRequest"; + } + protected: + explicit ReportErrorToServiceRequest(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kErrorMessageFieldNumber = 2, + kErrorOriginFieldNumber = 5, + kErrorCodeFieldNumber = 1, + }; + // string error_message = 2; + void clear_error_message(); + const std::string& error_message() const; + template + void set_error_message(ArgT0&& arg0, ArgT... args); + std::string* mutable_error_message(); + PROTOBUF_NODISCARD std::string* release_error_message(); + void set_allocated_error_message(std::string* error_message); + private: + const std::string& _internal_error_message() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_error_message(const std::string& value); + std::string* _internal_mutable_error_message(); + public: + + // .tensorflow.CoordinatedTask error_origin = 5; + bool has_error_origin() const; + private: + bool _internal_has_error_origin() const; + public: + void clear_error_origin(); + const ::tensorflow::CoordinatedTask& error_origin() const; + PROTOBUF_NODISCARD ::tensorflow::CoordinatedTask* release_error_origin(); + ::tensorflow::CoordinatedTask* mutable_error_origin(); + void set_allocated_error_origin(::tensorflow::CoordinatedTask* error_origin); + private: + const ::tensorflow::CoordinatedTask& _internal_error_origin() const; + ::tensorflow::CoordinatedTask* _internal_mutable_error_origin(); + public: + void unsafe_arena_set_allocated_error_origin( + ::tensorflow::CoordinatedTask* error_origin); + ::tensorflow::CoordinatedTask* unsafe_arena_release_error_origin(); + + // int32 error_code = 1; + void clear_error_code(); + int32_t error_code() const; + void set_error_code(int32_t value); + private: + int32_t _internal_error_code() const; + void _internal_set_error_code(int32_t value); + public: + + // @@protoc_insertion_point(class_scope:tensorflow.ReportErrorToServiceRequest) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr error_message_; + ::tensorflow::CoordinatedTask* error_origin_; + int32_t error_code_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + }; + union { Impl_ _impl_; }; + friend struct ::TableStruct_tsl_2fprotobuf_2fcoordination_5fservice_2eproto; +}; +// ------------------------------------------------------------------- + +class ReportErrorToServiceResponse final : + public ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase /* @@protoc_insertion_point(class_definition:tensorflow.ReportErrorToServiceResponse) */ { + public: + inline ReportErrorToServiceResponse() : ReportErrorToServiceResponse(nullptr) {} + explicit PROTOBUF_CONSTEXPR ReportErrorToServiceResponse(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + ReportErrorToServiceResponse(const ReportErrorToServiceResponse& from); + ReportErrorToServiceResponse(ReportErrorToServiceResponse&& from) noexcept + : ReportErrorToServiceResponse() { + *this = ::std::move(from); + } + + inline ReportErrorToServiceResponse& operator=(const ReportErrorToServiceResponse& from) { + CopyFrom(from); + return *this; + } + inline ReportErrorToServiceResponse& operator=(ReportErrorToServiceResponse&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const ReportErrorToServiceResponse& default_instance() { + return *internal_default_instance(); + } + static inline const ReportErrorToServiceResponse* internal_default_instance() { + return reinterpret_cast( + &_ReportErrorToServiceResponse_default_instance_); + } + static constexpr int kIndexInFileMessages = + 17; + + friend void swap(ReportErrorToServiceResponse& a, ReportErrorToServiceResponse& b) { + a.Swap(&b); + } + inline void Swap(ReportErrorToServiceResponse* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(ReportErrorToServiceResponse* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + ReportErrorToServiceResponse* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::CopyFrom; + inline void CopyFrom(const ReportErrorToServiceResponse& from) { + ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::CopyImpl(*this, from); + } + using ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::MergeFrom; + void MergeFrom(const ReportErrorToServiceResponse& from) { + ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::MergeImpl(*this, from); + } + public: + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.ReportErrorToServiceResponse"; + } + protected: + explicit ReportErrorToServiceResponse(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + // @@protoc_insertion_point(class_scope:tensorflow.ReportErrorToServiceResponse) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + }; + friend struct ::TableStruct_tsl_2fprotobuf_2fcoordination_5fservice_2eproto; +}; +// ------------------------------------------------------------------- + +class GetTaskStateRequest final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:tensorflow.GetTaskStateRequest) */ { + public: + inline GetTaskStateRequest() : GetTaskStateRequest(nullptr) {} + ~GetTaskStateRequest() override; + explicit PROTOBUF_CONSTEXPR GetTaskStateRequest(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + GetTaskStateRequest(const GetTaskStateRequest& from); + GetTaskStateRequest(GetTaskStateRequest&& from) noexcept + : GetTaskStateRequest() { + *this = ::std::move(from); + } + + inline GetTaskStateRequest& operator=(const GetTaskStateRequest& from) { + CopyFrom(from); + return *this; + } + inline GetTaskStateRequest& operator=(GetTaskStateRequest&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const GetTaskStateRequest& default_instance() { + return *internal_default_instance(); + } + static inline const GetTaskStateRequest* internal_default_instance() { + return reinterpret_cast( + &_GetTaskStateRequest_default_instance_); + } + static constexpr int kIndexInFileMessages = + 18; + + friend void swap(GetTaskStateRequest& a, GetTaskStateRequest& b) { + a.Swap(&b); + } + inline void Swap(GetTaskStateRequest* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(GetTaskStateRequest* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + GetTaskStateRequest* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const GetTaskStateRequest& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom( const GetTaskStateRequest& from) { + GetTaskStateRequest::MergeImpl(*this, from); + } + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _impl_._cached_size_.Get(); } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(GetTaskStateRequest* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.GetTaskStateRequest"; + } + protected: + explicit GetTaskStateRequest(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kSourceTaskFieldNumber = 1, + }; + // repeated .tensorflow.CoordinatedTask source_task = 1; + int source_task_size() const; + private: + int _internal_source_task_size() const; + public: + void clear_source_task(); + ::tensorflow::CoordinatedTask* mutable_source_task(int index); + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::CoordinatedTask >* + mutable_source_task(); + private: + const ::tensorflow::CoordinatedTask& _internal_source_task(int index) const; + ::tensorflow::CoordinatedTask* _internal_add_source_task(); + public: + const ::tensorflow::CoordinatedTask& source_task(int index) const; + ::tensorflow::CoordinatedTask* add_source_task(); + const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::CoordinatedTask >& + source_task() const; + + // @@protoc_insertion_point(class_scope:tensorflow.GetTaskStateRequest) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::CoordinatedTask > source_task_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + }; + union { Impl_ _impl_; }; + friend struct ::TableStruct_tsl_2fprotobuf_2fcoordination_5fservice_2eproto; +}; +// ------------------------------------------------------------------- + +class GetTaskStateResponse final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:tensorflow.GetTaskStateResponse) */ { + public: + inline GetTaskStateResponse() : GetTaskStateResponse(nullptr) {} + ~GetTaskStateResponse() override; + explicit PROTOBUF_CONSTEXPR GetTaskStateResponse(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + GetTaskStateResponse(const GetTaskStateResponse& from); + GetTaskStateResponse(GetTaskStateResponse&& from) noexcept + : GetTaskStateResponse() { + *this = ::std::move(from); + } + + inline GetTaskStateResponse& operator=(const GetTaskStateResponse& from) { + CopyFrom(from); + return *this; + } + inline GetTaskStateResponse& operator=(GetTaskStateResponse&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const GetTaskStateResponse& default_instance() { + return *internal_default_instance(); + } + static inline const GetTaskStateResponse* internal_default_instance() { + return reinterpret_cast( + &_GetTaskStateResponse_default_instance_); + } + static constexpr int kIndexInFileMessages = + 19; + + friend void swap(GetTaskStateResponse& a, GetTaskStateResponse& b) { + a.Swap(&b); + } + inline void Swap(GetTaskStateResponse* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(GetTaskStateResponse* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + GetTaskStateResponse* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const GetTaskStateResponse& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom( const GetTaskStateResponse& from) { + GetTaskStateResponse::MergeImpl(*this, from); + } + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _impl_._cached_size_.Get(); } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(GetTaskStateResponse* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.GetTaskStateResponse"; + } + protected: + explicit GetTaskStateResponse(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kTaskStateFieldNumber = 1, + }; + // repeated .tensorflow.CoordinatedTaskStateInfo task_state = 1; + int task_state_size() const; + private: + int _internal_task_state_size() const; + public: + void clear_task_state(); + ::tensorflow::CoordinatedTaskStateInfo* mutable_task_state(int index); + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::CoordinatedTaskStateInfo >* + mutable_task_state(); + private: + const ::tensorflow::CoordinatedTaskStateInfo& _internal_task_state(int index) const; + ::tensorflow::CoordinatedTaskStateInfo* _internal_add_task_state(); + public: + const ::tensorflow::CoordinatedTaskStateInfo& task_state(int index) const; + ::tensorflow::CoordinatedTaskStateInfo* add_task_state(); + const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::CoordinatedTaskStateInfo >& + task_state() const; + + // @@protoc_insertion_point(class_scope:tensorflow.GetTaskStateResponse) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::CoordinatedTaskStateInfo > task_state_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + }; + union { Impl_ _impl_; }; + friend struct ::TableStruct_tsl_2fprotobuf_2fcoordination_5fservice_2eproto; +}; +// ------------------------------------------------------------------- + +class KeyValueEntry final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:tensorflow.KeyValueEntry) */ { + public: + inline KeyValueEntry() : KeyValueEntry(nullptr) {} + ~KeyValueEntry() override; + explicit PROTOBUF_CONSTEXPR KeyValueEntry(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + KeyValueEntry(const KeyValueEntry& from); + KeyValueEntry(KeyValueEntry&& from) noexcept + : KeyValueEntry() { + *this = ::std::move(from); + } + + inline KeyValueEntry& operator=(const KeyValueEntry& from) { + CopyFrom(from); + return *this; + } + inline KeyValueEntry& operator=(KeyValueEntry&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const KeyValueEntry& default_instance() { + return *internal_default_instance(); + } + static inline const KeyValueEntry* internal_default_instance() { + return reinterpret_cast( + &_KeyValueEntry_default_instance_); + } + static constexpr int kIndexInFileMessages = + 20; + + friend void swap(KeyValueEntry& a, KeyValueEntry& b) { + a.Swap(&b); + } + inline void Swap(KeyValueEntry* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(KeyValueEntry* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + KeyValueEntry* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const KeyValueEntry& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom( const KeyValueEntry& from) { + KeyValueEntry::MergeImpl(*this, from); + } + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _impl_._cached_size_.Get(); } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(KeyValueEntry* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.KeyValueEntry"; + } + protected: + explicit KeyValueEntry(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kKeyFieldNumber = 1, + kValueFieldNumber = 2, + }; + // string key = 1; + void clear_key(); + const std::string& key() const; + template + void set_key(ArgT0&& arg0, ArgT... args); + std::string* mutable_key(); + PROTOBUF_NODISCARD std::string* release_key(); + void set_allocated_key(std::string* key); + private: + const std::string& _internal_key() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_key(const std::string& value); + std::string* _internal_mutable_key(); + public: + + // bytes value = 2; + void clear_value(); + const std::string& value() const; + template + void set_value(ArgT0&& arg0, ArgT... args); + std::string* mutable_value(); + PROTOBUF_NODISCARD std::string* release_value(); + void set_allocated_value(std::string* value); + private: + const std::string& _internal_value() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_value(const std::string& value); + std::string* _internal_mutable_value(); + public: + + // @@protoc_insertion_point(class_scope:tensorflow.KeyValueEntry) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr key_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr value_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + }; + union { Impl_ _impl_; }; + friend struct ::TableStruct_tsl_2fprotobuf_2fcoordination_5fservice_2eproto; +}; +// ------------------------------------------------------------------- + +class InsertKeyValueRequest final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:tensorflow.InsertKeyValueRequest) */ { + public: + inline InsertKeyValueRequest() : InsertKeyValueRequest(nullptr) {} + ~InsertKeyValueRequest() override; + explicit PROTOBUF_CONSTEXPR InsertKeyValueRequest(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + InsertKeyValueRequest(const InsertKeyValueRequest& from); + InsertKeyValueRequest(InsertKeyValueRequest&& from) noexcept + : InsertKeyValueRequest() { + *this = ::std::move(from); + } + + inline InsertKeyValueRequest& operator=(const InsertKeyValueRequest& from) { + CopyFrom(from); + return *this; + } + inline InsertKeyValueRequest& operator=(InsertKeyValueRequest&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const InsertKeyValueRequest& default_instance() { + return *internal_default_instance(); + } + static inline const InsertKeyValueRequest* internal_default_instance() { + return reinterpret_cast( + &_InsertKeyValueRequest_default_instance_); + } + static constexpr int kIndexInFileMessages = + 21; + + friend void swap(InsertKeyValueRequest& a, InsertKeyValueRequest& b) { + a.Swap(&b); + } + inline void Swap(InsertKeyValueRequest* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(InsertKeyValueRequest* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + InsertKeyValueRequest* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const InsertKeyValueRequest& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom( const InsertKeyValueRequest& from) { + InsertKeyValueRequest::MergeImpl(*this, from); + } + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _impl_._cached_size_.Get(); } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(InsertKeyValueRequest* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.InsertKeyValueRequest"; + } + protected: + explicit InsertKeyValueRequest(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kKvFieldNumber = 1, + }; + // .tensorflow.KeyValueEntry kv = 1; + bool has_kv() const; + private: + bool _internal_has_kv() const; + public: + void clear_kv(); + const ::tensorflow::KeyValueEntry& kv() const; + PROTOBUF_NODISCARD ::tensorflow::KeyValueEntry* release_kv(); + ::tensorflow::KeyValueEntry* mutable_kv(); + void set_allocated_kv(::tensorflow::KeyValueEntry* kv); + private: + const ::tensorflow::KeyValueEntry& _internal_kv() const; + ::tensorflow::KeyValueEntry* _internal_mutable_kv(); + public: + void unsafe_arena_set_allocated_kv( + ::tensorflow::KeyValueEntry* kv); + ::tensorflow::KeyValueEntry* unsafe_arena_release_kv(); + + // @@protoc_insertion_point(class_scope:tensorflow.InsertKeyValueRequest) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + ::tensorflow::KeyValueEntry* kv_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + }; + union { Impl_ _impl_; }; + friend struct ::TableStruct_tsl_2fprotobuf_2fcoordination_5fservice_2eproto; +}; +// ------------------------------------------------------------------- + +class InsertKeyValueResponse final : + public ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase /* @@protoc_insertion_point(class_definition:tensorflow.InsertKeyValueResponse) */ { + public: + inline InsertKeyValueResponse() : InsertKeyValueResponse(nullptr) {} + explicit PROTOBUF_CONSTEXPR InsertKeyValueResponse(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + InsertKeyValueResponse(const InsertKeyValueResponse& from); + InsertKeyValueResponse(InsertKeyValueResponse&& from) noexcept + : InsertKeyValueResponse() { + *this = ::std::move(from); + } + + inline InsertKeyValueResponse& operator=(const InsertKeyValueResponse& from) { + CopyFrom(from); + return *this; + } + inline InsertKeyValueResponse& operator=(InsertKeyValueResponse&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const InsertKeyValueResponse& default_instance() { + return *internal_default_instance(); + } + static inline const InsertKeyValueResponse* internal_default_instance() { + return reinterpret_cast( + &_InsertKeyValueResponse_default_instance_); + } + static constexpr int kIndexInFileMessages = + 22; + + friend void swap(InsertKeyValueResponse& a, InsertKeyValueResponse& b) { + a.Swap(&b); + } + inline void Swap(InsertKeyValueResponse* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(InsertKeyValueResponse* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + InsertKeyValueResponse* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::CopyFrom; + inline void CopyFrom(const InsertKeyValueResponse& from) { + ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::CopyImpl(*this, from); + } + using ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::MergeFrom; + void MergeFrom(const InsertKeyValueResponse& from) { + ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::MergeImpl(*this, from); + } + public: + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.InsertKeyValueResponse"; + } + protected: + explicit InsertKeyValueResponse(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + // @@protoc_insertion_point(class_scope:tensorflow.InsertKeyValueResponse) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + }; + friend struct ::TableStruct_tsl_2fprotobuf_2fcoordination_5fservice_2eproto; +}; +// ------------------------------------------------------------------- + +class GetKeyValueRequest final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:tensorflow.GetKeyValueRequest) */ { + public: + inline GetKeyValueRequest() : GetKeyValueRequest(nullptr) {} + ~GetKeyValueRequest() override; + explicit PROTOBUF_CONSTEXPR GetKeyValueRequest(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + GetKeyValueRequest(const GetKeyValueRequest& from); + GetKeyValueRequest(GetKeyValueRequest&& from) noexcept + : GetKeyValueRequest() { + *this = ::std::move(from); + } + + inline GetKeyValueRequest& operator=(const GetKeyValueRequest& from) { + CopyFrom(from); + return *this; + } + inline GetKeyValueRequest& operator=(GetKeyValueRequest&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const GetKeyValueRequest& default_instance() { + return *internal_default_instance(); + } + static inline const GetKeyValueRequest* internal_default_instance() { + return reinterpret_cast( + &_GetKeyValueRequest_default_instance_); + } + static constexpr int kIndexInFileMessages = + 23; + + friend void swap(GetKeyValueRequest& a, GetKeyValueRequest& b) { + a.Swap(&b); + } + inline void Swap(GetKeyValueRequest* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(GetKeyValueRequest* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + GetKeyValueRequest* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const GetKeyValueRequest& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom( const GetKeyValueRequest& from) { + GetKeyValueRequest::MergeImpl(*this, from); + } + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _impl_._cached_size_.Get(); } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(GetKeyValueRequest* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.GetKeyValueRequest"; + } + protected: + explicit GetKeyValueRequest(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kKeyFieldNumber = 1, + }; + // string key = 1; + void clear_key(); + const std::string& key() const; + template + void set_key(ArgT0&& arg0, ArgT... args); + std::string* mutable_key(); + PROTOBUF_NODISCARD std::string* release_key(); + void set_allocated_key(std::string* key); + private: + const std::string& _internal_key() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_key(const std::string& value); + std::string* _internal_mutable_key(); + public: + + // @@protoc_insertion_point(class_scope:tensorflow.GetKeyValueRequest) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr key_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + }; + union { Impl_ _impl_; }; + friend struct ::TableStruct_tsl_2fprotobuf_2fcoordination_5fservice_2eproto; +}; +// ------------------------------------------------------------------- + +class GetKeyValueResponse final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:tensorflow.GetKeyValueResponse) */ { + public: + inline GetKeyValueResponse() : GetKeyValueResponse(nullptr) {} + ~GetKeyValueResponse() override; + explicit PROTOBUF_CONSTEXPR GetKeyValueResponse(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + GetKeyValueResponse(const GetKeyValueResponse& from); + GetKeyValueResponse(GetKeyValueResponse&& from) noexcept + : GetKeyValueResponse() { + *this = ::std::move(from); + } + + inline GetKeyValueResponse& operator=(const GetKeyValueResponse& from) { + CopyFrom(from); + return *this; + } + inline GetKeyValueResponse& operator=(GetKeyValueResponse&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const GetKeyValueResponse& default_instance() { + return *internal_default_instance(); + } + static inline const GetKeyValueResponse* internal_default_instance() { + return reinterpret_cast( + &_GetKeyValueResponse_default_instance_); + } + static constexpr int kIndexInFileMessages = + 24; + + friend void swap(GetKeyValueResponse& a, GetKeyValueResponse& b) { + a.Swap(&b); + } + inline void Swap(GetKeyValueResponse* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(GetKeyValueResponse* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + GetKeyValueResponse* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const GetKeyValueResponse& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom( const GetKeyValueResponse& from) { + GetKeyValueResponse::MergeImpl(*this, from); + } + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _impl_._cached_size_.Get(); } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(GetKeyValueResponse* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.GetKeyValueResponse"; + } + protected: + explicit GetKeyValueResponse(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kKvFieldNumber = 1, + }; + // .tensorflow.KeyValueEntry kv = 1; + bool has_kv() const; + private: + bool _internal_has_kv() const; + public: + void clear_kv(); + const ::tensorflow::KeyValueEntry& kv() const; + PROTOBUF_NODISCARD ::tensorflow::KeyValueEntry* release_kv(); + ::tensorflow::KeyValueEntry* mutable_kv(); + void set_allocated_kv(::tensorflow::KeyValueEntry* kv); + private: + const ::tensorflow::KeyValueEntry& _internal_kv() const; + ::tensorflow::KeyValueEntry* _internal_mutable_kv(); + public: + void unsafe_arena_set_allocated_kv( + ::tensorflow::KeyValueEntry* kv); + ::tensorflow::KeyValueEntry* unsafe_arena_release_kv(); + + // @@protoc_insertion_point(class_scope:tensorflow.GetKeyValueResponse) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + ::tensorflow::KeyValueEntry* kv_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + }; + union { Impl_ _impl_; }; + friend struct ::TableStruct_tsl_2fprotobuf_2fcoordination_5fservice_2eproto; +}; +// ------------------------------------------------------------------- + +class TryGetKeyValueRequest final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:tensorflow.TryGetKeyValueRequest) */ { + public: + inline TryGetKeyValueRequest() : TryGetKeyValueRequest(nullptr) {} + ~TryGetKeyValueRequest() override; + explicit PROTOBUF_CONSTEXPR TryGetKeyValueRequest(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + TryGetKeyValueRequest(const TryGetKeyValueRequest& from); + TryGetKeyValueRequest(TryGetKeyValueRequest&& from) noexcept + : TryGetKeyValueRequest() { + *this = ::std::move(from); + } + + inline TryGetKeyValueRequest& operator=(const TryGetKeyValueRequest& from) { + CopyFrom(from); + return *this; + } + inline TryGetKeyValueRequest& operator=(TryGetKeyValueRequest&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const TryGetKeyValueRequest& default_instance() { + return *internal_default_instance(); + } + static inline const TryGetKeyValueRequest* internal_default_instance() { + return reinterpret_cast( + &_TryGetKeyValueRequest_default_instance_); + } + static constexpr int kIndexInFileMessages = + 25; + + friend void swap(TryGetKeyValueRequest& a, TryGetKeyValueRequest& b) { + a.Swap(&b); + } + inline void Swap(TryGetKeyValueRequest* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(TryGetKeyValueRequest* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + TryGetKeyValueRequest* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const TryGetKeyValueRequest& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom( const TryGetKeyValueRequest& from) { + TryGetKeyValueRequest::MergeImpl(*this, from); + } + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _impl_._cached_size_.Get(); } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(TryGetKeyValueRequest* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.TryGetKeyValueRequest"; + } + protected: + explicit TryGetKeyValueRequest(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kKeyFieldNumber = 1, + }; + // string key = 1; + void clear_key(); + const std::string& key() const; + template + void set_key(ArgT0&& arg0, ArgT... args); + std::string* mutable_key(); + PROTOBUF_NODISCARD std::string* release_key(); + void set_allocated_key(std::string* key); + private: + const std::string& _internal_key() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_key(const std::string& value); + std::string* _internal_mutable_key(); + public: + + // @@protoc_insertion_point(class_scope:tensorflow.TryGetKeyValueRequest) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr key_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + }; + union { Impl_ _impl_; }; + friend struct ::TableStruct_tsl_2fprotobuf_2fcoordination_5fservice_2eproto; +}; +// ------------------------------------------------------------------- + +class TryGetKeyValueResponse final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:tensorflow.TryGetKeyValueResponse) */ { + public: + inline TryGetKeyValueResponse() : TryGetKeyValueResponse(nullptr) {} + ~TryGetKeyValueResponse() override; + explicit PROTOBUF_CONSTEXPR TryGetKeyValueResponse(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + TryGetKeyValueResponse(const TryGetKeyValueResponse& from); + TryGetKeyValueResponse(TryGetKeyValueResponse&& from) noexcept + : TryGetKeyValueResponse() { + *this = ::std::move(from); + } + + inline TryGetKeyValueResponse& operator=(const TryGetKeyValueResponse& from) { + CopyFrom(from); + return *this; + } + inline TryGetKeyValueResponse& operator=(TryGetKeyValueResponse&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const TryGetKeyValueResponse& default_instance() { + return *internal_default_instance(); + } + static inline const TryGetKeyValueResponse* internal_default_instance() { + return reinterpret_cast( + &_TryGetKeyValueResponse_default_instance_); + } + static constexpr int kIndexInFileMessages = + 26; + + friend void swap(TryGetKeyValueResponse& a, TryGetKeyValueResponse& b) { + a.Swap(&b); + } + inline void Swap(TryGetKeyValueResponse* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(TryGetKeyValueResponse* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + TryGetKeyValueResponse* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const TryGetKeyValueResponse& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom( const TryGetKeyValueResponse& from) { + TryGetKeyValueResponse::MergeImpl(*this, from); + } + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _impl_._cached_size_.Get(); } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(TryGetKeyValueResponse* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.TryGetKeyValueResponse"; + } + protected: + explicit TryGetKeyValueResponse(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kKvFieldNumber = 1, + }; + // .tensorflow.KeyValueEntry kv = 1; + bool has_kv() const; + private: + bool _internal_has_kv() const; + public: + void clear_kv(); + const ::tensorflow::KeyValueEntry& kv() const; + PROTOBUF_NODISCARD ::tensorflow::KeyValueEntry* release_kv(); + ::tensorflow::KeyValueEntry* mutable_kv(); + void set_allocated_kv(::tensorflow::KeyValueEntry* kv); + private: + const ::tensorflow::KeyValueEntry& _internal_kv() const; + ::tensorflow::KeyValueEntry* _internal_mutable_kv(); + public: + void unsafe_arena_set_allocated_kv( + ::tensorflow::KeyValueEntry* kv); + ::tensorflow::KeyValueEntry* unsafe_arena_release_kv(); + + // @@protoc_insertion_point(class_scope:tensorflow.TryGetKeyValueResponse) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + ::tensorflow::KeyValueEntry* kv_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + }; + union { Impl_ _impl_; }; + friend struct ::TableStruct_tsl_2fprotobuf_2fcoordination_5fservice_2eproto; +}; +// ------------------------------------------------------------------- + +class GetKeyValueDirRequest final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:tensorflow.GetKeyValueDirRequest) */ { + public: + inline GetKeyValueDirRequest() : GetKeyValueDirRequest(nullptr) {} + ~GetKeyValueDirRequest() override; + explicit PROTOBUF_CONSTEXPR GetKeyValueDirRequest(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + GetKeyValueDirRequest(const GetKeyValueDirRequest& from); + GetKeyValueDirRequest(GetKeyValueDirRequest&& from) noexcept + : GetKeyValueDirRequest() { + *this = ::std::move(from); + } + + inline GetKeyValueDirRequest& operator=(const GetKeyValueDirRequest& from) { + CopyFrom(from); + return *this; + } + inline GetKeyValueDirRequest& operator=(GetKeyValueDirRequest&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const GetKeyValueDirRequest& default_instance() { + return *internal_default_instance(); + } + static inline const GetKeyValueDirRequest* internal_default_instance() { + return reinterpret_cast( + &_GetKeyValueDirRequest_default_instance_); + } + static constexpr int kIndexInFileMessages = + 27; + + friend void swap(GetKeyValueDirRequest& a, GetKeyValueDirRequest& b) { + a.Swap(&b); + } + inline void Swap(GetKeyValueDirRequest* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(GetKeyValueDirRequest* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + GetKeyValueDirRequest* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const GetKeyValueDirRequest& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom( const GetKeyValueDirRequest& from) { + GetKeyValueDirRequest::MergeImpl(*this, from); + } + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _impl_._cached_size_.Get(); } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(GetKeyValueDirRequest* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.GetKeyValueDirRequest"; + } + protected: + explicit GetKeyValueDirRequest(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kDirectoryKeyFieldNumber = 1, + }; + // string directory_key = 1; + void clear_directory_key(); + const std::string& directory_key() const; + template + void set_directory_key(ArgT0&& arg0, ArgT... args); + std::string* mutable_directory_key(); + PROTOBUF_NODISCARD std::string* release_directory_key(); + void set_allocated_directory_key(std::string* directory_key); + private: + const std::string& _internal_directory_key() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_directory_key(const std::string& value); + std::string* _internal_mutable_directory_key(); + public: + + // @@protoc_insertion_point(class_scope:tensorflow.GetKeyValueDirRequest) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr directory_key_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + }; + union { Impl_ _impl_; }; + friend struct ::TableStruct_tsl_2fprotobuf_2fcoordination_5fservice_2eproto; +}; +// ------------------------------------------------------------------- + +class GetKeyValueDirResponse final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:tensorflow.GetKeyValueDirResponse) */ { + public: + inline GetKeyValueDirResponse() : GetKeyValueDirResponse(nullptr) {} + ~GetKeyValueDirResponse() override; + explicit PROTOBUF_CONSTEXPR GetKeyValueDirResponse(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + GetKeyValueDirResponse(const GetKeyValueDirResponse& from); + GetKeyValueDirResponse(GetKeyValueDirResponse&& from) noexcept + : GetKeyValueDirResponse() { + *this = ::std::move(from); + } + + inline GetKeyValueDirResponse& operator=(const GetKeyValueDirResponse& from) { + CopyFrom(from); + return *this; + } + inline GetKeyValueDirResponse& operator=(GetKeyValueDirResponse&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const GetKeyValueDirResponse& default_instance() { + return *internal_default_instance(); + } + static inline const GetKeyValueDirResponse* internal_default_instance() { + return reinterpret_cast( + &_GetKeyValueDirResponse_default_instance_); + } + static constexpr int kIndexInFileMessages = + 28; + + friend void swap(GetKeyValueDirResponse& a, GetKeyValueDirResponse& b) { + a.Swap(&b); + } + inline void Swap(GetKeyValueDirResponse* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(GetKeyValueDirResponse* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + GetKeyValueDirResponse* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const GetKeyValueDirResponse& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom( const GetKeyValueDirResponse& from) { + GetKeyValueDirResponse::MergeImpl(*this, from); + } + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _impl_._cached_size_.Get(); } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(GetKeyValueDirResponse* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.GetKeyValueDirResponse"; + } + protected: + explicit GetKeyValueDirResponse(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kKvFieldNumber = 2, + kDirectoryKeyFieldNumber = 1, + }; + // repeated .tensorflow.KeyValueEntry kv = 2; + int kv_size() const; + private: + int _internal_kv_size() const; + public: + void clear_kv(); + ::tensorflow::KeyValueEntry* mutable_kv(int index); + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::KeyValueEntry >* + mutable_kv(); + private: + const ::tensorflow::KeyValueEntry& _internal_kv(int index) const; + ::tensorflow::KeyValueEntry* _internal_add_kv(); + public: + const ::tensorflow::KeyValueEntry& kv(int index) const; + ::tensorflow::KeyValueEntry* add_kv(); + const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::KeyValueEntry >& + kv() const; + + // string directory_key = 1; + void clear_directory_key(); + const std::string& directory_key() const; + template + void set_directory_key(ArgT0&& arg0, ArgT... args); + std::string* mutable_directory_key(); + PROTOBUF_NODISCARD std::string* release_directory_key(); + void set_allocated_directory_key(std::string* directory_key); + private: + const std::string& _internal_directory_key() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_directory_key(const std::string& value); + std::string* _internal_mutable_directory_key(); + public: + + // @@protoc_insertion_point(class_scope:tensorflow.GetKeyValueDirResponse) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::KeyValueEntry > kv_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr directory_key_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + }; + union { Impl_ _impl_; }; + friend struct ::TableStruct_tsl_2fprotobuf_2fcoordination_5fservice_2eproto; +}; +// ------------------------------------------------------------------- + +class DeleteKeyValueRequest final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:tensorflow.DeleteKeyValueRequest) */ { + public: + inline DeleteKeyValueRequest() : DeleteKeyValueRequest(nullptr) {} + ~DeleteKeyValueRequest() override; + explicit PROTOBUF_CONSTEXPR DeleteKeyValueRequest(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + DeleteKeyValueRequest(const DeleteKeyValueRequest& from); + DeleteKeyValueRequest(DeleteKeyValueRequest&& from) noexcept + : DeleteKeyValueRequest() { + *this = ::std::move(from); + } + + inline DeleteKeyValueRequest& operator=(const DeleteKeyValueRequest& from) { + CopyFrom(from); + return *this; + } + inline DeleteKeyValueRequest& operator=(DeleteKeyValueRequest&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const DeleteKeyValueRequest& default_instance() { + return *internal_default_instance(); + } + static inline const DeleteKeyValueRequest* internal_default_instance() { + return reinterpret_cast( + &_DeleteKeyValueRequest_default_instance_); + } + static constexpr int kIndexInFileMessages = + 29; + + friend void swap(DeleteKeyValueRequest& a, DeleteKeyValueRequest& b) { + a.Swap(&b); + } + inline void Swap(DeleteKeyValueRequest* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(DeleteKeyValueRequest* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + DeleteKeyValueRequest* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const DeleteKeyValueRequest& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom( const DeleteKeyValueRequest& from) { + DeleteKeyValueRequest::MergeImpl(*this, from); + } + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _impl_._cached_size_.Get(); } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(DeleteKeyValueRequest* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.DeleteKeyValueRequest"; + } + protected: + explicit DeleteKeyValueRequest(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kKeyFieldNumber = 1, + kIsDirectoryFieldNumber = 2, + }; + // string key = 1; + void clear_key(); + const std::string& key() const; + template + void set_key(ArgT0&& arg0, ArgT... args); + std::string* mutable_key(); + PROTOBUF_NODISCARD std::string* release_key(); + void set_allocated_key(std::string* key); + private: + const std::string& _internal_key() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_key(const std::string& value); + std::string* _internal_mutable_key(); + public: + + // bool is_directory = 2; + void clear_is_directory(); + bool is_directory() const; + void set_is_directory(bool value); + private: + bool _internal_is_directory() const; + void _internal_set_is_directory(bool value); + public: + + // @@protoc_insertion_point(class_scope:tensorflow.DeleteKeyValueRequest) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr key_; + bool is_directory_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + }; + union { Impl_ _impl_; }; + friend struct ::TableStruct_tsl_2fprotobuf_2fcoordination_5fservice_2eproto; +}; +// ------------------------------------------------------------------- + +class DeleteKeyValueResponse final : + public ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase /* @@protoc_insertion_point(class_definition:tensorflow.DeleteKeyValueResponse) */ { + public: + inline DeleteKeyValueResponse() : DeleteKeyValueResponse(nullptr) {} + explicit PROTOBUF_CONSTEXPR DeleteKeyValueResponse(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + DeleteKeyValueResponse(const DeleteKeyValueResponse& from); + DeleteKeyValueResponse(DeleteKeyValueResponse&& from) noexcept + : DeleteKeyValueResponse() { + *this = ::std::move(from); + } + + inline DeleteKeyValueResponse& operator=(const DeleteKeyValueResponse& from) { + CopyFrom(from); + return *this; + } + inline DeleteKeyValueResponse& operator=(DeleteKeyValueResponse&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const DeleteKeyValueResponse& default_instance() { + return *internal_default_instance(); + } + static inline const DeleteKeyValueResponse* internal_default_instance() { + return reinterpret_cast( + &_DeleteKeyValueResponse_default_instance_); + } + static constexpr int kIndexInFileMessages = + 30; + + friend void swap(DeleteKeyValueResponse& a, DeleteKeyValueResponse& b) { + a.Swap(&b); + } + inline void Swap(DeleteKeyValueResponse* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(DeleteKeyValueResponse* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + DeleteKeyValueResponse* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::CopyFrom; + inline void CopyFrom(const DeleteKeyValueResponse& from) { + ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::CopyImpl(*this, from); + } + using ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::MergeFrom; + void MergeFrom(const DeleteKeyValueResponse& from) { + ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::MergeImpl(*this, from); + } + public: + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.DeleteKeyValueResponse"; + } + protected: + explicit DeleteKeyValueResponse(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + // @@protoc_insertion_point(class_scope:tensorflow.DeleteKeyValueResponse) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + }; + friend struct ::TableStruct_tsl_2fprotobuf_2fcoordination_5fservice_2eproto; +}; +// ------------------------------------------------------------------- + +class BarrierRequest final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:tensorflow.BarrierRequest) */ { + public: + inline BarrierRequest() : BarrierRequest(nullptr) {} + ~BarrierRequest() override; + explicit PROTOBUF_CONSTEXPR BarrierRequest(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + BarrierRequest(const BarrierRequest& from); + BarrierRequest(BarrierRequest&& from) noexcept + : BarrierRequest() { + *this = ::std::move(from); + } + + inline BarrierRequest& operator=(const BarrierRequest& from) { + CopyFrom(from); + return *this; + } + inline BarrierRequest& operator=(BarrierRequest&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const BarrierRequest& default_instance() { + return *internal_default_instance(); + } + static inline const BarrierRequest* internal_default_instance() { + return reinterpret_cast( + &_BarrierRequest_default_instance_); + } + static constexpr int kIndexInFileMessages = + 31; + + friend void swap(BarrierRequest& a, BarrierRequest& b) { + a.Swap(&b); + } + inline void Swap(BarrierRequest* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(BarrierRequest* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + BarrierRequest* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const BarrierRequest& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom( const BarrierRequest& from) { + BarrierRequest::MergeImpl(*this, from); + } + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _impl_._cached_size_.Get(); } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(BarrierRequest* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.BarrierRequest"; + } + protected: + explicit BarrierRequest(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kTasksFieldNumber = 3, + kBarrierIdFieldNumber = 1, + kSourceTaskFieldNumber = 4, + kBarrierTimeoutInMsFieldNumber = 2, + }; + // repeated .tensorflow.CoordinatedTask tasks = 3; + int tasks_size() const; + private: + int _internal_tasks_size() const; + public: + void clear_tasks(); + ::tensorflow::CoordinatedTask* mutable_tasks(int index); + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::CoordinatedTask >* + mutable_tasks(); + private: + const ::tensorflow::CoordinatedTask& _internal_tasks(int index) const; + ::tensorflow::CoordinatedTask* _internal_add_tasks(); + public: + const ::tensorflow::CoordinatedTask& tasks(int index) const; + ::tensorflow::CoordinatedTask* add_tasks(); + const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::CoordinatedTask >& + tasks() const; + + // string barrier_id = 1; + void clear_barrier_id(); + const std::string& barrier_id() const; + template + void set_barrier_id(ArgT0&& arg0, ArgT... args); + std::string* mutable_barrier_id(); + PROTOBUF_NODISCARD std::string* release_barrier_id(); + void set_allocated_barrier_id(std::string* barrier_id); + private: + const std::string& _internal_barrier_id() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_barrier_id(const std::string& value); + std::string* _internal_mutable_barrier_id(); + public: + + // .tensorflow.CoordinatedTask source_task = 4; + bool has_source_task() const; + private: + bool _internal_has_source_task() const; + public: + void clear_source_task(); + const ::tensorflow::CoordinatedTask& source_task() const; + PROTOBUF_NODISCARD ::tensorflow::CoordinatedTask* release_source_task(); + ::tensorflow::CoordinatedTask* mutable_source_task(); + void set_allocated_source_task(::tensorflow::CoordinatedTask* source_task); + private: + const ::tensorflow::CoordinatedTask& _internal_source_task() const; + ::tensorflow::CoordinatedTask* _internal_mutable_source_task(); + public: + void unsafe_arena_set_allocated_source_task( + ::tensorflow::CoordinatedTask* source_task); + ::tensorflow::CoordinatedTask* unsafe_arena_release_source_task(); + + // int64 barrier_timeout_in_ms = 2; + void clear_barrier_timeout_in_ms(); + int64_t barrier_timeout_in_ms() const; + void set_barrier_timeout_in_ms(int64_t value); + private: + int64_t _internal_barrier_timeout_in_ms() const; + void _internal_set_barrier_timeout_in_ms(int64_t value); + public: + + // @@protoc_insertion_point(class_scope:tensorflow.BarrierRequest) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::CoordinatedTask > tasks_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr barrier_id_; + ::tensorflow::CoordinatedTask* source_task_; + int64_t barrier_timeout_in_ms_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + }; + union { Impl_ _impl_; }; + friend struct ::TableStruct_tsl_2fprotobuf_2fcoordination_5fservice_2eproto; +}; +// ------------------------------------------------------------------- + +class BarrierResponse final : + public ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase /* @@protoc_insertion_point(class_definition:tensorflow.BarrierResponse) */ { + public: + inline BarrierResponse() : BarrierResponse(nullptr) {} + explicit PROTOBUF_CONSTEXPR BarrierResponse(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + BarrierResponse(const BarrierResponse& from); + BarrierResponse(BarrierResponse&& from) noexcept + : BarrierResponse() { + *this = ::std::move(from); + } + + inline BarrierResponse& operator=(const BarrierResponse& from) { + CopyFrom(from); + return *this; + } + inline BarrierResponse& operator=(BarrierResponse&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const BarrierResponse& default_instance() { + return *internal_default_instance(); + } + static inline const BarrierResponse* internal_default_instance() { + return reinterpret_cast( + &_BarrierResponse_default_instance_); + } + static constexpr int kIndexInFileMessages = + 32; + + friend void swap(BarrierResponse& a, BarrierResponse& b) { + a.Swap(&b); + } + inline void Swap(BarrierResponse* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(BarrierResponse* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + BarrierResponse* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::CopyFrom; + inline void CopyFrom(const BarrierResponse& from) { + ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::CopyImpl(*this, from); + } + using ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::MergeFrom; + void MergeFrom(const BarrierResponse& from) { + ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::MergeImpl(*this, from); + } + public: + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.BarrierResponse"; + } + protected: + explicit BarrierResponse(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + // @@protoc_insertion_point(class_scope:tensorflow.BarrierResponse) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + }; + friend struct ::TableStruct_tsl_2fprotobuf_2fcoordination_5fservice_2eproto; +}; +// ------------------------------------------------------------------- + +class CancelBarrierRequest final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:tensorflow.CancelBarrierRequest) */ { + public: + inline CancelBarrierRequest() : CancelBarrierRequest(nullptr) {} + ~CancelBarrierRequest() override; + explicit PROTOBUF_CONSTEXPR CancelBarrierRequest(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + CancelBarrierRequest(const CancelBarrierRequest& from); + CancelBarrierRequest(CancelBarrierRequest&& from) noexcept + : CancelBarrierRequest() { + *this = ::std::move(from); + } + + inline CancelBarrierRequest& operator=(const CancelBarrierRequest& from) { + CopyFrom(from); + return *this; + } + inline CancelBarrierRequest& operator=(CancelBarrierRequest&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const CancelBarrierRequest& default_instance() { + return *internal_default_instance(); + } + static inline const CancelBarrierRequest* internal_default_instance() { + return reinterpret_cast( + &_CancelBarrierRequest_default_instance_); + } + static constexpr int kIndexInFileMessages = + 33; + + friend void swap(CancelBarrierRequest& a, CancelBarrierRequest& b) { + a.Swap(&b); + } + inline void Swap(CancelBarrierRequest* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(CancelBarrierRequest* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + CancelBarrierRequest* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const CancelBarrierRequest& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom( const CancelBarrierRequest& from) { + CancelBarrierRequest::MergeImpl(*this, from); + } + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _impl_._cached_size_.Get(); } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(CancelBarrierRequest* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.CancelBarrierRequest"; + } + protected: + explicit CancelBarrierRequest(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kBarrierIdFieldNumber = 1, + kSourceTaskFieldNumber = 2, + }; + // string barrier_id = 1; + void clear_barrier_id(); + const std::string& barrier_id() const; + template + void set_barrier_id(ArgT0&& arg0, ArgT... args); + std::string* mutable_barrier_id(); + PROTOBUF_NODISCARD std::string* release_barrier_id(); + void set_allocated_barrier_id(std::string* barrier_id); + private: + const std::string& _internal_barrier_id() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_barrier_id(const std::string& value); + std::string* _internal_mutable_barrier_id(); + public: + + // .tensorflow.CoordinatedTask source_task = 2; + bool has_source_task() const; + private: + bool _internal_has_source_task() const; + public: + void clear_source_task(); + const ::tensorflow::CoordinatedTask& source_task() const; + PROTOBUF_NODISCARD ::tensorflow::CoordinatedTask* release_source_task(); + ::tensorflow::CoordinatedTask* mutable_source_task(); + void set_allocated_source_task(::tensorflow::CoordinatedTask* source_task); + private: + const ::tensorflow::CoordinatedTask& _internal_source_task() const; + ::tensorflow::CoordinatedTask* _internal_mutable_source_task(); + public: + void unsafe_arena_set_allocated_source_task( + ::tensorflow::CoordinatedTask* source_task); + ::tensorflow::CoordinatedTask* unsafe_arena_release_source_task(); + + // @@protoc_insertion_point(class_scope:tensorflow.CancelBarrierRequest) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr barrier_id_; + ::tensorflow::CoordinatedTask* source_task_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + }; + union { Impl_ _impl_; }; + friend struct ::TableStruct_tsl_2fprotobuf_2fcoordination_5fservice_2eproto; +}; +// ------------------------------------------------------------------- + +class CancelBarrierResponse final : + public ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase /* @@protoc_insertion_point(class_definition:tensorflow.CancelBarrierResponse) */ { + public: + inline CancelBarrierResponse() : CancelBarrierResponse(nullptr) {} + explicit PROTOBUF_CONSTEXPR CancelBarrierResponse(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + CancelBarrierResponse(const CancelBarrierResponse& from); + CancelBarrierResponse(CancelBarrierResponse&& from) noexcept + : CancelBarrierResponse() { + *this = ::std::move(from); + } + + inline CancelBarrierResponse& operator=(const CancelBarrierResponse& from) { + CopyFrom(from); + return *this; + } + inline CancelBarrierResponse& operator=(CancelBarrierResponse&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const CancelBarrierResponse& default_instance() { + return *internal_default_instance(); + } + static inline const CancelBarrierResponse* internal_default_instance() { + return reinterpret_cast( + &_CancelBarrierResponse_default_instance_); + } + static constexpr int kIndexInFileMessages = + 34; + + friend void swap(CancelBarrierResponse& a, CancelBarrierResponse& b) { + a.Swap(&b); + } + inline void Swap(CancelBarrierResponse* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(CancelBarrierResponse* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + CancelBarrierResponse* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::CopyFrom; + inline void CopyFrom(const CancelBarrierResponse& from) { + ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::CopyImpl(*this, from); + } + using ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::MergeFrom; + void MergeFrom(const CancelBarrierResponse& from) { + ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::MergeImpl(*this, from); + } + public: + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.CancelBarrierResponse"; + } + protected: + explicit CancelBarrierResponse(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + // @@protoc_insertion_point(class_scope:tensorflow.CancelBarrierResponse) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + }; + friend struct ::TableStruct_tsl_2fprotobuf_2fcoordination_5fservice_2eproto; +}; +// =================================================================== + + +// =================================================================== + +#ifdef __GNUC__ + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wstrict-aliasing" +#endif // __GNUC__ +// CoordinatedTask + +// string job_name = 1; +inline void CoordinatedTask::clear_job_name() { + _impl_.job_name_.ClearToEmpty(); +} +inline const std::string& CoordinatedTask::job_name() const { + // @@protoc_insertion_point(field_get:tensorflow.CoordinatedTask.job_name) + return _internal_job_name(); +} +template +inline PROTOBUF_ALWAYS_INLINE +void CoordinatedTask::set_job_name(ArgT0&& arg0, ArgT... args) { + + _impl_.job_name_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:tensorflow.CoordinatedTask.job_name) +} +inline std::string* CoordinatedTask::mutable_job_name() { + std::string* _s = _internal_mutable_job_name(); + // @@protoc_insertion_point(field_mutable:tensorflow.CoordinatedTask.job_name) + return _s; +} +inline const std::string& CoordinatedTask::_internal_job_name() const { + return _impl_.job_name_.Get(); +} +inline void CoordinatedTask::_internal_set_job_name(const std::string& value) { + + _impl_.job_name_.Set(value, GetArenaForAllocation()); +} +inline std::string* CoordinatedTask::_internal_mutable_job_name() { + + return _impl_.job_name_.Mutable(GetArenaForAllocation()); +} +inline std::string* CoordinatedTask::release_job_name() { + // @@protoc_insertion_point(field_release:tensorflow.CoordinatedTask.job_name) + return _impl_.job_name_.Release(); +} +inline void CoordinatedTask::set_allocated_job_name(std::string* job_name) { + if (job_name != nullptr) { + + } else { + + } + _impl_.job_name_.SetAllocated(job_name, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.job_name_.IsDefault()) { + _impl_.job_name_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:tensorflow.CoordinatedTask.job_name) +} + +// int32 task_id = 2; +inline void CoordinatedTask::clear_task_id() { + _impl_.task_id_ = 0; +} +inline int32_t CoordinatedTask::_internal_task_id() const { + return _impl_.task_id_; +} +inline int32_t CoordinatedTask::task_id() const { + // @@protoc_insertion_point(field_get:tensorflow.CoordinatedTask.task_id) + return _internal_task_id(); +} +inline void CoordinatedTask::_internal_set_task_id(int32_t value) { + + _impl_.task_id_ = value; +} +inline void CoordinatedTask::set_task_id(int32_t value) { + _internal_set_task_id(value); + // @@protoc_insertion_point(field_set:tensorflow.CoordinatedTask.task_id) +} + +// ------------------------------------------------------------------- + +// CoordinationServiceError + +// bool is_reported_error = 3; +inline void CoordinationServiceError::clear_is_reported_error() { + _impl_.is_reported_error_ = false; +} +inline bool CoordinationServiceError::_internal_is_reported_error() const { + return _impl_.is_reported_error_; +} +inline bool CoordinationServiceError::is_reported_error() const { + // @@protoc_insertion_point(field_get:tensorflow.CoordinationServiceError.is_reported_error) + return _internal_is_reported_error(); +} +inline void CoordinationServiceError::_internal_set_is_reported_error(bool value) { + + _impl_.is_reported_error_ = value; +} +inline void CoordinationServiceError::set_is_reported_error(bool value) { + _internal_set_is_reported_error(value); + // @@protoc_insertion_point(field_set:tensorflow.CoordinationServiceError.is_reported_error) +} + +// .tensorflow.CoordinatedTask source_task = 4; +inline bool CoordinationServiceError::_internal_has_source_task() const { + return this != internal_default_instance() && _impl_.source_task_ != nullptr; +} +inline bool CoordinationServiceError::has_source_task() const { + return _internal_has_source_task(); +} +inline void CoordinationServiceError::clear_source_task() { + if (GetArenaForAllocation() == nullptr && _impl_.source_task_ != nullptr) { + delete _impl_.source_task_; + } + _impl_.source_task_ = nullptr; +} +inline const ::tensorflow::CoordinatedTask& CoordinationServiceError::_internal_source_task() const { + const ::tensorflow::CoordinatedTask* p = _impl_.source_task_; + return p != nullptr ? *p : reinterpret_cast( + ::tensorflow::_CoordinatedTask_default_instance_); +} +inline const ::tensorflow::CoordinatedTask& CoordinationServiceError::source_task() const { + // @@protoc_insertion_point(field_get:tensorflow.CoordinationServiceError.source_task) + return _internal_source_task(); +} +inline void CoordinationServiceError::unsafe_arena_set_allocated_source_task( + ::tensorflow::CoordinatedTask* source_task) { + if (GetArenaForAllocation() == nullptr) { + delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.source_task_); + } + _impl_.source_task_ = source_task; + if (source_task) { + + } else { + + } + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.CoordinationServiceError.source_task) +} +inline ::tensorflow::CoordinatedTask* CoordinationServiceError::release_source_task() { + + ::tensorflow::CoordinatedTask* temp = _impl_.source_task_; + _impl_.source_task_ = nullptr; +#ifdef PROTOBUF_FORCE_COPY_IN_RELEASE + auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp); + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + if (GetArenaForAllocation() == nullptr) { delete old; } +#else // PROTOBUF_FORCE_COPY_IN_RELEASE + if (GetArenaForAllocation() != nullptr) { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + } +#endif // !PROTOBUF_FORCE_COPY_IN_RELEASE + return temp; +} +inline ::tensorflow::CoordinatedTask* CoordinationServiceError::unsafe_arena_release_source_task() { + // @@protoc_insertion_point(field_release:tensorflow.CoordinationServiceError.source_task) + + ::tensorflow::CoordinatedTask* temp = _impl_.source_task_; + _impl_.source_task_ = nullptr; + return temp; +} +inline ::tensorflow::CoordinatedTask* CoordinationServiceError::_internal_mutable_source_task() { + + if (_impl_.source_task_ == nullptr) { + auto* p = CreateMaybeMessage<::tensorflow::CoordinatedTask>(GetArenaForAllocation()); + _impl_.source_task_ = p; + } + return _impl_.source_task_; +} +inline ::tensorflow::CoordinatedTask* CoordinationServiceError::mutable_source_task() { + ::tensorflow::CoordinatedTask* _msg = _internal_mutable_source_task(); + // @@protoc_insertion_point(field_mutable:tensorflow.CoordinationServiceError.source_task) + return _msg; +} +inline void CoordinationServiceError::set_allocated_source_task(::tensorflow::CoordinatedTask* source_task) { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); + if (message_arena == nullptr) { + delete _impl_.source_task_; + } + if (source_task) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(source_task); + if (message_arena != submessage_arena) { + source_task = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, source_task, submessage_arena); + } + + } else { + + } + _impl_.source_task_ = source_task; + // @@protoc_insertion_point(field_set_allocated:tensorflow.CoordinationServiceError.source_task) +} + +// ------------------------------------------------------------------- + +// CoordinatedTaskStateInfo + +// .tensorflow.CoordinatedTask task = 1; +inline bool CoordinatedTaskStateInfo::_internal_has_task() const { + return this != internal_default_instance() && _impl_.task_ != nullptr; +} +inline bool CoordinatedTaskStateInfo::has_task() const { + return _internal_has_task(); +} +inline void CoordinatedTaskStateInfo::clear_task() { + if (GetArenaForAllocation() == nullptr && _impl_.task_ != nullptr) { + delete _impl_.task_; + } + _impl_.task_ = nullptr; +} +inline const ::tensorflow::CoordinatedTask& CoordinatedTaskStateInfo::_internal_task() const { + const ::tensorflow::CoordinatedTask* p = _impl_.task_; + return p != nullptr ? *p : reinterpret_cast( + ::tensorflow::_CoordinatedTask_default_instance_); +} +inline const ::tensorflow::CoordinatedTask& CoordinatedTaskStateInfo::task() const { + // @@protoc_insertion_point(field_get:tensorflow.CoordinatedTaskStateInfo.task) + return _internal_task(); +} +inline void CoordinatedTaskStateInfo::unsafe_arena_set_allocated_task( + ::tensorflow::CoordinatedTask* task) { + if (GetArenaForAllocation() == nullptr) { + delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.task_); + } + _impl_.task_ = task; + if (task) { + + } else { + + } + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.CoordinatedTaskStateInfo.task) +} +inline ::tensorflow::CoordinatedTask* CoordinatedTaskStateInfo::release_task() { + + ::tensorflow::CoordinatedTask* temp = _impl_.task_; + _impl_.task_ = nullptr; +#ifdef PROTOBUF_FORCE_COPY_IN_RELEASE + auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp); + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + if (GetArenaForAllocation() == nullptr) { delete old; } +#else // PROTOBUF_FORCE_COPY_IN_RELEASE + if (GetArenaForAllocation() != nullptr) { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + } +#endif // !PROTOBUF_FORCE_COPY_IN_RELEASE + return temp; +} +inline ::tensorflow::CoordinatedTask* CoordinatedTaskStateInfo::unsafe_arena_release_task() { + // @@protoc_insertion_point(field_release:tensorflow.CoordinatedTaskStateInfo.task) + + ::tensorflow::CoordinatedTask* temp = _impl_.task_; + _impl_.task_ = nullptr; + return temp; +} +inline ::tensorflow::CoordinatedTask* CoordinatedTaskStateInfo::_internal_mutable_task() { + + if (_impl_.task_ == nullptr) { + auto* p = CreateMaybeMessage<::tensorflow::CoordinatedTask>(GetArenaForAllocation()); + _impl_.task_ = p; + } + return _impl_.task_; +} +inline ::tensorflow::CoordinatedTask* CoordinatedTaskStateInfo::mutable_task() { + ::tensorflow::CoordinatedTask* _msg = _internal_mutable_task(); + // @@protoc_insertion_point(field_mutable:tensorflow.CoordinatedTaskStateInfo.task) + return _msg; +} +inline void CoordinatedTaskStateInfo::set_allocated_task(::tensorflow::CoordinatedTask* task) { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); + if (message_arena == nullptr) { + delete _impl_.task_; + } + if (task) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(task); + if (message_arena != submessage_arena) { + task = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, task, submessage_arena); + } + + } else { + + } + _impl_.task_ = task; + // @@protoc_insertion_point(field_set_allocated:tensorflow.CoordinatedTaskStateInfo.task) +} + +// .tensorflow.CoordinatedTaskState state = 2; +inline void CoordinatedTaskStateInfo::clear_state() { + _impl_.state_ = 0; +} +inline ::tensorflow::CoordinatedTaskState CoordinatedTaskStateInfo::_internal_state() const { + return static_cast< ::tensorflow::CoordinatedTaskState >(_impl_.state_); +} +inline ::tensorflow::CoordinatedTaskState CoordinatedTaskStateInfo::state() const { + // @@protoc_insertion_point(field_get:tensorflow.CoordinatedTaskStateInfo.state) + return _internal_state(); +} +inline void CoordinatedTaskStateInfo::_internal_set_state(::tensorflow::CoordinatedTaskState value) { + + _impl_.state_ = value; +} +inline void CoordinatedTaskStateInfo::set_state(::tensorflow::CoordinatedTaskState value) { + _internal_set_state(value); + // @@protoc_insertion_point(field_set:tensorflow.CoordinatedTaskStateInfo.state) +} + +// int32 error_code = 3; +inline void CoordinatedTaskStateInfo::clear_error_code() { + _impl_.error_code_ = 0; +} +inline int32_t CoordinatedTaskStateInfo::_internal_error_code() const { + return _impl_.error_code_; +} +inline int32_t CoordinatedTaskStateInfo::error_code() const { + // @@protoc_insertion_point(field_get:tensorflow.CoordinatedTaskStateInfo.error_code) + return _internal_error_code(); +} +inline void CoordinatedTaskStateInfo::_internal_set_error_code(int32_t value) { + + _impl_.error_code_ = value; +} +inline void CoordinatedTaskStateInfo::set_error_code(int32_t value) { + _internal_set_error_code(value); + // @@protoc_insertion_point(field_set:tensorflow.CoordinatedTaskStateInfo.error_code) +} + +// string error_message = 4; +inline void CoordinatedTaskStateInfo::clear_error_message() { + _impl_.error_message_.ClearToEmpty(); +} +inline const std::string& CoordinatedTaskStateInfo::error_message() const { + // @@protoc_insertion_point(field_get:tensorflow.CoordinatedTaskStateInfo.error_message) + return _internal_error_message(); +} +template +inline PROTOBUF_ALWAYS_INLINE +void CoordinatedTaskStateInfo::set_error_message(ArgT0&& arg0, ArgT... args) { + + _impl_.error_message_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:tensorflow.CoordinatedTaskStateInfo.error_message) +} +inline std::string* CoordinatedTaskStateInfo::mutable_error_message() { + std::string* _s = _internal_mutable_error_message(); + // @@protoc_insertion_point(field_mutable:tensorflow.CoordinatedTaskStateInfo.error_message) + return _s; +} +inline const std::string& CoordinatedTaskStateInfo::_internal_error_message() const { + return _impl_.error_message_.Get(); +} +inline void CoordinatedTaskStateInfo::_internal_set_error_message(const std::string& value) { + + _impl_.error_message_.Set(value, GetArenaForAllocation()); +} +inline std::string* CoordinatedTaskStateInfo::_internal_mutable_error_message() { + + return _impl_.error_message_.Mutable(GetArenaForAllocation()); +} +inline std::string* CoordinatedTaskStateInfo::release_error_message() { + // @@protoc_insertion_point(field_release:tensorflow.CoordinatedTaskStateInfo.error_message) + return _impl_.error_message_.Release(); +} +inline void CoordinatedTaskStateInfo::set_allocated_error_message(std::string* error_message) { + if (error_message != nullptr) { + + } else { + + } + _impl_.error_message_.SetAllocated(error_message, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.error_message_.IsDefault()) { + _impl_.error_message_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:tensorflow.CoordinatedTaskStateInfo.error_message) +} + +// .tensorflow.CoordinationServiceError error_payload = 5; +inline bool CoordinatedTaskStateInfo::_internal_has_error_payload() const { + return this != internal_default_instance() && _impl_.error_payload_ != nullptr; +} +inline bool CoordinatedTaskStateInfo::has_error_payload() const { + return _internal_has_error_payload(); +} +inline void CoordinatedTaskStateInfo::clear_error_payload() { + if (GetArenaForAllocation() == nullptr && _impl_.error_payload_ != nullptr) { + delete _impl_.error_payload_; + } + _impl_.error_payload_ = nullptr; +} +inline const ::tensorflow::CoordinationServiceError& CoordinatedTaskStateInfo::_internal_error_payload() const { + const ::tensorflow::CoordinationServiceError* p = _impl_.error_payload_; + return p != nullptr ? *p : reinterpret_cast( + ::tensorflow::_CoordinationServiceError_default_instance_); +} +inline const ::tensorflow::CoordinationServiceError& CoordinatedTaskStateInfo::error_payload() const { + // @@protoc_insertion_point(field_get:tensorflow.CoordinatedTaskStateInfo.error_payload) + return _internal_error_payload(); +} +inline void CoordinatedTaskStateInfo::unsafe_arena_set_allocated_error_payload( + ::tensorflow::CoordinationServiceError* error_payload) { + if (GetArenaForAllocation() == nullptr) { + delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.error_payload_); + } + _impl_.error_payload_ = error_payload; + if (error_payload) { + + } else { + + } + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.CoordinatedTaskStateInfo.error_payload) +} +inline ::tensorflow::CoordinationServiceError* CoordinatedTaskStateInfo::release_error_payload() { + + ::tensorflow::CoordinationServiceError* temp = _impl_.error_payload_; + _impl_.error_payload_ = nullptr; +#ifdef PROTOBUF_FORCE_COPY_IN_RELEASE + auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp); + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + if (GetArenaForAllocation() == nullptr) { delete old; } +#else // PROTOBUF_FORCE_COPY_IN_RELEASE + if (GetArenaForAllocation() != nullptr) { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + } +#endif // !PROTOBUF_FORCE_COPY_IN_RELEASE + return temp; +} +inline ::tensorflow::CoordinationServiceError* CoordinatedTaskStateInfo::unsafe_arena_release_error_payload() { + // @@protoc_insertion_point(field_release:tensorflow.CoordinatedTaskStateInfo.error_payload) + + ::tensorflow::CoordinationServiceError* temp = _impl_.error_payload_; + _impl_.error_payload_ = nullptr; + return temp; +} +inline ::tensorflow::CoordinationServiceError* CoordinatedTaskStateInfo::_internal_mutable_error_payload() { + + if (_impl_.error_payload_ == nullptr) { + auto* p = CreateMaybeMessage<::tensorflow::CoordinationServiceError>(GetArenaForAllocation()); + _impl_.error_payload_ = p; + } + return _impl_.error_payload_; +} +inline ::tensorflow::CoordinationServiceError* CoordinatedTaskStateInfo::mutable_error_payload() { + ::tensorflow::CoordinationServiceError* _msg = _internal_mutable_error_payload(); + // @@protoc_insertion_point(field_mutable:tensorflow.CoordinatedTaskStateInfo.error_payload) + return _msg; +} +inline void CoordinatedTaskStateInfo::set_allocated_error_payload(::tensorflow::CoordinationServiceError* error_payload) { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); + if (message_arena == nullptr) { + delete _impl_.error_payload_; + } + if (error_payload) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(error_payload); + if (message_arena != submessage_arena) { + error_payload = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, error_payload, submessage_arena); + } + + } else { + + } + _impl_.error_payload_ = error_payload; + // @@protoc_insertion_point(field_set_allocated:tensorflow.CoordinatedTaskStateInfo.error_payload) +} + +// ------------------------------------------------------------------- + +// DeviceInfo + +// repeated .google.protobuf.Any device = 1; +inline int DeviceInfo::_internal_device_size() const { + return _impl_.device_.size(); +} +inline int DeviceInfo::device_size() const { + return _internal_device_size(); +} +inline ::PROTOBUF_NAMESPACE_ID::Any* DeviceInfo::mutable_device(int index) { + // @@protoc_insertion_point(field_mutable:tensorflow.DeviceInfo.device) + return _impl_.device_.Mutable(index); +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::PROTOBUF_NAMESPACE_ID::Any >* +DeviceInfo::mutable_device() { + // @@protoc_insertion_point(field_mutable_list:tensorflow.DeviceInfo.device) + return &_impl_.device_; +} +inline const ::PROTOBUF_NAMESPACE_ID::Any& DeviceInfo::_internal_device(int index) const { + return _impl_.device_.Get(index); +} +inline const ::PROTOBUF_NAMESPACE_ID::Any& DeviceInfo::device(int index) const { + // @@protoc_insertion_point(field_get:tensorflow.DeviceInfo.device) + return _internal_device(index); +} +inline ::PROTOBUF_NAMESPACE_ID::Any* DeviceInfo::_internal_add_device() { + return _impl_.device_.Add(); +} +inline ::PROTOBUF_NAMESPACE_ID::Any* DeviceInfo::add_device() { + ::PROTOBUF_NAMESPACE_ID::Any* _add = _internal_add_device(); + // @@protoc_insertion_point(field_add:tensorflow.DeviceInfo.device) + return _add; +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::PROTOBUF_NAMESPACE_ID::Any >& +DeviceInfo::device() const { + // @@protoc_insertion_point(field_list:tensorflow.DeviceInfo.device) + return _impl_.device_; +} + +// ------------------------------------------------------------------- + +// RegisterTaskRequest + +// fixed64 incarnation = 3; +inline void RegisterTaskRequest::clear_incarnation() { + _impl_.incarnation_ = uint64_t{0u}; +} +inline uint64_t RegisterTaskRequest::_internal_incarnation() const { + return _impl_.incarnation_; +} +inline uint64_t RegisterTaskRequest::incarnation() const { + // @@protoc_insertion_point(field_get:tensorflow.RegisterTaskRequest.incarnation) + return _internal_incarnation(); +} +inline void RegisterTaskRequest::_internal_set_incarnation(uint64_t value) { + + _impl_.incarnation_ = value; +} +inline void RegisterTaskRequest::set_incarnation(uint64_t value) { + _internal_set_incarnation(value); + // @@protoc_insertion_point(field_set:tensorflow.RegisterTaskRequest.incarnation) +} + +// .tensorflow.CoordinatedTask source_task = 5; +inline bool RegisterTaskRequest::_internal_has_source_task() const { + return this != internal_default_instance() && _impl_.source_task_ != nullptr; +} +inline bool RegisterTaskRequest::has_source_task() const { + return _internal_has_source_task(); +} +inline void RegisterTaskRequest::clear_source_task() { + if (GetArenaForAllocation() == nullptr && _impl_.source_task_ != nullptr) { + delete _impl_.source_task_; + } + _impl_.source_task_ = nullptr; +} +inline const ::tensorflow::CoordinatedTask& RegisterTaskRequest::_internal_source_task() const { + const ::tensorflow::CoordinatedTask* p = _impl_.source_task_; + return p != nullptr ? *p : reinterpret_cast( + ::tensorflow::_CoordinatedTask_default_instance_); +} +inline const ::tensorflow::CoordinatedTask& RegisterTaskRequest::source_task() const { + // @@protoc_insertion_point(field_get:tensorflow.RegisterTaskRequest.source_task) + return _internal_source_task(); +} +inline void RegisterTaskRequest::unsafe_arena_set_allocated_source_task( + ::tensorflow::CoordinatedTask* source_task) { + if (GetArenaForAllocation() == nullptr) { + delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.source_task_); + } + _impl_.source_task_ = source_task; + if (source_task) { + + } else { + + } + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.RegisterTaskRequest.source_task) +} +inline ::tensorflow::CoordinatedTask* RegisterTaskRequest::release_source_task() { + + ::tensorflow::CoordinatedTask* temp = _impl_.source_task_; + _impl_.source_task_ = nullptr; +#ifdef PROTOBUF_FORCE_COPY_IN_RELEASE + auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp); + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + if (GetArenaForAllocation() == nullptr) { delete old; } +#else // PROTOBUF_FORCE_COPY_IN_RELEASE + if (GetArenaForAllocation() != nullptr) { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + } +#endif // !PROTOBUF_FORCE_COPY_IN_RELEASE + return temp; +} +inline ::tensorflow::CoordinatedTask* RegisterTaskRequest::unsafe_arena_release_source_task() { + // @@protoc_insertion_point(field_release:tensorflow.RegisterTaskRequest.source_task) + + ::tensorflow::CoordinatedTask* temp = _impl_.source_task_; + _impl_.source_task_ = nullptr; + return temp; +} +inline ::tensorflow::CoordinatedTask* RegisterTaskRequest::_internal_mutable_source_task() { + + if (_impl_.source_task_ == nullptr) { + auto* p = CreateMaybeMessage<::tensorflow::CoordinatedTask>(GetArenaForAllocation()); + _impl_.source_task_ = p; + } + return _impl_.source_task_; +} +inline ::tensorflow::CoordinatedTask* RegisterTaskRequest::mutable_source_task() { + ::tensorflow::CoordinatedTask* _msg = _internal_mutable_source_task(); + // @@protoc_insertion_point(field_mutable:tensorflow.RegisterTaskRequest.source_task) + return _msg; +} +inline void RegisterTaskRequest::set_allocated_source_task(::tensorflow::CoordinatedTask* source_task) { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); + if (message_arena == nullptr) { + delete _impl_.source_task_; + } + if (source_task) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(source_task); + if (message_arena != submessage_arena) { + source_task = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, source_task, submessage_arena); + } + + } else { + + } + _impl_.source_task_ = source_task; + // @@protoc_insertion_point(field_set_allocated:tensorflow.RegisterTaskRequest.source_task) +} + +// ------------------------------------------------------------------- + +// RegisterTaskResponse + +// fixed64 leader_incarnation = 1; +inline void RegisterTaskResponse::clear_leader_incarnation() { + _impl_.leader_incarnation_ = uint64_t{0u}; +} +inline uint64_t RegisterTaskResponse::_internal_leader_incarnation() const { + return _impl_.leader_incarnation_; +} +inline uint64_t RegisterTaskResponse::leader_incarnation() const { + // @@protoc_insertion_point(field_get:tensorflow.RegisterTaskResponse.leader_incarnation) + return _internal_leader_incarnation(); +} +inline void RegisterTaskResponse::_internal_set_leader_incarnation(uint64_t value) { + + _impl_.leader_incarnation_ = value; +} +inline void RegisterTaskResponse::set_leader_incarnation(uint64_t value) { + _internal_set_leader_incarnation(value); + // @@protoc_insertion_point(field_set:tensorflow.RegisterTaskResponse.leader_incarnation) +} + +// ------------------------------------------------------------------- + +// HeartbeatRequest + +// fixed64 incarnation = 3; +inline void HeartbeatRequest::clear_incarnation() { + _impl_.incarnation_ = uint64_t{0u}; +} +inline uint64_t HeartbeatRequest::_internal_incarnation() const { + return _impl_.incarnation_; +} +inline uint64_t HeartbeatRequest::incarnation() const { + // @@protoc_insertion_point(field_get:tensorflow.HeartbeatRequest.incarnation) + return _internal_incarnation(); +} +inline void HeartbeatRequest::_internal_set_incarnation(uint64_t value) { + + _impl_.incarnation_ = value; +} +inline void HeartbeatRequest::set_incarnation(uint64_t value) { + _internal_set_incarnation(value); + // @@protoc_insertion_point(field_set:tensorflow.HeartbeatRequest.incarnation) +} + +// .tensorflow.CoordinatedTask source_task = 4; +inline bool HeartbeatRequest::_internal_has_source_task() const { + return this != internal_default_instance() && _impl_.source_task_ != nullptr; +} +inline bool HeartbeatRequest::has_source_task() const { + return _internal_has_source_task(); +} +inline void HeartbeatRequest::clear_source_task() { + if (GetArenaForAllocation() == nullptr && _impl_.source_task_ != nullptr) { + delete _impl_.source_task_; + } + _impl_.source_task_ = nullptr; +} +inline const ::tensorflow::CoordinatedTask& HeartbeatRequest::_internal_source_task() const { + const ::tensorflow::CoordinatedTask* p = _impl_.source_task_; + return p != nullptr ? *p : reinterpret_cast( + ::tensorflow::_CoordinatedTask_default_instance_); +} +inline const ::tensorflow::CoordinatedTask& HeartbeatRequest::source_task() const { + // @@protoc_insertion_point(field_get:tensorflow.HeartbeatRequest.source_task) + return _internal_source_task(); +} +inline void HeartbeatRequest::unsafe_arena_set_allocated_source_task( + ::tensorflow::CoordinatedTask* source_task) { + if (GetArenaForAllocation() == nullptr) { + delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.source_task_); + } + _impl_.source_task_ = source_task; + if (source_task) { + + } else { + + } + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.HeartbeatRequest.source_task) +} +inline ::tensorflow::CoordinatedTask* HeartbeatRequest::release_source_task() { + + ::tensorflow::CoordinatedTask* temp = _impl_.source_task_; + _impl_.source_task_ = nullptr; +#ifdef PROTOBUF_FORCE_COPY_IN_RELEASE + auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp); + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + if (GetArenaForAllocation() == nullptr) { delete old; } +#else // PROTOBUF_FORCE_COPY_IN_RELEASE + if (GetArenaForAllocation() != nullptr) { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + } +#endif // !PROTOBUF_FORCE_COPY_IN_RELEASE + return temp; +} +inline ::tensorflow::CoordinatedTask* HeartbeatRequest::unsafe_arena_release_source_task() { + // @@protoc_insertion_point(field_release:tensorflow.HeartbeatRequest.source_task) + + ::tensorflow::CoordinatedTask* temp = _impl_.source_task_; + _impl_.source_task_ = nullptr; + return temp; +} +inline ::tensorflow::CoordinatedTask* HeartbeatRequest::_internal_mutable_source_task() { + + if (_impl_.source_task_ == nullptr) { + auto* p = CreateMaybeMessage<::tensorflow::CoordinatedTask>(GetArenaForAllocation()); + _impl_.source_task_ = p; + } + return _impl_.source_task_; +} +inline ::tensorflow::CoordinatedTask* HeartbeatRequest::mutable_source_task() { + ::tensorflow::CoordinatedTask* _msg = _internal_mutable_source_task(); + // @@protoc_insertion_point(field_mutable:tensorflow.HeartbeatRequest.source_task) + return _msg; +} +inline void HeartbeatRequest::set_allocated_source_task(::tensorflow::CoordinatedTask* source_task) { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); + if (message_arena == nullptr) { + delete _impl_.source_task_; + } + if (source_task) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(source_task); + if (message_arena != submessage_arena) { + source_task = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, source_task, submessage_arena); + } + + } else { + + } + _impl_.source_task_ = source_task; + // @@protoc_insertion_point(field_set_allocated:tensorflow.HeartbeatRequest.source_task) +} + +// ------------------------------------------------------------------- + +// HeartbeatResponse + +// fixed64 leader_incarnation = 1; +inline void HeartbeatResponse::clear_leader_incarnation() { + _impl_.leader_incarnation_ = uint64_t{0u}; +} +inline uint64_t HeartbeatResponse::_internal_leader_incarnation() const { + return _impl_.leader_incarnation_; +} +inline uint64_t HeartbeatResponse::leader_incarnation() const { + // @@protoc_insertion_point(field_get:tensorflow.HeartbeatResponse.leader_incarnation) + return _internal_leader_incarnation(); +} +inline void HeartbeatResponse::_internal_set_leader_incarnation(uint64_t value) { + + _impl_.leader_incarnation_ = value; +} +inline void HeartbeatResponse::set_leader_incarnation(uint64_t value) { + _internal_set_leader_incarnation(value); + // @@protoc_insertion_point(field_set:tensorflow.HeartbeatResponse.leader_incarnation) +} + +// ------------------------------------------------------------------- + +// WaitForAllTasksRequest + +// .tensorflow.CoordinatedTask source_task = 5; +inline bool WaitForAllTasksRequest::_internal_has_source_task() const { + return this != internal_default_instance() && _impl_.source_task_ != nullptr; +} +inline bool WaitForAllTasksRequest::has_source_task() const { + return _internal_has_source_task(); +} +inline void WaitForAllTasksRequest::clear_source_task() { + if (GetArenaForAllocation() == nullptr && _impl_.source_task_ != nullptr) { + delete _impl_.source_task_; + } + _impl_.source_task_ = nullptr; +} +inline const ::tensorflow::CoordinatedTask& WaitForAllTasksRequest::_internal_source_task() const { + const ::tensorflow::CoordinatedTask* p = _impl_.source_task_; + return p != nullptr ? *p : reinterpret_cast( + ::tensorflow::_CoordinatedTask_default_instance_); +} +inline const ::tensorflow::CoordinatedTask& WaitForAllTasksRequest::source_task() const { + // @@protoc_insertion_point(field_get:tensorflow.WaitForAllTasksRequest.source_task) + return _internal_source_task(); +} +inline void WaitForAllTasksRequest::unsafe_arena_set_allocated_source_task( + ::tensorflow::CoordinatedTask* source_task) { + if (GetArenaForAllocation() == nullptr) { + delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.source_task_); + } + _impl_.source_task_ = source_task; + if (source_task) { + + } else { + + } + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.WaitForAllTasksRequest.source_task) +} +inline ::tensorflow::CoordinatedTask* WaitForAllTasksRequest::release_source_task() { + + ::tensorflow::CoordinatedTask* temp = _impl_.source_task_; + _impl_.source_task_ = nullptr; +#ifdef PROTOBUF_FORCE_COPY_IN_RELEASE + auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp); + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + if (GetArenaForAllocation() == nullptr) { delete old; } +#else // PROTOBUF_FORCE_COPY_IN_RELEASE + if (GetArenaForAllocation() != nullptr) { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + } +#endif // !PROTOBUF_FORCE_COPY_IN_RELEASE + return temp; +} +inline ::tensorflow::CoordinatedTask* WaitForAllTasksRequest::unsafe_arena_release_source_task() { + // @@protoc_insertion_point(field_release:tensorflow.WaitForAllTasksRequest.source_task) + + ::tensorflow::CoordinatedTask* temp = _impl_.source_task_; + _impl_.source_task_ = nullptr; + return temp; +} +inline ::tensorflow::CoordinatedTask* WaitForAllTasksRequest::_internal_mutable_source_task() { + + if (_impl_.source_task_ == nullptr) { + auto* p = CreateMaybeMessage<::tensorflow::CoordinatedTask>(GetArenaForAllocation()); + _impl_.source_task_ = p; + } + return _impl_.source_task_; +} +inline ::tensorflow::CoordinatedTask* WaitForAllTasksRequest::mutable_source_task() { + ::tensorflow::CoordinatedTask* _msg = _internal_mutable_source_task(); + // @@protoc_insertion_point(field_mutable:tensorflow.WaitForAllTasksRequest.source_task) + return _msg; +} +inline void WaitForAllTasksRequest::set_allocated_source_task(::tensorflow::CoordinatedTask* source_task) { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); + if (message_arena == nullptr) { + delete _impl_.source_task_; + } + if (source_task) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(source_task); + if (message_arena != submessage_arena) { + source_task = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, source_task, submessage_arena); + } + + } else { + + } + _impl_.source_task_ = source_task; + // @@protoc_insertion_point(field_set_allocated:tensorflow.WaitForAllTasksRequest.source_task) +} + +// .tensorflow.DeviceInfo device_info = 6; +inline bool WaitForAllTasksRequest::_internal_has_device_info() const { + return this != internal_default_instance() && _impl_.device_info_ != nullptr; +} +inline bool WaitForAllTasksRequest::has_device_info() const { + return _internal_has_device_info(); +} +inline void WaitForAllTasksRequest::clear_device_info() { + if (GetArenaForAllocation() == nullptr && _impl_.device_info_ != nullptr) { + delete _impl_.device_info_; + } + _impl_.device_info_ = nullptr; +} +inline const ::tensorflow::DeviceInfo& WaitForAllTasksRequest::_internal_device_info() const { + const ::tensorflow::DeviceInfo* p = _impl_.device_info_; + return p != nullptr ? *p : reinterpret_cast( + ::tensorflow::_DeviceInfo_default_instance_); +} +inline const ::tensorflow::DeviceInfo& WaitForAllTasksRequest::device_info() const { + // @@protoc_insertion_point(field_get:tensorflow.WaitForAllTasksRequest.device_info) + return _internal_device_info(); +} +inline void WaitForAllTasksRequest::unsafe_arena_set_allocated_device_info( + ::tensorflow::DeviceInfo* device_info) { + if (GetArenaForAllocation() == nullptr) { + delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.device_info_); + } + _impl_.device_info_ = device_info; + if (device_info) { + + } else { + + } + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.WaitForAllTasksRequest.device_info) +} +inline ::tensorflow::DeviceInfo* WaitForAllTasksRequest::release_device_info() { + + ::tensorflow::DeviceInfo* temp = _impl_.device_info_; + _impl_.device_info_ = nullptr; +#ifdef PROTOBUF_FORCE_COPY_IN_RELEASE + auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp); + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + if (GetArenaForAllocation() == nullptr) { delete old; } +#else // PROTOBUF_FORCE_COPY_IN_RELEASE + if (GetArenaForAllocation() != nullptr) { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + } +#endif // !PROTOBUF_FORCE_COPY_IN_RELEASE + return temp; +} +inline ::tensorflow::DeviceInfo* WaitForAllTasksRequest::unsafe_arena_release_device_info() { + // @@protoc_insertion_point(field_release:tensorflow.WaitForAllTasksRequest.device_info) + + ::tensorflow::DeviceInfo* temp = _impl_.device_info_; + _impl_.device_info_ = nullptr; + return temp; +} +inline ::tensorflow::DeviceInfo* WaitForAllTasksRequest::_internal_mutable_device_info() { + + if (_impl_.device_info_ == nullptr) { + auto* p = CreateMaybeMessage<::tensorflow::DeviceInfo>(GetArenaForAllocation()); + _impl_.device_info_ = p; + } + return _impl_.device_info_; +} +inline ::tensorflow::DeviceInfo* WaitForAllTasksRequest::mutable_device_info() { + ::tensorflow::DeviceInfo* _msg = _internal_mutable_device_info(); + // @@protoc_insertion_point(field_mutable:tensorflow.WaitForAllTasksRequest.device_info) + return _msg; +} +inline void WaitForAllTasksRequest::set_allocated_device_info(::tensorflow::DeviceInfo* device_info) { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); + if (message_arena == nullptr) { + delete _impl_.device_info_; + } + if (device_info) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(device_info); + if (message_arena != submessage_arena) { + device_info = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, device_info, submessage_arena); + } + + } else { + + } + _impl_.device_info_ = device_info; + // @@protoc_insertion_point(field_set_allocated:tensorflow.WaitForAllTasksRequest.device_info) +} + +// ------------------------------------------------------------------- + +// WaitForAllTasksResponse + +// fixed64 leader_incarnation = 1; +inline void WaitForAllTasksResponse::clear_leader_incarnation() { + _impl_.leader_incarnation_ = uint64_t{0u}; +} +inline uint64_t WaitForAllTasksResponse::_internal_leader_incarnation() const { + return _impl_.leader_incarnation_; +} +inline uint64_t WaitForAllTasksResponse::leader_incarnation() const { + // @@protoc_insertion_point(field_get:tensorflow.WaitForAllTasksResponse.leader_incarnation) + return _internal_leader_incarnation(); +} +inline void WaitForAllTasksResponse::_internal_set_leader_incarnation(uint64_t value) { + + _impl_.leader_incarnation_ = value; +} +inline void WaitForAllTasksResponse::set_leader_incarnation(uint64_t value) { + _internal_set_leader_incarnation(value); + // @@protoc_insertion_point(field_set:tensorflow.WaitForAllTasksResponse.leader_incarnation) +} + +// .tensorflow.DeviceInfo device_info = 4; +inline bool WaitForAllTasksResponse::_internal_has_device_info() const { + return this != internal_default_instance() && _impl_.device_info_ != nullptr; +} +inline bool WaitForAllTasksResponse::has_device_info() const { + return _internal_has_device_info(); +} +inline void WaitForAllTasksResponse::clear_device_info() { + if (GetArenaForAllocation() == nullptr && _impl_.device_info_ != nullptr) { + delete _impl_.device_info_; + } + _impl_.device_info_ = nullptr; +} +inline const ::tensorflow::DeviceInfo& WaitForAllTasksResponse::_internal_device_info() const { + const ::tensorflow::DeviceInfo* p = _impl_.device_info_; + return p != nullptr ? *p : reinterpret_cast( + ::tensorflow::_DeviceInfo_default_instance_); +} +inline const ::tensorflow::DeviceInfo& WaitForAllTasksResponse::device_info() const { + // @@protoc_insertion_point(field_get:tensorflow.WaitForAllTasksResponse.device_info) + return _internal_device_info(); +} +inline void WaitForAllTasksResponse::unsafe_arena_set_allocated_device_info( + ::tensorflow::DeviceInfo* device_info) { + if (GetArenaForAllocation() == nullptr) { + delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.device_info_); + } + _impl_.device_info_ = device_info; + if (device_info) { + + } else { + + } + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.WaitForAllTasksResponse.device_info) +} +inline ::tensorflow::DeviceInfo* WaitForAllTasksResponse::release_device_info() { + + ::tensorflow::DeviceInfo* temp = _impl_.device_info_; + _impl_.device_info_ = nullptr; +#ifdef PROTOBUF_FORCE_COPY_IN_RELEASE + auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp); + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + if (GetArenaForAllocation() == nullptr) { delete old; } +#else // PROTOBUF_FORCE_COPY_IN_RELEASE + if (GetArenaForAllocation() != nullptr) { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + } +#endif // !PROTOBUF_FORCE_COPY_IN_RELEASE + return temp; +} +inline ::tensorflow::DeviceInfo* WaitForAllTasksResponse::unsafe_arena_release_device_info() { + // @@protoc_insertion_point(field_release:tensorflow.WaitForAllTasksResponse.device_info) + + ::tensorflow::DeviceInfo* temp = _impl_.device_info_; + _impl_.device_info_ = nullptr; + return temp; +} +inline ::tensorflow::DeviceInfo* WaitForAllTasksResponse::_internal_mutable_device_info() { + + if (_impl_.device_info_ == nullptr) { + auto* p = CreateMaybeMessage<::tensorflow::DeviceInfo>(GetArenaForAllocation()); + _impl_.device_info_ = p; + } + return _impl_.device_info_; +} +inline ::tensorflow::DeviceInfo* WaitForAllTasksResponse::mutable_device_info() { + ::tensorflow::DeviceInfo* _msg = _internal_mutable_device_info(); + // @@protoc_insertion_point(field_mutable:tensorflow.WaitForAllTasksResponse.device_info) + return _msg; +} +inline void WaitForAllTasksResponse::set_allocated_device_info(::tensorflow::DeviceInfo* device_info) { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); + if (message_arena == nullptr) { + delete _impl_.device_info_; + } + if (device_info) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(device_info); + if (message_arena != submessage_arena) { + device_info = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, device_info, submessage_arena); + } + + } else { + + } + _impl_.device_info_ = device_info; + // @@protoc_insertion_point(field_set_allocated:tensorflow.WaitForAllTasksResponse.device_info) +} + +// ------------------------------------------------------------------- + +// ShutdownTaskRequest + +// .tensorflow.CoordinatedTask source_task = 1; +inline bool ShutdownTaskRequest::_internal_has_source_task() const { + return this != internal_default_instance() && _impl_.source_task_ != nullptr; +} +inline bool ShutdownTaskRequest::has_source_task() const { + return _internal_has_source_task(); +} +inline void ShutdownTaskRequest::clear_source_task() { + if (GetArenaForAllocation() == nullptr && _impl_.source_task_ != nullptr) { + delete _impl_.source_task_; + } + _impl_.source_task_ = nullptr; +} +inline const ::tensorflow::CoordinatedTask& ShutdownTaskRequest::_internal_source_task() const { + const ::tensorflow::CoordinatedTask* p = _impl_.source_task_; + return p != nullptr ? *p : reinterpret_cast( + ::tensorflow::_CoordinatedTask_default_instance_); +} +inline const ::tensorflow::CoordinatedTask& ShutdownTaskRequest::source_task() const { + // @@protoc_insertion_point(field_get:tensorflow.ShutdownTaskRequest.source_task) + return _internal_source_task(); +} +inline void ShutdownTaskRequest::unsafe_arena_set_allocated_source_task( + ::tensorflow::CoordinatedTask* source_task) { + if (GetArenaForAllocation() == nullptr) { + delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.source_task_); + } + _impl_.source_task_ = source_task; + if (source_task) { + + } else { + + } + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.ShutdownTaskRequest.source_task) +} +inline ::tensorflow::CoordinatedTask* ShutdownTaskRequest::release_source_task() { + + ::tensorflow::CoordinatedTask* temp = _impl_.source_task_; + _impl_.source_task_ = nullptr; +#ifdef PROTOBUF_FORCE_COPY_IN_RELEASE + auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp); + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + if (GetArenaForAllocation() == nullptr) { delete old; } +#else // PROTOBUF_FORCE_COPY_IN_RELEASE + if (GetArenaForAllocation() != nullptr) { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + } +#endif // !PROTOBUF_FORCE_COPY_IN_RELEASE + return temp; +} +inline ::tensorflow::CoordinatedTask* ShutdownTaskRequest::unsafe_arena_release_source_task() { + // @@protoc_insertion_point(field_release:tensorflow.ShutdownTaskRequest.source_task) + + ::tensorflow::CoordinatedTask* temp = _impl_.source_task_; + _impl_.source_task_ = nullptr; + return temp; +} +inline ::tensorflow::CoordinatedTask* ShutdownTaskRequest::_internal_mutable_source_task() { + + if (_impl_.source_task_ == nullptr) { + auto* p = CreateMaybeMessage<::tensorflow::CoordinatedTask>(GetArenaForAllocation()); + _impl_.source_task_ = p; + } + return _impl_.source_task_; +} +inline ::tensorflow::CoordinatedTask* ShutdownTaskRequest::mutable_source_task() { + ::tensorflow::CoordinatedTask* _msg = _internal_mutable_source_task(); + // @@protoc_insertion_point(field_mutable:tensorflow.ShutdownTaskRequest.source_task) + return _msg; +} +inline void ShutdownTaskRequest::set_allocated_source_task(::tensorflow::CoordinatedTask* source_task) { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); + if (message_arena == nullptr) { + delete _impl_.source_task_; + } + if (source_task) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(source_task); + if (message_arena != submessage_arena) { + source_task = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, source_task, submessage_arena); + } + + } else { + + } + _impl_.source_task_ = source_task; + // @@protoc_insertion_point(field_set_allocated:tensorflow.ShutdownTaskRequest.source_task) +} + +// ------------------------------------------------------------------- + +// ShutdownTaskResponse + +// ------------------------------------------------------------------- + +// ResetTaskRequest + +// .tensorflow.CoordinatedTask source_task = 1; +inline bool ResetTaskRequest::_internal_has_source_task() const { + return this != internal_default_instance() && _impl_.source_task_ != nullptr; +} +inline bool ResetTaskRequest::has_source_task() const { + return _internal_has_source_task(); +} +inline void ResetTaskRequest::clear_source_task() { + if (GetArenaForAllocation() == nullptr && _impl_.source_task_ != nullptr) { + delete _impl_.source_task_; + } + _impl_.source_task_ = nullptr; +} +inline const ::tensorflow::CoordinatedTask& ResetTaskRequest::_internal_source_task() const { + const ::tensorflow::CoordinatedTask* p = _impl_.source_task_; + return p != nullptr ? *p : reinterpret_cast( + ::tensorflow::_CoordinatedTask_default_instance_); +} +inline const ::tensorflow::CoordinatedTask& ResetTaskRequest::source_task() const { + // @@protoc_insertion_point(field_get:tensorflow.ResetTaskRequest.source_task) + return _internal_source_task(); +} +inline void ResetTaskRequest::unsafe_arena_set_allocated_source_task( + ::tensorflow::CoordinatedTask* source_task) { + if (GetArenaForAllocation() == nullptr) { + delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.source_task_); + } + _impl_.source_task_ = source_task; + if (source_task) { + + } else { + + } + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.ResetTaskRequest.source_task) +} +inline ::tensorflow::CoordinatedTask* ResetTaskRequest::release_source_task() { + + ::tensorflow::CoordinatedTask* temp = _impl_.source_task_; + _impl_.source_task_ = nullptr; +#ifdef PROTOBUF_FORCE_COPY_IN_RELEASE + auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp); + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + if (GetArenaForAllocation() == nullptr) { delete old; } +#else // PROTOBUF_FORCE_COPY_IN_RELEASE + if (GetArenaForAllocation() != nullptr) { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + } +#endif // !PROTOBUF_FORCE_COPY_IN_RELEASE + return temp; +} +inline ::tensorflow::CoordinatedTask* ResetTaskRequest::unsafe_arena_release_source_task() { + // @@protoc_insertion_point(field_release:tensorflow.ResetTaskRequest.source_task) + + ::tensorflow::CoordinatedTask* temp = _impl_.source_task_; + _impl_.source_task_ = nullptr; + return temp; +} +inline ::tensorflow::CoordinatedTask* ResetTaskRequest::_internal_mutable_source_task() { + + if (_impl_.source_task_ == nullptr) { + auto* p = CreateMaybeMessage<::tensorflow::CoordinatedTask>(GetArenaForAllocation()); + _impl_.source_task_ = p; + } + return _impl_.source_task_; +} +inline ::tensorflow::CoordinatedTask* ResetTaskRequest::mutable_source_task() { + ::tensorflow::CoordinatedTask* _msg = _internal_mutable_source_task(); + // @@protoc_insertion_point(field_mutable:tensorflow.ResetTaskRequest.source_task) + return _msg; +} +inline void ResetTaskRequest::set_allocated_source_task(::tensorflow::CoordinatedTask* source_task) { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); + if (message_arena == nullptr) { + delete _impl_.source_task_; + } + if (source_task) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(source_task); + if (message_arena != submessage_arena) { + source_task = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, source_task, submessage_arena); + } + + } else { + + } + _impl_.source_task_ = source_task; + // @@protoc_insertion_point(field_set_allocated:tensorflow.ResetTaskRequest.source_task) +} + +// ------------------------------------------------------------------- + +// ResetTaskResponse + +// ------------------------------------------------------------------- + +// ReportErrorToTaskRequest + +// int32 error_code = 1; +inline void ReportErrorToTaskRequest::clear_error_code() { + _impl_.error_code_ = 0; +} +inline int32_t ReportErrorToTaskRequest::_internal_error_code() const { + return _impl_.error_code_; +} +inline int32_t ReportErrorToTaskRequest::error_code() const { + // @@protoc_insertion_point(field_get:tensorflow.ReportErrorToTaskRequest.error_code) + return _internal_error_code(); +} +inline void ReportErrorToTaskRequest::_internal_set_error_code(int32_t value) { + + _impl_.error_code_ = value; +} +inline void ReportErrorToTaskRequest::set_error_code(int32_t value) { + _internal_set_error_code(value); + // @@protoc_insertion_point(field_set:tensorflow.ReportErrorToTaskRequest.error_code) +} + +// string error_message = 2; +inline void ReportErrorToTaskRequest::clear_error_message() { + _impl_.error_message_.ClearToEmpty(); +} +inline const std::string& ReportErrorToTaskRequest::error_message() const { + // @@protoc_insertion_point(field_get:tensorflow.ReportErrorToTaskRequest.error_message) + return _internal_error_message(); +} +template +inline PROTOBUF_ALWAYS_INLINE +void ReportErrorToTaskRequest::set_error_message(ArgT0&& arg0, ArgT... args) { + + _impl_.error_message_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:tensorflow.ReportErrorToTaskRequest.error_message) +} +inline std::string* ReportErrorToTaskRequest::mutable_error_message() { + std::string* _s = _internal_mutable_error_message(); + // @@protoc_insertion_point(field_mutable:tensorflow.ReportErrorToTaskRequest.error_message) + return _s; +} +inline const std::string& ReportErrorToTaskRequest::_internal_error_message() const { + return _impl_.error_message_.Get(); +} +inline void ReportErrorToTaskRequest::_internal_set_error_message(const std::string& value) { + + _impl_.error_message_.Set(value, GetArenaForAllocation()); +} +inline std::string* ReportErrorToTaskRequest::_internal_mutable_error_message() { + + return _impl_.error_message_.Mutable(GetArenaForAllocation()); +} +inline std::string* ReportErrorToTaskRequest::release_error_message() { + // @@protoc_insertion_point(field_release:tensorflow.ReportErrorToTaskRequest.error_message) + return _impl_.error_message_.Release(); +} +inline void ReportErrorToTaskRequest::set_allocated_error_message(std::string* error_message) { + if (error_message != nullptr) { + + } else { + + } + _impl_.error_message_.SetAllocated(error_message, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.error_message_.IsDefault()) { + _impl_.error_message_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:tensorflow.ReportErrorToTaskRequest.error_message) +} + +// .tensorflow.CoordinationServiceError error_payload = 5; +inline bool ReportErrorToTaskRequest::_internal_has_error_payload() const { + return this != internal_default_instance() && _impl_.error_payload_ != nullptr; +} +inline bool ReportErrorToTaskRequest::has_error_payload() const { + return _internal_has_error_payload(); +} +inline void ReportErrorToTaskRequest::clear_error_payload() { + if (GetArenaForAllocation() == nullptr && _impl_.error_payload_ != nullptr) { + delete _impl_.error_payload_; + } + _impl_.error_payload_ = nullptr; +} +inline const ::tensorflow::CoordinationServiceError& ReportErrorToTaskRequest::_internal_error_payload() const { + const ::tensorflow::CoordinationServiceError* p = _impl_.error_payload_; + return p != nullptr ? *p : reinterpret_cast( + ::tensorflow::_CoordinationServiceError_default_instance_); +} +inline const ::tensorflow::CoordinationServiceError& ReportErrorToTaskRequest::error_payload() const { + // @@protoc_insertion_point(field_get:tensorflow.ReportErrorToTaskRequest.error_payload) + return _internal_error_payload(); +} +inline void ReportErrorToTaskRequest::unsafe_arena_set_allocated_error_payload( + ::tensorflow::CoordinationServiceError* error_payload) { + if (GetArenaForAllocation() == nullptr) { + delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.error_payload_); + } + _impl_.error_payload_ = error_payload; + if (error_payload) { + + } else { + + } + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.ReportErrorToTaskRequest.error_payload) +} +inline ::tensorflow::CoordinationServiceError* ReportErrorToTaskRequest::release_error_payload() { + + ::tensorflow::CoordinationServiceError* temp = _impl_.error_payload_; + _impl_.error_payload_ = nullptr; +#ifdef PROTOBUF_FORCE_COPY_IN_RELEASE + auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp); + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + if (GetArenaForAllocation() == nullptr) { delete old; } +#else // PROTOBUF_FORCE_COPY_IN_RELEASE + if (GetArenaForAllocation() != nullptr) { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + } +#endif // !PROTOBUF_FORCE_COPY_IN_RELEASE + return temp; +} +inline ::tensorflow::CoordinationServiceError* ReportErrorToTaskRequest::unsafe_arena_release_error_payload() { + // @@protoc_insertion_point(field_release:tensorflow.ReportErrorToTaskRequest.error_payload) + + ::tensorflow::CoordinationServiceError* temp = _impl_.error_payload_; + _impl_.error_payload_ = nullptr; + return temp; +} +inline ::tensorflow::CoordinationServiceError* ReportErrorToTaskRequest::_internal_mutable_error_payload() { + + if (_impl_.error_payload_ == nullptr) { + auto* p = CreateMaybeMessage<::tensorflow::CoordinationServiceError>(GetArenaForAllocation()); + _impl_.error_payload_ = p; + } + return _impl_.error_payload_; +} +inline ::tensorflow::CoordinationServiceError* ReportErrorToTaskRequest::mutable_error_payload() { + ::tensorflow::CoordinationServiceError* _msg = _internal_mutable_error_payload(); + // @@protoc_insertion_point(field_mutable:tensorflow.ReportErrorToTaskRequest.error_payload) + return _msg; +} +inline void ReportErrorToTaskRequest::set_allocated_error_payload(::tensorflow::CoordinationServiceError* error_payload) { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); + if (message_arena == nullptr) { + delete _impl_.error_payload_; + } + if (error_payload) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(error_payload); + if (message_arena != submessage_arena) { + error_payload = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, error_payload, submessage_arena); + } + + } else { + + } + _impl_.error_payload_ = error_payload; + // @@protoc_insertion_point(field_set_allocated:tensorflow.ReportErrorToTaskRequest.error_payload) +} + +// ------------------------------------------------------------------- + +// ReportErrorToTaskResponse + +// ------------------------------------------------------------------- + +// ReportErrorToServiceRequest + +// int32 error_code = 1; +inline void ReportErrorToServiceRequest::clear_error_code() { + _impl_.error_code_ = 0; +} +inline int32_t ReportErrorToServiceRequest::_internal_error_code() const { + return _impl_.error_code_; +} +inline int32_t ReportErrorToServiceRequest::error_code() const { + // @@protoc_insertion_point(field_get:tensorflow.ReportErrorToServiceRequest.error_code) + return _internal_error_code(); +} +inline void ReportErrorToServiceRequest::_internal_set_error_code(int32_t value) { + + _impl_.error_code_ = value; +} +inline void ReportErrorToServiceRequest::set_error_code(int32_t value) { + _internal_set_error_code(value); + // @@protoc_insertion_point(field_set:tensorflow.ReportErrorToServiceRequest.error_code) +} + +// string error_message = 2; +inline void ReportErrorToServiceRequest::clear_error_message() { + _impl_.error_message_.ClearToEmpty(); +} +inline const std::string& ReportErrorToServiceRequest::error_message() const { + // @@protoc_insertion_point(field_get:tensorflow.ReportErrorToServiceRequest.error_message) + return _internal_error_message(); +} +template +inline PROTOBUF_ALWAYS_INLINE +void ReportErrorToServiceRequest::set_error_message(ArgT0&& arg0, ArgT... args) { + + _impl_.error_message_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:tensorflow.ReportErrorToServiceRequest.error_message) +} +inline std::string* ReportErrorToServiceRequest::mutable_error_message() { + std::string* _s = _internal_mutable_error_message(); + // @@protoc_insertion_point(field_mutable:tensorflow.ReportErrorToServiceRequest.error_message) + return _s; +} +inline const std::string& ReportErrorToServiceRequest::_internal_error_message() const { + return _impl_.error_message_.Get(); +} +inline void ReportErrorToServiceRequest::_internal_set_error_message(const std::string& value) { + + _impl_.error_message_.Set(value, GetArenaForAllocation()); +} +inline std::string* ReportErrorToServiceRequest::_internal_mutable_error_message() { + + return _impl_.error_message_.Mutable(GetArenaForAllocation()); +} +inline std::string* ReportErrorToServiceRequest::release_error_message() { + // @@protoc_insertion_point(field_release:tensorflow.ReportErrorToServiceRequest.error_message) + return _impl_.error_message_.Release(); +} +inline void ReportErrorToServiceRequest::set_allocated_error_message(std::string* error_message) { + if (error_message != nullptr) { + + } else { + + } + _impl_.error_message_.SetAllocated(error_message, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.error_message_.IsDefault()) { + _impl_.error_message_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:tensorflow.ReportErrorToServiceRequest.error_message) +} + +// .tensorflow.CoordinatedTask error_origin = 5; +inline bool ReportErrorToServiceRequest::_internal_has_error_origin() const { + return this != internal_default_instance() && _impl_.error_origin_ != nullptr; +} +inline bool ReportErrorToServiceRequest::has_error_origin() const { + return _internal_has_error_origin(); +} +inline void ReportErrorToServiceRequest::clear_error_origin() { + if (GetArenaForAllocation() == nullptr && _impl_.error_origin_ != nullptr) { + delete _impl_.error_origin_; + } + _impl_.error_origin_ = nullptr; +} +inline const ::tensorflow::CoordinatedTask& ReportErrorToServiceRequest::_internal_error_origin() const { + const ::tensorflow::CoordinatedTask* p = _impl_.error_origin_; + return p != nullptr ? *p : reinterpret_cast( + ::tensorflow::_CoordinatedTask_default_instance_); +} +inline const ::tensorflow::CoordinatedTask& ReportErrorToServiceRequest::error_origin() const { + // @@protoc_insertion_point(field_get:tensorflow.ReportErrorToServiceRequest.error_origin) + return _internal_error_origin(); +} +inline void ReportErrorToServiceRequest::unsafe_arena_set_allocated_error_origin( + ::tensorflow::CoordinatedTask* error_origin) { + if (GetArenaForAllocation() == nullptr) { + delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.error_origin_); + } + _impl_.error_origin_ = error_origin; + if (error_origin) { + + } else { + + } + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.ReportErrorToServiceRequest.error_origin) +} +inline ::tensorflow::CoordinatedTask* ReportErrorToServiceRequest::release_error_origin() { + + ::tensorflow::CoordinatedTask* temp = _impl_.error_origin_; + _impl_.error_origin_ = nullptr; +#ifdef PROTOBUF_FORCE_COPY_IN_RELEASE + auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp); + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + if (GetArenaForAllocation() == nullptr) { delete old; } +#else // PROTOBUF_FORCE_COPY_IN_RELEASE + if (GetArenaForAllocation() != nullptr) { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + } +#endif // !PROTOBUF_FORCE_COPY_IN_RELEASE + return temp; +} +inline ::tensorflow::CoordinatedTask* ReportErrorToServiceRequest::unsafe_arena_release_error_origin() { + // @@protoc_insertion_point(field_release:tensorflow.ReportErrorToServiceRequest.error_origin) + + ::tensorflow::CoordinatedTask* temp = _impl_.error_origin_; + _impl_.error_origin_ = nullptr; + return temp; +} +inline ::tensorflow::CoordinatedTask* ReportErrorToServiceRequest::_internal_mutable_error_origin() { + + if (_impl_.error_origin_ == nullptr) { + auto* p = CreateMaybeMessage<::tensorflow::CoordinatedTask>(GetArenaForAllocation()); + _impl_.error_origin_ = p; + } + return _impl_.error_origin_; +} +inline ::tensorflow::CoordinatedTask* ReportErrorToServiceRequest::mutable_error_origin() { + ::tensorflow::CoordinatedTask* _msg = _internal_mutable_error_origin(); + // @@protoc_insertion_point(field_mutable:tensorflow.ReportErrorToServiceRequest.error_origin) + return _msg; +} +inline void ReportErrorToServiceRequest::set_allocated_error_origin(::tensorflow::CoordinatedTask* error_origin) { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); + if (message_arena == nullptr) { + delete _impl_.error_origin_; + } + if (error_origin) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(error_origin); + if (message_arena != submessage_arena) { + error_origin = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, error_origin, submessage_arena); + } + + } else { + + } + _impl_.error_origin_ = error_origin; + // @@protoc_insertion_point(field_set_allocated:tensorflow.ReportErrorToServiceRequest.error_origin) +} + +// ------------------------------------------------------------------- + +// ReportErrorToServiceResponse + +// ------------------------------------------------------------------- + +// GetTaskStateRequest + +// repeated .tensorflow.CoordinatedTask source_task = 1; +inline int GetTaskStateRequest::_internal_source_task_size() const { + return _impl_.source_task_.size(); +} +inline int GetTaskStateRequest::source_task_size() const { + return _internal_source_task_size(); +} +inline void GetTaskStateRequest::clear_source_task() { + _impl_.source_task_.Clear(); +} +inline ::tensorflow::CoordinatedTask* GetTaskStateRequest::mutable_source_task(int index) { + // @@protoc_insertion_point(field_mutable:tensorflow.GetTaskStateRequest.source_task) + return _impl_.source_task_.Mutable(index); +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::CoordinatedTask >* +GetTaskStateRequest::mutable_source_task() { + // @@protoc_insertion_point(field_mutable_list:tensorflow.GetTaskStateRequest.source_task) + return &_impl_.source_task_; +} +inline const ::tensorflow::CoordinatedTask& GetTaskStateRequest::_internal_source_task(int index) const { + return _impl_.source_task_.Get(index); +} +inline const ::tensorflow::CoordinatedTask& GetTaskStateRequest::source_task(int index) const { + // @@protoc_insertion_point(field_get:tensorflow.GetTaskStateRequest.source_task) + return _internal_source_task(index); +} +inline ::tensorflow::CoordinatedTask* GetTaskStateRequest::_internal_add_source_task() { + return _impl_.source_task_.Add(); +} +inline ::tensorflow::CoordinatedTask* GetTaskStateRequest::add_source_task() { + ::tensorflow::CoordinatedTask* _add = _internal_add_source_task(); + // @@protoc_insertion_point(field_add:tensorflow.GetTaskStateRequest.source_task) + return _add; +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::CoordinatedTask >& +GetTaskStateRequest::source_task() const { + // @@protoc_insertion_point(field_list:tensorflow.GetTaskStateRequest.source_task) + return _impl_.source_task_; +} + +// ------------------------------------------------------------------- + +// GetTaskStateResponse + +// repeated .tensorflow.CoordinatedTaskStateInfo task_state = 1; +inline int GetTaskStateResponse::_internal_task_state_size() const { + return _impl_.task_state_.size(); +} +inline int GetTaskStateResponse::task_state_size() const { + return _internal_task_state_size(); +} +inline void GetTaskStateResponse::clear_task_state() { + _impl_.task_state_.Clear(); +} +inline ::tensorflow::CoordinatedTaskStateInfo* GetTaskStateResponse::mutable_task_state(int index) { + // @@protoc_insertion_point(field_mutable:tensorflow.GetTaskStateResponse.task_state) + return _impl_.task_state_.Mutable(index); +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::CoordinatedTaskStateInfo >* +GetTaskStateResponse::mutable_task_state() { + // @@protoc_insertion_point(field_mutable_list:tensorflow.GetTaskStateResponse.task_state) + return &_impl_.task_state_; +} +inline const ::tensorflow::CoordinatedTaskStateInfo& GetTaskStateResponse::_internal_task_state(int index) const { + return _impl_.task_state_.Get(index); +} +inline const ::tensorflow::CoordinatedTaskStateInfo& GetTaskStateResponse::task_state(int index) const { + // @@protoc_insertion_point(field_get:tensorflow.GetTaskStateResponse.task_state) + return _internal_task_state(index); +} +inline ::tensorflow::CoordinatedTaskStateInfo* GetTaskStateResponse::_internal_add_task_state() { + return _impl_.task_state_.Add(); +} +inline ::tensorflow::CoordinatedTaskStateInfo* GetTaskStateResponse::add_task_state() { + ::tensorflow::CoordinatedTaskStateInfo* _add = _internal_add_task_state(); + // @@protoc_insertion_point(field_add:tensorflow.GetTaskStateResponse.task_state) + return _add; +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::CoordinatedTaskStateInfo >& +GetTaskStateResponse::task_state() const { + // @@protoc_insertion_point(field_list:tensorflow.GetTaskStateResponse.task_state) + return _impl_.task_state_; +} + +// ------------------------------------------------------------------- + +// KeyValueEntry + +// string key = 1; +inline void KeyValueEntry::clear_key() { + _impl_.key_.ClearToEmpty(); +} +inline const std::string& KeyValueEntry::key() const { + // @@protoc_insertion_point(field_get:tensorflow.KeyValueEntry.key) + return _internal_key(); +} +template +inline PROTOBUF_ALWAYS_INLINE +void KeyValueEntry::set_key(ArgT0&& arg0, ArgT... args) { + + _impl_.key_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:tensorflow.KeyValueEntry.key) +} +inline std::string* KeyValueEntry::mutable_key() { + std::string* _s = _internal_mutable_key(); + // @@protoc_insertion_point(field_mutable:tensorflow.KeyValueEntry.key) + return _s; +} +inline const std::string& KeyValueEntry::_internal_key() const { + return _impl_.key_.Get(); +} +inline void KeyValueEntry::_internal_set_key(const std::string& value) { + + _impl_.key_.Set(value, GetArenaForAllocation()); +} +inline std::string* KeyValueEntry::_internal_mutable_key() { + + return _impl_.key_.Mutable(GetArenaForAllocation()); +} +inline std::string* KeyValueEntry::release_key() { + // @@protoc_insertion_point(field_release:tensorflow.KeyValueEntry.key) + return _impl_.key_.Release(); +} +inline void KeyValueEntry::set_allocated_key(std::string* key) { + if (key != nullptr) { + + } else { + + } + _impl_.key_.SetAllocated(key, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.key_.IsDefault()) { + _impl_.key_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:tensorflow.KeyValueEntry.key) +} + +// bytes value = 2; +inline void KeyValueEntry::clear_value() { + _impl_.value_.ClearToEmpty(); +} +inline const std::string& KeyValueEntry::value() const { + // @@protoc_insertion_point(field_get:tensorflow.KeyValueEntry.value) + return _internal_value(); +} +template +inline PROTOBUF_ALWAYS_INLINE +void KeyValueEntry::set_value(ArgT0&& arg0, ArgT... args) { + + _impl_.value_.SetBytes(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:tensorflow.KeyValueEntry.value) +} +inline std::string* KeyValueEntry::mutable_value() { + std::string* _s = _internal_mutable_value(); + // @@protoc_insertion_point(field_mutable:tensorflow.KeyValueEntry.value) + return _s; +} +inline const std::string& KeyValueEntry::_internal_value() const { + return _impl_.value_.Get(); +} +inline void KeyValueEntry::_internal_set_value(const std::string& value) { + + _impl_.value_.Set(value, GetArenaForAllocation()); +} +inline std::string* KeyValueEntry::_internal_mutable_value() { + + return _impl_.value_.Mutable(GetArenaForAllocation()); +} +inline std::string* KeyValueEntry::release_value() { + // @@protoc_insertion_point(field_release:tensorflow.KeyValueEntry.value) + return _impl_.value_.Release(); +} +inline void KeyValueEntry::set_allocated_value(std::string* value) { + if (value != nullptr) { + + } else { + + } + _impl_.value_.SetAllocated(value, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.value_.IsDefault()) { + _impl_.value_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:tensorflow.KeyValueEntry.value) +} + +// ------------------------------------------------------------------- + +// InsertKeyValueRequest + +// .tensorflow.KeyValueEntry kv = 1; +inline bool InsertKeyValueRequest::_internal_has_kv() const { + return this != internal_default_instance() && _impl_.kv_ != nullptr; +} +inline bool InsertKeyValueRequest::has_kv() const { + return _internal_has_kv(); +} +inline void InsertKeyValueRequest::clear_kv() { + if (GetArenaForAllocation() == nullptr && _impl_.kv_ != nullptr) { + delete _impl_.kv_; + } + _impl_.kv_ = nullptr; +} +inline const ::tensorflow::KeyValueEntry& InsertKeyValueRequest::_internal_kv() const { + const ::tensorflow::KeyValueEntry* p = _impl_.kv_; + return p != nullptr ? *p : reinterpret_cast( + ::tensorflow::_KeyValueEntry_default_instance_); +} +inline const ::tensorflow::KeyValueEntry& InsertKeyValueRequest::kv() const { + // @@protoc_insertion_point(field_get:tensorflow.InsertKeyValueRequest.kv) + return _internal_kv(); +} +inline void InsertKeyValueRequest::unsafe_arena_set_allocated_kv( + ::tensorflow::KeyValueEntry* kv) { + if (GetArenaForAllocation() == nullptr) { + delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.kv_); + } + _impl_.kv_ = kv; + if (kv) { + + } else { + + } + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.InsertKeyValueRequest.kv) +} +inline ::tensorflow::KeyValueEntry* InsertKeyValueRequest::release_kv() { + + ::tensorflow::KeyValueEntry* temp = _impl_.kv_; + _impl_.kv_ = nullptr; +#ifdef PROTOBUF_FORCE_COPY_IN_RELEASE + auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp); + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + if (GetArenaForAllocation() == nullptr) { delete old; } +#else // PROTOBUF_FORCE_COPY_IN_RELEASE + if (GetArenaForAllocation() != nullptr) { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + } +#endif // !PROTOBUF_FORCE_COPY_IN_RELEASE + return temp; +} +inline ::tensorflow::KeyValueEntry* InsertKeyValueRequest::unsafe_arena_release_kv() { + // @@protoc_insertion_point(field_release:tensorflow.InsertKeyValueRequest.kv) + + ::tensorflow::KeyValueEntry* temp = _impl_.kv_; + _impl_.kv_ = nullptr; + return temp; +} +inline ::tensorflow::KeyValueEntry* InsertKeyValueRequest::_internal_mutable_kv() { + + if (_impl_.kv_ == nullptr) { + auto* p = CreateMaybeMessage<::tensorflow::KeyValueEntry>(GetArenaForAllocation()); + _impl_.kv_ = p; + } + return _impl_.kv_; +} +inline ::tensorflow::KeyValueEntry* InsertKeyValueRequest::mutable_kv() { + ::tensorflow::KeyValueEntry* _msg = _internal_mutable_kv(); + // @@protoc_insertion_point(field_mutable:tensorflow.InsertKeyValueRequest.kv) + return _msg; +} +inline void InsertKeyValueRequest::set_allocated_kv(::tensorflow::KeyValueEntry* kv) { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); + if (message_arena == nullptr) { + delete _impl_.kv_; + } + if (kv) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(kv); + if (message_arena != submessage_arena) { + kv = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, kv, submessage_arena); + } + + } else { + + } + _impl_.kv_ = kv; + // @@protoc_insertion_point(field_set_allocated:tensorflow.InsertKeyValueRequest.kv) +} + +// ------------------------------------------------------------------- + +// InsertKeyValueResponse + +// ------------------------------------------------------------------- + +// GetKeyValueRequest + +// string key = 1; +inline void GetKeyValueRequest::clear_key() { + _impl_.key_.ClearToEmpty(); +} +inline const std::string& GetKeyValueRequest::key() const { + // @@protoc_insertion_point(field_get:tensorflow.GetKeyValueRequest.key) + return _internal_key(); +} +template +inline PROTOBUF_ALWAYS_INLINE +void GetKeyValueRequest::set_key(ArgT0&& arg0, ArgT... args) { + + _impl_.key_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:tensorflow.GetKeyValueRequest.key) +} +inline std::string* GetKeyValueRequest::mutable_key() { + std::string* _s = _internal_mutable_key(); + // @@protoc_insertion_point(field_mutable:tensorflow.GetKeyValueRequest.key) + return _s; +} +inline const std::string& GetKeyValueRequest::_internal_key() const { + return _impl_.key_.Get(); +} +inline void GetKeyValueRequest::_internal_set_key(const std::string& value) { + + _impl_.key_.Set(value, GetArenaForAllocation()); +} +inline std::string* GetKeyValueRequest::_internal_mutable_key() { + + return _impl_.key_.Mutable(GetArenaForAllocation()); +} +inline std::string* GetKeyValueRequest::release_key() { + // @@protoc_insertion_point(field_release:tensorflow.GetKeyValueRequest.key) + return _impl_.key_.Release(); +} +inline void GetKeyValueRequest::set_allocated_key(std::string* key) { + if (key != nullptr) { + + } else { + + } + _impl_.key_.SetAllocated(key, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.key_.IsDefault()) { + _impl_.key_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:tensorflow.GetKeyValueRequest.key) +} + +// ------------------------------------------------------------------- + +// GetKeyValueResponse + +// .tensorflow.KeyValueEntry kv = 1; +inline bool GetKeyValueResponse::_internal_has_kv() const { + return this != internal_default_instance() && _impl_.kv_ != nullptr; +} +inline bool GetKeyValueResponse::has_kv() const { + return _internal_has_kv(); +} +inline void GetKeyValueResponse::clear_kv() { + if (GetArenaForAllocation() == nullptr && _impl_.kv_ != nullptr) { + delete _impl_.kv_; + } + _impl_.kv_ = nullptr; +} +inline const ::tensorflow::KeyValueEntry& GetKeyValueResponse::_internal_kv() const { + const ::tensorflow::KeyValueEntry* p = _impl_.kv_; + return p != nullptr ? *p : reinterpret_cast( + ::tensorflow::_KeyValueEntry_default_instance_); +} +inline const ::tensorflow::KeyValueEntry& GetKeyValueResponse::kv() const { + // @@protoc_insertion_point(field_get:tensorflow.GetKeyValueResponse.kv) + return _internal_kv(); +} +inline void GetKeyValueResponse::unsafe_arena_set_allocated_kv( + ::tensorflow::KeyValueEntry* kv) { + if (GetArenaForAllocation() == nullptr) { + delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.kv_); + } + _impl_.kv_ = kv; + if (kv) { + + } else { + + } + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.GetKeyValueResponse.kv) +} +inline ::tensorflow::KeyValueEntry* GetKeyValueResponse::release_kv() { + + ::tensorflow::KeyValueEntry* temp = _impl_.kv_; + _impl_.kv_ = nullptr; +#ifdef PROTOBUF_FORCE_COPY_IN_RELEASE + auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp); + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + if (GetArenaForAllocation() == nullptr) { delete old; } +#else // PROTOBUF_FORCE_COPY_IN_RELEASE + if (GetArenaForAllocation() != nullptr) { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + } +#endif // !PROTOBUF_FORCE_COPY_IN_RELEASE + return temp; +} +inline ::tensorflow::KeyValueEntry* GetKeyValueResponse::unsafe_arena_release_kv() { + // @@protoc_insertion_point(field_release:tensorflow.GetKeyValueResponse.kv) + + ::tensorflow::KeyValueEntry* temp = _impl_.kv_; + _impl_.kv_ = nullptr; + return temp; +} +inline ::tensorflow::KeyValueEntry* GetKeyValueResponse::_internal_mutable_kv() { + + if (_impl_.kv_ == nullptr) { + auto* p = CreateMaybeMessage<::tensorflow::KeyValueEntry>(GetArenaForAllocation()); + _impl_.kv_ = p; + } + return _impl_.kv_; +} +inline ::tensorflow::KeyValueEntry* GetKeyValueResponse::mutable_kv() { + ::tensorflow::KeyValueEntry* _msg = _internal_mutable_kv(); + // @@protoc_insertion_point(field_mutable:tensorflow.GetKeyValueResponse.kv) + return _msg; +} +inline void GetKeyValueResponse::set_allocated_kv(::tensorflow::KeyValueEntry* kv) { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); + if (message_arena == nullptr) { + delete _impl_.kv_; + } + if (kv) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(kv); + if (message_arena != submessage_arena) { + kv = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, kv, submessage_arena); + } + + } else { + + } + _impl_.kv_ = kv; + // @@protoc_insertion_point(field_set_allocated:tensorflow.GetKeyValueResponse.kv) +} + +// ------------------------------------------------------------------- + +// TryGetKeyValueRequest + +// string key = 1; +inline void TryGetKeyValueRequest::clear_key() { + _impl_.key_.ClearToEmpty(); +} +inline const std::string& TryGetKeyValueRequest::key() const { + // @@protoc_insertion_point(field_get:tensorflow.TryGetKeyValueRequest.key) + return _internal_key(); +} +template +inline PROTOBUF_ALWAYS_INLINE +void TryGetKeyValueRequest::set_key(ArgT0&& arg0, ArgT... args) { + + _impl_.key_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:tensorflow.TryGetKeyValueRequest.key) +} +inline std::string* TryGetKeyValueRequest::mutable_key() { + std::string* _s = _internal_mutable_key(); + // @@protoc_insertion_point(field_mutable:tensorflow.TryGetKeyValueRequest.key) + return _s; +} +inline const std::string& TryGetKeyValueRequest::_internal_key() const { + return _impl_.key_.Get(); +} +inline void TryGetKeyValueRequest::_internal_set_key(const std::string& value) { + + _impl_.key_.Set(value, GetArenaForAllocation()); +} +inline std::string* TryGetKeyValueRequest::_internal_mutable_key() { + + return _impl_.key_.Mutable(GetArenaForAllocation()); +} +inline std::string* TryGetKeyValueRequest::release_key() { + // @@protoc_insertion_point(field_release:tensorflow.TryGetKeyValueRequest.key) + return _impl_.key_.Release(); +} +inline void TryGetKeyValueRequest::set_allocated_key(std::string* key) { + if (key != nullptr) { + + } else { + + } + _impl_.key_.SetAllocated(key, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.key_.IsDefault()) { + _impl_.key_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:tensorflow.TryGetKeyValueRequest.key) +} + +// ------------------------------------------------------------------- + +// TryGetKeyValueResponse + +// .tensorflow.KeyValueEntry kv = 1; +inline bool TryGetKeyValueResponse::_internal_has_kv() const { + return this != internal_default_instance() && _impl_.kv_ != nullptr; +} +inline bool TryGetKeyValueResponse::has_kv() const { + return _internal_has_kv(); +} +inline void TryGetKeyValueResponse::clear_kv() { + if (GetArenaForAllocation() == nullptr && _impl_.kv_ != nullptr) { + delete _impl_.kv_; + } + _impl_.kv_ = nullptr; +} +inline const ::tensorflow::KeyValueEntry& TryGetKeyValueResponse::_internal_kv() const { + const ::tensorflow::KeyValueEntry* p = _impl_.kv_; + return p != nullptr ? *p : reinterpret_cast( + ::tensorflow::_KeyValueEntry_default_instance_); +} +inline const ::tensorflow::KeyValueEntry& TryGetKeyValueResponse::kv() const { + // @@protoc_insertion_point(field_get:tensorflow.TryGetKeyValueResponse.kv) + return _internal_kv(); +} +inline void TryGetKeyValueResponse::unsafe_arena_set_allocated_kv( + ::tensorflow::KeyValueEntry* kv) { + if (GetArenaForAllocation() == nullptr) { + delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.kv_); + } + _impl_.kv_ = kv; + if (kv) { + + } else { + + } + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.TryGetKeyValueResponse.kv) +} +inline ::tensorflow::KeyValueEntry* TryGetKeyValueResponse::release_kv() { + + ::tensorflow::KeyValueEntry* temp = _impl_.kv_; + _impl_.kv_ = nullptr; +#ifdef PROTOBUF_FORCE_COPY_IN_RELEASE + auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp); + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + if (GetArenaForAllocation() == nullptr) { delete old; } +#else // PROTOBUF_FORCE_COPY_IN_RELEASE + if (GetArenaForAllocation() != nullptr) { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + } +#endif // !PROTOBUF_FORCE_COPY_IN_RELEASE + return temp; +} +inline ::tensorflow::KeyValueEntry* TryGetKeyValueResponse::unsafe_arena_release_kv() { + // @@protoc_insertion_point(field_release:tensorflow.TryGetKeyValueResponse.kv) + + ::tensorflow::KeyValueEntry* temp = _impl_.kv_; + _impl_.kv_ = nullptr; + return temp; +} +inline ::tensorflow::KeyValueEntry* TryGetKeyValueResponse::_internal_mutable_kv() { + + if (_impl_.kv_ == nullptr) { + auto* p = CreateMaybeMessage<::tensorflow::KeyValueEntry>(GetArenaForAllocation()); + _impl_.kv_ = p; + } + return _impl_.kv_; +} +inline ::tensorflow::KeyValueEntry* TryGetKeyValueResponse::mutable_kv() { + ::tensorflow::KeyValueEntry* _msg = _internal_mutable_kv(); + // @@protoc_insertion_point(field_mutable:tensorflow.TryGetKeyValueResponse.kv) + return _msg; +} +inline void TryGetKeyValueResponse::set_allocated_kv(::tensorflow::KeyValueEntry* kv) { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); + if (message_arena == nullptr) { + delete _impl_.kv_; + } + if (kv) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(kv); + if (message_arena != submessage_arena) { + kv = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, kv, submessage_arena); + } + + } else { + + } + _impl_.kv_ = kv; + // @@protoc_insertion_point(field_set_allocated:tensorflow.TryGetKeyValueResponse.kv) +} + +// ------------------------------------------------------------------- + +// GetKeyValueDirRequest + +// string directory_key = 1; +inline void GetKeyValueDirRequest::clear_directory_key() { + _impl_.directory_key_.ClearToEmpty(); +} +inline const std::string& GetKeyValueDirRequest::directory_key() const { + // @@protoc_insertion_point(field_get:tensorflow.GetKeyValueDirRequest.directory_key) + return _internal_directory_key(); +} +template +inline PROTOBUF_ALWAYS_INLINE +void GetKeyValueDirRequest::set_directory_key(ArgT0&& arg0, ArgT... args) { + + _impl_.directory_key_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:tensorflow.GetKeyValueDirRequest.directory_key) +} +inline std::string* GetKeyValueDirRequest::mutable_directory_key() { + std::string* _s = _internal_mutable_directory_key(); + // @@protoc_insertion_point(field_mutable:tensorflow.GetKeyValueDirRequest.directory_key) + return _s; +} +inline const std::string& GetKeyValueDirRequest::_internal_directory_key() const { + return _impl_.directory_key_.Get(); +} +inline void GetKeyValueDirRequest::_internal_set_directory_key(const std::string& value) { + + _impl_.directory_key_.Set(value, GetArenaForAllocation()); +} +inline std::string* GetKeyValueDirRequest::_internal_mutable_directory_key() { + + return _impl_.directory_key_.Mutable(GetArenaForAllocation()); +} +inline std::string* GetKeyValueDirRequest::release_directory_key() { + // @@protoc_insertion_point(field_release:tensorflow.GetKeyValueDirRequest.directory_key) + return _impl_.directory_key_.Release(); +} +inline void GetKeyValueDirRequest::set_allocated_directory_key(std::string* directory_key) { + if (directory_key != nullptr) { + + } else { + + } + _impl_.directory_key_.SetAllocated(directory_key, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.directory_key_.IsDefault()) { + _impl_.directory_key_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:tensorflow.GetKeyValueDirRequest.directory_key) +} + +// ------------------------------------------------------------------- + +// GetKeyValueDirResponse + +// string directory_key = 1; +inline void GetKeyValueDirResponse::clear_directory_key() { + _impl_.directory_key_.ClearToEmpty(); +} +inline const std::string& GetKeyValueDirResponse::directory_key() const { + // @@protoc_insertion_point(field_get:tensorflow.GetKeyValueDirResponse.directory_key) + return _internal_directory_key(); +} +template +inline PROTOBUF_ALWAYS_INLINE +void GetKeyValueDirResponse::set_directory_key(ArgT0&& arg0, ArgT... args) { + + _impl_.directory_key_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:tensorflow.GetKeyValueDirResponse.directory_key) +} +inline std::string* GetKeyValueDirResponse::mutable_directory_key() { + std::string* _s = _internal_mutable_directory_key(); + // @@protoc_insertion_point(field_mutable:tensorflow.GetKeyValueDirResponse.directory_key) + return _s; +} +inline const std::string& GetKeyValueDirResponse::_internal_directory_key() const { + return _impl_.directory_key_.Get(); +} +inline void GetKeyValueDirResponse::_internal_set_directory_key(const std::string& value) { + + _impl_.directory_key_.Set(value, GetArenaForAllocation()); +} +inline std::string* GetKeyValueDirResponse::_internal_mutable_directory_key() { + + return _impl_.directory_key_.Mutable(GetArenaForAllocation()); +} +inline std::string* GetKeyValueDirResponse::release_directory_key() { + // @@protoc_insertion_point(field_release:tensorflow.GetKeyValueDirResponse.directory_key) + return _impl_.directory_key_.Release(); +} +inline void GetKeyValueDirResponse::set_allocated_directory_key(std::string* directory_key) { + if (directory_key != nullptr) { + + } else { + + } + _impl_.directory_key_.SetAllocated(directory_key, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.directory_key_.IsDefault()) { + _impl_.directory_key_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:tensorflow.GetKeyValueDirResponse.directory_key) +} + +// repeated .tensorflow.KeyValueEntry kv = 2; +inline int GetKeyValueDirResponse::_internal_kv_size() const { + return _impl_.kv_.size(); +} +inline int GetKeyValueDirResponse::kv_size() const { + return _internal_kv_size(); +} +inline void GetKeyValueDirResponse::clear_kv() { + _impl_.kv_.Clear(); +} +inline ::tensorflow::KeyValueEntry* GetKeyValueDirResponse::mutable_kv(int index) { + // @@protoc_insertion_point(field_mutable:tensorflow.GetKeyValueDirResponse.kv) + return _impl_.kv_.Mutable(index); +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::KeyValueEntry >* +GetKeyValueDirResponse::mutable_kv() { + // @@protoc_insertion_point(field_mutable_list:tensorflow.GetKeyValueDirResponse.kv) + return &_impl_.kv_; +} +inline const ::tensorflow::KeyValueEntry& GetKeyValueDirResponse::_internal_kv(int index) const { + return _impl_.kv_.Get(index); +} +inline const ::tensorflow::KeyValueEntry& GetKeyValueDirResponse::kv(int index) const { + // @@protoc_insertion_point(field_get:tensorflow.GetKeyValueDirResponse.kv) + return _internal_kv(index); +} +inline ::tensorflow::KeyValueEntry* GetKeyValueDirResponse::_internal_add_kv() { + return _impl_.kv_.Add(); +} +inline ::tensorflow::KeyValueEntry* GetKeyValueDirResponse::add_kv() { + ::tensorflow::KeyValueEntry* _add = _internal_add_kv(); + // @@protoc_insertion_point(field_add:tensorflow.GetKeyValueDirResponse.kv) + return _add; +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::KeyValueEntry >& +GetKeyValueDirResponse::kv() const { + // @@protoc_insertion_point(field_list:tensorflow.GetKeyValueDirResponse.kv) + return _impl_.kv_; +} + +// ------------------------------------------------------------------- + +// DeleteKeyValueRequest + +// string key = 1; +inline void DeleteKeyValueRequest::clear_key() { + _impl_.key_.ClearToEmpty(); +} +inline const std::string& DeleteKeyValueRequest::key() const { + // @@protoc_insertion_point(field_get:tensorflow.DeleteKeyValueRequest.key) + return _internal_key(); +} +template +inline PROTOBUF_ALWAYS_INLINE +void DeleteKeyValueRequest::set_key(ArgT0&& arg0, ArgT... args) { + + _impl_.key_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:tensorflow.DeleteKeyValueRequest.key) +} +inline std::string* DeleteKeyValueRequest::mutable_key() { + std::string* _s = _internal_mutable_key(); + // @@protoc_insertion_point(field_mutable:tensorflow.DeleteKeyValueRequest.key) + return _s; +} +inline const std::string& DeleteKeyValueRequest::_internal_key() const { + return _impl_.key_.Get(); +} +inline void DeleteKeyValueRequest::_internal_set_key(const std::string& value) { + + _impl_.key_.Set(value, GetArenaForAllocation()); +} +inline std::string* DeleteKeyValueRequest::_internal_mutable_key() { + + return _impl_.key_.Mutable(GetArenaForAllocation()); +} +inline std::string* DeleteKeyValueRequest::release_key() { + // @@protoc_insertion_point(field_release:tensorflow.DeleteKeyValueRequest.key) + return _impl_.key_.Release(); +} +inline void DeleteKeyValueRequest::set_allocated_key(std::string* key) { + if (key != nullptr) { + + } else { + + } + _impl_.key_.SetAllocated(key, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.key_.IsDefault()) { + _impl_.key_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:tensorflow.DeleteKeyValueRequest.key) +} + +// bool is_directory = 2; +inline void DeleteKeyValueRequest::clear_is_directory() { + _impl_.is_directory_ = false; +} +inline bool DeleteKeyValueRequest::_internal_is_directory() const { + return _impl_.is_directory_; +} +inline bool DeleteKeyValueRequest::is_directory() const { + // @@protoc_insertion_point(field_get:tensorflow.DeleteKeyValueRequest.is_directory) + return _internal_is_directory(); +} +inline void DeleteKeyValueRequest::_internal_set_is_directory(bool value) { + + _impl_.is_directory_ = value; +} +inline void DeleteKeyValueRequest::set_is_directory(bool value) { + _internal_set_is_directory(value); + // @@protoc_insertion_point(field_set:tensorflow.DeleteKeyValueRequest.is_directory) +} + +// ------------------------------------------------------------------- + +// DeleteKeyValueResponse + +// ------------------------------------------------------------------- + +// BarrierRequest + +// string barrier_id = 1; +inline void BarrierRequest::clear_barrier_id() { + _impl_.barrier_id_.ClearToEmpty(); +} +inline const std::string& BarrierRequest::barrier_id() const { + // @@protoc_insertion_point(field_get:tensorflow.BarrierRequest.barrier_id) + return _internal_barrier_id(); +} +template +inline PROTOBUF_ALWAYS_INLINE +void BarrierRequest::set_barrier_id(ArgT0&& arg0, ArgT... args) { + + _impl_.barrier_id_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:tensorflow.BarrierRequest.barrier_id) +} +inline std::string* BarrierRequest::mutable_barrier_id() { + std::string* _s = _internal_mutable_barrier_id(); + // @@protoc_insertion_point(field_mutable:tensorflow.BarrierRequest.barrier_id) + return _s; +} +inline const std::string& BarrierRequest::_internal_barrier_id() const { + return _impl_.barrier_id_.Get(); +} +inline void BarrierRequest::_internal_set_barrier_id(const std::string& value) { + + _impl_.barrier_id_.Set(value, GetArenaForAllocation()); +} +inline std::string* BarrierRequest::_internal_mutable_barrier_id() { + + return _impl_.barrier_id_.Mutable(GetArenaForAllocation()); +} +inline std::string* BarrierRequest::release_barrier_id() { + // @@protoc_insertion_point(field_release:tensorflow.BarrierRequest.barrier_id) + return _impl_.barrier_id_.Release(); +} +inline void BarrierRequest::set_allocated_barrier_id(std::string* barrier_id) { + if (barrier_id != nullptr) { + + } else { + + } + _impl_.barrier_id_.SetAllocated(barrier_id, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.barrier_id_.IsDefault()) { + _impl_.barrier_id_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:tensorflow.BarrierRequest.barrier_id) +} + +// int64 barrier_timeout_in_ms = 2; +inline void BarrierRequest::clear_barrier_timeout_in_ms() { + _impl_.barrier_timeout_in_ms_ = int64_t{0}; +} +inline int64_t BarrierRequest::_internal_barrier_timeout_in_ms() const { + return _impl_.barrier_timeout_in_ms_; +} +inline int64_t BarrierRequest::barrier_timeout_in_ms() const { + // @@protoc_insertion_point(field_get:tensorflow.BarrierRequest.barrier_timeout_in_ms) + return _internal_barrier_timeout_in_ms(); +} +inline void BarrierRequest::_internal_set_barrier_timeout_in_ms(int64_t value) { + + _impl_.barrier_timeout_in_ms_ = value; +} +inline void BarrierRequest::set_barrier_timeout_in_ms(int64_t value) { + _internal_set_barrier_timeout_in_ms(value); + // @@protoc_insertion_point(field_set:tensorflow.BarrierRequest.barrier_timeout_in_ms) +} + +// repeated .tensorflow.CoordinatedTask tasks = 3; +inline int BarrierRequest::_internal_tasks_size() const { + return _impl_.tasks_.size(); +} +inline int BarrierRequest::tasks_size() const { + return _internal_tasks_size(); +} +inline void BarrierRequest::clear_tasks() { + _impl_.tasks_.Clear(); +} +inline ::tensorflow::CoordinatedTask* BarrierRequest::mutable_tasks(int index) { + // @@protoc_insertion_point(field_mutable:tensorflow.BarrierRequest.tasks) + return _impl_.tasks_.Mutable(index); +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::CoordinatedTask >* +BarrierRequest::mutable_tasks() { + // @@protoc_insertion_point(field_mutable_list:tensorflow.BarrierRequest.tasks) + return &_impl_.tasks_; +} +inline const ::tensorflow::CoordinatedTask& BarrierRequest::_internal_tasks(int index) const { + return _impl_.tasks_.Get(index); +} +inline const ::tensorflow::CoordinatedTask& BarrierRequest::tasks(int index) const { + // @@protoc_insertion_point(field_get:tensorflow.BarrierRequest.tasks) + return _internal_tasks(index); +} +inline ::tensorflow::CoordinatedTask* BarrierRequest::_internal_add_tasks() { + return _impl_.tasks_.Add(); +} +inline ::tensorflow::CoordinatedTask* BarrierRequest::add_tasks() { + ::tensorflow::CoordinatedTask* _add = _internal_add_tasks(); + // @@protoc_insertion_point(field_add:tensorflow.BarrierRequest.tasks) + return _add; +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::CoordinatedTask >& +BarrierRequest::tasks() const { + // @@protoc_insertion_point(field_list:tensorflow.BarrierRequest.tasks) + return _impl_.tasks_; +} + +// .tensorflow.CoordinatedTask source_task = 4; +inline bool BarrierRequest::_internal_has_source_task() const { + return this != internal_default_instance() && _impl_.source_task_ != nullptr; +} +inline bool BarrierRequest::has_source_task() const { + return _internal_has_source_task(); +} +inline void BarrierRequest::clear_source_task() { + if (GetArenaForAllocation() == nullptr && _impl_.source_task_ != nullptr) { + delete _impl_.source_task_; + } + _impl_.source_task_ = nullptr; +} +inline const ::tensorflow::CoordinatedTask& BarrierRequest::_internal_source_task() const { + const ::tensorflow::CoordinatedTask* p = _impl_.source_task_; + return p != nullptr ? *p : reinterpret_cast( + ::tensorflow::_CoordinatedTask_default_instance_); +} +inline const ::tensorflow::CoordinatedTask& BarrierRequest::source_task() const { + // @@protoc_insertion_point(field_get:tensorflow.BarrierRequest.source_task) + return _internal_source_task(); +} +inline void BarrierRequest::unsafe_arena_set_allocated_source_task( + ::tensorflow::CoordinatedTask* source_task) { + if (GetArenaForAllocation() == nullptr) { + delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.source_task_); + } + _impl_.source_task_ = source_task; + if (source_task) { + + } else { + + } + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.BarrierRequest.source_task) +} +inline ::tensorflow::CoordinatedTask* BarrierRequest::release_source_task() { + + ::tensorflow::CoordinatedTask* temp = _impl_.source_task_; + _impl_.source_task_ = nullptr; +#ifdef PROTOBUF_FORCE_COPY_IN_RELEASE + auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp); + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + if (GetArenaForAllocation() == nullptr) { delete old; } +#else // PROTOBUF_FORCE_COPY_IN_RELEASE + if (GetArenaForAllocation() != nullptr) { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + } +#endif // !PROTOBUF_FORCE_COPY_IN_RELEASE + return temp; +} +inline ::tensorflow::CoordinatedTask* BarrierRequest::unsafe_arena_release_source_task() { + // @@protoc_insertion_point(field_release:tensorflow.BarrierRequest.source_task) + + ::tensorflow::CoordinatedTask* temp = _impl_.source_task_; + _impl_.source_task_ = nullptr; + return temp; +} +inline ::tensorflow::CoordinatedTask* BarrierRequest::_internal_mutable_source_task() { + + if (_impl_.source_task_ == nullptr) { + auto* p = CreateMaybeMessage<::tensorflow::CoordinatedTask>(GetArenaForAllocation()); + _impl_.source_task_ = p; + } + return _impl_.source_task_; +} +inline ::tensorflow::CoordinatedTask* BarrierRequest::mutable_source_task() { + ::tensorflow::CoordinatedTask* _msg = _internal_mutable_source_task(); + // @@protoc_insertion_point(field_mutable:tensorflow.BarrierRequest.source_task) + return _msg; +} +inline void BarrierRequest::set_allocated_source_task(::tensorflow::CoordinatedTask* source_task) { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); + if (message_arena == nullptr) { + delete _impl_.source_task_; + } + if (source_task) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(source_task); + if (message_arena != submessage_arena) { + source_task = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, source_task, submessage_arena); + } + + } else { + + } + _impl_.source_task_ = source_task; + // @@protoc_insertion_point(field_set_allocated:tensorflow.BarrierRequest.source_task) +} + +// ------------------------------------------------------------------- + +// BarrierResponse + +// ------------------------------------------------------------------- + +// CancelBarrierRequest + +// string barrier_id = 1; +inline void CancelBarrierRequest::clear_barrier_id() { + _impl_.barrier_id_.ClearToEmpty(); +} +inline const std::string& CancelBarrierRequest::barrier_id() const { + // @@protoc_insertion_point(field_get:tensorflow.CancelBarrierRequest.barrier_id) + return _internal_barrier_id(); +} +template +inline PROTOBUF_ALWAYS_INLINE +void CancelBarrierRequest::set_barrier_id(ArgT0&& arg0, ArgT... args) { + + _impl_.barrier_id_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:tensorflow.CancelBarrierRequest.barrier_id) +} +inline std::string* CancelBarrierRequest::mutable_barrier_id() { + std::string* _s = _internal_mutable_barrier_id(); + // @@protoc_insertion_point(field_mutable:tensorflow.CancelBarrierRequest.barrier_id) + return _s; +} +inline const std::string& CancelBarrierRequest::_internal_barrier_id() const { + return _impl_.barrier_id_.Get(); +} +inline void CancelBarrierRequest::_internal_set_barrier_id(const std::string& value) { + + _impl_.barrier_id_.Set(value, GetArenaForAllocation()); +} +inline std::string* CancelBarrierRequest::_internal_mutable_barrier_id() { + + return _impl_.barrier_id_.Mutable(GetArenaForAllocation()); +} +inline std::string* CancelBarrierRequest::release_barrier_id() { + // @@protoc_insertion_point(field_release:tensorflow.CancelBarrierRequest.barrier_id) + return _impl_.barrier_id_.Release(); +} +inline void CancelBarrierRequest::set_allocated_barrier_id(std::string* barrier_id) { + if (barrier_id != nullptr) { + + } else { + + } + _impl_.barrier_id_.SetAllocated(barrier_id, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.barrier_id_.IsDefault()) { + _impl_.barrier_id_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:tensorflow.CancelBarrierRequest.barrier_id) +} + +// .tensorflow.CoordinatedTask source_task = 2; +inline bool CancelBarrierRequest::_internal_has_source_task() const { + return this != internal_default_instance() && _impl_.source_task_ != nullptr; +} +inline bool CancelBarrierRequest::has_source_task() const { + return _internal_has_source_task(); +} +inline void CancelBarrierRequest::clear_source_task() { + if (GetArenaForAllocation() == nullptr && _impl_.source_task_ != nullptr) { + delete _impl_.source_task_; + } + _impl_.source_task_ = nullptr; +} +inline const ::tensorflow::CoordinatedTask& CancelBarrierRequest::_internal_source_task() const { + const ::tensorflow::CoordinatedTask* p = _impl_.source_task_; + return p != nullptr ? *p : reinterpret_cast( + ::tensorflow::_CoordinatedTask_default_instance_); +} +inline const ::tensorflow::CoordinatedTask& CancelBarrierRequest::source_task() const { + // @@protoc_insertion_point(field_get:tensorflow.CancelBarrierRequest.source_task) + return _internal_source_task(); +} +inline void CancelBarrierRequest::unsafe_arena_set_allocated_source_task( + ::tensorflow::CoordinatedTask* source_task) { + if (GetArenaForAllocation() == nullptr) { + delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.source_task_); + } + _impl_.source_task_ = source_task; + if (source_task) { + + } else { + + } + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.CancelBarrierRequest.source_task) +} +inline ::tensorflow::CoordinatedTask* CancelBarrierRequest::release_source_task() { + + ::tensorflow::CoordinatedTask* temp = _impl_.source_task_; + _impl_.source_task_ = nullptr; +#ifdef PROTOBUF_FORCE_COPY_IN_RELEASE + auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp); + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + if (GetArenaForAllocation() == nullptr) { delete old; } +#else // PROTOBUF_FORCE_COPY_IN_RELEASE + if (GetArenaForAllocation() != nullptr) { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + } +#endif // !PROTOBUF_FORCE_COPY_IN_RELEASE + return temp; +} +inline ::tensorflow::CoordinatedTask* CancelBarrierRequest::unsafe_arena_release_source_task() { + // @@protoc_insertion_point(field_release:tensorflow.CancelBarrierRequest.source_task) + + ::tensorflow::CoordinatedTask* temp = _impl_.source_task_; + _impl_.source_task_ = nullptr; + return temp; +} +inline ::tensorflow::CoordinatedTask* CancelBarrierRequest::_internal_mutable_source_task() { + + if (_impl_.source_task_ == nullptr) { + auto* p = CreateMaybeMessage<::tensorflow::CoordinatedTask>(GetArenaForAllocation()); + _impl_.source_task_ = p; + } + return _impl_.source_task_; +} +inline ::tensorflow::CoordinatedTask* CancelBarrierRequest::mutable_source_task() { + ::tensorflow::CoordinatedTask* _msg = _internal_mutable_source_task(); + // @@protoc_insertion_point(field_mutable:tensorflow.CancelBarrierRequest.source_task) + return _msg; +} +inline void CancelBarrierRequest::set_allocated_source_task(::tensorflow::CoordinatedTask* source_task) { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); + if (message_arena == nullptr) { + delete _impl_.source_task_; + } + if (source_task) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(source_task); + if (message_arena != submessage_arena) { + source_task = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, source_task, submessage_arena); + } + + } else { + + } + _impl_.source_task_ = source_task; + // @@protoc_insertion_point(field_set_allocated:tensorflow.CancelBarrierRequest.source_task) +} + +// ------------------------------------------------------------------- + +// CancelBarrierResponse + +#ifdef __GNUC__ + #pragma GCC diagnostic pop +#endif // __GNUC__ +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + + +// @@protoc_insertion_point(namespace_scope) + +} // namespace tensorflow + +PROTOBUF_NAMESPACE_OPEN + +template <> struct is_proto_enum< ::tensorflow::CoordinatedTaskState> : ::std::true_type {}; +template <> +inline const EnumDescriptor* GetEnumDescriptor< ::tensorflow::CoordinatedTaskState>() { + return ::tensorflow::CoordinatedTaskState_descriptor(); +} + +PROTOBUF_NAMESPACE_CLOSE + +// @@protoc_insertion_point(global_scope) + +#include +#endif // GOOGLE_PROTOBUF_INCLUDED_GOOGLE_PROTOBUF_INCLUDED_tsl_2fprotobuf_2fcoordination_5fservice_2eproto diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/coordination_service_mock.grpc.pb.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/coordination_service_mock.grpc.pb.h new file mode 100644 index 0000000000000000000000000000000000000000..0bfc1d231a38a5bfb54472403eb96f5e6fc21ec2 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/coordination_service_mock.grpc.pb.h @@ -0,0 +1,68 @@ +// Generated by the gRPC C++ plugin. +// If you make any local change, they will be lost. +// source: tsl/protobuf/coordination_service.proto + +#include "tsl/protobuf/coordination_service.pb.h" +#include "tsl/protobuf/coordination_service.grpc.pb.h" + +#include +#include +#include +namespace tensorflow { + + +namespace grpc { + +class MockCoordinationServiceStub : public CoordinationService::StubInterface { + public: + MOCK_METHOD3(RegisterTask, ::grpc::Status(::grpc::ClientContext* context, const ::tensorflow::RegisterTaskRequest& request, ::tensorflow::RegisterTaskResponse* response)); + MOCK_METHOD3(AsyncRegisterTaskRaw, ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::RegisterTaskResponse>*(::grpc::ClientContext* context, const ::tensorflow::RegisterTaskRequest& request, ::grpc::CompletionQueue* cq)); + MOCK_METHOD3(PrepareAsyncRegisterTaskRaw, ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::RegisterTaskResponse>*(::grpc::ClientContext* context, const ::tensorflow::RegisterTaskRequest& request, ::grpc::CompletionQueue* cq)); + MOCK_METHOD3(Heartbeat, ::grpc::Status(::grpc::ClientContext* context, const ::tensorflow::HeartbeatRequest& request, ::tensorflow::HeartbeatResponse* response)); + MOCK_METHOD3(AsyncHeartbeatRaw, ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::HeartbeatResponse>*(::grpc::ClientContext* context, const ::tensorflow::HeartbeatRequest& request, ::grpc::CompletionQueue* cq)); + MOCK_METHOD3(PrepareAsyncHeartbeatRaw, ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::HeartbeatResponse>*(::grpc::ClientContext* context, const ::tensorflow::HeartbeatRequest& request, ::grpc::CompletionQueue* cq)); + MOCK_METHOD3(WaitForAllTasks, ::grpc::Status(::grpc::ClientContext* context, const ::tensorflow::WaitForAllTasksRequest& request, ::tensorflow::WaitForAllTasksResponse* response)); + MOCK_METHOD3(AsyncWaitForAllTasksRaw, ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::WaitForAllTasksResponse>*(::grpc::ClientContext* context, const ::tensorflow::WaitForAllTasksRequest& request, ::grpc::CompletionQueue* cq)); + MOCK_METHOD3(PrepareAsyncWaitForAllTasksRaw, ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::WaitForAllTasksResponse>*(::grpc::ClientContext* context, const ::tensorflow::WaitForAllTasksRequest& request, ::grpc::CompletionQueue* cq)); + MOCK_METHOD3(ShutdownTask, ::grpc::Status(::grpc::ClientContext* context, const ::tensorflow::ShutdownTaskRequest& request, ::tensorflow::ShutdownTaskResponse* response)); + MOCK_METHOD3(AsyncShutdownTaskRaw, ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::ShutdownTaskResponse>*(::grpc::ClientContext* context, const ::tensorflow::ShutdownTaskRequest& request, ::grpc::CompletionQueue* cq)); + MOCK_METHOD3(PrepareAsyncShutdownTaskRaw, ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::ShutdownTaskResponse>*(::grpc::ClientContext* context, const ::tensorflow::ShutdownTaskRequest& request, ::grpc::CompletionQueue* cq)); + MOCK_METHOD3(ResetTask, ::grpc::Status(::grpc::ClientContext* context, const ::tensorflow::ResetTaskRequest& request, ::tensorflow::ResetTaskResponse* response)); + MOCK_METHOD3(AsyncResetTaskRaw, ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::ResetTaskResponse>*(::grpc::ClientContext* context, const ::tensorflow::ResetTaskRequest& request, ::grpc::CompletionQueue* cq)); + MOCK_METHOD3(PrepareAsyncResetTaskRaw, ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::ResetTaskResponse>*(::grpc::ClientContext* context, const ::tensorflow::ResetTaskRequest& request, ::grpc::CompletionQueue* cq)); + MOCK_METHOD3(ReportErrorToTask, ::grpc::Status(::grpc::ClientContext* context, const ::tensorflow::ReportErrorToTaskRequest& request, ::tensorflow::ReportErrorToTaskResponse* response)); + MOCK_METHOD3(AsyncReportErrorToTaskRaw, ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::ReportErrorToTaskResponse>*(::grpc::ClientContext* context, const ::tensorflow::ReportErrorToTaskRequest& request, ::grpc::CompletionQueue* cq)); + MOCK_METHOD3(PrepareAsyncReportErrorToTaskRaw, ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::ReportErrorToTaskResponse>*(::grpc::ClientContext* context, const ::tensorflow::ReportErrorToTaskRequest& request, ::grpc::CompletionQueue* cq)); + MOCK_METHOD3(ReportErrorToService, ::grpc::Status(::grpc::ClientContext* context, const ::tensorflow::ReportErrorToServiceRequest& request, ::tensorflow::ReportErrorToServiceResponse* response)); + MOCK_METHOD3(AsyncReportErrorToServiceRaw, ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::ReportErrorToServiceResponse>*(::grpc::ClientContext* context, const ::tensorflow::ReportErrorToServiceRequest& request, ::grpc::CompletionQueue* cq)); + MOCK_METHOD3(PrepareAsyncReportErrorToServiceRaw, ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::ReportErrorToServiceResponse>*(::grpc::ClientContext* context, const ::tensorflow::ReportErrorToServiceRequest& request, ::grpc::CompletionQueue* cq)); + MOCK_METHOD3(GetTaskState, ::grpc::Status(::grpc::ClientContext* context, const ::tensorflow::GetTaskStateRequest& request, ::tensorflow::GetTaskStateResponse* response)); + MOCK_METHOD3(AsyncGetTaskStateRaw, ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::GetTaskStateResponse>*(::grpc::ClientContext* context, const ::tensorflow::GetTaskStateRequest& request, ::grpc::CompletionQueue* cq)); + MOCK_METHOD3(PrepareAsyncGetTaskStateRaw, ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::GetTaskStateResponse>*(::grpc::ClientContext* context, const ::tensorflow::GetTaskStateRequest& request, ::grpc::CompletionQueue* cq)); + MOCK_METHOD3(InsertKeyValue, ::grpc::Status(::grpc::ClientContext* context, const ::tensorflow::InsertKeyValueRequest& request, ::tensorflow::InsertKeyValueResponse* response)); + MOCK_METHOD3(AsyncInsertKeyValueRaw, ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::InsertKeyValueResponse>*(::grpc::ClientContext* context, const ::tensorflow::InsertKeyValueRequest& request, ::grpc::CompletionQueue* cq)); + MOCK_METHOD3(PrepareAsyncInsertKeyValueRaw, ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::InsertKeyValueResponse>*(::grpc::ClientContext* context, const ::tensorflow::InsertKeyValueRequest& request, ::grpc::CompletionQueue* cq)); + MOCK_METHOD3(GetKeyValue, ::grpc::Status(::grpc::ClientContext* context, const ::tensorflow::GetKeyValueRequest& request, ::tensorflow::GetKeyValueResponse* response)); + MOCK_METHOD3(AsyncGetKeyValueRaw, ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::GetKeyValueResponse>*(::grpc::ClientContext* context, const ::tensorflow::GetKeyValueRequest& request, ::grpc::CompletionQueue* cq)); + MOCK_METHOD3(PrepareAsyncGetKeyValueRaw, ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::GetKeyValueResponse>*(::grpc::ClientContext* context, const ::tensorflow::GetKeyValueRequest& request, ::grpc::CompletionQueue* cq)); + MOCK_METHOD3(TryGetKeyValue, ::grpc::Status(::grpc::ClientContext* context, const ::tensorflow::TryGetKeyValueRequest& request, ::tensorflow::TryGetKeyValueResponse* response)); + MOCK_METHOD3(AsyncTryGetKeyValueRaw, ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::TryGetKeyValueResponse>*(::grpc::ClientContext* context, const ::tensorflow::TryGetKeyValueRequest& request, ::grpc::CompletionQueue* cq)); + MOCK_METHOD3(PrepareAsyncTryGetKeyValueRaw, ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::TryGetKeyValueResponse>*(::grpc::ClientContext* context, const ::tensorflow::TryGetKeyValueRequest& request, ::grpc::CompletionQueue* cq)); + MOCK_METHOD3(GetKeyValueDir, ::grpc::Status(::grpc::ClientContext* context, const ::tensorflow::GetKeyValueDirRequest& request, ::tensorflow::GetKeyValueDirResponse* response)); + MOCK_METHOD3(AsyncGetKeyValueDirRaw, ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::GetKeyValueDirResponse>*(::grpc::ClientContext* context, const ::tensorflow::GetKeyValueDirRequest& request, ::grpc::CompletionQueue* cq)); + MOCK_METHOD3(PrepareAsyncGetKeyValueDirRaw, ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::GetKeyValueDirResponse>*(::grpc::ClientContext* context, const ::tensorflow::GetKeyValueDirRequest& request, ::grpc::CompletionQueue* cq)); + MOCK_METHOD3(DeleteKeyValue, ::grpc::Status(::grpc::ClientContext* context, const ::tensorflow::DeleteKeyValueRequest& request, ::tensorflow::DeleteKeyValueResponse* response)); + MOCK_METHOD3(AsyncDeleteKeyValueRaw, ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::DeleteKeyValueResponse>*(::grpc::ClientContext* context, const ::tensorflow::DeleteKeyValueRequest& request, ::grpc::CompletionQueue* cq)); + MOCK_METHOD3(PrepareAsyncDeleteKeyValueRaw, ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::DeleteKeyValueResponse>*(::grpc::ClientContext* context, const ::tensorflow::DeleteKeyValueRequest& request, ::grpc::CompletionQueue* cq)); + MOCK_METHOD3(Barrier, ::grpc::Status(::grpc::ClientContext* context, const ::tensorflow::BarrierRequest& request, ::tensorflow::BarrierResponse* response)); + MOCK_METHOD3(AsyncBarrierRaw, ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::BarrierResponse>*(::grpc::ClientContext* context, const ::tensorflow::BarrierRequest& request, ::grpc::CompletionQueue* cq)); + MOCK_METHOD3(PrepareAsyncBarrierRaw, ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::BarrierResponse>*(::grpc::ClientContext* context, const ::tensorflow::BarrierRequest& request, ::grpc::CompletionQueue* cq)); + MOCK_METHOD3(CancelBarrier, ::grpc::Status(::grpc::ClientContext* context, const ::tensorflow::CancelBarrierRequest& request, ::tensorflow::CancelBarrierResponse* response)); + MOCK_METHOD3(AsyncCancelBarrierRaw, ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::CancelBarrierResponse>*(::grpc::ClientContext* context, const ::tensorflow::CancelBarrierRequest& request, ::grpc::CompletionQueue* cq)); + MOCK_METHOD3(PrepareAsyncCancelBarrierRaw, ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::CancelBarrierResponse>*(::grpc::ClientContext* context, const ::tensorflow::CancelBarrierRequest& request, ::grpc::CompletionQueue* cq)); +}; + +} // namespace grpc + +} // namespace tensorflow + diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/distributed_runtime_payloads.pb.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/distributed_runtime_payloads.pb.h new file mode 100644 index 0000000000000000000000000000000000000000..1cf5d6b687f07023b1d2d53f887d7a88b9273924 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/distributed_runtime_payloads.pb.h @@ -0,0 +1,569 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: tsl/protobuf/distributed_runtime_payloads.proto + +#ifndef GOOGLE_PROTOBUF_INCLUDED_tsl_2fprotobuf_2fdistributed_5fruntime_5fpayloads_2eproto +#define GOOGLE_PROTOBUF_INCLUDED_tsl_2fprotobuf_2fdistributed_5fruntime_5fpayloads_2eproto + +#include +#include + +#include +#if PROTOBUF_VERSION < 3021000 +#error This file was generated by a newer version of protoc which is +#error incompatible with your Protocol Buffer headers. Please update +#error your headers. +#endif +#if 3021009 < PROTOBUF_MIN_PROTOC_VERSION +#error This file was generated by an older version of protoc which is +#error incompatible with your Protocol Buffer headers. Please +#error regenerate this file with a newer version of protoc. +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include // IWYU pragma: export +#include // IWYU pragma: export +#include // IWYU pragma: export +#include +#include +#include +// @@protoc_insertion_point(includes) +#include +#define PROTOBUF_INTERNAL_EXPORT_tsl_2fprotobuf_2fdistributed_5fruntime_5fpayloads_2eproto +PROTOBUF_NAMESPACE_OPEN +namespace internal { +class AnyMetadata; +} // namespace internal +PROTOBUF_NAMESPACE_CLOSE + +// Internal implementation detail -- do not use these members. +struct TableStruct_tsl_2fprotobuf_2fdistributed_5fruntime_5fpayloads_2eproto { + static const uint32_t offsets[]; +}; +extern const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_tsl_2fprotobuf_2fdistributed_5fruntime_5fpayloads_2eproto; +namespace tensorflow { +namespace distributed_runtime { +class GrpcPayloadContainer; +struct GrpcPayloadContainerDefaultTypeInternal; +extern GrpcPayloadContainerDefaultTypeInternal _GrpcPayloadContainer_default_instance_; +class GrpcPayloadContainer_PayloadsEntry_DoNotUse; +struct GrpcPayloadContainer_PayloadsEntry_DoNotUseDefaultTypeInternal; +extern GrpcPayloadContainer_PayloadsEntry_DoNotUseDefaultTypeInternal _GrpcPayloadContainer_PayloadsEntry_DoNotUse_default_instance_; +class GrpcPayloadsLost; +struct GrpcPayloadsLostDefaultTypeInternal; +extern GrpcPayloadsLostDefaultTypeInternal _GrpcPayloadsLost_default_instance_; +class WorkerPossiblyRestarted; +struct WorkerPossiblyRestartedDefaultTypeInternal; +extern WorkerPossiblyRestartedDefaultTypeInternal _WorkerPossiblyRestarted_default_instance_; +} // namespace distributed_runtime +} // namespace tensorflow +PROTOBUF_NAMESPACE_OPEN +template<> ::tensorflow::distributed_runtime::GrpcPayloadContainer* Arena::CreateMaybeMessage<::tensorflow::distributed_runtime::GrpcPayloadContainer>(Arena*); +template<> ::tensorflow::distributed_runtime::GrpcPayloadContainer_PayloadsEntry_DoNotUse* Arena::CreateMaybeMessage<::tensorflow::distributed_runtime::GrpcPayloadContainer_PayloadsEntry_DoNotUse>(Arena*); +template<> ::tensorflow::distributed_runtime::GrpcPayloadsLost* Arena::CreateMaybeMessage<::tensorflow::distributed_runtime::GrpcPayloadsLost>(Arena*); +template<> ::tensorflow::distributed_runtime::WorkerPossiblyRestarted* Arena::CreateMaybeMessage<::tensorflow::distributed_runtime::WorkerPossiblyRestarted>(Arena*); +PROTOBUF_NAMESPACE_CLOSE +namespace tensorflow { +namespace distributed_runtime { + +// =================================================================== + +class GrpcPayloadContainer_PayloadsEntry_DoNotUse : public ::PROTOBUF_NAMESPACE_ID::internal::MapEntry { +public: + typedef ::PROTOBUF_NAMESPACE_ID::internal::MapEntry SuperType; + GrpcPayloadContainer_PayloadsEntry_DoNotUse(); + explicit PROTOBUF_CONSTEXPR GrpcPayloadContainer_PayloadsEntry_DoNotUse( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + explicit GrpcPayloadContainer_PayloadsEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena); + void MergeFrom(const GrpcPayloadContainer_PayloadsEntry_DoNotUse& other); + static const GrpcPayloadContainer_PayloadsEntry_DoNotUse* internal_default_instance() { return reinterpret_cast(&_GrpcPayloadContainer_PayloadsEntry_DoNotUse_default_instance_); } + static bool ValidateKey(std::string* s) { + return ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(s->data(), static_cast(s->size()), ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE, "tensorflow.distributed_runtime.GrpcPayloadContainer.PayloadsEntry.key"); + } + static bool ValidateValue(void*) { return true; } + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + friend struct ::TableStruct_tsl_2fprotobuf_2fdistributed_5fruntime_5fpayloads_2eproto; +}; + +// ------------------------------------------------------------------- + +class GrpcPayloadContainer final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:tensorflow.distributed_runtime.GrpcPayloadContainer) */ { + public: + inline GrpcPayloadContainer() : GrpcPayloadContainer(nullptr) {} + ~GrpcPayloadContainer() override; + explicit PROTOBUF_CONSTEXPR GrpcPayloadContainer(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + GrpcPayloadContainer(const GrpcPayloadContainer& from); + GrpcPayloadContainer(GrpcPayloadContainer&& from) noexcept + : GrpcPayloadContainer() { + *this = ::std::move(from); + } + + inline GrpcPayloadContainer& operator=(const GrpcPayloadContainer& from) { + CopyFrom(from); + return *this; + } + inline GrpcPayloadContainer& operator=(GrpcPayloadContainer&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const GrpcPayloadContainer& default_instance() { + return *internal_default_instance(); + } + static inline const GrpcPayloadContainer* internal_default_instance() { + return reinterpret_cast( + &_GrpcPayloadContainer_default_instance_); + } + static constexpr int kIndexInFileMessages = + 1; + + friend void swap(GrpcPayloadContainer& a, GrpcPayloadContainer& b) { + a.Swap(&b); + } + inline void Swap(GrpcPayloadContainer* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(GrpcPayloadContainer* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + GrpcPayloadContainer* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const GrpcPayloadContainer& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom( const GrpcPayloadContainer& from) { + GrpcPayloadContainer::MergeImpl(*this, from); + } + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _impl_._cached_size_.Get(); } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(GrpcPayloadContainer* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.distributed_runtime.GrpcPayloadContainer"; + } + protected: + explicit GrpcPayloadContainer(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + private: + static void ArenaDtor(void* object); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + + // accessors ------------------------------------------------------- + + enum : int { + kPayloadsFieldNumber = 1, + }; + // map payloads = 1; + int payloads_size() const; + private: + int _internal_payloads_size() const; + public: + void clear_payloads(); + private: + const ::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >& + _internal_payloads() const; + ::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >* + _internal_mutable_payloads(); + public: + const ::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >& + payloads() const; + ::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >* + mutable_payloads(); + + // @@protoc_insertion_point(class_scope:tensorflow.distributed_runtime.GrpcPayloadContainer) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + ::PROTOBUF_NAMESPACE_ID::internal::MapField< + GrpcPayloadContainer_PayloadsEntry_DoNotUse, + std::string, std::string, + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING, + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_BYTES> payloads_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + }; + union { Impl_ _impl_; }; + friend struct ::TableStruct_tsl_2fprotobuf_2fdistributed_5fruntime_5fpayloads_2eproto; +}; +// ------------------------------------------------------------------- + +class GrpcPayloadsLost final : + public ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase /* @@protoc_insertion_point(class_definition:tensorflow.distributed_runtime.GrpcPayloadsLost) */ { + public: + inline GrpcPayloadsLost() : GrpcPayloadsLost(nullptr) {} + explicit PROTOBUF_CONSTEXPR GrpcPayloadsLost(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + GrpcPayloadsLost(const GrpcPayloadsLost& from); + GrpcPayloadsLost(GrpcPayloadsLost&& from) noexcept + : GrpcPayloadsLost() { + *this = ::std::move(from); + } + + inline GrpcPayloadsLost& operator=(const GrpcPayloadsLost& from) { + CopyFrom(from); + return *this; + } + inline GrpcPayloadsLost& operator=(GrpcPayloadsLost&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const GrpcPayloadsLost& default_instance() { + return *internal_default_instance(); + } + static inline const GrpcPayloadsLost* internal_default_instance() { + return reinterpret_cast( + &_GrpcPayloadsLost_default_instance_); + } + static constexpr int kIndexInFileMessages = + 2; + + friend void swap(GrpcPayloadsLost& a, GrpcPayloadsLost& b) { + a.Swap(&b); + } + inline void Swap(GrpcPayloadsLost* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(GrpcPayloadsLost* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + GrpcPayloadsLost* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::CopyFrom; + inline void CopyFrom(const GrpcPayloadsLost& from) { + ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::CopyImpl(*this, from); + } + using ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::MergeFrom; + void MergeFrom(const GrpcPayloadsLost& from) { + ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::MergeImpl(*this, from); + } + public: + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.distributed_runtime.GrpcPayloadsLost"; + } + protected: + explicit GrpcPayloadsLost(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + // @@protoc_insertion_point(class_scope:tensorflow.distributed_runtime.GrpcPayloadsLost) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + }; + friend struct ::TableStruct_tsl_2fprotobuf_2fdistributed_5fruntime_5fpayloads_2eproto; +}; +// ------------------------------------------------------------------- + +class WorkerPossiblyRestarted final : + public ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase /* @@protoc_insertion_point(class_definition:tensorflow.distributed_runtime.WorkerPossiblyRestarted) */ { + public: + inline WorkerPossiblyRestarted() : WorkerPossiblyRestarted(nullptr) {} + explicit PROTOBUF_CONSTEXPR WorkerPossiblyRestarted(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + WorkerPossiblyRestarted(const WorkerPossiblyRestarted& from); + WorkerPossiblyRestarted(WorkerPossiblyRestarted&& from) noexcept + : WorkerPossiblyRestarted() { + *this = ::std::move(from); + } + + inline WorkerPossiblyRestarted& operator=(const WorkerPossiblyRestarted& from) { + CopyFrom(from); + return *this; + } + inline WorkerPossiblyRestarted& operator=(WorkerPossiblyRestarted&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const WorkerPossiblyRestarted& default_instance() { + return *internal_default_instance(); + } + static inline const WorkerPossiblyRestarted* internal_default_instance() { + return reinterpret_cast( + &_WorkerPossiblyRestarted_default_instance_); + } + static constexpr int kIndexInFileMessages = + 3; + + friend void swap(WorkerPossiblyRestarted& a, WorkerPossiblyRestarted& b) { + a.Swap(&b); + } + inline void Swap(WorkerPossiblyRestarted* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(WorkerPossiblyRestarted* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + WorkerPossiblyRestarted* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::CopyFrom; + inline void CopyFrom(const WorkerPossiblyRestarted& from) { + ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::CopyImpl(*this, from); + } + using ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::MergeFrom; + void MergeFrom(const WorkerPossiblyRestarted& from) { + ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::MergeImpl(*this, from); + } + public: + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.distributed_runtime.WorkerPossiblyRestarted"; + } + protected: + explicit WorkerPossiblyRestarted(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + // @@protoc_insertion_point(class_scope:tensorflow.distributed_runtime.WorkerPossiblyRestarted) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + }; + friend struct ::TableStruct_tsl_2fprotobuf_2fdistributed_5fruntime_5fpayloads_2eproto; +}; +// =================================================================== + + +// =================================================================== + +#ifdef __GNUC__ + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wstrict-aliasing" +#endif // __GNUC__ +// ------------------------------------------------------------------- + +// GrpcPayloadContainer + +// map payloads = 1; +inline int GrpcPayloadContainer::_internal_payloads_size() const { + return _impl_.payloads_.size(); +} +inline int GrpcPayloadContainer::payloads_size() const { + return _internal_payloads_size(); +} +inline void GrpcPayloadContainer::clear_payloads() { + _impl_.payloads_.Clear(); +} +inline const ::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >& +GrpcPayloadContainer::_internal_payloads() const { + return _impl_.payloads_.GetMap(); +} +inline const ::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >& +GrpcPayloadContainer::payloads() const { + // @@protoc_insertion_point(field_map:tensorflow.distributed_runtime.GrpcPayloadContainer.payloads) + return _internal_payloads(); +} +inline ::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >* +GrpcPayloadContainer::_internal_mutable_payloads() { + return _impl_.payloads_.MutableMap(); +} +inline ::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >* +GrpcPayloadContainer::mutable_payloads() { + // @@protoc_insertion_point(field_mutable_map:tensorflow.distributed_runtime.GrpcPayloadContainer.payloads) + return _internal_mutable_payloads(); +} + +// ------------------------------------------------------------------- + +// GrpcPayloadsLost + +// ------------------------------------------------------------------- + +// WorkerPossiblyRestarted + +#ifdef __GNUC__ + #pragma GCC diagnostic pop +#endif // __GNUC__ +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + + +// @@protoc_insertion_point(namespace_scope) + +} // namespace distributed_runtime +} // namespace tensorflow + +// @@protoc_insertion_point(global_scope) + +#include +#endif // GOOGLE_PROTOBUF_INCLUDED_GOOGLE_PROTOBUF_INCLUDED_tsl_2fprotobuf_2fdistributed_5fruntime_5fpayloads_2eproto diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/distributed_runtime_payloads.proto b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/distributed_runtime_payloads.proto new file mode 100644 index 0000000000000000000000000000000000000000..3a2aecdd2b7d4f97c5ec72ef8c439471763b9be4 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/distributed_runtime_payloads.proto @@ -0,0 +1,24 @@ +syntax = "proto3"; + +package tensorflow.distributed_runtime; + +option cc_enable_arenas = true; +option go_package = "github.com/tsl/tsl/go/core/protobuf/for_core_protos_go_proto"; + +// Used to serialize and transmit tensorflow::Status payloads through +// grpc::Status `error_details` since grpc::Status lacks payload API. +// TODO(b/204231601): Use GRPC API once supported. +message GrpcPayloadContainer { + map payloads = 1; +} + +// If included as a payload, this message flags the Status to have lost payloads +// during the GRPC transmission. +// URI: "type.googleapis.com/tensorflow.distributed_runtime.GrpcPayloadsLost" +message GrpcPayloadsLost {} + +// If included as a payload, this message flags the Status to be a possible +// outcome of a worker restart. +// URI: +// "type.googleapis.com/tensorflow.distributed_runtime.WorkerPossiblyRestarted" +message WorkerPossiblyRestarted {} diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/dnn.pb.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/dnn.pb.h new file mode 100644 index 0000000000000000000000000000000000000000..af6114c6a4c9ceb75365ab5bdb3474dc2d6c2a18 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/dnn.pb.h @@ -0,0 +1,2186 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: tsl/protobuf/dnn.proto + +#ifndef GOOGLE_PROTOBUF_INCLUDED_tsl_2fprotobuf_2fdnn_2eproto +#define GOOGLE_PROTOBUF_INCLUDED_tsl_2fprotobuf_2fdnn_2eproto + +#include +#include + +#include +#if PROTOBUF_VERSION < 3021000 +#error This file was generated by a newer version of protoc which is +#error incompatible with your Protocol Buffer headers. Please update +#error your headers. +#endif +#if 3021009 < PROTOBUF_MIN_PROTOC_VERSION +#error This file was generated by an older version of protoc which is +#error incompatible with your Protocol Buffer headers. Please +#error regenerate this file with a newer version of protoc. +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include // IWYU pragma: export +#include // IWYU pragma: export +#include // IWYU pragma: export +#include +#include +#include +#include +#include +// @@protoc_insertion_point(includes) +#include +#define PROTOBUF_INTERNAL_EXPORT_tsl_2fprotobuf_2fdnn_2eproto +PROTOBUF_NAMESPACE_OPEN +namespace internal { +class AnyMetadata; +} // namespace internal +PROTOBUF_NAMESPACE_CLOSE + +// Internal implementation detail -- do not use these members. +struct TableStruct_tsl_2fprotobuf_2fdnn_2eproto { + static const uint32_t offsets[]; +}; +extern const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_tsl_2fprotobuf_2fdnn_2eproto; +namespace stream_executor { +namespace dnn { +class AlgorithmConfigProto; +struct AlgorithmConfigProtoDefaultTypeInternal; +extern AlgorithmConfigProtoDefaultTypeInternal _AlgorithmConfigProto_default_instance_; +class AlgorithmProto; +struct AlgorithmProtoDefaultTypeInternal; +extern AlgorithmProtoDefaultTypeInternal _AlgorithmProto_default_instance_; +class AlgorithmProto_TuningKnobsEntry_DoNotUse; +struct AlgorithmProto_TuningKnobsEntry_DoNotUseDefaultTypeInternal; +extern AlgorithmProto_TuningKnobsEntry_DoNotUseDefaultTypeInternal _AlgorithmProto_TuningKnobsEntry_DoNotUse_default_instance_; +class ConvolutionDescriptorProto; +struct ConvolutionDescriptorProtoDefaultTypeInternal; +extern ConvolutionDescriptorProtoDefaultTypeInternal _ConvolutionDescriptorProto_default_instance_; +class TensorDescriptorProto; +struct TensorDescriptorProtoDefaultTypeInternal; +extern TensorDescriptorProtoDefaultTypeInternal _TensorDescriptorProto_default_instance_; +} // namespace dnn +} // namespace stream_executor +PROTOBUF_NAMESPACE_OPEN +template<> ::stream_executor::dnn::AlgorithmConfigProto* Arena::CreateMaybeMessage<::stream_executor::dnn::AlgorithmConfigProto>(Arena*); +template<> ::stream_executor::dnn::AlgorithmProto* Arena::CreateMaybeMessage<::stream_executor::dnn::AlgorithmProto>(Arena*); +template<> ::stream_executor::dnn::AlgorithmProto_TuningKnobsEntry_DoNotUse* Arena::CreateMaybeMessage<::stream_executor::dnn::AlgorithmProto_TuningKnobsEntry_DoNotUse>(Arena*); +template<> ::stream_executor::dnn::ConvolutionDescriptorProto* Arena::CreateMaybeMessage<::stream_executor::dnn::ConvolutionDescriptorProto>(Arena*); +template<> ::stream_executor::dnn::TensorDescriptorProto* Arena::CreateMaybeMessage<::stream_executor::dnn::TensorDescriptorProto>(Arena*); +PROTOBUF_NAMESPACE_CLOSE +namespace stream_executor { +namespace dnn { + +enum AlgorithmProto_MathType : int { + AlgorithmProto_MathType_DEFAULT_MATH = 0, + AlgorithmProto_MathType_TENSOR_OP_MATH = 1, + AlgorithmProto_MathType_AlgorithmProto_MathType_INT_MIN_SENTINEL_DO_NOT_USE_ = std::numeric_limits::min(), + AlgorithmProto_MathType_AlgorithmProto_MathType_INT_MAX_SENTINEL_DO_NOT_USE_ = std::numeric_limits::max() +}; +bool AlgorithmProto_MathType_IsValid(int value); +constexpr AlgorithmProto_MathType AlgorithmProto_MathType_MathType_MIN = AlgorithmProto_MathType_DEFAULT_MATH; +constexpr AlgorithmProto_MathType AlgorithmProto_MathType_MathType_MAX = AlgorithmProto_MathType_TENSOR_OP_MATH; +constexpr int AlgorithmProto_MathType_MathType_ARRAYSIZE = AlgorithmProto_MathType_MathType_MAX + 1; + +const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* AlgorithmProto_MathType_descriptor(); +template +inline const std::string& AlgorithmProto_MathType_Name(T enum_t_value) { + static_assert(::std::is_same::value || + ::std::is_integral::value, + "Incorrect type passed to function AlgorithmProto_MathType_Name."); + return ::PROTOBUF_NAMESPACE_ID::internal::NameOfEnum( + AlgorithmProto_MathType_descriptor(), enum_t_value); +} +inline bool AlgorithmProto_MathType_Parse( + ::PROTOBUF_NAMESPACE_ID::ConstStringParam name, AlgorithmProto_MathType* value) { + return ::PROTOBUF_NAMESPACE_ID::internal::ParseNamedEnum( + AlgorithmProto_MathType_descriptor(), name, value); +} +enum DataType : int { + kFloat = 0, + kDouble = 1, + kHalf = 2, + kInt8 = 3, + kInt32 = 4, + kComplexFloat = 5, + kComplexDouble = 6, + kBF16 = 7, + kF8E5M2 = 8, + kF8E4M3FN = 9, + kF8E5M2FNUZ = 10, + kF8E4M3FNUZ = 11, + kInt64 = 12, + DataType_INT_MIN_SENTINEL_DO_NOT_USE_ = std::numeric_limits::min(), + DataType_INT_MAX_SENTINEL_DO_NOT_USE_ = std::numeric_limits::max() +}; +bool DataType_IsValid(int value); +constexpr DataType DataType_MIN = kFloat; +constexpr DataType DataType_MAX = kInt64; +constexpr int DataType_ARRAYSIZE = DataType_MAX + 1; + +const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* DataType_descriptor(); +template +inline const std::string& DataType_Name(T enum_t_value) { + static_assert(::std::is_same::value || + ::std::is_integral::value, + "Incorrect type passed to function DataType_Name."); + return ::PROTOBUF_NAMESPACE_ID::internal::NameOfEnum( + DataType_descriptor(), enum_t_value); +} +inline bool DataType_Parse( + ::PROTOBUF_NAMESPACE_ID::ConstStringParam name, DataType* value) { + return ::PROTOBUF_NAMESPACE_ID::internal::ParseNamedEnum( + DataType_descriptor(), name, value); +} +enum DataLayout : int { + kYXDepthBatch = 0, + kYXBatchDepth = 1, + kBatchYXDepth = 2, + kBatchDepthYX = 3, + kBatchDepthYX4 = 4, + kBatchDepthYX32 = 5, + DataLayout_INT_MIN_SENTINEL_DO_NOT_USE_ = std::numeric_limits::min(), + DataLayout_INT_MAX_SENTINEL_DO_NOT_USE_ = std::numeric_limits::max() +}; +bool DataLayout_IsValid(int value); +constexpr DataLayout DataLayout_MIN = kYXDepthBatch; +constexpr DataLayout DataLayout_MAX = kBatchDepthYX32; +constexpr int DataLayout_ARRAYSIZE = DataLayout_MAX + 1; + +const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* DataLayout_descriptor(); +template +inline const std::string& DataLayout_Name(T enum_t_value) { + static_assert(::std::is_same::value || + ::std::is_integral::value, + "Incorrect type passed to function DataLayout_Name."); + return ::PROTOBUF_NAMESPACE_ID::internal::NameOfEnum( + DataLayout_descriptor(), enum_t_value); +} +inline bool DataLayout_Parse( + ::PROTOBUF_NAMESPACE_ID::ConstStringParam name, DataLayout* value) { + return ::PROTOBUF_NAMESPACE_ID::internal::ParseNamedEnum( + DataLayout_descriptor(), name, value); +} +enum FilterLayout : int { + kOutputInputYX = 0, + kOutputYXInput = 1, + kOutputInputYX4 = 2, + kOutputInputYX32 = 5, + kOutputInputYX32_CudnnReordered = 6, + kInputYXOutput = 3, + kYXInputOutput = 4, + FilterLayout_INT_MIN_SENTINEL_DO_NOT_USE_ = std::numeric_limits::min(), + FilterLayout_INT_MAX_SENTINEL_DO_NOT_USE_ = std::numeric_limits::max() +}; +bool FilterLayout_IsValid(int value); +constexpr FilterLayout FilterLayout_MIN = kOutputInputYX; +constexpr FilterLayout FilterLayout_MAX = kOutputInputYX32_CudnnReordered; +constexpr int FilterLayout_ARRAYSIZE = FilterLayout_MAX + 1; + +const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* FilterLayout_descriptor(); +template +inline const std::string& FilterLayout_Name(T enum_t_value) { + static_assert(::std::is_same::value || + ::std::is_integral::value, + "Incorrect type passed to function FilterLayout_Name."); + return ::PROTOBUF_NAMESPACE_ID::internal::NameOfEnum( + FilterLayout_descriptor(), enum_t_value); +} +inline bool FilterLayout_Parse( + ::PROTOBUF_NAMESPACE_ID::ConstStringParam name, FilterLayout* value) { + return ::PROTOBUF_NAMESPACE_ID::internal::ParseNamedEnum( + FilterLayout_descriptor(), name, value); +} +enum ActivationMode : int { + kNone = 0, + kSigmoid = 1, + kRelu = 2, + kRelu6 = 3, + kReluX = 4, + kTanh = 5, + kBandPass = 6, + kElu = 7, + kLeakyRelu = 8, + kGeluExact = 9, + ActivationMode_INT_MIN_SENTINEL_DO_NOT_USE_ = std::numeric_limits::min(), + ActivationMode_INT_MAX_SENTINEL_DO_NOT_USE_ = std::numeric_limits::max() +}; +bool ActivationMode_IsValid(int value); +constexpr ActivationMode ActivationMode_MIN = kNone; +constexpr ActivationMode ActivationMode_MAX = kGeluExact; +constexpr int ActivationMode_ARRAYSIZE = ActivationMode_MAX + 1; + +const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* ActivationMode_descriptor(); +template +inline const std::string& ActivationMode_Name(T enum_t_value) { + static_assert(::std::is_same::value || + ::std::is_integral::value, + "Incorrect type passed to function ActivationMode_Name."); + return ::PROTOBUF_NAMESPACE_ID::internal::NameOfEnum( + ActivationMode_descriptor(), enum_t_value); +} +inline bool ActivationMode_Parse( + ::PROTOBUF_NAMESPACE_ID::ConstStringParam name, ActivationMode* value) { + return ::PROTOBUF_NAMESPACE_ID::internal::ParseNamedEnum( + ActivationMode_descriptor(), name, value); +} +enum ConvolutionMode : int { + CROSS_CORRELATION = 0, + CONVOLUTION = 1, + ConvolutionMode_INT_MIN_SENTINEL_DO_NOT_USE_ = std::numeric_limits::min(), + ConvolutionMode_INT_MAX_SENTINEL_DO_NOT_USE_ = std::numeric_limits::max() +}; +bool ConvolutionMode_IsValid(int value); +constexpr ConvolutionMode ConvolutionMode_MIN = CROSS_CORRELATION; +constexpr ConvolutionMode ConvolutionMode_MAX = CONVOLUTION; +constexpr int ConvolutionMode_ARRAYSIZE = ConvolutionMode_MAX + 1; + +const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* ConvolutionMode_descriptor(); +template +inline const std::string& ConvolutionMode_Name(T enum_t_value) { + static_assert(::std::is_same::value || + ::std::is_integral::value, + "Incorrect type passed to function ConvolutionMode_Name."); + return ::PROTOBUF_NAMESPACE_ID::internal::NameOfEnum( + ConvolutionMode_descriptor(), enum_t_value); +} +inline bool ConvolutionMode_Parse( + ::PROTOBUF_NAMESPACE_ID::ConstStringParam name, ConvolutionMode* value) { + return ::PROTOBUF_NAMESPACE_ID::internal::ParseNamedEnum( + ConvolutionMode_descriptor(), name, value); +} +enum ConvolutionKind : int { + INVALID = 0, + FORWARD = 1, + BACKWARD_FILTER = 2, + BACKWARD_DATA = 3, + FORWARD_BIAS_ACTIVATION = 4, + FORWARD_GRAPH = 5, + ConvolutionKind_INT_MIN_SENTINEL_DO_NOT_USE_ = std::numeric_limits::min(), + ConvolutionKind_INT_MAX_SENTINEL_DO_NOT_USE_ = std::numeric_limits::max() +}; +bool ConvolutionKind_IsValid(int value); +constexpr ConvolutionKind ConvolutionKind_MIN = INVALID; +constexpr ConvolutionKind ConvolutionKind_MAX = FORWARD_GRAPH; +constexpr int ConvolutionKind_ARRAYSIZE = ConvolutionKind_MAX + 1; + +const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* ConvolutionKind_descriptor(); +template +inline const std::string& ConvolutionKind_Name(T enum_t_value) { + static_assert(::std::is_same::value || + ::std::is_integral::value, + "Incorrect type passed to function ConvolutionKind_Name."); + return ::PROTOBUF_NAMESPACE_ID::internal::NameOfEnum( + ConvolutionKind_descriptor(), enum_t_value); +} +inline bool ConvolutionKind_Parse( + ::PROTOBUF_NAMESPACE_ID::ConstStringParam name, ConvolutionKind* value) { + return ::PROTOBUF_NAMESPACE_ID::internal::ParseNamedEnum( + ConvolutionKind_descriptor(), name, value); +} +enum FusedMHAKind : int { + BMM1_OUTPUT_UNKNOWN = 0, + BMM1_OUTPUT_INPUT_TYPE = 1, + BMM1_OUTPUT_FLOAT = 2, + FusedMHAKind_INT_MIN_SENTINEL_DO_NOT_USE_ = std::numeric_limits::min(), + FusedMHAKind_INT_MAX_SENTINEL_DO_NOT_USE_ = std::numeric_limits::max() +}; +bool FusedMHAKind_IsValid(int value); +constexpr FusedMHAKind FusedMHAKind_MIN = BMM1_OUTPUT_UNKNOWN; +constexpr FusedMHAKind FusedMHAKind_MAX = BMM1_OUTPUT_FLOAT; +constexpr int FusedMHAKind_ARRAYSIZE = FusedMHAKind_MAX + 1; + +const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* FusedMHAKind_descriptor(); +template +inline const std::string& FusedMHAKind_Name(T enum_t_value) { + static_assert(::std::is_same::value || + ::std::is_integral::value, + "Incorrect type passed to function FusedMHAKind_Name."); + return ::PROTOBUF_NAMESPACE_ID::internal::NameOfEnum( + FusedMHAKind_descriptor(), enum_t_value); +} +inline bool FusedMHAKind_Parse( + ::PROTOBUF_NAMESPACE_ID::ConstStringParam name, FusedMHAKind* value) { + return ::PROTOBUF_NAMESPACE_ID::internal::ParseNamedEnum( + FusedMHAKind_descriptor(), name, value); +} +// =================================================================== + +class TensorDescriptorProto final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:stream_executor.dnn.TensorDescriptorProto) */ { + public: + inline TensorDescriptorProto() : TensorDescriptorProto(nullptr) {} + ~TensorDescriptorProto() override; + explicit PROTOBUF_CONSTEXPR TensorDescriptorProto(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + TensorDescriptorProto(const TensorDescriptorProto& from); + TensorDescriptorProto(TensorDescriptorProto&& from) noexcept + : TensorDescriptorProto() { + *this = ::std::move(from); + } + + inline TensorDescriptorProto& operator=(const TensorDescriptorProto& from) { + CopyFrom(from); + return *this; + } + inline TensorDescriptorProto& operator=(TensorDescriptorProto&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const TensorDescriptorProto& default_instance() { + return *internal_default_instance(); + } + enum LayoutOneofCase { + kDataLayout = 3, + kFilterLayout = 4, + LAYOUT_ONEOF_NOT_SET = 0, + }; + + static inline const TensorDescriptorProto* internal_default_instance() { + return reinterpret_cast( + &_TensorDescriptorProto_default_instance_); + } + static constexpr int kIndexInFileMessages = + 0; + + friend void swap(TensorDescriptorProto& a, TensorDescriptorProto& b) { + a.Swap(&b); + } + inline void Swap(TensorDescriptorProto* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(TensorDescriptorProto* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + TensorDescriptorProto* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const TensorDescriptorProto& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom( const TensorDescriptorProto& from) { + TensorDescriptorProto::MergeImpl(*this, from); + } + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _impl_._cached_size_.Get(); } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(TensorDescriptorProto* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "stream_executor.dnn.TensorDescriptorProto"; + } + protected: + explicit TensorDescriptorProto(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kDimensionsFieldNumber = 1, + kDataTypeFieldNumber = 2, + kDataLayoutFieldNumber = 3, + kFilterLayoutFieldNumber = 4, + }; + // repeated int64 dimensions = 1; + int dimensions_size() const; + private: + int _internal_dimensions_size() const; + public: + void clear_dimensions(); + private: + int64_t _internal_dimensions(int index) const; + const ::PROTOBUF_NAMESPACE_ID::RepeatedField< int64_t >& + _internal_dimensions() const; + void _internal_add_dimensions(int64_t value); + ::PROTOBUF_NAMESPACE_ID::RepeatedField< int64_t >* + _internal_mutable_dimensions(); + public: + int64_t dimensions(int index) const; + void set_dimensions(int index, int64_t value); + void add_dimensions(int64_t value); + const ::PROTOBUF_NAMESPACE_ID::RepeatedField< int64_t >& + dimensions() const; + ::PROTOBUF_NAMESPACE_ID::RepeatedField< int64_t >* + mutable_dimensions(); + + // .stream_executor.dnn.DataType data_type = 2; + void clear_data_type(); + ::stream_executor::dnn::DataType data_type() const; + void set_data_type(::stream_executor::dnn::DataType value); + private: + ::stream_executor::dnn::DataType _internal_data_type() const; + void _internal_set_data_type(::stream_executor::dnn::DataType value); + public: + + // .stream_executor.dnn.DataLayout data_layout = 3; + bool has_data_layout() const; + private: + bool _internal_has_data_layout() const; + public: + void clear_data_layout(); + ::stream_executor::dnn::DataLayout data_layout() const; + void set_data_layout(::stream_executor::dnn::DataLayout value); + private: + ::stream_executor::dnn::DataLayout _internal_data_layout() const; + void _internal_set_data_layout(::stream_executor::dnn::DataLayout value); + public: + + // .stream_executor.dnn.FilterLayout filter_layout = 4; + bool has_filter_layout() const; + private: + bool _internal_has_filter_layout() const; + public: + void clear_filter_layout(); + ::stream_executor::dnn::FilterLayout filter_layout() const; + void set_filter_layout(::stream_executor::dnn::FilterLayout value); + private: + ::stream_executor::dnn::FilterLayout _internal_filter_layout() const; + void _internal_set_filter_layout(::stream_executor::dnn::FilterLayout value); + public: + + void clear_layout_oneof(); + LayoutOneofCase layout_oneof_case() const; + // @@protoc_insertion_point(class_scope:stream_executor.dnn.TensorDescriptorProto) + private: + class _Internal; + void set_has_data_layout(); + void set_has_filter_layout(); + + inline bool has_layout_oneof() const; + inline void clear_has_layout_oneof(); + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + ::PROTOBUF_NAMESPACE_ID::RepeatedField< int64_t > dimensions_; + mutable std::atomic _dimensions_cached_byte_size_; + int data_type_; + union LayoutOneofUnion { + constexpr LayoutOneofUnion() : _constinit_{} {} + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized _constinit_; + int data_layout_; + int filter_layout_; + } layout_oneof_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + uint32_t _oneof_case_[1]; + + }; + union { Impl_ _impl_; }; + friend struct ::TableStruct_tsl_2fprotobuf_2fdnn_2eproto; +}; +// ------------------------------------------------------------------- + +class AlgorithmProto_TuningKnobsEntry_DoNotUse : public ::PROTOBUF_NAMESPACE_ID::internal::MapEntry { +public: + typedef ::PROTOBUF_NAMESPACE_ID::internal::MapEntry SuperType; + AlgorithmProto_TuningKnobsEntry_DoNotUse(); + explicit PROTOBUF_CONSTEXPR AlgorithmProto_TuningKnobsEntry_DoNotUse( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + explicit AlgorithmProto_TuningKnobsEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena); + void MergeFrom(const AlgorithmProto_TuningKnobsEntry_DoNotUse& other); + static const AlgorithmProto_TuningKnobsEntry_DoNotUse* internal_default_instance() { return reinterpret_cast(&_AlgorithmProto_TuningKnobsEntry_DoNotUse_default_instance_); } + static bool ValidateKey(void*) { return true; } + static bool ValidateValue(void*) { return true; } + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + friend struct ::TableStruct_tsl_2fprotobuf_2fdnn_2eproto; +}; + +// ------------------------------------------------------------------- + +class AlgorithmProto final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:stream_executor.dnn.AlgorithmProto) */ { + public: + inline AlgorithmProto() : AlgorithmProto(nullptr) {} + ~AlgorithmProto() override; + explicit PROTOBUF_CONSTEXPR AlgorithmProto(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + AlgorithmProto(const AlgorithmProto& from); + AlgorithmProto(AlgorithmProto&& from) noexcept + : AlgorithmProto() { + *this = ::std::move(from); + } + + inline AlgorithmProto& operator=(const AlgorithmProto& from) { + CopyFrom(from); + return *this; + } + inline AlgorithmProto& operator=(AlgorithmProto&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const AlgorithmProto& default_instance() { + return *internal_default_instance(); + } + static inline const AlgorithmProto* internal_default_instance() { + return reinterpret_cast( + &_AlgorithmProto_default_instance_); + } + static constexpr int kIndexInFileMessages = + 2; + + friend void swap(AlgorithmProto& a, AlgorithmProto& b) { + a.Swap(&b); + } + inline void Swap(AlgorithmProto* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(AlgorithmProto* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + AlgorithmProto* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const AlgorithmProto& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom( const AlgorithmProto& from) { + AlgorithmProto::MergeImpl(*this, from); + } + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _impl_._cached_size_.Get(); } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(AlgorithmProto* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "stream_executor.dnn.AlgorithmProto"; + } + protected: + explicit AlgorithmProto(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + private: + static void ArenaDtor(void* object); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + + typedef AlgorithmProto_MathType MathType; + static constexpr MathType DEFAULT_MATH = + AlgorithmProto_MathType_DEFAULT_MATH; + static constexpr MathType TENSOR_OP_MATH = + AlgorithmProto_MathType_TENSOR_OP_MATH; + static inline bool MathType_IsValid(int value) { + return AlgorithmProto_MathType_IsValid(value); + } + static constexpr MathType MathType_MIN = + AlgorithmProto_MathType_MathType_MIN; + static constexpr MathType MathType_MAX = + AlgorithmProto_MathType_MathType_MAX; + static constexpr int MathType_ARRAYSIZE = + AlgorithmProto_MathType_MathType_ARRAYSIZE; + static inline const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* + MathType_descriptor() { + return AlgorithmProto_MathType_descriptor(); + } + template + static inline const std::string& MathType_Name(T enum_t_value) { + static_assert(::std::is_same::value || + ::std::is_integral::value, + "Incorrect type passed to function MathType_Name."); + return AlgorithmProto_MathType_Name(enum_t_value); + } + static inline bool MathType_Parse(::PROTOBUF_NAMESPACE_ID::ConstStringParam name, + MathType* value) { + return AlgorithmProto_MathType_Parse(name, value); + } + + // accessors ------------------------------------------------------- + + enum : int { + kTuningKnobsFieldNumber = 4, + kWorkspaceSizeFieldNumber = 6, + kAlgoIdFieldNumber = 1, + kMathTypeFieldNumber = 2, + kIsCudnnFrontendFieldNumber = 5, + }; + // map tuning_knobs = 4; + int tuning_knobs_size() const; + private: + int _internal_tuning_knobs_size() const; + public: + void clear_tuning_knobs(); + private: + const ::PROTOBUF_NAMESPACE_ID::Map< int64_t, int64_t >& + _internal_tuning_knobs() const; + ::PROTOBUF_NAMESPACE_ID::Map< int64_t, int64_t >* + _internal_mutable_tuning_knobs(); + public: + const ::PROTOBUF_NAMESPACE_ID::Map< int64_t, int64_t >& + tuning_knobs() const; + ::PROTOBUF_NAMESPACE_ID::Map< int64_t, int64_t >* + mutable_tuning_knobs(); + + // .google.protobuf.UInt64Value workspace_size = 6; + bool has_workspace_size() const; + private: + bool _internal_has_workspace_size() const; + public: + void clear_workspace_size(); + const ::PROTOBUF_NAMESPACE_ID::UInt64Value& workspace_size() const; + PROTOBUF_NODISCARD ::PROTOBUF_NAMESPACE_ID::UInt64Value* release_workspace_size(); + ::PROTOBUF_NAMESPACE_ID::UInt64Value* mutable_workspace_size(); + void set_allocated_workspace_size(::PROTOBUF_NAMESPACE_ID::UInt64Value* workspace_size); + private: + const ::PROTOBUF_NAMESPACE_ID::UInt64Value& _internal_workspace_size() const; + ::PROTOBUF_NAMESPACE_ID::UInt64Value* _internal_mutable_workspace_size(); + public: + void unsafe_arena_set_allocated_workspace_size( + ::PROTOBUF_NAMESPACE_ID::UInt64Value* workspace_size); + ::PROTOBUF_NAMESPACE_ID::UInt64Value* unsafe_arena_release_workspace_size(); + + // int64 algo_id = 1; + void clear_algo_id(); + int64_t algo_id() const; + void set_algo_id(int64_t value); + private: + int64_t _internal_algo_id() const; + void _internal_set_algo_id(int64_t value); + public: + + // .stream_executor.dnn.AlgorithmProto.MathType math_type = 2; + void clear_math_type(); + ::stream_executor::dnn::AlgorithmProto_MathType math_type() const; + void set_math_type(::stream_executor::dnn::AlgorithmProto_MathType value); + private: + ::stream_executor::dnn::AlgorithmProto_MathType _internal_math_type() const; + void _internal_set_math_type(::stream_executor::dnn::AlgorithmProto_MathType value); + public: + + // bool is_cudnn_frontend = 5; + void clear_is_cudnn_frontend(); + bool is_cudnn_frontend() const; + void set_is_cudnn_frontend(bool value); + private: + bool _internal_is_cudnn_frontend() const; + void _internal_set_is_cudnn_frontend(bool value); + public: + + // @@protoc_insertion_point(class_scope:stream_executor.dnn.AlgorithmProto) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + ::PROTOBUF_NAMESPACE_ID::internal::MapField< + AlgorithmProto_TuningKnobsEntry_DoNotUse, + int64_t, int64_t, + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT64, + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT64> tuning_knobs_; + ::PROTOBUF_NAMESPACE_ID::UInt64Value* workspace_size_; + int64_t algo_id_; + int math_type_; + bool is_cudnn_frontend_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + }; + union { Impl_ _impl_; }; + friend struct ::TableStruct_tsl_2fprotobuf_2fdnn_2eproto; +}; +// ------------------------------------------------------------------- + +class AlgorithmConfigProto final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:stream_executor.dnn.AlgorithmConfigProto) */ { + public: + inline AlgorithmConfigProto() : AlgorithmConfigProto(nullptr) {} + ~AlgorithmConfigProto() override; + explicit PROTOBUF_CONSTEXPR AlgorithmConfigProto(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + AlgorithmConfigProto(const AlgorithmConfigProto& from); + AlgorithmConfigProto(AlgorithmConfigProto&& from) noexcept + : AlgorithmConfigProto() { + *this = ::std::move(from); + } + + inline AlgorithmConfigProto& operator=(const AlgorithmConfigProto& from) { + CopyFrom(from); + return *this; + } + inline AlgorithmConfigProto& operator=(AlgorithmConfigProto&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const AlgorithmConfigProto& default_instance() { + return *internal_default_instance(); + } + enum OptionalAlgorithmCase { + kAlgorithm = 1, + OPTIONAL_ALGORITHM_NOT_SET = 0, + }; + + enum OptionalAlgorithmNoScratchCase { + kAlgorithmNoScratch = 2, + OPTIONAL_ALGORITHM_NO_SCRATCH_NOT_SET = 0, + }; + + enum OptionalScratchSizeCase { + kScratchSize = 3, + OPTIONAL_SCRATCH_SIZE_NOT_SET = 0, + }; + + static inline const AlgorithmConfigProto* internal_default_instance() { + return reinterpret_cast( + &_AlgorithmConfigProto_default_instance_); + } + static constexpr int kIndexInFileMessages = + 3; + + friend void swap(AlgorithmConfigProto& a, AlgorithmConfigProto& b) { + a.Swap(&b); + } + inline void Swap(AlgorithmConfigProto* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(AlgorithmConfigProto* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + AlgorithmConfigProto* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const AlgorithmConfigProto& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom( const AlgorithmConfigProto& from) { + AlgorithmConfigProto::MergeImpl(*this, from); + } + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _impl_._cached_size_.Get(); } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(AlgorithmConfigProto* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "stream_executor.dnn.AlgorithmConfigProto"; + } + protected: + explicit AlgorithmConfigProto(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kAlgorithmFieldNumber = 1, + kAlgorithmNoScratchFieldNumber = 2, + kScratchSizeFieldNumber = 3, + }; + // .stream_executor.dnn.AlgorithmProto algorithm = 1; + bool has_algorithm() const; + private: + bool _internal_has_algorithm() const; + public: + void clear_algorithm(); + const ::stream_executor::dnn::AlgorithmProto& algorithm() const; + PROTOBUF_NODISCARD ::stream_executor::dnn::AlgorithmProto* release_algorithm(); + ::stream_executor::dnn::AlgorithmProto* mutable_algorithm(); + void set_allocated_algorithm(::stream_executor::dnn::AlgorithmProto* algorithm); + private: + const ::stream_executor::dnn::AlgorithmProto& _internal_algorithm() const; + ::stream_executor::dnn::AlgorithmProto* _internal_mutable_algorithm(); + public: + void unsafe_arena_set_allocated_algorithm( + ::stream_executor::dnn::AlgorithmProto* algorithm); + ::stream_executor::dnn::AlgorithmProto* unsafe_arena_release_algorithm(); + + // .stream_executor.dnn.AlgorithmProto algorithm_no_scratch = 2; + bool has_algorithm_no_scratch() const; + private: + bool _internal_has_algorithm_no_scratch() const; + public: + void clear_algorithm_no_scratch(); + const ::stream_executor::dnn::AlgorithmProto& algorithm_no_scratch() const; + PROTOBUF_NODISCARD ::stream_executor::dnn::AlgorithmProto* release_algorithm_no_scratch(); + ::stream_executor::dnn::AlgorithmProto* mutable_algorithm_no_scratch(); + void set_allocated_algorithm_no_scratch(::stream_executor::dnn::AlgorithmProto* algorithm_no_scratch); + private: + const ::stream_executor::dnn::AlgorithmProto& _internal_algorithm_no_scratch() const; + ::stream_executor::dnn::AlgorithmProto* _internal_mutable_algorithm_no_scratch(); + public: + void unsafe_arena_set_allocated_algorithm_no_scratch( + ::stream_executor::dnn::AlgorithmProto* algorithm_no_scratch); + ::stream_executor::dnn::AlgorithmProto* unsafe_arena_release_algorithm_no_scratch(); + + // int64 scratch_size = 3; + bool has_scratch_size() const; + private: + bool _internal_has_scratch_size() const; + public: + void clear_scratch_size(); + int64_t scratch_size() const; + void set_scratch_size(int64_t value); + private: + int64_t _internal_scratch_size() const; + void _internal_set_scratch_size(int64_t value); + public: + + void clear_optional_algorithm(); + OptionalAlgorithmCase optional_algorithm_case() const; + void clear_optional_algorithm_no_scratch(); + OptionalAlgorithmNoScratchCase optional_algorithm_no_scratch_case() const; + void clear_optional_scratch_size(); + OptionalScratchSizeCase optional_scratch_size_case() const; + // @@protoc_insertion_point(class_scope:stream_executor.dnn.AlgorithmConfigProto) + private: + class _Internal; + void set_has_algorithm(); + void set_has_algorithm_no_scratch(); + void set_has_scratch_size(); + + inline bool has_optional_algorithm() const; + inline void clear_has_optional_algorithm(); + + inline bool has_optional_algorithm_no_scratch() const; + inline void clear_has_optional_algorithm_no_scratch(); + + inline bool has_optional_scratch_size() const; + inline void clear_has_optional_scratch_size(); + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + union OptionalAlgorithmUnion { + constexpr OptionalAlgorithmUnion() : _constinit_{} {} + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized _constinit_; + ::stream_executor::dnn::AlgorithmProto* algorithm_; + } optional_algorithm_; + union OptionalAlgorithmNoScratchUnion { + constexpr OptionalAlgorithmNoScratchUnion() : _constinit_{} {} + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized _constinit_; + ::stream_executor::dnn::AlgorithmProto* algorithm_no_scratch_; + } optional_algorithm_no_scratch_; + union OptionalScratchSizeUnion { + constexpr OptionalScratchSizeUnion() : _constinit_{} {} + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized _constinit_; + int64_t scratch_size_; + } optional_scratch_size_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + uint32_t _oneof_case_[3]; + + }; + union { Impl_ _impl_; }; + friend struct ::TableStruct_tsl_2fprotobuf_2fdnn_2eproto; +}; +// ------------------------------------------------------------------- + +class ConvolutionDescriptorProto final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:stream_executor.dnn.ConvolutionDescriptorProto) */ { + public: + inline ConvolutionDescriptorProto() : ConvolutionDescriptorProto(nullptr) {} + ~ConvolutionDescriptorProto() override; + explicit PROTOBUF_CONSTEXPR ConvolutionDescriptorProto(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + ConvolutionDescriptorProto(const ConvolutionDescriptorProto& from); + ConvolutionDescriptorProto(ConvolutionDescriptorProto&& from) noexcept + : ConvolutionDescriptorProto() { + *this = ::std::move(from); + } + + inline ConvolutionDescriptorProto& operator=(const ConvolutionDescriptorProto& from) { + CopyFrom(from); + return *this; + } + inline ConvolutionDescriptorProto& operator=(ConvolutionDescriptorProto&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const ConvolutionDescriptorProto& default_instance() { + return *internal_default_instance(); + } + static inline const ConvolutionDescriptorProto* internal_default_instance() { + return reinterpret_cast( + &_ConvolutionDescriptorProto_default_instance_); + } + static constexpr int kIndexInFileMessages = + 4; + + friend void swap(ConvolutionDescriptorProto& a, ConvolutionDescriptorProto& b) { + a.Swap(&b); + } + inline void Swap(ConvolutionDescriptorProto* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(ConvolutionDescriptorProto* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + ConvolutionDescriptorProto* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const ConvolutionDescriptorProto& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom( const ConvolutionDescriptorProto& from) { + ConvolutionDescriptorProto::MergeImpl(*this, from); + } + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _impl_._cached_size_.Get(); } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(ConvolutionDescriptorProto* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "stream_executor.dnn.ConvolutionDescriptorProto"; + } + protected: + explicit ConvolutionDescriptorProto(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kPaddingsFieldNumber = 1, + kStridesFieldNumber = 2, + kDilationsFieldNumber = 3, + kNameFieldNumber = 7, + kComputeModeFieldNumber = 4, + kGroupCountFieldNumber = 5, + kConvolutionModeFieldNumber = 6, + }; + // repeated int64 paddings = 1; + int paddings_size() const; + private: + int _internal_paddings_size() const; + public: + void clear_paddings(); + private: + int64_t _internal_paddings(int index) const; + const ::PROTOBUF_NAMESPACE_ID::RepeatedField< int64_t >& + _internal_paddings() const; + void _internal_add_paddings(int64_t value); + ::PROTOBUF_NAMESPACE_ID::RepeatedField< int64_t >* + _internal_mutable_paddings(); + public: + int64_t paddings(int index) const; + void set_paddings(int index, int64_t value); + void add_paddings(int64_t value); + const ::PROTOBUF_NAMESPACE_ID::RepeatedField< int64_t >& + paddings() const; + ::PROTOBUF_NAMESPACE_ID::RepeatedField< int64_t >* + mutable_paddings(); + + // repeated int64 strides = 2; + int strides_size() const; + private: + int _internal_strides_size() const; + public: + void clear_strides(); + private: + int64_t _internal_strides(int index) const; + const ::PROTOBUF_NAMESPACE_ID::RepeatedField< int64_t >& + _internal_strides() const; + void _internal_add_strides(int64_t value); + ::PROTOBUF_NAMESPACE_ID::RepeatedField< int64_t >* + _internal_mutable_strides(); + public: + int64_t strides(int index) const; + void set_strides(int index, int64_t value); + void add_strides(int64_t value); + const ::PROTOBUF_NAMESPACE_ID::RepeatedField< int64_t >& + strides() const; + ::PROTOBUF_NAMESPACE_ID::RepeatedField< int64_t >* + mutable_strides(); + + // repeated int64 dilations = 3; + int dilations_size() const; + private: + int _internal_dilations_size() const; + public: + void clear_dilations(); + private: + int64_t _internal_dilations(int index) const; + const ::PROTOBUF_NAMESPACE_ID::RepeatedField< int64_t >& + _internal_dilations() const; + void _internal_add_dilations(int64_t value); + ::PROTOBUF_NAMESPACE_ID::RepeatedField< int64_t >* + _internal_mutable_dilations(); + public: + int64_t dilations(int index) const; + void set_dilations(int index, int64_t value); + void add_dilations(int64_t value); + const ::PROTOBUF_NAMESPACE_ID::RepeatedField< int64_t >& + dilations() const; + ::PROTOBUF_NAMESPACE_ID::RepeatedField< int64_t >* + mutable_dilations(); + + // string name = 7; + void clear_name(); + const std::string& name() const; + template + void set_name(ArgT0&& arg0, ArgT... args); + std::string* mutable_name(); + PROTOBUF_NODISCARD std::string* release_name(); + void set_allocated_name(std::string* name); + private: + const std::string& _internal_name() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_name(const std::string& value); + std::string* _internal_mutable_name(); + public: + + // .stream_executor.dnn.DataType compute_mode = 4; + void clear_compute_mode(); + ::stream_executor::dnn::DataType compute_mode() const; + void set_compute_mode(::stream_executor::dnn::DataType value); + private: + ::stream_executor::dnn::DataType _internal_compute_mode() const; + void _internal_set_compute_mode(::stream_executor::dnn::DataType value); + public: + + // int32 group_count = 5; + void clear_group_count(); + int32_t group_count() const; + void set_group_count(int32_t value); + private: + int32_t _internal_group_count() const; + void _internal_set_group_count(int32_t value); + public: + + // .stream_executor.dnn.ConvolutionMode convolution_mode = 6; + void clear_convolution_mode(); + ::stream_executor::dnn::ConvolutionMode convolution_mode() const; + void set_convolution_mode(::stream_executor::dnn::ConvolutionMode value); + private: + ::stream_executor::dnn::ConvolutionMode _internal_convolution_mode() const; + void _internal_set_convolution_mode(::stream_executor::dnn::ConvolutionMode value); + public: + + // @@protoc_insertion_point(class_scope:stream_executor.dnn.ConvolutionDescriptorProto) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + ::PROTOBUF_NAMESPACE_ID::RepeatedField< int64_t > paddings_; + mutable std::atomic _paddings_cached_byte_size_; + ::PROTOBUF_NAMESPACE_ID::RepeatedField< int64_t > strides_; + mutable std::atomic _strides_cached_byte_size_; + ::PROTOBUF_NAMESPACE_ID::RepeatedField< int64_t > dilations_; + mutable std::atomic _dilations_cached_byte_size_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr name_; + int compute_mode_; + int32_t group_count_; + int convolution_mode_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + }; + union { Impl_ _impl_; }; + friend struct ::TableStruct_tsl_2fprotobuf_2fdnn_2eproto; +}; +// =================================================================== + + +// =================================================================== + +#ifdef __GNUC__ + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wstrict-aliasing" +#endif // __GNUC__ +// TensorDescriptorProto + +// repeated int64 dimensions = 1; +inline int TensorDescriptorProto::_internal_dimensions_size() const { + return _impl_.dimensions_.size(); +} +inline int TensorDescriptorProto::dimensions_size() const { + return _internal_dimensions_size(); +} +inline void TensorDescriptorProto::clear_dimensions() { + _impl_.dimensions_.Clear(); +} +inline int64_t TensorDescriptorProto::_internal_dimensions(int index) const { + return _impl_.dimensions_.Get(index); +} +inline int64_t TensorDescriptorProto::dimensions(int index) const { + // @@protoc_insertion_point(field_get:stream_executor.dnn.TensorDescriptorProto.dimensions) + return _internal_dimensions(index); +} +inline void TensorDescriptorProto::set_dimensions(int index, int64_t value) { + _impl_.dimensions_.Set(index, value); + // @@protoc_insertion_point(field_set:stream_executor.dnn.TensorDescriptorProto.dimensions) +} +inline void TensorDescriptorProto::_internal_add_dimensions(int64_t value) { + _impl_.dimensions_.Add(value); +} +inline void TensorDescriptorProto::add_dimensions(int64_t value) { + _internal_add_dimensions(value); + // @@protoc_insertion_point(field_add:stream_executor.dnn.TensorDescriptorProto.dimensions) +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField< int64_t >& +TensorDescriptorProto::_internal_dimensions() const { + return _impl_.dimensions_; +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField< int64_t >& +TensorDescriptorProto::dimensions() const { + // @@protoc_insertion_point(field_list:stream_executor.dnn.TensorDescriptorProto.dimensions) + return _internal_dimensions(); +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedField< int64_t >* +TensorDescriptorProto::_internal_mutable_dimensions() { + return &_impl_.dimensions_; +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedField< int64_t >* +TensorDescriptorProto::mutable_dimensions() { + // @@protoc_insertion_point(field_mutable_list:stream_executor.dnn.TensorDescriptorProto.dimensions) + return _internal_mutable_dimensions(); +} + +// .stream_executor.dnn.DataType data_type = 2; +inline void TensorDescriptorProto::clear_data_type() { + _impl_.data_type_ = 0; +} +inline ::stream_executor::dnn::DataType TensorDescriptorProto::_internal_data_type() const { + return static_cast< ::stream_executor::dnn::DataType >(_impl_.data_type_); +} +inline ::stream_executor::dnn::DataType TensorDescriptorProto::data_type() const { + // @@protoc_insertion_point(field_get:stream_executor.dnn.TensorDescriptorProto.data_type) + return _internal_data_type(); +} +inline void TensorDescriptorProto::_internal_set_data_type(::stream_executor::dnn::DataType value) { + + _impl_.data_type_ = value; +} +inline void TensorDescriptorProto::set_data_type(::stream_executor::dnn::DataType value) { + _internal_set_data_type(value); + // @@protoc_insertion_point(field_set:stream_executor.dnn.TensorDescriptorProto.data_type) +} + +// .stream_executor.dnn.DataLayout data_layout = 3; +inline bool TensorDescriptorProto::_internal_has_data_layout() const { + return layout_oneof_case() == kDataLayout; +} +inline bool TensorDescriptorProto::has_data_layout() const { + return _internal_has_data_layout(); +} +inline void TensorDescriptorProto::set_has_data_layout() { + _impl_._oneof_case_[0] = kDataLayout; +} +inline void TensorDescriptorProto::clear_data_layout() { + if (_internal_has_data_layout()) { + _impl_.layout_oneof_.data_layout_ = 0; + clear_has_layout_oneof(); + } +} +inline ::stream_executor::dnn::DataLayout TensorDescriptorProto::_internal_data_layout() const { + if (_internal_has_data_layout()) { + return static_cast< ::stream_executor::dnn::DataLayout >(_impl_.layout_oneof_.data_layout_); + } + return static_cast< ::stream_executor::dnn::DataLayout >(0); +} +inline ::stream_executor::dnn::DataLayout TensorDescriptorProto::data_layout() const { + // @@protoc_insertion_point(field_get:stream_executor.dnn.TensorDescriptorProto.data_layout) + return _internal_data_layout(); +} +inline void TensorDescriptorProto::_internal_set_data_layout(::stream_executor::dnn::DataLayout value) { + if (!_internal_has_data_layout()) { + clear_layout_oneof(); + set_has_data_layout(); + } + _impl_.layout_oneof_.data_layout_ = value; +} +inline void TensorDescriptorProto::set_data_layout(::stream_executor::dnn::DataLayout value) { + _internal_set_data_layout(value); + // @@protoc_insertion_point(field_set:stream_executor.dnn.TensorDescriptorProto.data_layout) +} + +// .stream_executor.dnn.FilterLayout filter_layout = 4; +inline bool TensorDescriptorProto::_internal_has_filter_layout() const { + return layout_oneof_case() == kFilterLayout; +} +inline bool TensorDescriptorProto::has_filter_layout() const { + return _internal_has_filter_layout(); +} +inline void TensorDescriptorProto::set_has_filter_layout() { + _impl_._oneof_case_[0] = kFilterLayout; +} +inline void TensorDescriptorProto::clear_filter_layout() { + if (_internal_has_filter_layout()) { + _impl_.layout_oneof_.filter_layout_ = 0; + clear_has_layout_oneof(); + } +} +inline ::stream_executor::dnn::FilterLayout TensorDescriptorProto::_internal_filter_layout() const { + if (_internal_has_filter_layout()) { + return static_cast< ::stream_executor::dnn::FilterLayout >(_impl_.layout_oneof_.filter_layout_); + } + return static_cast< ::stream_executor::dnn::FilterLayout >(0); +} +inline ::stream_executor::dnn::FilterLayout TensorDescriptorProto::filter_layout() const { + // @@protoc_insertion_point(field_get:stream_executor.dnn.TensorDescriptorProto.filter_layout) + return _internal_filter_layout(); +} +inline void TensorDescriptorProto::_internal_set_filter_layout(::stream_executor::dnn::FilterLayout value) { + if (!_internal_has_filter_layout()) { + clear_layout_oneof(); + set_has_filter_layout(); + } + _impl_.layout_oneof_.filter_layout_ = value; +} +inline void TensorDescriptorProto::set_filter_layout(::stream_executor::dnn::FilterLayout value) { + _internal_set_filter_layout(value); + // @@protoc_insertion_point(field_set:stream_executor.dnn.TensorDescriptorProto.filter_layout) +} + +inline bool TensorDescriptorProto::has_layout_oneof() const { + return layout_oneof_case() != LAYOUT_ONEOF_NOT_SET; +} +inline void TensorDescriptorProto::clear_has_layout_oneof() { + _impl_._oneof_case_[0] = LAYOUT_ONEOF_NOT_SET; +} +inline TensorDescriptorProto::LayoutOneofCase TensorDescriptorProto::layout_oneof_case() const { + return TensorDescriptorProto::LayoutOneofCase(_impl_._oneof_case_[0]); +} +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// AlgorithmProto + +// int64 algo_id = 1; +inline void AlgorithmProto::clear_algo_id() { + _impl_.algo_id_ = int64_t{0}; +} +inline int64_t AlgorithmProto::_internal_algo_id() const { + return _impl_.algo_id_; +} +inline int64_t AlgorithmProto::algo_id() const { + // @@protoc_insertion_point(field_get:stream_executor.dnn.AlgorithmProto.algo_id) + return _internal_algo_id(); +} +inline void AlgorithmProto::_internal_set_algo_id(int64_t value) { + + _impl_.algo_id_ = value; +} +inline void AlgorithmProto::set_algo_id(int64_t value) { + _internal_set_algo_id(value); + // @@protoc_insertion_point(field_set:stream_executor.dnn.AlgorithmProto.algo_id) +} + +// .stream_executor.dnn.AlgorithmProto.MathType math_type = 2; +inline void AlgorithmProto::clear_math_type() { + _impl_.math_type_ = 0; +} +inline ::stream_executor::dnn::AlgorithmProto_MathType AlgorithmProto::_internal_math_type() const { + return static_cast< ::stream_executor::dnn::AlgorithmProto_MathType >(_impl_.math_type_); +} +inline ::stream_executor::dnn::AlgorithmProto_MathType AlgorithmProto::math_type() const { + // @@protoc_insertion_point(field_get:stream_executor.dnn.AlgorithmProto.math_type) + return _internal_math_type(); +} +inline void AlgorithmProto::_internal_set_math_type(::stream_executor::dnn::AlgorithmProto_MathType value) { + + _impl_.math_type_ = value; +} +inline void AlgorithmProto::set_math_type(::stream_executor::dnn::AlgorithmProto_MathType value) { + _internal_set_math_type(value); + // @@protoc_insertion_point(field_set:stream_executor.dnn.AlgorithmProto.math_type) +} + +// map tuning_knobs = 4; +inline int AlgorithmProto::_internal_tuning_knobs_size() const { + return _impl_.tuning_knobs_.size(); +} +inline int AlgorithmProto::tuning_knobs_size() const { + return _internal_tuning_knobs_size(); +} +inline void AlgorithmProto::clear_tuning_knobs() { + _impl_.tuning_knobs_.Clear(); +} +inline const ::PROTOBUF_NAMESPACE_ID::Map< int64_t, int64_t >& +AlgorithmProto::_internal_tuning_knobs() const { + return _impl_.tuning_knobs_.GetMap(); +} +inline const ::PROTOBUF_NAMESPACE_ID::Map< int64_t, int64_t >& +AlgorithmProto::tuning_knobs() const { + // @@protoc_insertion_point(field_map:stream_executor.dnn.AlgorithmProto.tuning_knobs) + return _internal_tuning_knobs(); +} +inline ::PROTOBUF_NAMESPACE_ID::Map< int64_t, int64_t >* +AlgorithmProto::_internal_mutable_tuning_knobs() { + return _impl_.tuning_knobs_.MutableMap(); +} +inline ::PROTOBUF_NAMESPACE_ID::Map< int64_t, int64_t >* +AlgorithmProto::mutable_tuning_knobs() { + // @@protoc_insertion_point(field_mutable_map:stream_executor.dnn.AlgorithmProto.tuning_knobs) + return _internal_mutable_tuning_knobs(); +} + +// bool is_cudnn_frontend = 5; +inline void AlgorithmProto::clear_is_cudnn_frontend() { + _impl_.is_cudnn_frontend_ = false; +} +inline bool AlgorithmProto::_internal_is_cudnn_frontend() const { + return _impl_.is_cudnn_frontend_; +} +inline bool AlgorithmProto::is_cudnn_frontend() const { + // @@protoc_insertion_point(field_get:stream_executor.dnn.AlgorithmProto.is_cudnn_frontend) + return _internal_is_cudnn_frontend(); +} +inline void AlgorithmProto::_internal_set_is_cudnn_frontend(bool value) { + + _impl_.is_cudnn_frontend_ = value; +} +inline void AlgorithmProto::set_is_cudnn_frontend(bool value) { + _internal_set_is_cudnn_frontend(value); + // @@protoc_insertion_point(field_set:stream_executor.dnn.AlgorithmProto.is_cudnn_frontend) +} + +// .google.protobuf.UInt64Value workspace_size = 6; +inline bool AlgorithmProto::_internal_has_workspace_size() const { + return this != internal_default_instance() && _impl_.workspace_size_ != nullptr; +} +inline bool AlgorithmProto::has_workspace_size() const { + return _internal_has_workspace_size(); +} +inline const ::PROTOBUF_NAMESPACE_ID::UInt64Value& AlgorithmProto::_internal_workspace_size() const { + const ::PROTOBUF_NAMESPACE_ID::UInt64Value* p = _impl_.workspace_size_; + return p != nullptr ? *p : reinterpret_cast( + ::PROTOBUF_NAMESPACE_ID::_UInt64Value_default_instance_); +} +inline const ::PROTOBUF_NAMESPACE_ID::UInt64Value& AlgorithmProto::workspace_size() const { + // @@protoc_insertion_point(field_get:stream_executor.dnn.AlgorithmProto.workspace_size) + return _internal_workspace_size(); +} +inline void AlgorithmProto::unsafe_arena_set_allocated_workspace_size( + ::PROTOBUF_NAMESPACE_ID::UInt64Value* workspace_size) { + if (GetArenaForAllocation() == nullptr) { + delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.workspace_size_); + } + _impl_.workspace_size_ = workspace_size; + if (workspace_size) { + + } else { + + } + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:stream_executor.dnn.AlgorithmProto.workspace_size) +} +inline ::PROTOBUF_NAMESPACE_ID::UInt64Value* AlgorithmProto::release_workspace_size() { + + ::PROTOBUF_NAMESPACE_ID::UInt64Value* temp = _impl_.workspace_size_; + _impl_.workspace_size_ = nullptr; +#ifdef PROTOBUF_FORCE_COPY_IN_RELEASE + auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp); + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + if (GetArenaForAllocation() == nullptr) { delete old; } +#else // PROTOBUF_FORCE_COPY_IN_RELEASE + if (GetArenaForAllocation() != nullptr) { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + } +#endif // !PROTOBUF_FORCE_COPY_IN_RELEASE + return temp; +} +inline ::PROTOBUF_NAMESPACE_ID::UInt64Value* AlgorithmProto::unsafe_arena_release_workspace_size() { + // @@protoc_insertion_point(field_release:stream_executor.dnn.AlgorithmProto.workspace_size) + + ::PROTOBUF_NAMESPACE_ID::UInt64Value* temp = _impl_.workspace_size_; + _impl_.workspace_size_ = nullptr; + return temp; +} +inline ::PROTOBUF_NAMESPACE_ID::UInt64Value* AlgorithmProto::_internal_mutable_workspace_size() { + + if (_impl_.workspace_size_ == nullptr) { + auto* p = CreateMaybeMessage<::PROTOBUF_NAMESPACE_ID::UInt64Value>(GetArenaForAllocation()); + _impl_.workspace_size_ = p; + } + return _impl_.workspace_size_; +} +inline ::PROTOBUF_NAMESPACE_ID::UInt64Value* AlgorithmProto::mutable_workspace_size() { + ::PROTOBUF_NAMESPACE_ID::UInt64Value* _msg = _internal_mutable_workspace_size(); + // @@protoc_insertion_point(field_mutable:stream_executor.dnn.AlgorithmProto.workspace_size) + return _msg; +} +inline void AlgorithmProto::set_allocated_workspace_size(::PROTOBUF_NAMESPACE_ID::UInt64Value* workspace_size) { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); + if (message_arena == nullptr) { + delete reinterpret_cast< ::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.workspace_size_); + } + if (workspace_size) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena( + reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(workspace_size)); + if (message_arena != submessage_arena) { + workspace_size = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, workspace_size, submessage_arena); + } + + } else { + + } + _impl_.workspace_size_ = workspace_size; + // @@protoc_insertion_point(field_set_allocated:stream_executor.dnn.AlgorithmProto.workspace_size) +} + +// ------------------------------------------------------------------- + +// AlgorithmConfigProto + +// .stream_executor.dnn.AlgorithmProto algorithm = 1; +inline bool AlgorithmConfigProto::_internal_has_algorithm() const { + return optional_algorithm_case() == kAlgorithm; +} +inline bool AlgorithmConfigProto::has_algorithm() const { + return _internal_has_algorithm(); +} +inline void AlgorithmConfigProto::set_has_algorithm() { + _impl_._oneof_case_[0] = kAlgorithm; +} +inline void AlgorithmConfigProto::clear_algorithm() { + if (_internal_has_algorithm()) { + if (GetArenaForAllocation() == nullptr) { + delete _impl_.optional_algorithm_.algorithm_; + } + clear_has_optional_algorithm(); + } +} +inline ::stream_executor::dnn::AlgorithmProto* AlgorithmConfigProto::release_algorithm() { + // @@protoc_insertion_point(field_release:stream_executor.dnn.AlgorithmConfigProto.algorithm) + if (_internal_has_algorithm()) { + clear_has_optional_algorithm(); + ::stream_executor::dnn::AlgorithmProto* temp = _impl_.optional_algorithm_.algorithm_; + if (GetArenaForAllocation() != nullptr) { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + } + _impl_.optional_algorithm_.algorithm_ = nullptr; + return temp; + } else { + return nullptr; + } +} +inline const ::stream_executor::dnn::AlgorithmProto& AlgorithmConfigProto::_internal_algorithm() const { + return _internal_has_algorithm() + ? *_impl_.optional_algorithm_.algorithm_ + : reinterpret_cast< ::stream_executor::dnn::AlgorithmProto&>(::stream_executor::dnn::_AlgorithmProto_default_instance_); +} +inline const ::stream_executor::dnn::AlgorithmProto& AlgorithmConfigProto::algorithm() const { + // @@protoc_insertion_point(field_get:stream_executor.dnn.AlgorithmConfigProto.algorithm) + return _internal_algorithm(); +} +inline ::stream_executor::dnn::AlgorithmProto* AlgorithmConfigProto::unsafe_arena_release_algorithm() { + // @@protoc_insertion_point(field_unsafe_arena_release:stream_executor.dnn.AlgorithmConfigProto.algorithm) + if (_internal_has_algorithm()) { + clear_has_optional_algorithm(); + ::stream_executor::dnn::AlgorithmProto* temp = _impl_.optional_algorithm_.algorithm_; + _impl_.optional_algorithm_.algorithm_ = nullptr; + return temp; + } else { + return nullptr; + } +} +inline void AlgorithmConfigProto::unsafe_arena_set_allocated_algorithm(::stream_executor::dnn::AlgorithmProto* algorithm) { + clear_optional_algorithm(); + if (algorithm) { + set_has_algorithm(); + _impl_.optional_algorithm_.algorithm_ = algorithm; + } + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:stream_executor.dnn.AlgorithmConfigProto.algorithm) +} +inline ::stream_executor::dnn::AlgorithmProto* AlgorithmConfigProto::_internal_mutable_algorithm() { + if (!_internal_has_algorithm()) { + clear_optional_algorithm(); + set_has_algorithm(); + _impl_.optional_algorithm_.algorithm_ = CreateMaybeMessage< ::stream_executor::dnn::AlgorithmProto >(GetArenaForAllocation()); + } + return _impl_.optional_algorithm_.algorithm_; +} +inline ::stream_executor::dnn::AlgorithmProto* AlgorithmConfigProto::mutable_algorithm() { + ::stream_executor::dnn::AlgorithmProto* _msg = _internal_mutable_algorithm(); + // @@protoc_insertion_point(field_mutable:stream_executor.dnn.AlgorithmConfigProto.algorithm) + return _msg; +} + +// .stream_executor.dnn.AlgorithmProto algorithm_no_scratch = 2; +inline bool AlgorithmConfigProto::_internal_has_algorithm_no_scratch() const { + return optional_algorithm_no_scratch_case() == kAlgorithmNoScratch; +} +inline bool AlgorithmConfigProto::has_algorithm_no_scratch() const { + return _internal_has_algorithm_no_scratch(); +} +inline void AlgorithmConfigProto::set_has_algorithm_no_scratch() { + _impl_._oneof_case_[1] = kAlgorithmNoScratch; +} +inline void AlgorithmConfigProto::clear_algorithm_no_scratch() { + if (_internal_has_algorithm_no_scratch()) { + if (GetArenaForAllocation() == nullptr) { + delete _impl_.optional_algorithm_no_scratch_.algorithm_no_scratch_; + } + clear_has_optional_algorithm_no_scratch(); + } +} +inline ::stream_executor::dnn::AlgorithmProto* AlgorithmConfigProto::release_algorithm_no_scratch() { + // @@protoc_insertion_point(field_release:stream_executor.dnn.AlgorithmConfigProto.algorithm_no_scratch) + if (_internal_has_algorithm_no_scratch()) { + clear_has_optional_algorithm_no_scratch(); + ::stream_executor::dnn::AlgorithmProto* temp = _impl_.optional_algorithm_no_scratch_.algorithm_no_scratch_; + if (GetArenaForAllocation() != nullptr) { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + } + _impl_.optional_algorithm_no_scratch_.algorithm_no_scratch_ = nullptr; + return temp; + } else { + return nullptr; + } +} +inline const ::stream_executor::dnn::AlgorithmProto& AlgorithmConfigProto::_internal_algorithm_no_scratch() const { + return _internal_has_algorithm_no_scratch() + ? *_impl_.optional_algorithm_no_scratch_.algorithm_no_scratch_ + : reinterpret_cast< ::stream_executor::dnn::AlgorithmProto&>(::stream_executor::dnn::_AlgorithmProto_default_instance_); +} +inline const ::stream_executor::dnn::AlgorithmProto& AlgorithmConfigProto::algorithm_no_scratch() const { + // @@protoc_insertion_point(field_get:stream_executor.dnn.AlgorithmConfigProto.algorithm_no_scratch) + return _internal_algorithm_no_scratch(); +} +inline ::stream_executor::dnn::AlgorithmProto* AlgorithmConfigProto::unsafe_arena_release_algorithm_no_scratch() { + // @@protoc_insertion_point(field_unsafe_arena_release:stream_executor.dnn.AlgorithmConfigProto.algorithm_no_scratch) + if (_internal_has_algorithm_no_scratch()) { + clear_has_optional_algorithm_no_scratch(); + ::stream_executor::dnn::AlgorithmProto* temp = _impl_.optional_algorithm_no_scratch_.algorithm_no_scratch_; + _impl_.optional_algorithm_no_scratch_.algorithm_no_scratch_ = nullptr; + return temp; + } else { + return nullptr; + } +} +inline void AlgorithmConfigProto::unsafe_arena_set_allocated_algorithm_no_scratch(::stream_executor::dnn::AlgorithmProto* algorithm_no_scratch) { + clear_optional_algorithm_no_scratch(); + if (algorithm_no_scratch) { + set_has_algorithm_no_scratch(); + _impl_.optional_algorithm_no_scratch_.algorithm_no_scratch_ = algorithm_no_scratch; + } + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:stream_executor.dnn.AlgorithmConfigProto.algorithm_no_scratch) +} +inline ::stream_executor::dnn::AlgorithmProto* AlgorithmConfigProto::_internal_mutable_algorithm_no_scratch() { + if (!_internal_has_algorithm_no_scratch()) { + clear_optional_algorithm_no_scratch(); + set_has_algorithm_no_scratch(); + _impl_.optional_algorithm_no_scratch_.algorithm_no_scratch_ = CreateMaybeMessage< ::stream_executor::dnn::AlgorithmProto >(GetArenaForAllocation()); + } + return _impl_.optional_algorithm_no_scratch_.algorithm_no_scratch_; +} +inline ::stream_executor::dnn::AlgorithmProto* AlgorithmConfigProto::mutable_algorithm_no_scratch() { + ::stream_executor::dnn::AlgorithmProto* _msg = _internal_mutable_algorithm_no_scratch(); + // @@protoc_insertion_point(field_mutable:stream_executor.dnn.AlgorithmConfigProto.algorithm_no_scratch) + return _msg; +} + +// int64 scratch_size = 3; +inline bool AlgorithmConfigProto::_internal_has_scratch_size() const { + return optional_scratch_size_case() == kScratchSize; +} +inline bool AlgorithmConfigProto::has_scratch_size() const { + return _internal_has_scratch_size(); +} +inline void AlgorithmConfigProto::set_has_scratch_size() { + _impl_._oneof_case_[2] = kScratchSize; +} +inline void AlgorithmConfigProto::clear_scratch_size() { + if (_internal_has_scratch_size()) { + _impl_.optional_scratch_size_.scratch_size_ = int64_t{0}; + clear_has_optional_scratch_size(); + } +} +inline int64_t AlgorithmConfigProto::_internal_scratch_size() const { + if (_internal_has_scratch_size()) { + return _impl_.optional_scratch_size_.scratch_size_; + } + return int64_t{0}; +} +inline void AlgorithmConfigProto::_internal_set_scratch_size(int64_t value) { + if (!_internal_has_scratch_size()) { + clear_optional_scratch_size(); + set_has_scratch_size(); + } + _impl_.optional_scratch_size_.scratch_size_ = value; +} +inline int64_t AlgorithmConfigProto::scratch_size() const { + // @@protoc_insertion_point(field_get:stream_executor.dnn.AlgorithmConfigProto.scratch_size) + return _internal_scratch_size(); +} +inline void AlgorithmConfigProto::set_scratch_size(int64_t value) { + _internal_set_scratch_size(value); + // @@protoc_insertion_point(field_set:stream_executor.dnn.AlgorithmConfigProto.scratch_size) +} + +inline bool AlgorithmConfigProto::has_optional_algorithm() const { + return optional_algorithm_case() != OPTIONAL_ALGORITHM_NOT_SET; +} +inline void AlgorithmConfigProto::clear_has_optional_algorithm() { + _impl_._oneof_case_[0] = OPTIONAL_ALGORITHM_NOT_SET; +} +inline bool AlgorithmConfigProto::has_optional_algorithm_no_scratch() const { + return optional_algorithm_no_scratch_case() != OPTIONAL_ALGORITHM_NO_SCRATCH_NOT_SET; +} +inline void AlgorithmConfigProto::clear_has_optional_algorithm_no_scratch() { + _impl_._oneof_case_[1] = OPTIONAL_ALGORITHM_NO_SCRATCH_NOT_SET; +} +inline bool AlgorithmConfigProto::has_optional_scratch_size() const { + return optional_scratch_size_case() != OPTIONAL_SCRATCH_SIZE_NOT_SET; +} +inline void AlgorithmConfigProto::clear_has_optional_scratch_size() { + _impl_._oneof_case_[2] = OPTIONAL_SCRATCH_SIZE_NOT_SET; +} +inline AlgorithmConfigProto::OptionalAlgorithmCase AlgorithmConfigProto::optional_algorithm_case() const { + return AlgorithmConfigProto::OptionalAlgorithmCase(_impl_._oneof_case_[0]); +} +inline AlgorithmConfigProto::OptionalAlgorithmNoScratchCase AlgorithmConfigProto::optional_algorithm_no_scratch_case() const { + return AlgorithmConfigProto::OptionalAlgorithmNoScratchCase(_impl_._oneof_case_[1]); +} +inline AlgorithmConfigProto::OptionalScratchSizeCase AlgorithmConfigProto::optional_scratch_size_case() const { + return AlgorithmConfigProto::OptionalScratchSizeCase(_impl_._oneof_case_[2]); +} +// ------------------------------------------------------------------- + +// ConvolutionDescriptorProto + +// repeated int64 paddings = 1; +inline int ConvolutionDescriptorProto::_internal_paddings_size() const { + return _impl_.paddings_.size(); +} +inline int ConvolutionDescriptorProto::paddings_size() const { + return _internal_paddings_size(); +} +inline void ConvolutionDescriptorProto::clear_paddings() { + _impl_.paddings_.Clear(); +} +inline int64_t ConvolutionDescriptorProto::_internal_paddings(int index) const { + return _impl_.paddings_.Get(index); +} +inline int64_t ConvolutionDescriptorProto::paddings(int index) const { + // @@protoc_insertion_point(field_get:stream_executor.dnn.ConvolutionDescriptorProto.paddings) + return _internal_paddings(index); +} +inline void ConvolutionDescriptorProto::set_paddings(int index, int64_t value) { + _impl_.paddings_.Set(index, value); + // @@protoc_insertion_point(field_set:stream_executor.dnn.ConvolutionDescriptorProto.paddings) +} +inline void ConvolutionDescriptorProto::_internal_add_paddings(int64_t value) { + _impl_.paddings_.Add(value); +} +inline void ConvolutionDescriptorProto::add_paddings(int64_t value) { + _internal_add_paddings(value); + // @@protoc_insertion_point(field_add:stream_executor.dnn.ConvolutionDescriptorProto.paddings) +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField< int64_t >& +ConvolutionDescriptorProto::_internal_paddings() const { + return _impl_.paddings_; +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField< int64_t >& +ConvolutionDescriptorProto::paddings() const { + // @@protoc_insertion_point(field_list:stream_executor.dnn.ConvolutionDescriptorProto.paddings) + return _internal_paddings(); +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedField< int64_t >* +ConvolutionDescriptorProto::_internal_mutable_paddings() { + return &_impl_.paddings_; +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedField< int64_t >* +ConvolutionDescriptorProto::mutable_paddings() { + // @@protoc_insertion_point(field_mutable_list:stream_executor.dnn.ConvolutionDescriptorProto.paddings) + return _internal_mutable_paddings(); +} + +// repeated int64 strides = 2; +inline int ConvolutionDescriptorProto::_internal_strides_size() const { + return _impl_.strides_.size(); +} +inline int ConvolutionDescriptorProto::strides_size() const { + return _internal_strides_size(); +} +inline void ConvolutionDescriptorProto::clear_strides() { + _impl_.strides_.Clear(); +} +inline int64_t ConvolutionDescriptorProto::_internal_strides(int index) const { + return _impl_.strides_.Get(index); +} +inline int64_t ConvolutionDescriptorProto::strides(int index) const { + // @@protoc_insertion_point(field_get:stream_executor.dnn.ConvolutionDescriptorProto.strides) + return _internal_strides(index); +} +inline void ConvolutionDescriptorProto::set_strides(int index, int64_t value) { + _impl_.strides_.Set(index, value); + // @@protoc_insertion_point(field_set:stream_executor.dnn.ConvolutionDescriptorProto.strides) +} +inline void ConvolutionDescriptorProto::_internal_add_strides(int64_t value) { + _impl_.strides_.Add(value); +} +inline void ConvolutionDescriptorProto::add_strides(int64_t value) { + _internal_add_strides(value); + // @@protoc_insertion_point(field_add:stream_executor.dnn.ConvolutionDescriptorProto.strides) +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField< int64_t >& +ConvolutionDescriptorProto::_internal_strides() const { + return _impl_.strides_; +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField< int64_t >& +ConvolutionDescriptorProto::strides() const { + // @@protoc_insertion_point(field_list:stream_executor.dnn.ConvolutionDescriptorProto.strides) + return _internal_strides(); +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedField< int64_t >* +ConvolutionDescriptorProto::_internal_mutable_strides() { + return &_impl_.strides_; +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedField< int64_t >* +ConvolutionDescriptorProto::mutable_strides() { + // @@protoc_insertion_point(field_mutable_list:stream_executor.dnn.ConvolutionDescriptorProto.strides) + return _internal_mutable_strides(); +} + +// repeated int64 dilations = 3; +inline int ConvolutionDescriptorProto::_internal_dilations_size() const { + return _impl_.dilations_.size(); +} +inline int ConvolutionDescriptorProto::dilations_size() const { + return _internal_dilations_size(); +} +inline void ConvolutionDescriptorProto::clear_dilations() { + _impl_.dilations_.Clear(); +} +inline int64_t ConvolutionDescriptorProto::_internal_dilations(int index) const { + return _impl_.dilations_.Get(index); +} +inline int64_t ConvolutionDescriptorProto::dilations(int index) const { + // @@protoc_insertion_point(field_get:stream_executor.dnn.ConvolutionDescriptorProto.dilations) + return _internal_dilations(index); +} +inline void ConvolutionDescriptorProto::set_dilations(int index, int64_t value) { + _impl_.dilations_.Set(index, value); + // @@protoc_insertion_point(field_set:stream_executor.dnn.ConvolutionDescriptorProto.dilations) +} +inline void ConvolutionDescriptorProto::_internal_add_dilations(int64_t value) { + _impl_.dilations_.Add(value); +} +inline void ConvolutionDescriptorProto::add_dilations(int64_t value) { + _internal_add_dilations(value); + // @@protoc_insertion_point(field_add:stream_executor.dnn.ConvolutionDescriptorProto.dilations) +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField< int64_t >& +ConvolutionDescriptorProto::_internal_dilations() const { + return _impl_.dilations_; +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField< int64_t >& +ConvolutionDescriptorProto::dilations() const { + // @@protoc_insertion_point(field_list:stream_executor.dnn.ConvolutionDescriptorProto.dilations) + return _internal_dilations(); +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedField< int64_t >* +ConvolutionDescriptorProto::_internal_mutable_dilations() { + return &_impl_.dilations_; +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedField< int64_t >* +ConvolutionDescriptorProto::mutable_dilations() { + // @@protoc_insertion_point(field_mutable_list:stream_executor.dnn.ConvolutionDescriptorProto.dilations) + return _internal_mutable_dilations(); +} + +// .stream_executor.dnn.DataType compute_mode = 4; +inline void ConvolutionDescriptorProto::clear_compute_mode() { + _impl_.compute_mode_ = 0; +} +inline ::stream_executor::dnn::DataType ConvolutionDescriptorProto::_internal_compute_mode() const { + return static_cast< ::stream_executor::dnn::DataType >(_impl_.compute_mode_); +} +inline ::stream_executor::dnn::DataType ConvolutionDescriptorProto::compute_mode() const { + // @@protoc_insertion_point(field_get:stream_executor.dnn.ConvolutionDescriptorProto.compute_mode) + return _internal_compute_mode(); +} +inline void ConvolutionDescriptorProto::_internal_set_compute_mode(::stream_executor::dnn::DataType value) { + + _impl_.compute_mode_ = value; +} +inline void ConvolutionDescriptorProto::set_compute_mode(::stream_executor::dnn::DataType value) { + _internal_set_compute_mode(value); + // @@protoc_insertion_point(field_set:stream_executor.dnn.ConvolutionDescriptorProto.compute_mode) +} + +// int32 group_count = 5; +inline void ConvolutionDescriptorProto::clear_group_count() { + _impl_.group_count_ = 0; +} +inline int32_t ConvolutionDescriptorProto::_internal_group_count() const { + return _impl_.group_count_; +} +inline int32_t ConvolutionDescriptorProto::group_count() const { + // @@protoc_insertion_point(field_get:stream_executor.dnn.ConvolutionDescriptorProto.group_count) + return _internal_group_count(); +} +inline void ConvolutionDescriptorProto::_internal_set_group_count(int32_t value) { + + _impl_.group_count_ = value; +} +inline void ConvolutionDescriptorProto::set_group_count(int32_t value) { + _internal_set_group_count(value); + // @@protoc_insertion_point(field_set:stream_executor.dnn.ConvolutionDescriptorProto.group_count) +} + +// .stream_executor.dnn.ConvolutionMode convolution_mode = 6; +inline void ConvolutionDescriptorProto::clear_convolution_mode() { + _impl_.convolution_mode_ = 0; +} +inline ::stream_executor::dnn::ConvolutionMode ConvolutionDescriptorProto::_internal_convolution_mode() const { + return static_cast< ::stream_executor::dnn::ConvolutionMode >(_impl_.convolution_mode_); +} +inline ::stream_executor::dnn::ConvolutionMode ConvolutionDescriptorProto::convolution_mode() const { + // @@protoc_insertion_point(field_get:stream_executor.dnn.ConvolutionDescriptorProto.convolution_mode) + return _internal_convolution_mode(); +} +inline void ConvolutionDescriptorProto::_internal_set_convolution_mode(::stream_executor::dnn::ConvolutionMode value) { + + _impl_.convolution_mode_ = value; +} +inline void ConvolutionDescriptorProto::set_convolution_mode(::stream_executor::dnn::ConvolutionMode value) { + _internal_set_convolution_mode(value); + // @@protoc_insertion_point(field_set:stream_executor.dnn.ConvolutionDescriptorProto.convolution_mode) +} + +// string name = 7; +inline void ConvolutionDescriptorProto::clear_name() { + _impl_.name_.ClearToEmpty(); +} +inline const std::string& ConvolutionDescriptorProto::name() const { + // @@protoc_insertion_point(field_get:stream_executor.dnn.ConvolutionDescriptorProto.name) + return _internal_name(); +} +template +inline PROTOBUF_ALWAYS_INLINE +void ConvolutionDescriptorProto::set_name(ArgT0&& arg0, ArgT... args) { + + _impl_.name_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:stream_executor.dnn.ConvolutionDescriptorProto.name) +} +inline std::string* ConvolutionDescriptorProto::mutable_name() { + std::string* _s = _internal_mutable_name(); + // @@protoc_insertion_point(field_mutable:stream_executor.dnn.ConvolutionDescriptorProto.name) + return _s; +} +inline const std::string& ConvolutionDescriptorProto::_internal_name() const { + return _impl_.name_.Get(); +} +inline void ConvolutionDescriptorProto::_internal_set_name(const std::string& value) { + + _impl_.name_.Set(value, GetArenaForAllocation()); +} +inline std::string* ConvolutionDescriptorProto::_internal_mutable_name() { + + return _impl_.name_.Mutable(GetArenaForAllocation()); +} +inline std::string* ConvolutionDescriptorProto::release_name() { + // @@protoc_insertion_point(field_release:stream_executor.dnn.ConvolutionDescriptorProto.name) + return _impl_.name_.Release(); +} +inline void ConvolutionDescriptorProto::set_allocated_name(std::string* name) { + if (name != nullptr) { + + } else { + + } + _impl_.name_.SetAllocated(name, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.name_.IsDefault()) { + _impl_.name_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:stream_executor.dnn.ConvolutionDescriptorProto.name) +} + +#ifdef __GNUC__ + #pragma GCC diagnostic pop +#endif // __GNUC__ +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + + +// @@protoc_insertion_point(namespace_scope) + +} // namespace dnn +} // namespace stream_executor + +PROTOBUF_NAMESPACE_OPEN + +template <> struct is_proto_enum< ::stream_executor::dnn::AlgorithmProto_MathType> : ::std::true_type {}; +template <> +inline const EnumDescriptor* GetEnumDescriptor< ::stream_executor::dnn::AlgorithmProto_MathType>() { + return ::stream_executor::dnn::AlgorithmProto_MathType_descriptor(); +} +template <> struct is_proto_enum< ::stream_executor::dnn::DataType> : ::std::true_type {}; +template <> +inline const EnumDescriptor* GetEnumDescriptor< ::stream_executor::dnn::DataType>() { + return ::stream_executor::dnn::DataType_descriptor(); +} +template <> struct is_proto_enum< ::stream_executor::dnn::DataLayout> : ::std::true_type {}; +template <> +inline const EnumDescriptor* GetEnumDescriptor< ::stream_executor::dnn::DataLayout>() { + return ::stream_executor::dnn::DataLayout_descriptor(); +} +template <> struct is_proto_enum< ::stream_executor::dnn::FilterLayout> : ::std::true_type {}; +template <> +inline const EnumDescriptor* GetEnumDescriptor< ::stream_executor::dnn::FilterLayout>() { + return ::stream_executor::dnn::FilterLayout_descriptor(); +} +template <> struct is_proto_enum< ::stream_executor::dnn::ActivationMode> : ::std::true_type {}; +template <> +inline const EnumDescriptor* GetEnumDescriptor< ::stream_executor::dnn::ActivationMode>() { + return ::stream_executor::dnn::ActivationMode_descriptor(); +} +template <> struct is_proto_enum< ::stream_executor::dnn::ConvolutionMode> : ::std::true_type {}; +template <> +inline const EnumDescriptor* GetEnumDescriptor< ::stream_executor::dnn::ConvolutionMode>() { + return ::stream_executor::dnn::ConvolutionMode_descriptor(); +} +template <> struct is_proto_enum< ::stream_executor::dnn::ConvolutionKind> : ::std::true_type {}; +template <> +inline const EnumDescriptor* GetEnumDescriptor< ::stream_executor::dnn::ConvolutionKind>() { + return ::stream_executor::dnn::ConvolutionKind_descriptor(); +} +template <> struct is_proto_enum< ::stream_executor::dnn::FusedMHAKind> : ::std::true_type {}; +template <> +inline const EnumDescriptor* GetEnumDescriptor< ::stream_executor::dnn::FusedMHAKind>() { + return ::stream_executor::dnn::FusedMHAKind_descriptor(); +} + +PROTOBUF_NAMESPACE_CLOSE + +// @@protoc_insertion_point(global_scope) + +#include +#endif // GOOGLE_PROTOBUF_INCLUDED_GOOGLE_PROTOBUF_INCLUDED_tsl_2fprotobuf_2fdnn_2eproto diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/error_codes.pb.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/error_codes.pb.h new file mode 100644 index 0000000000000000000000000000000000000000..61afe2b115f89db61395447d2edc7ca535bf9c8a --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/error_codes.pb.h @@ -0,0 +1,126 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: tsl/protobuf/error_codes.proto + +#ifndef GOOGLE_PROTOBUF_INCLUDED_tsl_2fprotobuf_2ferror_5fcodes_2eproto +#define GOOGLE_PROTOBUF_INCLUDED_tsl_2fprotobuf_2ferror_5fcodes_2eproto + +#include +#include + +#include +#if PROTOBUF_VERSION < 3021000 +#error This file was generated by a newer version of protoc which is +#error incompatible with your Protocol Buffer headers. Please update +#error your headers. +#endif +#if 3021009 < PROTOBUF_MIN_PROTOC_VERSION +#error This file was generated by an older version of protoc which is +#error incompatible with your Protocol Buffer headers. Please +#error regenerate this file with a newer version of protoc. +#endif + +#include +#include +#include +#include +#include +#include +#include +#include // IWYU pragma: export +#include // IWYU pragma: export +#include +// @@protoc_insertion_point(includes) +#include +#define PROTOBUF_INTERNAL_EXPORT_tsl_2fprotobuf_2ferror_5fcodes_2eproto +PROTOBUF_NAMESPACE_OPEN +namespace internal { +class AnyMetadata; +} // namespace internal +PROTOBUF_NAMESPACE_CLOSE + +// Internal implementation detail -- do not use these members. +struct TableStruct_tsl_2fprotobuf_2ferror_5fcodes_2eproto { + static const uint32_t offsets[]; +}; +extern const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_tsl_2fprotobuf_2ferror_5fcodes_2eproto; +PROTOBUF_NAMESPACE_OPEN +PROTOBUF_NAMESPACE_CLOSE +namespace tensorflow { +namespace error { + +enum Code : int { + OK = 0, + CANCELLED = 1, + UNKNOWN = 2, + INVALID_ARGUMENT = 3, + DEADLINE_EXCEEDED = 4, + NOT_FOUND = 5, + ALREADY_EXISTS = 6, + PERMISSION_DENIED = 7, + UNAUTHENTICATED = 16, + RESOURCE_EXHAUSTED = 8, + FAILED_PRECONDITION = 9, + ABORTED = 10, + OUT_OF_RANGE = 11, + UNIMPLEMENTED = 12, + INTERNAL = 13, + UNAVAILABLE = 14, + DATA_LOSS = 15, + DO_NOT_USE_RESERVED_FOR_FUTURE_EXPANSION_USE_DEFAULT_IN_SWITCH_INSTEAD_ = 20, + Code_INT_MIN_SENTINEL_DO_NOT_USE_ = std::numeric_limits::min(), + Code_INT_MAX_SENTINEL_DO_NOT_USE_ = std::numeric_limits::max() +}; +bool Code_IsValid(int value); +constexpr Code Code_MIN = OK; +constexpr Code Code_MAX = DO_NOT_USE_RESERVED_FOR_FUTURE_EXPANSION_USE_DEFAULT_IN_SWITCH_INSTEAD_; +constexpr int Code_ARRAYSIZE = Code_MAX + 1; + +const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* Code_descriptor(); +template +inline const std::string& Code_Name(T enum_t_value) { + static_assert(::std::is_same::value || + ::std::is_integral::value, + "Incorrect type passed to function Code_Name."); + return ::PROTOBUF_NAMESPACE_ID::internal::NameOfEnum( + Code_descriptor(), enum_t_value); +} +inline bool Code_Parse( + ::PROTOBUF_NAMESPACE_ID::ConstStringParam name, Code* value) { + return ::PROTOBUF_NAMESPACE_ID::internal::ParseNamedEnum( + Code_descriptor(), name, value); +} +// =================================================================== + + +// =================================================================== + + +// =================================================================== + +#ifdef __GNUC__ + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wstrict-aliasing" +#endif // __GNUC__ +#ifdef __GNUC__ + #pragma GCC diagnostic pop +#endif // __GNUC__ + +// @@protoc_insertion_point(namespace_scope) + +} // namespace error +} // namespace tensorflow + +PROTOBUF_NAMESPACE_OPEN + +template <> struct is_proto_enum< ::tensorflow::error::Code> : ::std::true_type {}; +template <> +inline const EnumDescriptor* GetEnumDescriptor< ::tensorflow::error::Code>() { + return ::tensorflow::error::Code_descriptor(); +} + +PROTOBUF_NAMESPACE_CLOSE + +// @@protoc_insertion_point(global_scope) + +#include +#endif // GOOGLE_PROTOBUF_INCLUDED_GOOGLE_PROTOBUF_INCLUDED_tsl_2fprotobuf_2ferror_5fcodes_2eproto diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/error_codes.proto b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/error_codes.proto new file mode 100644 index 0000000000000000000000000000000000000000..c873d55887f1ddba8d38b6bc95fbe94f5cd34855 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/error_codes.proto @@ -0,0 +1,155 @@ +syntax = "proto3"; + +// TODO(b/247876220): Change package and java_package once we figure out how to +// migrate. + +package tensorflow.error; + +option cc_enable_arenas = true; +option java_outer_classname = "ErrorCodesProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.framework"; +option go_package = "github.com/google/tsl/tsl/go/protobuf/for_core_protos_go_proto"; + +// The canonical error codes for TensorFlow APIs. +// +// Warnings: +// +// - Do not change any numeric assignments. +// - Changes to this list should only be made if there is a compelling +// need that can't be satisfied in another way. Such changes +// must be approved by at least two OWNERS. +// - These error codes must match gRPC and protobuf error codes (except for +// DO_NOT_USE_RESERVED_FOR_FUTURE_EXPANSION_USE_DEFAULT_IN_SWITCH_INSTEAD_). +// +// Sometimes multiple error codes may apply. Services should return +// the most specific error code that applies. For example, prefer +// OUT_OF_RANGE over FAILED_PRECONDITION if both codes apply. +// Similarly prefer NOT_FOUND or ALREADY_EXISTS over FAILED_PRECONDITION. +enum Code { + // Not an error; returned on success + OK = 0; + + // The operation was cancelled (typically by the caller). + CANCELLED = 1; + + // Unknown error. An example of where this error may be returned is + // if a Status value received from another address space belongs to + // an error-space that is not known in this address space. Also + // errors raised by APIs that do not return enough error information + // may be converted to this error. + UNKNOWN = 2; + + // Client specified an invalid argument. Note that this differs + // from FAILED_PRECONDITION. INVALID_ARGUMENT indicates arguments + // that are problematic regardless of the state of the system + // (e.g., a malformed file name). + INVALID_ARGUMENT = 3; + + // Deadline expired before operation could complete. For operations + // that change the state of the system, this error may be returned + // even if the operation has completed successfully. For example, a + // successful response from a server could have been delayed long + // enough for the deadline to expire. + DEADLINE_EXCEEDED = 4; + + // Some requested entity (e.g., file or directory) was not found. + // For privacy reasons, this code *may* be returned when the client + // does not have the access right to the entity. + NOT_FOUND = 5; + + // Some entity that we attempted to create (e.g., file or directory) + // already exists. + ALREADY_EXISTS = 6; + + // The caller does not have permission to execute the specified + // operation. PERMISSION_DENIED must not be used for rejections + // caused by exhausting some resource (use RESOURCE_EXHAUSTED + // instead for those errors). PERMISSION_DENIED must not be + // used if the caller can not be identified (use UNAUTHENTICATED + // instead for those errors). + PERMISSION_DENIED = 7; + + // The request does not have valid authentication credentials for the + // operation. + UNAUTHENTICATED = 16; + + // Some resource has been exhausted, perhaps a per-user quota, or + // perhaps the entire file system is out of space. + RESOURCE_EXHAUSTED = 8; + + // Operation was rejected because the system is not in a state + // required for the operation's execution. For example, directory + // to be deleted may be non-empty, an rmdir operation is applied to + // a non-directory, etc. + // + // A litmus test that may help a service implementor in deciding + // between FAILED_PRECONDITION, ABORTED, and UNAVAILABLE: + // (a) Use UNAVAILABLE if the client can retry just the failing call. + // (b) Use ABORTED if the client should retry at a higher-level + // (e.g., restarting a read-modify-write sequence). + // (c) Use FAILED_PRECONDITION if the client should not retry until + // the system state has been explicitly fixed. E.g., if an "rmdir" + // fails because the directory is non-empty, FAILED_PRECONDITION + // should be returned since the client should not retry unless + // they have first fixed up the directory by deleting files from it. + // (d) Use FAILED_PRECONDITION if the client performs conditional + // REST Get/Update/Delete on a resource and the resource on the + // server does not match the condition. E.g., conflicting + // read-modify-write on the same resource. + FAILED_PRECONDITION = 9; + + // The operation was aborted, typically due to a concurrency issue + // like sequencer check failures, transaction aborts, etc. + // + // See litmus test above for deciding between FAILED_PRECONDITION, + // ABORTED, and UNAVAILABLE. + ABORTED = 10; + + // Operation tried to iterate past the valid input range. E.g., seeking or + // reading past end of file. + // + // Unlike INVALID_ARGUMENT, this error indicates a problem that may + // be fixed if the system state changes. For example, a 32-bit file + // system will generate INVALID_ARGUMENT if asked to read at an + // offset that is not in the range [0,2^32-1], but it will generate + // OUT_OF_RANGE if asked to read from an offset past the current + // file size. + // + // There is a fair bit of overlap between FAILED_PRECONDITION and + // OUT_OF_RANGE. We recommend using OUT_OF_RANGE (the more specific + // error) when it applies so that callers who are iterating through + // a space can easily look for an OUT_OF_RANGE error to detect when + // they are done. + OUT_OF_RANGE = 11; + + // Operation is not implemented or not supported/enabled in this service. + UNIMPLEMENTED = 12; + + // Internal errors. Means some invariant expected by the underlying + // system has been broken. If you see one of these errors, + // something is very broken. + INTERNAL = 13; + + // The service is currently unavailable. This is a most likely a + // transient condition and may be corrected by retrying with + // a backoff. + // + // See litmus test above for deciding between FAILED_PRECONDITION, + // ABORTED, and UNAVAILABLE. + UNAVAILABLE = 14; + + // Unrecoverable data loss or corruption. + DATA_LOSS = 15; + + // An extra enum entry to prevent people from writing code that + // fails to compile when a new code is added. + // + // Nobody should ever reference this enumeration entry. In particular, + // if you write C++ code that switches on this enumeration, add a default: + // case instead of a case that mentions this enumeration entry. + // + // Nobody should rely on the value (currently 20) listed here. It + // may change in the future. + DO_NOT_USE_RESERVED_FOR_FUTURE_EXPANSION_USE_DEFAULT_IN_SWITCH_INSTEAD_ = 20; +} diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/histogram.pb.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/histogram.pb.h new file mode 100644 index 0000000000000000000000000000000000000000..412dcbb1a817dbf0a28ec3ab8968378f0565871a --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/histogram.pb.h @@ -0,0 +1,513 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: tsl/protobuf/histogram.proto + +#ifndef GOOGLE_PROTOBUF_INCLUDED_tsl_2fprotobuf_2fhistogram_2eproto +#define GOOGLE_PROTOBUF_INCLUDED_tsl_2fprotobuf_2fhistogram_2eproto + +#include +#include + +#include +#if PROTOBUF_VERSION < 3021000 +#error This file was generated by a newer version of protoc which is +#error incompatible with your Protocol Buffer headers. Please update +#error your headers. +#endif +#if 3021009 < PROTOBUF_MIN_PROTOC_VERSION +#error This file was generated by an older version of protoc which is +#error incompatible with your Protocol Buffer headers. Please +#error regenerate this file with a newer version of protoc. +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include // IWYU pragma: export +#include // IWYU pragma: export +#include +// @@protoc_insertion_point(includes) +#include +#define PROTOBUF_INTERNAL_EXPORT_tsl_2fprotobuf_2fhistogram_2eproto +PROTOBUF_NAMESPACE_OPEN +namespace internal { +class AnyMetadata; +} // namespace internal +PROTOBUF_NAMESPACE_CLOSE + +// Internal implementation detail -- do not use these members. +struct TableStruct_tsl_2fprotobuf_2fhistogram_2eproto { + static const uint32_t offsets[]; +}; +extern const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_tsl_2fprotobuf_2fhistogram_2eproto; +namespace tensorflow { +class HistogramProto; +struct HistogramProtoDefaultTypeInternal; +extern HistogramProtoDefaultTypeInternal _HistogramProto_default_instance_; +} // namespace tensorflow +PROTOBUF_NAMESPACE_OPEN +template<> ::tensorflow::HistogramProto* Arena::CreateMaybeMessage<::tensorflow::HistogramProto>(Arena*); +PROTOBUF_NAMESPACE_CLOSE +namespace tensorflow { + +// =================================================================== + +class HistogramProto final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:tensorflow.HistogramProto) */ { + public: + inline HistogramProto() : HistogramProto(nullptr) {} + ~HistogramProto() override; + explicit PROTOBUF_CONSTEXPR HistogramProto(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + HistogramProto(const HistogramProto& from); + HistogramProto(HistogramProto&& from) noexcept + : HistogramProto() { + *this = ::std::move(from); + } + + inline HistogramProto& operator=(const HistogramProto& from) { + CopyFrom(from); + return *this; + } + inline HistogramProto& operator=(HistogramProto&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const HistogramProto& default_instance() { + return *internal_default_instance(); + } + static inline const HistogramProto* internal_default_instance() { + return reinterpret_cast( + &_HistogramProto_default_instance_); + } + static constexpr int kIndexInFileMessages = + 0; + + friend void swap(HistogramProto& a, HistogramProto& b) { + a.Swap(&b); + } + inline void Swap(HistogramProto* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(HistogramProto* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + HistogramProto* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const HistogramProto& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom( const HistogramProto& from) { + HistogramProto::MergeImpl(*this, from); + } + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _impl_._cached_size_.Get(); } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(HistogramProto* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.HistogramProto"; + } + protected: + explicit HistogramProto(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kBucketLimitFieldNumber = 6, + kBucketFieldNumber = 7, + kMinFieldNumber = 1, + kMaxFieldNumber = 2, + kNumFieldNumber = 3, + kSumFieldNumber = 4, + kSumSquaresFieldNumber = 5, + }; + // repeated double bucket_limit = 6 [packed = true]; + int bucket_limit_size() const; + private: + int _internal_bucket_limit_size() const; + public: + void clear_bucket_limit(); + private: + double _internal_bucket_limit(int index) const; + const ::PROTOBUF_NAMESPACE_ID::RepeatedField< double >& + _internal_bucket_limit() const; + void _internal_add_bucket_limit(double value); + ::PROTOBUF_NAMESPACE_ID::RepeatedField< double >* + _internal_mutable_bucket_limit(); + public: + double bucket_limit(int index) const; + void set_bucket_limit(int index, double value); + void add_bucket_limit(double value); + const ::PROTOBUF_NAMESPACE_ID::RepeatedField< double >& + bucket_limit() const; + ::PROTOBUF_NAMESPACE_ID::RepeatedField< double >* + mutable_bucket_limit(); + + // repeated double bucket = 7 [packed = true]; + int bucket_size() const; + private: + int _internal_bucket_size() const; + public: + void clear_bucket(); + private: + double _internal_bucket(int index) const; + const ::PROTOBUF_NAMESPACE_ID::RepeatedField< double >& + _internal_bucket() const; + void _internal_add_bucket(double value); + ::PROTOBUF_NAMESPACE_ID::RepeatedField< double >* + _internal_mutable_bucket(); + public: + double bucket(int index) const; + void set_bucket(int index, double value); + void add_bucket(double value); + const ::PROTOBUF_NAMESPACE_ID::RepeatedField< double >& + bucket() const; + ::PROTOBUF_NAMESPACE_ID::RepeatedField< double >* + mutable_bucket(); + + // double min = 1; + void clear_min(); + double min() const; + void set_min(double value); + private: + double _internal_min() const; + void _internal_set_min(double value); + public: + + // double max = 2; + void clear_max(); + double max() const; + void set_max(double value); + private: + double _internal_max() const; + void _internal_set_max(double value); + public: + + // double num = 3; + void clear_num(); + double num() const; + void set_num(double value); + private: + double _internal_num() const; + void _internal_set_num(double value); + public: + + // double sum = 4; + void clear_sum(); + double sum() const; + void set_sum(double value); + private: + double _internal_sum() const; + void _internal_set_sum(double value); + public: + + // double sum_squares = 5; + void clear_sum_squares(); + double sum_squares() const; + void set_sum_squares(double value); + private: + double _internal_sum_squares() const; + void _internal_set_sum_squares(double value); + public: + + // @@protoc_insertion_point(class_scope:tensorflow.HistogramProto) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + ::PROTOBUF_NAMESPACE_ID::RepeatedField< double > bucket_limit_; + ::PROTOBUF_NAMESPACE_ID::RepeatedField< double > bucket_; + double min_; + double max_; + double num_; + double sum_; + double sum_squares_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + }; + union { Impl_ _impl_; }; + friend struct ::TableStruct_tsl_2fprotobuf_2fhistogram_2eproto; +}; +// =================================================================== + + +// =================================================================== + +#ifdef __GNUC__ + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wstrict-aliasing" +#endif // __GNUC__ +// HistogramProto + +// double min = 1; +inline void HistogramProto::clear_min() { + _impl_.min_ = 0; +} +inline double HistogramProto::_internal_min() const { + return _impl_.min_; +} +inline double HistogramProto::min() const { + // @@protoc_insertion_point(field_get:tensorflow.HistogramProto.min) + return _internal_min(); +} +inline void HistogramProto::_internal_set_min(double value) { + + _impl_.min_ = value; +} +inline void HistogramProto::set_min(double value) { + _internal_set_min(value); + // @@protoc_insertion_point(field_set:tensorflow.HistogramProto.min) +} + +// double max = 2; +inline void HistogramProto::clear_max() { + _impl_.max_ = 0; +} +inline double HistogramProto::_internal_max() const { + return _impl_.max_; +} +inline double HistogramProto::max() const { + // @@protoc_insertion_point(field_get:tensorflow.HistogramProto.max) + return _internal_max(); +} +inline void HistogramProto::_internal_set_max(double value) { + + _impl_.max_ = value; +} +inline void HistogramProto::set_max(double value) { + _internal_set_max(value); + // @@protoc_insertion_point(field_set:tensorflow.HistogramProto.max) +} + +// double num = 3; +inline void HistogramProto::clear_num() { + _impl_.num_ = 0; +} +inline double HistogramProto::_internal_num() const { + return _impl_.num_; +} +inline double HistogramProto::num() const { + // @@protoc_insertion_point(field_get:tensorflow.HistogramProto.num) + return _internal_num(); +} +inline void HistogramProto::_internal_set_num(double value) { + + _impl_.num_ = value; +} +inline void HistogramProto::set_num(double value) { + _internal_set_num(value); + // @@protoc_insertion_point(field_set:tensorflow.HistogramProto.num) +} + +// double sum = 4; +inline void HistogramProto::clear_sum() { + _impl_.sum_ = 0; +} +inline double HistogramProto::_internal_sum() const { + return _impl_.sum_; +} +inline double HistogramProto::sum() const { + // @@protoc_insertion_point(field_get:tensorflow.HistogramProto.sum) + return _internal_sum(); +} +inline void HistogramProto::_internal_set_sum(double value) { + + _impl_.sum_ = value; +} +inline void HistogramProto::set_sum(double value) { + _internal_set_sum(value); + // @@protoc_insertion_point(field_set:tensorflow.HistogramProto.sum) +} + +// double sum_squares = 5; +inline void HistogramProto::clear_sum_squares() { + _impl_.sum_squares_ = 0; +} +inline double HistogramProto::_internal_sum_squares() const { + return _impl_.sum_squares_; +} +inline double HistogramProto::sum_squares() const { + // @@protoc_insertion_point(field_get:tensorflow.HistogramProto.sum_squares) + return _internal_sum_squares(); +} +inline void HistogramProto::_internal_set_sum_squares(double value) { + + _impl_.sum_squares_ = value; +} +inline void HistogramProto::set_sum_squares(double value) { + _internal_set_sum_squares(value); + // @@protoc_insertion_point(field_set:tensorflow.HistogramProto.sum_squares) +} + +// repeated double bucket_limit = 6 [packed = true]; +inline int HistogramProto::_internal_bucket_limit_size() const { + return _impl_.bucket_limit_.size(); +} +inline int HistogramProto::bucket_limit_size() const { + return _internal_bucket_limit_size(); +} +inline void HistogramProto::clear_bucket_limit() { + _impl_.bucket_limit_.Clear(); +} +inline double HistogramProto::_internal_bucket_limit(int index) const { + return _impl_.bucket_limit_.Get(index); +} +inline double HistogramProto::bucket_limit(int index) const { + // @@protoc_insertion_point(field_get:tensorflow.HistogramProto.bucket_limit) + return _internal_bucket_limit(index); +} +inline void HistogramProto::set_bucket_limit(int index, double value) { + _impl_.bucket_limit_.Set(index, value); + // @@protoc_insertion_point(field_set:tensorflow.HistogramProto.bucket_limit) +} +inline void HistogramProto::_internal_add_bucket_limit(double value) { + _impl_.bucket_limit_.Add(value); +} +inline void HistogramProto::add_bucket_limit(double value) { + _internal_add_bucket_limit(value); + // @@protoc_insertion_point(field_add:tensorflow.HistogramProto.bucket_limit) +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField< double >& +HistogramProto::_internal_bucket_limit() const { + return _impl_.bucket_limit_; +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField< double >& +HistogramProto::bucket_limit() const { + // @@protoc_insertion_point(field_list:tensorflow.HistogramProto.bucket_limit) + return _internal_bucket_limit(); +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedField< double >* +HistogramProto::_internal_mutable_bucket_limit() { + return &_impl_.bucket_limit_; +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedField< double >* +HistogramProto::mutable_bucket_limit() { + // @@protoc_insertion_point(field_mutable_list:tensorflow.HistogramProto.bucket_limit) + return _internal_mutable_bucket_limit(); +} + +// repeated double bucket = 7 [packed = true]; +inline int HistogramProto::_internal_bucket_size() const { + return _impl_.bucket_.size(); +} +inline int HistogramProto::bucket_size() const { + return _internal_bucket_size(); +} +inline void HistogramProto::clear_bucket() { + _impl_.bucket_.Clear(); +} +inline double HistogramProto::_internal_bucket(int index) const { + return _impl_.bucket_.Get(index); +} +inline double HistogramProto::bucket(int index) const { + // @@protoc_insertion_point(field_get:tensorflow.HistogramProto.bucket) + return _internal_bucket(index); +} +inline void HistogramProto::set_bucket(int index, double value) { + _impl_.bucket_.Set(index, value); + // @@protoc_insertion_point(field_set:tensorflow.HistogramProto.bucket) +} +inline void HistogramProto::_internal_add_bucket(double value) { + _impl_.bucket_.Add(value); +} +inline void HistogramProto::add_bucket(double value) { + _internal_add_bucket(value); + // @@protoc_insertion_point(field_add:tensorflow.HistogramProto.bucket) +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField< double >& +HistogramProto::_internal_bucket() const { + return _impl_.bucket_; +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField< double >& +HistogramProto::bucket() const { + // @@protoc_insertion_point(field_list:tensorflow.HistogramProto.bucket) + return _internal_bucket(); +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedField< double >* +HistogramProto::_internal_mutable_bucket() { + return &_impl_.bucket_; +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedField< double >* +HistogramProto::mutable_bucket() { + // @@protoc_insertion_point(field_mutable_list:tensorflow.HistogramProto.bucket) + return _internal_mutable_bucket(); +} + +#ifdef __GNUC__ + #pragma GCC diagnostic pop +#endif // __GNUC__ + +// @@protoc_insertion_point(namespace_scope) + +} // namespace tensorflow + +// @@protoc_insertion_point(global_scope) + +#include +#endif // GOOGLE_PROTOBUF_INCLUDED_GOOGLE_PROTOBUF_INCLUDED_tsl_2fprotobuf_2fhistogram_2eproto diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/rpc_options.pb.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/rpc_options.pb.h new file mode 100644 index 0000000000000000000000000000000000000000..c8f1bcc71c8952b915800383aabc398be74d6d12 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/rpc_options.pb.h @@ -0,0 +1,437 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: tsl/protobuf/rpc_options.proto + +#ifndef GOOGLE_PROTOBUF_INCLUDED_tsl_2fprotobuf_2frpc_5foptions_2eproto +#define GOOGLE_PROTOBUF_INCLUDED_tsl_2fprotobuf_2frpc_5foptions_2eproto + +#include +#include + +#include +#if PROTOBUF_VERSION < 3021000 +#error This file was generated by a newer version of protoc which is +#error incompatible with your Protocol Buffer headers. Please update +#error your headers. +#endif +#if 3021009 < PROTOBUF_MIN_PROTOC_VERSION +#error This file was generated by an older version of protoc which is +#error incompatible with your Protocol Buffer headers. Please +#error regenerate this file with a newer version of protoc. +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include // IWYU pragma: export +#include // IWYU pragma: export +#include +// @@protoc_insertion_point(includes) +#include +#define PROTOBUF_INTERNAL_EXPORT_tsl_2fprotobuf_2frpc_5foptions_2eproto +PROTOBUF_NAMESPACE_OPEN +namespace internal { +class AnyMetadata; +} // namespace internal +PROTOBUF_NAMESPACE_CLOSE + +// Internal implementation detail -- do not use these members. +struct TableStruct_tsl_2fprotobuf_2frpc_5foptions_2eproto { + static const uint32_t offsets[]; +}; +extern const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_tsl_2fprotobuf_2frpc_5foptions_2eproto; +namespace tensorflow { +class RPCOptions; +struct RPCOptionsDefaultTypeInternal; +extern RPCOptionsDefaultTypeInternal _RPCOptions_default_instance_; +} // namespace tensorflow +PROTOBUF_NAMESPACE_OPEN +template<> ::tensorflow::RPCOptions* Arena::CreateMaybeMessage<::tensorflow::RPCOptions>(Arena*); +PROTOBUF_NAMESPACE_CLOSE +namespace tensorflow { + +// =================================================================== + +class RPCOptions final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:tensorflow.RPCOptions) */ { + public: + inline RPCOptions() : RPCOptions(nullptr) {} + ~RPCOptions() override; + explicit PROTOBUF_CONSTEXPR RPCOptions(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + RPCOptions(const RPCOptions& from); + RPCOptions(RPCOptions&& from) noexcept + : RPCOptions() { + *this = ::std::move(from); + } + + inline RPCOptions& operator=(const RPCOptions& from) { + CopyFrom(from); + return *this; + } + inline RPCOptions& operator=(RPCOptions&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const RPCOptions& default_instance() { + return *internal_default_instance(); + } + static inline const RPCOptions* internal_default_instance() { + return reinterpret_cast( + &_RPCOptions_default_instance_); + } + static constexpr int kIndexInFileMessages = + 0; + + friend void swap(RPCOptions& a, RPCOptions& b) { + a.Swap(&b); + } + inline void Swap(RPCOptions* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(RPCOptions* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + RPCOptions* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const RPCOptions& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom( const RPCOptions& from) { + RPCOptions::MergeImpl(*this, from); + } + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _impl_._cached_size_.Get(); } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(RPCOptions* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.RPCOptions"; + } + protected: + explicit RPCOptions(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kCompressionAlgorithmFieldNumber = 2, + kCompressionLevelFieldNumber = 3, + kUseRpcForInprocessMasterFieldNumber = 1, + kCacheRpcResponseFieldNumber = 4, + kDisableSessionConnectionSharingFieldNumber = 5, + kNumChannelsPerTargetFieldNumber = 6, + }; + // string compression_algorithm = 2; + void clear_compression_algorithm(); + const std::string& compression_algorithm() const; + template + void set_compression_algorithm(ArgT0&& arg0, ArgT... args); + std::string* mutable_compression_algorithm(); + PROTOBUF_NODISCARD std::string* release_compression_algorithm(); + void set_allocated_compression_algorithm(std::string* compression_algorithm); + private: + const std::string& _internal_compression_algorithm() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_compression_algorithm(const std::string& value); + std::string* _internal_mutable_compression_algorithm(); + public: + + // int32 compression_level = 3; + void clear_compression_level(); + int32_t compression_level() const; + void set_compression_level(int32_t value); + private: + int32_t _internal_compression_level() const; + void _internal_set_compression_level(int32_t value); + public: + + // bool use_rpc_for_inprocess_master = 1; + void clear_use_rpc_for_inprocess_master(); + bool use_rpc_for_inprocess_master() const; + void set_use_rpc_for_inprocess_master(bool value); + private: + bool _internal_use_rpc_for_inprocess_master() const; + void _internal_set_use_rpc_for_inprocess_master(bool value); + public: + + // bool cache_rpc_response = 4; + void clear_cache_rpc_response(); + bool cache_rpc_response() const; + void set_cache_rpc_response(bool value); + private: + bool _internal_cache_rpc_response() const; + void _internal_set_cache_rpc_response(bool value); + public: + + // bool disable_session_connection_sharing = 5; + void clear_disable_session_connection_sharing(); + bool disable_session_connection_sharing() const; + void set_disable_session_connection_sharing(bool value); + private: + bool _internal_disable_session_connection_sharing() const; + void _internal_set_disable_session_connection_sharing(bool value); + public: + + // int32 num_channels_per_target = 6; + void clear_num_channels_per_target(); + int32_t num_channels_per_target() const; + void set_num_channels_per_target(int32_t value); + private: + int32_t _internal_num_channels_per_target() const; + void _internal_set_num_channels_per_target(int32_t value); + public: + + // @@protoc_insertion_point(class_scope:tensorflow.RPCOptions) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr compression_algorithm_; + int32_t compression_level_; + bool use_rpc_for_inprocess_master_; + bool cache_rpc_response_; + bool disable_session_connection_sharing_; + int32_t num_channels_per_target_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + }; + union { Impl_ _impl_; }; + friend struct ::TableStruct_tsl_2fprotobuf_2frpc_5foptions_2eproto; +}; +// =================================================================== + + +// =================================================================== + +#ifdef __GNUC__ + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wstrict-aliasing" +#endif // __GNUC__ +// RPCOptions + +// bool use_rpc_for_inprocess_master = 1; +inline void RPCOptions::clear_use_rpc_for_inprocess_master() { + _impl_.use_rpc_for_inprocess_master_ = false; +} +inline bool RPCOptions::_internal_use_rpc_for_inprocess_master() const { + return _impl_.use_rpc_for_inprocess_master_; +} +inline bool RPCOptions::use_rpc_for_inprocess_master() const { + // @@protoc_insertion_point(field_get:tensorflow.RPCOptions.use_rpc_for_inprocess_master) + return _internal_use_rpc_for_inprocess_master(); +} +inline void RPCOptions::_internal_set_use_rpc_for_inprocess_master(bool value) { + + _impl_.use_rpc_for_inprocess_master_ = value; +} +inline void RPCOptions::set_use_rpc_for_inprocess_master(bool value) { + _internal_set_use_rpc_for_inprocess_master(value); + // @@protoc_insertion_point(field_set:tensorflow.RPCOptions.use_rpc_for_inprocess_master) +} + +// string compression_algorithm = 2; +inline void RPCOptions::clear_compression_algorithm() { + _impl_.compression_algorithm_.ClearToEmpty(); +} +inline const std::string& RPCOptions::compression_algorithm() const { + // @@protoc_insertion_point(field_get:tensorflow.RPCOptions.compression_algorithm) + return _internal_compression_algorithm(); +} +template +inline PROTOBUF_ALWAYS_INLINE +void RPCOptions::set_compression_algorithm(ArgT0&& arg0, ArgT... args) { + + _impl_.compression_algorithm_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:tensorflow.RPCOptions.compression_algorithm) +} +inline std::string* RPCOptions::mutable_compression_algorithm() { + std::string* _s = _internal_mutable_compression_algorithm(); + // @@protoc_insertion_point(field_mutable:tensorflow.RPCOptions.compression_algorithm) + return _s; +} +inline const std::string& RPCOptions::_internal_compression_algorithm() const { + return _impl_.compression_algorithm_.Get(); +} +inline void RPCOptions::_internal_set_compression_algorithm(const std::string& value) { + + _impl_.compression_algorithm_.Set(value, GetArenaForAllocation()); +} +inline std::string* RPCOptions::_internal_mutable_compression_algorithm() { + + return _impl_.compression_algorithm_.Mutable(GetArenaForAllocation()); +} +inline std::string* RPCOptions::release_compression_algorithm() { + // @@protoc_insertion_point(field_release:tensorflow.RPCOptions.compression_algorithm) + return _impl_.compression_algorithm_.Release(); +} +inline void RPCOptions::set_allocated_compression_algorithm(std::string* compression_algorithm) { + if (compression_algorithm != nullptr) { + + } else { + + } + _impl_.compression_algorithm_.SetAllocated(compression_algorithm, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.compression_algorithm_.IsDefault()) { + _impl_.compression_algorithm_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:tensorflow.RPCOptions.compression_algorithm) +} + +// int32 compression_level = 3; +inline void RPCOptions::clear_compression_level() { + _impl_.compression_level_ = 0; +} +inline int32_t RPCOptions::_internal_compression_level() const { + return _impl_.compression_level_; +} +inline int32_t RPCOptions::compression_level() const { + // @@protoc_insertion_point(field_get:tensorflow.RPCOptions.compression_level) + return _internal_compression_level(); +} +inline void RPCOptions::_internal_set_compression_level(int32_t value) { + + _impl_.compression_level_ = value; +} +inline void RPCOptions::set_compression_level(int32_t value) { + _internal_set_compression_level(value); + // @@protoc_insertion_point(field_set:tensorflow.RPCOptions.compression_level) +} + +// bool cache_rpc_response = 4; +inline void RPCOptions::clear_cache_rpc_response() { + _impl_.cache_rpc_response_ = false; +} +inline bool RPCOptions::_internal_cache_rpc_response() const { + return _impl_.cache_rpc_response_; +} +inline bool RPCOptions::cache_rpc_response() const { + // @@protoc_insertion_point(field_get:tensorflow.RPCOptions.cache_rpc_response) + return _internal_cache_rpc_response(); +} +inline void RPCOptions::_internal_set_cache_rpc_response(bool value) { + + _impl_.cache_rpc_response_ = value; +} +inline void RPCOptions::set_cache_rpc_response(bool value) { + _internal_set_cache_rpc_response(value); + // @@protoc_insertion_point(field_set:tensorflow.RPCOptions.cache_rpc_response) +} + +// bool disable_session_connection_sharing = 5; +inline void RPCOptions::clear_disable_session_connection_sharing() { + _impl_.disable_session_connection_sharing_ = false; +} +inline bool RPCOptions::_internal_disable_session_connection_sharing() const { + return _impl_.disable_session_connection_sharing_; +} +inline bool RPCOptions::disable_session_connection_sharing() const { + // @@protoc_insertion_point(field_get:tensorflow.RPCOptions.disable_session_connection_sharing) + return _internal_disable_session_connection_sharing(); +} +inline void RPCOptions::_internal_set_disable_session_connection_sharing(bool value) { + + _impl_.disable_session_connection_sharing_ = value; +} +inline void RPCOptions::set_disable_session_connection_sharing(bool value) { + _internal_set_disable_session_connection_sharing(value); + // @@protoc_insertion_point(field_set:tensorflow.RPCOptions.disable_session_connection_sharing) +} + +// int32 num_channels_per_target = 6; +inline void RPCOptions::clear_num_channels_per_target() { + _impl_.num_channels_per_target_ = 0; +} +inline int32_t RPCOptions::_internal_num_channels_per_target() const { + return _impl_.num_channels_per_target_; +} +inline int32_t RPCOptions::num_channels_per_target() const { + // @@protoc_insertion_point(field_get:tensorflow.RPCOptions.num_channels_per_target) + return _internal_num_channels_per_target(); +} +inline void RPCOptions::_internal_set_num_channels_per_target(int32_t value) { + + _impl_.num_channels_per_target_ = value; +} +inline void RPCOptions::set_num_channels_per_target(int32_t value) { + _internal_set_num_channels_per_target(value); + // @@protoc_insertion_point(field_set:tensorflow.RPCOptions.num_channels_per_target) +} + +#ifdef __GNUC__ + #pragma GCC diagnostic pop +#endif // __GNUC__ + +// @@protoc_insertion_point(namespace_scope) + +} // namespace tensorflow + +// @@protoc_insertion_point(global_scope) + +#include +#endif // GOOGLE_PROTOBUF_INCLUDED_GOOGLE_PROTOBUF_INCLUDED_tsl_2fprotobuf_2frpc_5foptions_2eproto diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/rpc_options.proto b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/rpc_options.proto new file mode 100644 index 0000000000000000000000000000000000000000..35c5dbe3b1bf4eccd581fe1ae6c5b66a657f6ecd --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/rpc_options.proto @@ -0,0 +1,41 @@ +syntax = "proto3"; + +package tensorflow; + +option go_package = "github.com/google/tsl/tsl/go/protobuf/for_core_protos_go_proto"; + +// RPC options for distributed runtime. +message RPCOptions { + // If true, always use RPC to contact the session target. + // + // If false (the default option), TensorFlow may use an optimized + // transport for client-master communication that avoids the RPC + // stack. This option is primarily for used testing the RPC stack. + bool use_rpc_for_inprocess_master = 1; + + // The compression algorithm to be used. One of "deflate", "gzip". + string compression_algorithm = 2; + + // If compression_algorithm is set, the compression level to be used. + // From 0 (no compression), up to 3. + int32 compression_level = 3; + + // Setting cache_rpc_response to true will enable sender side caching of + // response for RecvTensorAsync and RecvBufAsync to allow receiver to retry + // requests . This is only necessary when the network fabric is experiencing a + // significant error rate. Without it we'll fail a step on an network error, + // while with it we'll be able to complete long steps (like complex + // initializations) in the face of some network errors during RecvTensor. + bool cache_rpc_response = 4; + + // Disables TCP connection sharing when opening a new RPC channel. + bool disable_session_connection_sharing = 5; + + // Setting num_channels_per_target > 0 allows uses of multiple channels to + // communicate to the same target. This can be used to improve the aggregate + // throughput on high speed links (e.g 100G) where single connection is not + // sufficient to maximize link utilization. Note that a single RPC only goes + // on a single channel, this only helps in situations where there are multiple + // transfers to the same target overlapping in time. + int32 num_channels_per_target = 6; +} diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/status.pb.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/status.pb.h new file mode 100644 index 0000000000000000000000000000000000000000..da97ca3fd192ce05b6218001988e87ea25a9945e --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/status.pb.h @@ -0,0 +1,314 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: tsl/protobuf/status.proto + +#ifndef GOOGLE_PROTOBUF_INCLUDED_tsl_2fprotobuf_2fstatus_2eproto +#define GOOGLE_PROTOBUF_INCLUDED_tsl_2fprotobuf_2fstatus_2eproto + +#include +#include + +#include +#if PROTOBUF_VERSION < 3021000 +#error This file was generated by a newer version of protoc which is +#error incompatible with your Protocol Buffer headers. Please update +#error your headers. +#endif +#if 3021009 < PROTOBUF_MIN_PROTOC_VERSION +#error This file was generated by an older version of protoc which is +#error incompatible with your Protocol Buffer headers. Please +#error regenerate this file with a newer version of protoc. +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include // IWYU pragma: export +#include // IWYU pragma: export +#include +#include "tsl/protobuf/error_codes.pb.h" +// @@protoc_insertion_point(includes) +#include +#define PROTOBUF_INTERNAL_EXPORT_tsl_2fprotobuf_2fstatus_2eproto +PROTOBUF_NAMESPACE_OPEN +namespace internal { +class AnyMetadata; +} // namespace internal +PROTOBUF_NAMESPACE_CLOSE + +// Internal implementation detail -- do not use these members. +struct TableStruct_tsl_2fprotobuf_2fstatus_2eproto { + static const uint32_t offsets[]; +}; +extern const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_tsl_2fprotobuf_2fstatus_2eproto; +namespace tensorflow { +class StatusProto; +struct StatusProtoDefaultTypeInternal; +extern StatusProtoDefaultTypeInternal _StatusProto_default_instance_; +} // namespace tensorflow +PROTOBUF_NAMESPACE_OPEN +template<> ::tensorflow::StatusProto* Arena::CreateMaybeMessage<::tensorflow::StatusProto>(Arena*); +PROTOBUF_NAMESPACE_CLOSE +namespace tensorflow { + +// =================================================================== + +class StatusProto final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:tensorflow.StatusProto) */ { + public: + inline StatusProto() : StatusProto(nullptr) {} + ~StatusProto() override; + explicit PROTOBUF_CONSTEXPR StatusProto(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + StatusProto(const StatusProto& from); + StatusProto(StatusProto&& from) noexcept + : StatusProto() { + *this = ::std::move(from); + } + + inline StatusProto& operator=(const StatusProto& from) { + CopyFrom(from); + return *this; + } + inline StatusProto& operator=(StatusProto&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const StatusProto& default_instance() { + return *internal_default_instance(); + } + static inline const StatusProto* internal_default_instance() { + return reinterpret_cast( + &_StatusProto_default_instance_); + } + static constexpr int kIndexInFileMessages = + 0; + + friend void swap(StatusProto& a, StatusProto& b) { + a.Swap(&b); + } + inline void Swap(StatusProto* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(StatusProto* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + StatusProto* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const StatusProto& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom( const StatusProto& from) { + StatusProto::MergeImpl(*this, from); + } + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _impl_._cached_size_.Get(); } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(StatusProto* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.StatusProto"; + } + protected: + explicit StatusProto(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kMessageFieldNumber = 2, + kCodeFieldNumber = 1, + }; + // string message = 2; + void clear_message(); + const std::string& message() const; + template + void set_message(ArgT0&& arg0, ArgT... args); + std::string* mutable_message(); + PROTOBUF_NODISCARD std::string* release_message(); + void set_allocated_message(std::string* message); + private: + const std::string& _internal_message() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_message(const std::string& value); + std::string* _internal_mutable_message(); + public: + + // .tensorflow.error.Code code = 1; + void clear_code(); + ::tensorflow::error::Code code() const; + void set_code(::tensorflow::error::Code value); + private: + ::tensorflow::error::Code _internal_code() const; + void _internal_set_code(::tensorflow::error::Code value); + public: + + // @@protoc_insertion_point(class_scope:tensorflow.StatusProto) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr message_; + int code_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + }; + union { Impl_ _impl_; }; + friend struct ::TableStruct_tsl_2fprotobuf_2fstatus_2eproto; +}; +// =================================================================== + + +// =================================================================== + +#ifdef __GNUC__ + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wstrict-aliasing" +#endif // __GNUC__ +// StatusProto + +// .tensorflow.error.Code code = 1; +inline void StatusProto::clear_code() { + _impl_.code_ = 0; +} +inline ::tensorflow::error::Code StatusProto::_internal_code() const { + return static_cast< ::tensorflow::error::Code >(_impl_.code_); +} +inline ::tensorflow::error::Code StatusProto::code() const { + // @@protoc_insertion_point(field_get:tensorflow.StatusProto.code) + return _internal_code(); +} +inline void StatusProto::_internal_set_code(::tensorflow::error::Code value) { + + _impl_.code_ = value; +} +inline void StatusProto::set_code(::tensorflow::error::Code value) { + _internal_set_code(value); + // @@protoc_insertion_point(field_set:tensorflow.StatusProto.code) +} + +// string message = 2; +inline void StatusProto::clear_message() { + _impl_.message_.ClearToEmpty(); +} +inline const std::string& StatusProto::message() const { + // @@protoc_insertion_point(field_get:tensorflow.StatusProto.message) + return _internal_message(); +} +template +inline PROTOBUF_ALWAYS_INLINE +void StatusProto::set_message(ArgT0&& arg0, ArgT... args) { + + _impl_.message_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:tensorflow.StatusProto.message) +} +inline std::string* StatusProto::mutable_message() { + std::string* _s = _internal_mutable_message(); + // @@protoc_insertion_point(field_mutable:tensorflow.StatusProto.message) + return _s; +} +inline const std::string& StatusProto::_internal_message() const { + return _impl_.message_.Get(); +} +inline void StatusProto::_internal_set_message(const std::string& value) { + + _impl_.message_.Set(value, GetArenaForAllocation()); +} +inline std::string* StatusProto::_internal_mutable_message() { + + return _impl_.message_.Mutable(GetArenaForAllocation()); +} +inline std::string* StatusProto::release_message() { + // @@protoc_insertion_point(field_release:tensorflow.StatusProto.message) + return _impl_.message_.Release(); +} +inline void StatusProto::set_allocated_message(std::string* message) { + if (message != nullptr) { + + } else { + + } + _impl_.message_.SetAllocated(message, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.message_.IsDefault()) { + _impl_.message_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:tensorflow.StatusProto.message) +} + +#ifdef __GNUC__ + #pragma GCC diagnostic pop +#endif // __GNUC__ + +// @@protoc_insertion_point(namespace_scope) + +} // namespace tensorflow + +// @@protoc_insertion_point(global_scope) + +#include +#endif // GOOGLE_PROTOBUF_INCLUDED_GOOGLE_PROTOBUF_INCLUDED_tsl_2fprotobuf_2fstatus_2eproto diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/status.proto b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/status.proto new file mode 100644 index 0000000000000000000000000000000000000000..09d72218941ee20cd1c5bc1d84615e789fc0a0e9 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/status.proto @@ -0,0 +1,20 @@ +syntax = "proto3"; + +package tensorflow; + +import "tsl/protobuf/error_codes.proto"; + +option cc_enable_arenas = true; +option java_multiple_files = true; +option java_package = "org.tensorflow.framework"; +option go_package = "github.com/google/tsl/tsl/go/protobuf/for_core_protos_go_proto"; + +// Wire-format for Status. +// Next tag: 3 +message StatusProto { + // Status code as defined in tensorflow/tsl/protobuf/error_codes.proto. + error.Code code = 1; + + // Detail error message. + string message = 2; +} diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/test_log.pb.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/test_log.pb.h new file mode 100644 index 0000000000000000000000000000000000000000..2dcc8e97bac10f8e9c5e368ae5462620777294b2 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/test_log.pb.h @@ -0,0 +1,6514 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: tsl/protobuf/test_log.proto + +#ifndef GOOGLE_PROTOBUF_INCLUDED_tsl_2fprotobuf_2ftest_5flog_2eproto +#define GOOGLE_PROTOBUF_INCLUDED_tsl_2fprotobuf_2ftest_5flog_2eproto + +#include +#include + +#include +#if PROTOBUF_VERSION < 3021000 +#error This file was generated by a newer version of protoc which is +#error incompatible with your Protocol Buffer headers. Please update +#error your headers. +#endif +#if 3021009 < PROTOBUF_MIN_PROTOC_VERSION +#error This file was generated by an older version of protoc which is +#error incompatible with your Protocol Buffer headers. Please +#error regenerate this file with a newer version of protoc. +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include // IWYU pragma: export +#include // IWYU pragma: export +#include // IWYU pragma: export +#include +#include +#include +#include +#include +#include +// @@protoc_insertion_point(includes) +#include +#define PROTOBUF_INTERNAL_EXPORT_tsl_2fprotobuf_2ftest_5flog_2eproto +PROTOBUF_NAMESPACE_OPEN +namespace internal { +class AnyMetadata; +} // namespace internal +PROTOBUF_NAMESPACE_CLOSE + +// Internal implementation detail -- do not use these members. +struct TableStruct_tsl_2fprotobuf_2ftest_5flog_2eproto { + static const uint32_t offsets[]; +}; +extern const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_tsl_2fprotobuf_2ftest_5flog_2eproto; +namespace tensorflow { +class AvailableDeviceInfo; +struct AvailableDeviceInfoDefaultTypeInternal; +extern AvailableDeviceInfoDefaultTypeInternal _AvailableDeviceInfo_default_instance_; +class BenchmarkEntries; +struct BenchmarkEntriesDefaultTypeInternal; +extern BenchmarkEntriesDefaultTypeInternal _BenchmarkEntries_default_instance_; +class BenchmarkEntry; +struct BenchmarkEntryDefaultTypeInternal; +extern BenchmarkEntryDefaultTypeInternal _BenchmarkEntry_default_instance_; +class BenchmarkEntry_ExtrasEntry_DoNotUse; +struct BenchmarkEntry_ExtrasEntry_DoNotUseDefaultTypeInternal; +extern BenchmarkEntry_ExtrasEntry_DoNotUseDefaultTypeInternal _BenchmarkEntry_ExtrasEntry_DoNotUse_default_instance_; +class BuildConfiguration; +struct BuildConfigurationDefaultTypeInternal; +extern BuildConfigurationDefaultTypeInternal _BuildConfiguration_default_instance_; +class CPUInfo; +struct CPUInfoDefaultTypeInternal; +extern CPUInfoDefaultTypeInternal _CPUInfo_default_instance_; +class CPUInfo_CacheSizeEntry_DoNotUse; +struct CPUInfo_CacheSizeEntry_DoNotUseDefaultTypeInternal; +extern CPUInfo_CacheSizeEntry_DoNotUseDefaultTypeInternal _CPUInfo_CacheSizeEntry_DoNotUse_default_instance_; +class CommitId; +struct CommitIdDefaultTypeInternal; +extern CommitIdDefaultTypeInternal _CommitId_default_instance_; +class EntryValue; +struct EntryValueDefaultTypeInternal; +extern EntryValueDefaultTypeInternal _EntryValue_default_instance_; +class GPUInfo; +struct GPUInfoDefaultTypeInternal; +extern GPUInfoDefaultTypeInternal _GPUInfo_default_instance_; +class MachineConfiguration; +struct MachineConfigurationDefaultTypeInternal; +extern MachineConfigurationDefaultTypeInternal _MachineConfiguration_default_instance_; +class MemoryInfo; +struct MemoryInfoDefaultTypeInternal; +extern MemoryInfoDefaultTypeInternal _MemoryInfo_default_instance_; +class MetricEntry; +struct MetricEntryDefaultTypeInternal; +extern MetricEntryDefaultTypeInternal _MetricEntry_default_instance_; +class PlatformInfo; +struct PlatformInfoDefaultTypeInternal; +extern PlatformInfoDefaultTypeInternal _PlatformInfo_default_instance_; +class RunConfiguration; +struct RunConfigurationDefaultTypeInternal; +extern RunConfigurationDefaultTypeInternal _RunConfiguration_default_instance_; +class RunConfiguration_EnvVarsEntry_DoNotUse; +struct RunConfiguration_EnvVarsEntry_DoNotUseDefaultTypeInternal; +extern RunConfiguration_EnvVarsEntry_DoNotUseDefaultTypeInternal _RunConfiguration_EnvVarsEntry_DoNotUse_default_instance_; +class TestResults; +struct TestResultsDefaultTypeInternal; +extern TestResultsDefaultTypeInternal _TestResults_default_instance_; +} // namespace tensorflow +PROTOBUF_NAMESPACE_OPEN +template<> ::tensorflow::AvailableDeviceInfo* Arena::CreateMaybeMessage<::tensorflow::AvailableDeviceInfo>(Arena*); +template<> ::tensorflow::BenchmarkEntries* Arena::CreateMaybeMessage<::tensorflow::BenchmarkEntries>(Arena*); +template<> ::tensorflow::BenchmarkEntry* Arena::CreateMaybeMessage<::tensorflow::BenchmarkEntry>(Arena*); +template<> ::tensorflow::BenchmarkEntry_ExtrasEntry_DoNotUse* Arena::CreateMaybeMessage<::tensorflow::BenchmarkEntry_ExtrasEntry_DoNotUse>(Arena*); +template<> ::tensorflow::BuildConfiguration* Arena::CreateMaybeMessage<::tensorflow::BuildConfiguration>(Arena*); +template<> ::tensorflow::CPUInfo* Arena::CreateMaybeMessage<::tensorflow::CPUInfo>(Arena*); +template<> ::tensorflow::CPUInfo_CacheSizeEntry_DoNotUse* Arena::CreateMaybeMessage<::tensorflow::CPUInfo_CacheSizeEntry_DoNotUse>(Arena*); +template<> ::tensorflow::CommitId* Arena::CreateMaybeMessage<::tensorflow::CommitId>(Arena*); +template<> ::tensorflow::EntryValue* Arena::CreateMaybeMessage<::tensorflow::EntryValue>(Arena*); +template<> ::tensorflow::GPUInfo* Arena::CreateMaybeMessage<::tensorflow::GPUInfo>(Arena*); +template<> ::tensorflow::MachineConfiguration* Arena::CreateMaybeMessage<::tensorflow::MachineConfiguration>(Arena*); +template<> ::tensorflow::MemoryInfo* Arena::CreateMaybeMessage<::tensorflow::MemoryInfo>(Arena*); +template<> ::tensorflow::MetricEntry* Arena::CreateMaybeMessage<::tensorflow::MetricEntry>(Arena*); +template<> ::tensorflow::PlatformInfo* Arena::CreateMaybeMessage<::tensorflow::PlatformInfo>(Arena*); +template<> ::tensorflow::RunConfiguration* Arena::CreateMaybeMessage<::tensorflow::RunConfiguration>(Arena*); +template<> ::tensorflow::RunConfiguration_EnvVarsEntry_DoNotUse* Arena::CreateMaybeMessage<::tensorflow::RunConfiguration_EnvVarsEntry_DoNotUse>(Arena*); +template<> ::tensorflow::TestResults* Arena::CreateMaybeMessage<::tensorflow::TestResults>(Arena*); +PROTOBUF_NAMESPACE_CLOSE +namespace tensorflow { + +enum TestResults_BenchmarkType : int { + TestResults_BenchmarkType_UNKNOWN = 0, + TestResults_BenchmarkType_CPP_MICROBENCHMARK = 1, + TestResults_BenchmarkType_PYTHON_BENCHMARK = 2, + TestResults_BenchmarkType_ANDROID_BENCHMARK = 3, + TestResults_BenchmarkType_EDGE_BENCHMARK = 4, + TestResults_BenchmarkType_IOS_BENCHMARK = 5, + TestResults_BenchmarkType_TestResults_BenchmarkType_INT_MIN_SENTINEL_DO_NOT_USE_ = std::numeric_limits::min(), + TestResults_BenchmarkType_TestResults_BenchmarkType_INT_MAX_SENTINEL_DO_NOT_USE_ = std::numeric_limits::max() +}; +bool TestResults_BenchmarkType_IsValid(int value); +constexpr TestResults_BenchmarkType TestResults_BenchmarkType_BenchmarkType_MIN = TestResults_BenchmarkType_UNKNOWN; +constexpr TestResults_BenchmarkType TestResults_BenchmarkType_BenchmarkType_MAX = TestResults_BenchmarkType_IOS_BENCHMARK; +constexpr int TestResults_BenchmarkType_BenchmarkType_ARRAYSIZE = TestResults_BenchmarkType_BenchmarkType_MAX + 1; + +const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* TestResults_BenchmarkType_descriptor(); +template +inline const std::string& TestResults_BenchmarkType_Name(T enum_t_value) { + static_assert(::std::is_same::value || + ::std::is_integral::value, + "Incorrect type passed to function TestResults_BenchmarkType_Name."); + return ::PROTOBUF_NAMESPACE_ID::internal::NameOfEnum( + TestResults_BenchmarkType_descriptor(), enum_t_value); +} +inline bool TestResults_BenchmarkType_Parse( + ::PROTOBUF_NAMESPACE_ID::ConstStringParam name, TestResults_BenchmarkType* value) { + return ::PROTOBUF_NAMESPACE_ID::internal::ParseNamedEnum( + TestResults_BenchmarkType_descriptor(), name, value); +} +// =================================================================== + +class EntryValue final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:tensorflow.EntryValue) */ { + public: + inline EntryValue() : EntryValue(nullptr) {} + ~EntryValue() override; + explicit PROTOBUF_CONSTEXPR EntryValue(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + EntryValue(const EntryValue& from); + EntryValue(EntryValue&& from) noexcept + : EntryValue() { + *this = ::std::move(from); + } + + inline EntryValue& operator=(const EntryValue& from) { + CopyFrom(from); + return *this; + } + inline EntryValue& operator=(EntryValue&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const EntryValue& default_instance() { + return *internal_default_instance(); + } + enum KindCase { + kDoubleValue = 1, + kStringValue = 2, + KIND_NOT_SET = 0, + }; + + static inline const EntryValue* internal_default_instance() { + return reinterpret_cast( + &_EntryValue_default_instance_); + } + static constexpr int kIndexInFileMessages = + 0; + + friend void swap(EntryValue& a, EntryValue& b) { + a.Swap(&b); + } + inline void Swap(EntryValue* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(EntryValue* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + EntryValue* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const EntryValue& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom( const EntryValue& from) { + EntryValue::MergeImpl(*this, from); + } + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _impl_._cached_size_.Get(); } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(EntryValue* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.EntryValue"; + } + protected: + explicit EntryValue(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kDoubleValueFieldNumber = 1, + kStringValueFieldNumber = 2, + }; + // double double_value = 1; + bool has_double_value() const; + private: + bool _internal_has_double_value() const; + public: + void clear_double_value(); + double double_value() const; + void set_double_value(double value); + private: + double _internal_double_value() const; + void _internal_set_double_value(double value); + public: + + // string string_value = 2; + bool has_string_value() const; + private: + bool _internal_has_string_value() const; + public: + void clear_string_value(); + const std::string& string_value() const; + template + void set_string_value(ArgT0&& arg0, ArgT... args); + std::string* mutable_string_value(); + PROTOBUF_NODISCARD std::string* release_string_value(); + void set_allocated_string_value(std::string* string_value); + private: + const std::string& _internal_string_value() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_string_value(const std::string& value); + std::string* _internal_mutable_string_value(); + public: + + void clear_kind(); + KindCase kind_case() const; + // @@protoc_insertion_point(class_scope:tensorflow.EntryValue) + private: + class _Internal; + void set_has_double_value(); + void set_has_string_value(); + + inline bool has_kind() const; + inline void clear_has_kind(); + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + union KindUnion { + constexpr KindUnion() : _constinit_{} {} + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized _constinit_; + double double_value_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr string_value_; + } kind_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + uint32_t _oneof_case_[1]; + + }; + union { Impl_ _impl_; }; + friend struct ::TableStruct_tsl_2fprotobuf_2ftest_5flog_2eproto; +}; +// ------------------------------------------------------------------- + +class MetricEntry final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:tensorflow.MetricEntry) */ { + public: + inline MetricEntry() : MetricEntry(nullptr) {} + ~MetricEntry() override; + explicit PROTOBUF_CONSTEXPR MetricEntry(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + MetricEntry(const MetricEntry& from); + MetricEntry(MetricEntry&& from) noexcept + : MetricEntry() { + *this = ::std::move(from); + } + + inline MetricEntry& operator=(const MetricEntry& from) { + CopyFrom(from); + return *this; + } + inline MetricEntry& operator=(MetricEntry&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const MetricEntry& default_instance() { + return *internal_default_instance(); + } + static inline const MetricEntry* internal_default_instance() { + return reinterpret_cast( + &_MetricEntry_default_instance_); + } + static constexpr int kIndexInFileMessages = + 1; + + friend void swap(MetricEntry& a, MetricEntry& b) { + a.Swap(&b); + } + inline void Swap(MetricEntry* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(MetricEntry* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + MetricEntry* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const MetricEntry& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom( const MetricEntry& from) { + MetricEntry::MergeImpl(*this, from); + } + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _impl_._cached_size_.Get(); } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(MetricEntry* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.MetricEntry"; + } + protected: + explicit MetricEntry(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kNameFieldNumber = 1, + kMinValueFieldNumber = 3, + kMaxValueFieldNumber = 4, + kValueFieldNumber = 2, + }; + // string name = 1; + void clear_name(); + const std::string& name() const; + template + void set_name(ArgT0&& arg0, ArgT... args); + std::string* mutable_name(); + PROTOBUF_NODISCARD std::string* release_name(); + void set_allocated_name(std::string* name); + private: + const std::string& _internal_name() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_name(const std::string& value); + std::string* _internal_mutable_name(); + public: + + // .google.protobuf.DoubleValue min_value = 3; + bool has_min_value() const; + private: + bool _internal_has_min_value() const; + public: + void clear_min_value(); + const ::PROTOBUF_NAMESPACE_ID::DoubleValue& min_value() const; + PROTOBUF_NODISCARD ::PROTOBUF_NAMESPACE_ID::DoubleValue* release_min_value(); + ::PROTOBUF_NAMESPACE_ID::DoubleValue* mutable_min_value(); + void set_allocated_min_value(::PROTOBUF_NAMESPACE_ID::DoubleValue* min_value); + private: + const ::PROTOBUF_NAMESPACE_ID::DoubleValue& _internal_min_value() const; + ::PROTOBUF_NAMESPACE_ID::DoubleValue* _internal_mutable_min_value(); + public: + void unsafe_arena_set_allocated_min_value( + ::PROTOBUF_NAMESPACE_ID::DoubleValue* min_value); + ::PROTOBUF_NAMESPACE_ID::DoubleValue* unsafe_arena_release_min_value(); + + // .google.protobuf.DoubleValue max_value = 4; + bool has_max_value() const; + private: + bool _internal_has_max_value() const; + public: + void clear_max_value(); + const ::PROTOBUF_NAMESPACE_ID::DoubleValue& max_value() const; + PROTOBUF_NODISCARD ::PROTOBUF_NAMESPACE_ID::DoubleValue* release_max_value(); + ::PROTOBUF_NAMESPACE_ID::DoubleValue* mutable_max_value(); + void set_allocated_max_value(::PROTOBUF_NAMESPACE_ID::DoubleValue* max_value); + private: + const ::PROTOBUF_NAMESPACE_ID::DoubleValue& _internal_max_value() const; + ::PROTOBUF_NAMESPACE_ID::DoubleValue* _internal_mutable_max_value(); + public: + void unsafe_arena_set_allocated_max_value( + ::PROTOBUF_NAMESPACE_ID::DoubleValue* max_value); + ::PROTOBUF_NAMESPACE_ID::DoubleValue* unsafe_arena_release_max_value(); + + // double value = 2; + void clear_value(); + double value() const; + void set_value(double value); + private: + double _internal_value() const; + void _internal_set_value(double value); + public: + + // @@protoc_insertion_point(class_scope:tensorflow.MetricEntry) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr name_; + ::PROTOBUF_NAMESPACE_ID::DoubleValue* min_value_; + ::PROTOBUF_NAMESPACE_ID::DoubleValue* max_value_; + double value_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + }; + union { Impl_ _impl_; }; + friend struct ::TableStruct_tsl_2fprotobuf_2ftest_5flog_2eproto; +}; +// ------------------------------------------------------------------- + +class BenchmarkEntry_ExtrasEntry_DoNotUse : public ::PROTOBUF_NAMESPACE_ID::internal::MapEntry { +public: + typedef ::PROTOBUF_NAMESPACE_ID::internal::MapEntry SuperType; + BenchmarkEntry_ExtrasEntry_DoNotUse(); + explicit PROTOBUF_CONSTEXPR BenchmarkEntry_ExtrasEntry_DoNotUse( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + explicit BenchmarkEntry_ExtrasEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena); + void MergeFrom(const BenchmarkEntry_ExtrasEntry_DoNotUse& other); + static const BenchmarkEntry_ExtrasEntry_DoNotUse* internal_default_instance() { return reinterpret_cast(&_BenchmarkEntry_ExtrasEntry_DoNotUse_default_instance_); } + static bool ValidateKey(std::string* s) { + return ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(s->data(), static_cast(s->size()), ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE, "tensorflow.BenchmarkEntry.ExtrasEntry.key"); + } + static bool ValidateValue(void*) { return true; } + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + friend struct ::TableStruct_tsl_2fprotobuf_2ftest_5flog_2eproto; +}; + +// ------------------------------------------------------------------- + +class BenchmarkEntry final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:tensorflow.BenchmarkEntry) */ { + public: + inline BenchmarkEntry() : BenchmarkEntry(nullptr) {} + ~BenchmarkEntry() override; + explicit PROTOBUF_CONSTEXPR BenchmarkEntry(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + BenchmarkEntry(const BenchmarkEntry& from); + BenchmarkEntry(BenchmarkEntry&& from) noexcept + : BenchmarkEntry() { + *this = ::std::move(from); + } + + inline BenchmarkEntry& operator=(const BenchmarkEntry& from) { + CopyFrom(from); + return *this; + } + inline BenchmarkEntry& operator=(BenchmarkEntry&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const BenchmarkEntry& default_instance() { + return *internal_default_instance(); + } + static inline const BenchmarkEntry* internal_default_instance() { + return reinterpret_cast( + &_BenchmarkEntry_default_instance_); + } + static constexpr int kIndexInFileMessages = + 3; + + friend void swap(BenchmarkEntry& a, BenchmarkEntry& b) { + a.Swap(&b); + } + inline void Swap(BenchmarkEntry* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(BenchmarkEntry* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + BenchmarkEntry* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const BenchmarkEntry& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom( const BenchmarkEntry& from) { + BenchmarkEntry::MergeImpl(*this, from); + } + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _impl_._cached_size_.Get(); } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(BenchmarkEntry* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.BenchmarkEntry"; + } + protected: + explicit BenchmarkEntry(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + private: + static void ArenaDtor(void* object); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + + // accessors ------------------------------------------------------- + + enum : int { + kExtrasFieldNumber = 6, + kMetricsFieldNumber = 7, + kNameFieldNumber = 1, + kItersFieldNumber = 2, + kCpuTimeFieldNumber = 3, + kWallTimeFieldNumber = 4, + kThroughputFieldNumber = 5, + }; + // map extras = 6; + int extras_size() const; + private: + int _internal_extras_size() const; + public: + void clear_extras(); + private: + const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::tensorflow::EntryValue >& + _internal_extras() const; + ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::tensorflow::EntryValue >* + _internal_mutable_extras(); + public: + const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::tensorflow::EntryValue >& + extras() const; + ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::tensorflow::EntryValue >* + mutable_extras(); + + // repeated .tensorflow.MetricEntry metrics = 7; + int metrics_size() const; + private: + int _internal_metrics_size() const; + public: + void clear_metrics(); + ::tensorflow::MetricEntry* mutable_metrics(int index); + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::MetricEntry >* + mutable_metrics(); + private: + const ::tensorflow::MetricEntry& _internal_metrics(int index) const; + ::tensorflow::MetricEntry* _internal_add_metrics(); + public: + const ::tensorflow::MetricEntry& metrics(int index) const; + ::tensorflow::MetricEntry* add_metrics(); + const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::MetricEntry >& + metrics() const; + + // string name = 1; + void clear_name(); + const std::string& name() const; + template + void set_name(ArgT0&& arg0, ArgT... args); + std::string* mutable_name(); + PROTOBUF_NODISCARD std::string* release_name(); + void set_allocated_name(std::string* name); + private: + const std::string& _internal_name() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_name(const std::string& value); + std::string* _internal_mutable_name(); + public: + + // int64 iters = 2; + void clear_iters(); + int64_t iters() const; + void set_iters(int64_t value); + private: + int64_t _internal_iters() const; + void _internal_set_iters(int64_t value); + public: + + // double cpu_time = 3; + void clear_cpu_time(); + double cpu_time() const; + void set_cpu_time(double value); + private: + double _internal_cpu_time() const; + void _internal_set_cpu_time(double value); + public: + + // double wall_time = 4; + void clear_wall_time(); + double wall_time() const; + void set_wall_time(double value); + private: + double _internal_wall_time() const; + void _internal_set_wall_time(double value); + public: + + // double throughput = 5; + void clear_throughput(); + double throughput() const; + void set_throughput(double value); + private: + double _internal_throughput() const; + void _internal_set_throughput(double value); + public: + + // @@protoc_insertion_point(class_scope:tensorflow.BenchmarkEntry) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + ::PROTOBUF_NAMESPACE_ID::internal::MapField< + BenchmarkEntry_ExtrasEntry_DoNotUse, + std::string, ::tensorflow::EntryValue, + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING, + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_MESSAGE> extras_; + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::MetricEntry > metrics_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr name_; + int64_t iters_; + double cpu_time_; + double wall_time_; + double throughput_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + }; + union { Impl_ _impl_; }; + friend struct ::TableStruct_tsl_2fprotobuf_2ftest_5flog_2eproto; +}; +// ------------------------------------------------------------------- + +class BenchmarkEntries final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:tensorflow.BenchmarkEntries) */ { + public: + inline BenchmarkEntries() : BenchmarkEntries(nullptr) {} + ~BenchmarkEntries() override; + explicit PROTOBUF_CONSTEXPR BenchmarkEntries(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + BenchmarkEntries(const BenchmarkEntries& from); + BenchmarkEntries(BenchmarkEntries&& from) noexcept + : BenchmarkEntries() { + *this = ::std::move(from); + } + + inline BenchmarkEntries& operator=(const BenchmarkEntries& from) { + CopyFrom(from); + return *this; + } + inline BenchmarkEntries& operator=(BenchmarkEntries&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const BenchmarkEntries& default_instance() { + return *internal_default_instance(); + } + static inline const BenchmarkEntries* internal_default_instance() { + return reinterpret_cast( + &_BenchmarkEntries_default_instance_); + } + static constexpr int kIndexInFileMessages = + 4; + + friend void swap(BenchmarkEntries& a, BenchmarkEntries& b) { + a.Swap(&b); + } + inline void Swap(BenchmarkEntries* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(BenchmarkEntries* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + BenchmarkEntries* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const BenchmarkEntries& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom( const BenchmarkEntries& from) { + BenchmarkEntries::MergeImpl(*this, from); + } + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _impl_._cached_size_.Get(); } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(BenchmarkEntries* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.BenchmarkEntries"; + } + protected: + explicit BenchmarkEntries(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kEntryFieldNumber = 1, + }; + // repeated .tensorflow.BenchmarkEntry entry = 1; + int entry_size() const; + private: + int _internal_entry_size() const; + public: + void clear_entry(); + ::tensorflow::BenchmarkEntry* mutable_entry(int index); + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::BenchmarkEntry >* + mutable_entry(); + private: + const ::tensorflow::BenchmarkEntry& _internal_entry(int index) const; + ::tensorflow::BenchmarkEntry* _internal_add_entry(); + public: + const ::tensorflow::BenchmarkEntry& entry(int index) const; + ::tensorflow::BenchmarkEntry* add_entry(); + const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::BenchmarkEntry >& + entry() const; + + // @@protoc_insertion_point(class_scope:tensorflow.BenchmarkEntries) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::BenchmarkEntry > entry_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + }; + union { Impl_ _impl_; }; + friend struct ::TableStruct_tsl_2fprotobuf_2ftest_5flog_2eproto; +}; +// ------------------------------------------------------------------- + +class BuildConfiguration final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:tensorflow.BuildConfiguration) */ { + public: + inline BuildConfiguration() : BuildConfiguration(nullptr) {} + ~BuildConfiguration() override; + explicit PROTOBUF_CONSTEXPR BuildConfiguration(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + BuildConfiguration(const BuildConfiguration& from); + BuildConfiguration(BuildConfiguration&& from) noexcept + : BuildConfiguration() { + *this = ::std::move(from); + } + + inline BuildConfiguration& operator=(const BuildConfiguration& from) { + CopyFrom(from); + return *this; + } + inline BuildConfiguration& operator=(BuildConfiguration&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const BuildConfiguration& default_instance() { + return *internal_default_instance(); + } + static inline const BuildConfiguration* internal_default_instance() { + return reinterpret_cast( + &_BuildConfiguration_default_instance_); + } + static constexpr int kIndexInFileMessages = + 5; + + friend void swap(BuildConfiguration& a, BuildConfiguration& b) { + a.Swap(&b); + } + inline void Swap(BuildConfiguration* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(BuildConfiguration* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + BuildConfiguration* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const BuildConfiguration& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom( const BuildConfiguration& from) { + BuildConfiguration::MergeImpl(*this, from); + } + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _impl_._cached_size_.Get(); } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(BuildConfiguration* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.BuildConfiguration"; + } + protected: + explicit BuildConfiguration(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kCcFlagsFieldNumber = 2, + kOptsFieldNumber = 3, + kModeFieldNumber = 1, + }; + // repeated string cc_flags = 2; + int cc_flags_size() const; + private: + int _internal_cc_flags_size() const; + public: + void clear_cc_flags(); + const std::string& cc_flags(int index) const; + std::string* mutable_cc_flags(int index); + void set_cc_flags(int index, const std::string& value); + void set_cc_flags(int index, std::string&& value); + void set_cc_flags(int index, const char* value); + void set_cc_flags(int index, const char* value, size_t size); + std::string* add_cc_flags(); + void add_cc_flags(const std::string& value); + void add_cc_flags(std::string&& value); + void add_cc_flags(const char* value); + void add_cc_flags(const char* value, size_t size); + const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField& cc_flags() const; + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField* mutable_cc_flags(); + private: + const std::string& _internal_cc_flags(int index) const; + std::string* _internal_add_cc_flags(); + public: + + // repeated string opts = 3; + int opts_size() const; + private: + int _internal_opts_size() const; + public: + void clear_opts(); + const std::string& opts(int index) const; + std::string* mutable_opts(int index); + void set_opts(int index, const std::string& value); + void set_opts(int index, std::string&& value); + void set_opts(int index, const char* value); + void set_opts(int index, const char* value, size_t size); + std::string* add_opts(); + void add_opts(const std::string& value); + void add_opts(std::string&& value); + void add_opts(const char* value); + void add_opts(const char* value, size_t size); + const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField& opts() const; + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField* mutable_opts(); + private: + const std::string& _internal_opts(int index) const; + std::string* _internal_add_opts(); + public: + + // string mode = 1; + void clear_mode(); + const std::string& mode() const; + template + void set_mode(ArgT0&& arg0, ArgT... args); + std::string* mutable_mode(); + PROTOBUF_NODISCARD std::string* release_mode(); + void set_allocated_mode(std::string* mode); + private: + const std::string& _internal_mode() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_mode(const std::string& value); + std::string* _internal_mutable_mode(); + public: + + // @@protoc_insertion_point(class_scope:tensorflow.BuildConfiguration) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField cc_flags_; + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField opts_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr mode_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + }; + union { Impl_ _impl_; }; + friend struct ::TableStruct_tsl_2fprotobuf_2ftest_5flog_2eproto; +}; +// ------------------------------------------------------------------- + +class CommitId final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:tensorflow.CommitId) */ { + public: + inline CommitId() : CommitId(nullptr) {} + ~CommitId() override; + explicit PROTOBUF_CONSTEXPR CommitId(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + CommitId(const CommitId& from); + CommitId(CommitId&& from) noexcept + : CommitId() { + *this = ::std::move(from); + } + + inline CommitId& operator=(const CommitId& from) { + CopyFrom(from); + return *this; + } + inline CommitId& operator=(CommitId&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const CommitId& default_instance() { + return *internal_default_instance(); + } + enum KindCase { + kChangelist = 1, + kHash = 2, + KIND_NOT_SET = 0, + }; + + static inline const CommitId* internal_default_instance() { + return reinterpret_cast( + &_CommitId_default_instance_); + } + static constexpr int kIndexInFileMessages = + 6; + + friend void swap(CommitId& a, CommitId& b) { + a.Swap(&b); + } + inline void Swap(CommitId* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(CommitId* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + CommitId* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const CommitId& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom( const CommitId& from) { + CommitId::MergeImpl(*this, from); + } + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _impl_._cached_size_.Get(); } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(CommitId* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.CommitId"; + } + protected: + explicit CommitId(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kSnapshotFieldNumber = 3, + kPendingChangelistFieldNumber = 4, + kChangelistFieldNumber = 1, + kHashFieldNumber = 2, + }; + // string snapshot = 3; + void clear_snapshot(); + const std::string& snapshot() const; + template + void set_snapshot(ArgT0&& arg0, ArgT... args); + std::string* mutable_snapshot(); + PROTOBUF_NODISCARD std::string* release_snapshot(); + void set_allocated_snapshot(std::string* snapshot); + private: + const std::string& _internal_snapshot() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_snapshot(const std::string& value); + std::string* _internal_mutable_snapshot(); + public: + + // int64 pending_changelist = 4; + void clear_pending_changelist(); + int64_t pending_changelist() const; + void set_pending_changelist(int64_t value); + private: + int64_t _internal_pending_changelist() const; + void _internal_set_pending_changelist(int64_t value); + public: + + // int64 changelist = 1; + bool has_changelist() const; + private: + bool _internal_has_changelist() const; + public: + void clear_changelist(); + int64_t changelist() const; + void set_changelist(int64_t value); + private: + int64_t _internal_changelist() const; + void _internal_set_changelist(int64_t value); + public: + + // string hash = 2; + bool has_hash() const; + private: + bool _internal_has_hash() const; + public: + void clear_hash(); + const std::string& hash() const; + template + void set_hash(ArgT0&& arg0, ArgT... args); + std::string* mutable_hash(); + PROTOBUF_NODISCARD std::string* release_hash(); + void set_allocated_hash(std::string* hash); + private: + const std::string& _internal_hash() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_hash(const std::string& value); + std::string* _internal_mutable_hash(); + public: + + void clear_kind(); + KindCase kind_case() const; + // @@protoc_insertion_point(class_scope:tensorflow.CommitId) + private: + class _Internal; + void set_has_changelist(); + void set_has_hash(); + + inline bool has_kind() const; + inline void clear_has_kind(); + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr snapshot_; + int64_t pending_changelist_; + union KindUnion { + constexpr KindUnion() : _constinit_{} {} + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized _constinit_; + int64_t changelist_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr hash_; + } kind_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + uint32_t _oneof_case_[1]; + + }; + union { Impl_ _impl_; }; + friend struct ::TableStruct_tsl_2fprotobuf_2ftest_5flog_2eproto; +}; +// ------------------------------------------------------------------- + +class CPUInfo_CacheSizeEntry_DoNotUse : public ::PROTOBUF_NAMESPACE_ID::internal::MapEntry { +public: + typedef ::PROTOBUF_NAMESPACE_ID::internal::MapEntry SuperType; + CPUInfo_CacheSizeEntry_DoNotUse(); + explicit PROTOBUF_CONSTEXPR CPUInfo_CacheSizeEntry_DoNotUse( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + explicit CPUInfo_CacheSizeEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena); + void MergeFrom(const CPUInfo_CacheSizeEntry_DoNotUse& other); + static const CPUInfo_CacheSizeEntry_DoNotUse* internal_default_instance() { return reinterpret_cast(&_CPUInfo_CacheSizeEntry_DoNotUse_default_instance_); } + static bool ValidateKey(std::string* s) { + return ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(s->data(), static_cast(s->size()), ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE, "tensorflow.CPUInfo.CacheSizeEntry.key"); + } + static bool ValidateValue(void*) { return true; } + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + friend struct ::TableStruct_tsl_2fprotobuf_2ftest_5flog_2eproto; +}; + +// ------------------------------------------------------------------- + +class CPUInfo final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:tensorflow.CPUInfo) */ { + public: + inline CPUInfo() : CPUInfo(nullptr) {} + ~CPUInfo() override; + explicit PROTOBUF_CONSTEXPR CPUInfo(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + CPUInfo(const CPUInfo& from); + CPUInfo(CPUInfo&& from) noexcept + : CPUInfo() { + *this = ::std::move(from); + } + + inline CPUInfo& operator=(const CPUInfo& from) { + CopyFrom(from); + return *this; + } + inline CPUInfo& operator=(CPUInfo&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const CPUInfo& default_instance() { + return *internal_default_instance(); + } + static inline const CPUInfo* internal_default_instance() { + return reinterpret_cast( + &_CPUInfo_default_instance_); + } + static constexpr int kIndexInFileMessages = + 8; + + friend void swap(CPUInfo& a, CPUInfo& b) { + a.Swap(&b); + } + inline void Swap(CPUInfo* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(CPUInfo* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + CPUInfo* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const CPUInfo& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom( const CPUInfo& from) { + CPUInfo::MergeImpl(*this, from); + } + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _impl_._cached_size_.Get(); } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(CPUInfo* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.CPUInfo"; + } + protected: + explicit CPUInfo(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + private: + static void ArenaDtor(void* object); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + + // accessors ------------------------------------------------------- + + enum : int { + kCacheSizeFieldNumber = 6, + kCpuInfoFieldNumber = 4, + kCpuGovernorFieldNumber = 5, + kNumCoresFieldNumber = 1, + kNumCoresAllowedFieldNumber = 2, + kMhzPerCpuFieldNumber = 3, + }; + // map cache_size = 6; + int cache_size_size() const; + private: + int _internal_cache_size_size() const; + public: + void clear_cache_size(); + private: + const ::PROTOBUF_NAMESPACE_ID::Map< std::string, int64_t >& + _internal_cache_size() const; + ::PROTOBUF_NAMESPACE_ID::Map< std::string, int64_t >* + _internal_mutable_cache_size(); + public: + const ::PROTOBUF_NAMESPACE_ID::Map< std::string, int64_t >& + cache_size() const; + ::PROTOBUF_NAMESPACE_ID::Map< std::string, int64_t >* + mutable_cache_size(); + + // string cpu_info = 4; + void clear_cpu_info(); + const std::string& cpu_info() const; + template + void set_cpu_info(ArgT0&& arg0, ArgT... args); + std::string* mutable_cpu_info(); + PROTOBUF_NODISCARD std::string* release_cpu_info(); + void set_allocated_cpu_info(std::string* cpu_info); + private: + const std::string& _internal_cpu_info() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_cpu_info(const std::string& value); + std::string* _internal_mutable_cpu_info(); + public: + + // string cpu_governor = 5; + void clear_cpu_governor(); + const std::string& cpu_governor() const; + template + void set_cpu_governor(ArgT0&& arg0, ArgT... args); + std::string* mutable_cpu_governor(); + PROTOBUF_NODISCARD std::string* release_cpu_governor(); + void set_allocated_cpu_governor(std::string* cpu_governor); + private: + const std::string& _internal_cpu_governor() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_cpu_governor(const std::string& value); + std::string* _internal_mutable_cpu_governor(); + public: + + // int64 num_cores = 1; + void clear_num_cores(); + int64_t num_cores() const; + void set_num_cores(int64_t value); + private: + int64_t _internal_num_cores() const; + void _internal_set_num_cores(int64_t value); + public: + + // int64 num_cores_allowed = 2; + void clear_num_cores_allowed(); + int64_t num_cores_allowed() const; + void set_num_cores_allowed(int64_t value); + private: + int64_t _internal_num_cores_allowed() const; + void _internal_set_num_cores_allowed(int64_t value); + public: + + // double mhz_per_cpu = 3; + void clear_mhz_per_cpu(); + double mhz_per_cpu() const; + void set_mhz_per_cpu(double value); + private: + double _internal_mhz_per_cpu() const; + void _internal_set_mhz_per_cpu(double value); + public: + + // @@protoc_insertion_point(class_scope:tensorflow.CPUInfo) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + ::PROTOBUF_NAMESPACE_ID::internal::MapField< + CPUInfo_CacheSizeEntry_DoNotUse, + std::string, int64_t, + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING, + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT64> cache_size_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr cpu_info_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr cpu_governor_; + int64_t num_cores_; + int64_t num_cores_allowed_; + double mhz_per_cpu_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + }; + union { Impl_ _impl_; }; + friend struct ::TableStruct_tsl_2fprotobuf_2ftest_5flog_2eproto; +}; +// ------------------------------------------------------------------- + +class MemoryInfo final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:tensorflow.MemoryInfo) */ { + public: + inline MemoryInfo() : MemoryInfo(nullptr) {} + ~MemoryInfo() override; + explicit PROTOBUF_CONSTEXPR MemoryInfo(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + MemoryInfo(const MemoryInfo& from); + MemoryInfo(MemoryInfo&& from) noexcept + : MemoryInfo() { + *this = ::std::move(from); + } + + inline MemoryInfo& operator=(const MemoryInfo& from) { + CopyFrom(from); + return *this; + } + inline MemoryInfo& operator=(MemoryInfo&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const MemoryInfo& default_instance() { + return *internal_default_instance(); + } + static inline const MemoryInfo* internal_default_instance() { + return reinterpret_cast( + &_MemoryInfo_default_instance_); + } + static constexpr int kIndexInFileMessages = + 9; + + friend void swap(MemoryInfo& a, MemoryInfo& b) { + a.Swap(&b); + } + inline void Swap(MemoryInfo* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(MemoryInfo* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + MemoryInfo* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const MemoryInfo& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom( const MemoryInfo& from) { + MemoryInfo::MergeImpl(*this, from); + } + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _impl_._cached_size_.Get(); } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(MemoryInfo* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.MemoryInfo"; + } + protected: + explicit MemoryInfo(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kTotalFieldNumber = 1, + kAvailableFieldNumber = 2, + }; + // int64 total = 1; + void clear_total(); + int64_t total() const; + void set_total(int64_t value); + private: + int64_t _internal_total() const; + void _internal_set_total(int64_t value); + public: + + // int64 available = 2; + void clear_available(); + int64_t available() const; + void set_available(int64_t value); + private: + int64_t _internal_available() const; + void _internal_set_available(int64_t value); + public: + + // @@protoc_insertion_point(class_scope:tensorflow.MemoryInfo) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + int64_t total_; + int64_t available_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + }; + union { Impl_ _impl_; }; + friend struct ::TableStruct_tsl_2fprotobuf_2ftest_5flog_2eproto; +}; +// ------------------------------------------------------------------- + +class GPUInfo final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:tensorflow.GPUInfo) */ { + public: + inline GPUInfo() : GPUInfo(nullptr) {} + ~GPUInfo() override; + explicit PROTOBUF_CONSTEXPR GPUInfo(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + GPUInfo(const GPUInfo& from); + GPUInfo(GPUInfo&& from) noexcept + : GPUInfo() { + *this = ::std::move(from); + } + + inline GPUInfo& operator=(const GPUInfo& from) { + CopyFrom(from); + return *this; + } + inline GPUInfo& operator=(GPUInfo&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const GPUInfo& default_instance() { + return *internal_default_instance(); + } + static inline const GPUInfo* internal_default_instance() { + return reinterpret_cast( + &_GPUInfo_default_instance_); + } + static constexpr int kIndexInFileMessages = + 10; + + friend void swap(GPUInfo& a, GPUInfo& b) { + a.Swap(&b); + } + inline void Swap(GPUInfo* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(GPUInfo* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + GPUInfo* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const GPUInfo& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom( const GPUInfo& from) { + GPUInfo::MergeImpl(*this, from); + } + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _impl_._cached_size_.Get(); } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(GPUInfo* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.GPUInfo"; + } + protected: + explicit GPUInfo(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kModelFieldNumber = 1, + kUuidFieldNumber = 2, + kBusIdFieldNumber = 3, + }; + // string model = 1; + void clear_model(); + const std::string& model() const; + template + void set_model(ArgT0&& arg0, ArgT... args); + std::string* mutable_model(); + PROTOBUF_NODISCARD std::string* release_model(); + void set_allocated_model(std::string* model); + private: + const std::string& _internal_model() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_model(const std::string& value); + std::string* _internal_mutable_model(); + public: + + // string uuid = 2; + void clear_uuid(); + const std::string& uuid() const; + template + void set_uuid(ArgT0&& arg0, ArgT... args); + std::string* mutable_uuid(); + PROTOBUF_NODISCARD std::string* release_uuid(); + void set_allocated_uuid(std::string* uuid); + private: + const std::string& _internal_uuid() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_uuid(const std::string& value); + std::string* _internal_mutable_uuid(); + public: + + // string bus_id = 3; + void clear_bus_id(); + const std::string& bus_id() const; + template + void set_bus_id(ArgT0&& arg0, ArgT... args); + std::string* mutable_bus_id(); + PROTOBUF_NODISCARD std::string* release_bus_id(); + void set_allocated_bus_id(std::string* bus_id); + private: + const std::string& _internal_bus_id() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_bus_id(const std::string& value); + std::string* _internal_mutable_bus_id(); + public: + + // @@protoc_insertion_point(class_scope:tensorflow.GPUInfo) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr model_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr uuid_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr bus_id_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + }; + union { Impl_ _impl_; }; + friend struct ::TableStruct_tsl_2fprotobuf_2ftest_5flog_2eproto; +}; +// ------------------------------------------------------------------- + +class PlatformInfo final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:tensorflow.PlatformInfo) */ { + public: + inline PlatformInfo() : PlatformInfo(nullptr) {} + ~PlatformInfo() override; + explicit PROTOBUF_CONSTEXPR PlatformInfo(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + PlatformInfo(const PlatformInfo& from); + PlatformInfo(PlatformInfo&& from) noexcept + : PlatformInfo() { + *this = ::std::move(from); + } + + inline PlatformInfo& operator=(const PlatformInfo& from) { + CopyFrom(from); + return *this; + } + inline PlatformInfo& operator=(PlatformInfo&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const PlatformInfo& default_instance() { + return *internal_default_instance(); + } + static inline const PlatformInfo* internal_default_instance() { + return reinterpret_cast( + &_PlatformInfo_default_instance_); + } + static constexpr int kIndexInFileMessages = + 11; + + friend void swap(PlatformInfo& a, PlatformInfo& b) { + a.Swap(&b); + } + inline void Swap(PlatformInfo* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(PlatformInfo* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + PlatformInfo* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const PlatformInfo& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom( const PlatformInfo& from) { + PlatformInfo::MergeImpl(*this, from); + } + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _impl_._cached_size_.Get(); } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(PlatformInfo* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.PlatformInfo"; + } + protected: + explicit PlatformInfo(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kBitsFieldNumber = 1, + kLinkageFieldNumber = 2, + kMachineFieldNumber = 3, + kReleaseFieldNumber = 4, + kSystemFieldNumber = 5, + kVersionFieldNumber = 6, + }; + // string bits = 1; + void clear_bits(); + const std::string& bits() const; + template + void set_bits(ArgT0&& arg0, ArgT... args); + std::string* mutable_bits(); + PROTOBUF_NODISCARD std::string* release_bits(); + void set_allocated_bits(std::string* bits); + private: + const std::string& _internal_bits() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_bits(const std::string& value); + std::string* _internal_mutable_bits(); + public: + + // string linkage = 2; + void clear_linkage(); + const std::string& linkage() const; + template + void set_linkage(ArgT0&& arg0, ArgT... args); + std::string* mutable_linkage(); + PROTOBUF_NODISCARD std::string* release_linkage(); + void set_allocated_linkage(std::string* linkage); + private: + const std::string& _internal_linkage() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_linkage(const std::string& value); + std::string* _internal_mutable_linkage(); + public: + + // string machine = 3; + void clear_machine(); + const std::string& machine() const; + template + void set_machine(ArgT0&& arg0, ArgT... args); + std::string* mutable_machine(); + PROTOBUF_NODISCARD std::string* release_machine(); + void set_allocated_machine(std::string* machine); + private: + const std::string& _internal_machine() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_machine(const std::string& value); + std::string* _internal_mutable_machine(); + public: + + // string release = 4; + void clear_release(); + const std::string& release() const; + template + void set_release(ArgT0&& arg0, ArgT... args); + std::string* mutable_release(); + PROTOBUF_NODISCARD std::string* release_release(); + void set_allocated_release(std::string* release); + private: + const std::string& _internal_release() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_release(const std::string& value); + std::string* _internal_mutable_release(); + public: + + // string system = 5; + void clear_system(); + const std::string& system() const; + template + void set_system(ArgT0&& arg0, ArgT... args); + std::string* mutable_system(); + PROTOBUF_NODISCARD std::string* release_system(); + void set_allocated_system(std::string* system); + private: + const std::string& _internal_system() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_system(const std::string& value); + std::string* _internal_mutable_system(); + public: + + // string version = 6; + void clear_version(); + const std::string& version() const; + template + void set_version(ArgT0&& arg0, ArgT... args); + std::string* mutable_version(); + PROTOBUF_NODISCARD std::string* release_version(); + void set_allocated_version(std::string* version); + private: + const std::string& _internal_version() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_version(const std::string& value); + std::string* _internal_mutable_version(); + public: + + // @@protoc_insertion_point(class_scope:tensorflow.PlatformInfo) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr bits_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr linkage_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr machine_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr release_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr system_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr version_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + }; + union { Impl_ _impl_; }; + friend struct ::TableStruct_tsl_2fprotobuf_2ftest_5flog_2eproto; +}; +// ------------------------------------------------------------------- + +class AvailableDeviceInfo final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:tensorflow.AvailableDeviceInfo) */ { + public: + inline AvailableDeviceInfo() : AvailableDeviceInfo(nullptr) {} + ~AvailableDeviceInfo() override; + explicit PROTOBUF_CONSTEXPR AvailableDeviceInfo(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + AvailableDeviceInfo(const AvailableDeviceInfo& from); + AvailableDeviceInfo(AvailableDeviceInfo&& from) noexcept + : AvailableDeviceInfo() { + *this = ::std::move(from); + } + + inline AvailableDeviceInfo& operator=(const AvailableDeviceInfo& from) { + CopyFrom(from); + return *this; + } + inline AvailableDeviceInfo& operator=(AvailableDeviceInfo&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const AvailableDeviceInfo& default_instance() { + return *internal_default_instance(); + } + static inline const AvailableDeviceInfo* internal_default_instance() { + return reinterpret_cast( + &_AvailableDeviceInfo_default_instance_); + } + static constexpr int kIndexInFileMessages = + 12; + + friend void swap(AvailableDeviceInfo& a, AvailableDeviceInfo& b) { + a.Swap(&b); + } + inline void Swap(AvailableDeviceInfo* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(AvailableDeviceInfo* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + AvailableDeviceInfo* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const AvailableDeviceInfo& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom( const AvailableDeviceInfo& from) { + AvailableDeviceInfo::MergeImpl(*this, from); + } + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _impl_._cached_size_.Get(); } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(AvailableDeviceInfo* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.AvailableDeviceInfo"; + } + protected: + explicit AvailableDeviceInfo(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kNameFieldNumber = 1, + kTypeFieldNumber = 2, + kPhysicalDescriptionFieldNumber = 4, + kMemoryLimitFieldNumber = 3, + }; + // string name = 1; + void clear_name(); + const std::string& name() const; + template + void set_name(ArgT0&& arg0, ArgT... args); + std::string* mutable_name(); + PROTOBUF_NODISCARD std::string* release_name(); + void set_allocated_name(std::string* name); + private: + const std::string& _internal_name() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_name(const std::string& value); + std::string* _internal_mutable_name(); + public: + + // string type = 2; + void clear_type(); + const std::string& type() const; + template + void set_type(ArgT0&& arg0, ArgT... args); + std::string* mutable_type(); + PROTOBUF_NODISCARD std::string* release_type(); + void set_allocated_type(std::string* type); + private: + const std::string& _internal_type() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_type(const std::string& value); + std::string* _internal_mutable_type(); + public: + + // string physical_description = 4; + void clear_physical_description(); + const std::string& physical_description() const; + template + void set_physical_description(ArgT0&& arg0, ArgT... args); + std::string* mutable_physical_description(); + PROTOBUF_NODISCARD std::string* release_physical_description(); + void set_allocated_physical_description(std::string* physical_description); + private: + const std::string& _internal_physical_description() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_physical_description(const std::string& value); + std::string* _internal_mutable_physical_description(); + public: + + // int64 memory_limit = 3; + void clear_memory_limit(); + int64_t memory_limit() const; + void set_memory_limit(int64_t value); + private: + int64_t _internal_memory_limit() const; + void _internal_set_memory_limit(int64_t value); + public: + + // @@protoc_insertion_point(class_scope:tensorflow.AvailableDeviceInfo) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr name_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr type_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr physical_description_; + int64_t memory_limit_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + }; + union { Impl_ _impl_; }; + friend struct ::TableStruct_tsl_2fprotobuf_2ftest_5flog_2eproto; +}; +// ------------------------------------------------------------------- + +class MachineConfiguration final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:tensorflow.MachineConfiguration) */ { + public: + inline MachineConfiguration() : MachineConfiguration(nullptr) {} + ~MachineConfiguration() override; + explicit PROTOBUF_CONSTEXPR MachineConfiguration(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + MachineConfiguration(const MachineConfiguration& from); + MachineConfiguration(MachineConfiguration&& from) noexcept + : MachineConfiguration() { + *this = ::std::move(from); + } + + inline MachineConfiguration& operator=(const MachineConfiguration& from) { + CopyFrom(from); + return *this; + } + inline MachineConfiguration& operator=(MachineConfiguration&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const MachineConfiguration& default_instance() { + return *internal_default_instance(); + } + static inline const MachineConfiguration* internal_default_instance() { + return reinterpret_cast( + &_MachineConfiguration_default_instance_); + } + static constexpr int kIndexInFileMessages = + 13; + + friend void swap(MachineConfiguration& a, MachineConfiguration& b) { + a.Swap(&b); + } + inline void Swap(MachineConfiguration* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(MachineConfiguration* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + MachineConfiguration* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const MachineConfiguration& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom( const MachineConfiguration& from) { + MachineConfiguration::MergeImpl(*this, from); + } + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _impl_._cached_size_.Get(); } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(MachineConfiguration* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.MachineConfiguration"; + } + protected: + explicit MachineConfiguration(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kDeviceInfoFieldNumber = 4, + kAvailableDeviceInfoFieldNumber = 5, + kHostnameFieldNumber = 1, + kSerialIdentifierFieldNumber = 7, + kPlatformInfoFieldNumber = 2, + kCpuInfoFieldNumber = 3, + kMemoryInfoFieldNumber = 6, + }; + // repeated .google.protobuf.Any device_info = 4; + int device_info_size() const; + private: + int _internal_device_info_size() const; + public: + void clear_device_info(); + ::PROTOBUF_NAMESPACE_ID::Any* mutable_device_info(int index); + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::PROTOBUF_NAMESPACE_ID::Any >* + mutable_device_info(); + private: + const ::PROTOBUF_NAMESPACE_ID::Any& _internal_device_info(int index) const; + ::PROTOBUF_NAMESPACE_ID::Any* _internal_add_device_info(); + public: + const ::PROTOBUF_NAMESPACE_ID::Any& device_info(int index) const; + ::PROTOBUF_NAMESPACE_ID::Any* add_device_info(); + const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::PROTOBUF_NAMESPACE_ID::Any >& + device_info() const; + + // repeated .tensorflow.AvailableDeviceInfo available_device_info = 5; + int available_device_info_size() const; + private: + int _internal_available_device_info_size() const; + public: + void clear_available_device_info(); + ::tensorflow::AvailableDeviceInfo* mutable_available_device_info(int index); + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::AvailableDeviceInfo >* + mutable_available_device_info(); + private: + const ::tensorflow::AvailableDeviceInfo& _internal_available_device_info(int index) const; + ::tensorflow::AvailableDeviceInfo* _internal_add_available_device_info(); + public: + const ::tensorflow::AvailableDeviceInfo& available_device_info(int index) const; + ::tensorflow::AvailableDeviceInfo* add_available_device_info(); + const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::AvailableDeviceInfo >& + available_device_info() const; + + // string hostname = 1; + void clear_hostname(); + const std::string& hostname() const; + template + void set_hostname(ArgT0&& arg0, ArgT... args); + std::string* mutable_hostname(); + PROTOBUF_NODISCARD std::string* release_hostname(); + void set_allocated_hostname(std::string* hostname); + private: + const std::string& _internal_hostname() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_hostname(const std::string& value); + std::string* _internal_mutable_hostname(); + public: + + // string serial_identifier = 7; + void clear_serial_identifier(); + const std::string& serial_identifier() const; + template + void set_serial_identifier(ArgT0&& arg0, ArgT... args); + std::string* mutable_serial_identifier(); + PROTOBUF_NODISCARD std::string* release_serial_identifier(); + void set_allocated_serial_identifier(std::string* serial_identifier); + private: + const std::string& _internal_serial_identifier() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_serial_identifier(const std::string& value); + std::string* _internal_mutable_serial_identifier(); + public: + + // .tensorflow.PlatformInfo platform_info = 2; + bool has_platform_info() const; + private: + bool _internal_has_platform_info() const; + public: + void clear_platform_info(); + const ::tensorflow::PlatformInfo& platform_info() const; + PROTOBUF_NODISCARD ::tensorflow::PlatformInfo* release_platform_info(); + ::tensorflow::PlatformInfo* mutable_platform_info(); + void set_allocated_platform_info(::tensorflow::PlatformInfo* platform_info); + private: + const ::tensorflow::PlatformInfo& _internal_platform_info() const; + ::tensorflow::PlatformInfo* _internal_mutable_platform_info(); + public: + void unsafe_arena_set_allocated_platform_info( + ::tensorflow::PlatformInfo* platform_info); + ::tensorflow::PlatformInfo* unsafe_arena_release_platform_info(); + + // .tensorflow.CPUInfo cpu_info = 3; + bool has_cpu_info() const; + private: + bool _internal_has_cpu_info() const; + public: + void clear_cpu_info(); + const ::tensorflow::CPUInfo& cpu_info() const; + PROTOBUF_NODISCARD ::tensorflow::CPUInfo* release_cpu_info(); + ::tensorflow::CPUInfo* mutable_cpu_info(); + void set_allocated_cpu_info(::tensorflow::CPUInfo* cpu_info); + private: + const ::tensorflow::CPUInfo& _internal_cpu_info() const; + ::tensorflow::CPUInfo* _internal_mutable_cpu_info(); + public: + void unsafe_arena_set_allocated_cpu_info( + ::tensorflow::CPUInfo* cpu_info); + ::tensorflow::CPUInfo* unsafe_arena_release_cpu_info(); + + // .tensorflow.MemoryInfo memory_info = 6; + bool has_memory_info() const; + private: + bool _internal_has_memory_info() const; + public: + void clear_memory_info(); + const ::tensorflow::MemoryInfo& memory_info() const; + PROTOBUF_NODISCARD ::tensorflow::MemoryInfo* release_memory_info(); + ::tensorflow::MemoryInfo* mutable_memory_info(); + void set_allocated_memory_info(::tensorflow::MemoryInfo* memory_info); + private: + const ::tensorflow::MemoryInfo& _internal_memory_info() const; + ::tensorflow::MemoryInfo* _internal_mutable_memory_info(); + public: + void unsafe_arena_set_allocated_memory_info( + ::tensorflow::MemoryInfo* memory_info); + ::tensorflow::MemoryInfo* unsafe_arena_release_memory_info(); + + // @@protoc_insertion_point(class_scope:tensorflow.MachineConfiguration) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::PROTOBUF_NAMESPACE_ID::Any > device_info_; + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::AvailableDeviceInfo > available_device_info_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr hostname_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr serial_identifier_; + ::tensorflow::PlatformInfo* platform_info_; + ::tensorflow::CPUInfo* cpu_info_; + ::tensorflow::MemoryInfo* memory_info_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + }; + union { Impl_ _impl_; }; + friend struct ::TableStruct_tsl_2fprotobuf_2ftest_5flog_2eproto; +}; +// ------------------------------------------------------------------- + +class RunConfiguration_EnvVarsEntry_DoNotUse : public ::PROTOBUF_NAMESPACE_ID::internal::MapEntry { +public: + typedef ::PROTOBUF_NAMESPACE_ID::internal::MapEntry SuperType; + RunConfiguration_EnvVarsEntry_DoNotUse(); + explicit PROTOBUF_CONSTEXPR RunConfiguration_EnvVarsEntry_DoNotUse( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + explicit RunConfiguration_EnvVarsEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena); + void MergeFrom(const RunConfiguration_EnvVarsEntry_DoNotUse& other); + static const RunConfiguration_EnvVarsEntry_DoNotUse* internal_default_instance() { return reinterpret_cast(&_RunConfiguration_EnvVarsEntry_DoNotUse_default_instance_); } + static bool ValidateKey(std::string* s) { + return ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(s->data(), static_cast(s->size()), ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE, "tensorflow.RunConfiguration.EnvVarsEntry.key"); + } + static bool ValidateValue(std::string* s) { + return ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(s->data(), static_cast(s->size()), ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE, "tensorflow.RunConfiguration.EnvVarsEntry.value"); + } + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + friend struct ::TableStruct_tsl_2fprotobuf_2ftest_5flog_2eproto; +}; + +// ------------------------------------------------------------------- + +class RunConfiguration final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:tensorflow.RunConfiguration) */ { + public: + inline RunConfiguration() : RunConfiguration(nullptr) {} + ~RunConfiguration() override; + explicit PROTOBUF_CONSTEXPR RunConfiguration(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + RunConfiguration(const RunConfiguration& from); + RunConfiguration(RunConfiguration&& from) noexcept + : RunConfiguration() { + *this = ::std::move(from); + } + + inline RunConfiguration& operator=(const RunConfiguration& from) { + CopyFrom(from); + return *this; + } + inline RunConfiguration& operator=(RunConfiguration&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const RunConfiguration& default_instance() { + return *internal_default_instance(); + } + static inline const RunConfiguration* internal_default_instance() { + return reinterpret_cast( + &_RunConfiguration_default_instance_); + } + static constexpr int kIndexInFileMessages = + 15; + + friend void swap(RunConfiguration& a, RunConfiguration& b) { + a.Swap(&b); + } + inline void Swap(RunConfiguration* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(RunConfiguration* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + RunConfiguration* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const RunConfiguration& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom( const RunConfiguration& from) { + RunConfiguration::MergeImpl(*this, from); + } + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _impl_._cached_size_.Get(); } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(RunConfiguration* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.RunConfiguration"; + } + protected: + explicit RunConfiguration(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + private: + static void ArenaDtor(void* object); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + + // accessors ------------------------------------------------------- + + enum : int { + kArgumentFieldNumber = 1, + kEnvVarsFieldNumber = 2, + }; + // repeated string argument = 1; + int argument_size() const; + private: + int _internal_argument_size() const; + public: + void clear_argument(); + const std::string& argument(int index) const; + std::string* mutable_argument(int index); + void set_argument(int index, const std::string& value); + void set_argument(int index, std::string&& value); + void set_argument(int index, const char* value); + void set_argument(int index, const char* value, size_t size); + std::string* add_argument(); + void add_argument(const std::string& value); + void add_argument(std::string&& value); + void add_argument(const char* value); + void add_argument(const char* value, size_t size); + const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField& argument() const; + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField* mutable_argument(); + private: + const std::string& _internal_argument(int index) const; + std::string* _internal_add_argument(); + public: + + // map env_vars = 2; + int env_vars_size() const; + private: + int _internal_env_vars_size() const; + public: + void clear_env_vars(); + private: + const ::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >& + _internal_env_vars() const; + ::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >* + _internal_mutable_env_vars(); + public: + const ::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >& + env_vars() const; + ::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >* + mutable_env_vars(); + + // @@protoc_insertion_point(class_scope:tensorflow.RunConfiguration) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField argument_; + ::PROTOBUF_NAMESPACE_ID::internal::MapField< + RunConfiguration_EnvVarsEntry_DoNotUse, + std::string, std::string, + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING, + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING> env_vars_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + }; + union { Impl_ _impl_; }; + friend struct ::TableStruct_tsl_2fprotobuf_2ftest_5flog_2eproto; +}; +// ------------------------------------------------------------------- + +class TestResults final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:tensorflow.TestResults) */ { + public: + inline TestResults() : TestResults(nullptr) {} + ~TestResults() override; + explicit PROTOBUF_CONSTEXPR TestResults(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + TestResults(const TestResults& from); + TestResults(TestResults&& from) noexcept + : TestResults() { + *this = ::std::move(from); + } + + inline TestResults& operator=(const TestResults& from) { + CopyFrom(from); + return *this; + } + inline TestResults& operator=(TestResults&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const TestResults& default_instance() { + return *internal_default_instance(); + } + static inline const TestResults* internal_default_instance() { + return reinterpret_cast( + &_TestResults_default_instance_); + } + static constexpr int kIndexInFileMessages = + 16; + + friend void swap(TestResults& a, TestResults& b) { + a.Swap(&b); + } + inline void Swap(TestResults* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(TestResults* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + TestResults* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const TestResults& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom( const TestResults& from) { + TestResults::MergeImpl(*this, from); + } + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _impl_._cached_size_.Get(); } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(TestResults* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "tensorflow.TestResults"; + } + protected: + explicit TestResults(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + typedef TestResults_BenchmarkType BenchmarkType; + static constexpr BenchmarkType UNKNOWN = + TestResults_BenchmarkType_UNKNOWN; + static constexpr BenchmarkType CPP_MICROBENCHMARK = + TestResults_BenchmarkType_CPP_MICROBENCHMARK; + static constexpr BenchmarkType PYTHON_BENCHMARK = + TestResults_BenchmarkType_PYTHON_BENCHMARK; + static constexpr BenchmarkType ANDROID_BENCHMARK = + TestResults_BenchmarkType_ANDROID_BENCHMARK; + static constexpr BenchmarkType EDGE_BENCHMARK = + TestResults_BenchmarkType_EDGE_BENCHMARK; + static constexpr BenchmarkType IOS_BENCHMARK = + TestResults_BenchmarkType_IOS_BENCHMARK; + static inline bool BenchmarkType_IsValid(int value) { + return TestResults_BenchmarkType_IsValid(value); + } + static constexpr BenchmarkType BenchmarkType_MIN = + TestResults_BenchmarkType_BenchmarkType_MIN; + static constexpr BenchmarkType BenchmarkType_MAX = + TestResults_BenchmarkType_BenchmarkType_MAX; + static constexpr int BenchmarkType_ARRAYSIZE = + TestResults_BenchmarkType_BenchmarkType_ARRAYSIZE; + static inline const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* + BenchmarkType_descriptor() { + return TestResults_BenchmarkType_descriptor(); + } + template + static inline const std::string& BenchmarkType_Name(T enum_t_value) { + static_assert(::std::is_same::value || + ::std::is_integral::value, + "Incorrect type passed to function BenchmarkType_Name."); + return TestResults_BenchmarkType_Name(enum_t_value); + } + static inline bool BenchmarkType_Parse(::PROTOBUF_NAMESPACE_ID::ConstStringParam name, + BenchmarkType* value) { + return TestResults_BenchmarkType_Parse(name, value); + } + + // accessors ------------------------------------------------------- + + enum : int { + kTargetFieldNumber = 1, + kNameFieldNumber = 9, + kRunModeFieldNumber = 11, + kTfVersionFieldNumber = 12, + kEntriesFieldNumber = 2, + kBuildConfigurationFieldNumber = 3, + kCommitIdFieldNumber = 4, + kMachineConfigurationFieldNumber = 7, + kRunConfigurationFieldNumber = 8, + kStartTimeFieldNumber = 5, + kRunTimeFieldNumber = 6, + kBenchmarkTypeFieldNumber = 10, + }; + // string target = 1; + void clear_target(); + const std::string& target() const; + template + void set_target(ArgT0&& arg0, ArgT... args); + std::string* mutable_target(); + PROTOBUF_NODISCARD std::string* release_target(); + void set_allocated_target(std::string* target); + private: + const std::string& _internal_target() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_target(const std::string& value); + std::string* _internal_mutable_target(); + public: + + // string name = 9; + void clear_name(); + const std::string& name() const; + template + void set_name(ArgT0&& arg0, ArgT... args); + std::string* mutable_name(); + PROTOBUF_NODISCARD std::string* release_name(); + void set_allocated_name(std::string* name); + private: + const std::string& _internal_name() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_name(const std::string& value); + std::string* _internal_mutable_name(); + public: + + // string run_mode = 11; + void clear_run_mode(); + const std::string& run_mode() const; + template + void set_run_mode(ArgT0&& arg0, ArgT... args); + std::string* mutable_run_mode(); + PROTOBUF_NODISCARD std::string* release_run_mode(); + void set_allocated_run_mode(std::string* run_mode); + private: + const std::string& _internal_run_mode() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_run_mode(const std::string& value); + std::string* _internal_mutable_run_mode(); + public: + + // string tf_version = 12; + void clear_tf_version(); + const std::string& tf_version() const; + template + void set_tf_version(ArgT0&& arg0, ArgT... args); + std::string* mutable_tf_version(); + PROTOBUF_NODISCARD std::string* release_tf_version(); + void set_allocated_tf_version(std::string* tf_version); + private: + const std::string& _internal_tf_version() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_tf_version(const std::string& value); + std::string* _internal_mutable_tf_version(); + public: + + // .tensorflow.BenchmarkEntries entries = 2; + bool has_entries() const; + private: + bool _internal_has_entries() const; + public: + void clear_entries(); + const ::tensorflow::BenchmarkEntries& entries() const; + PROTOBUF_NODISCARD ::tensorflow::BenchmarkEntries* release_entries(); + ::tensorflow::BenchmarkEntries* mutable_entries(); + void set_allocated_entries(::tensorflow::BenchmarkEntries* entries); + private: + const ::tensorflow::BenchmarkEntries& _internal_entries() const; + ::tensorflow::BenchmarkEntries* _internal_mutable_entries(); + public: + void unsafe_arena_set_allocated_entries( + ::tensorflow::BenchmarkEntries* entries); + ::tensorflow::BenchmarkEntries* unsafe_arena_release_entries(); + + // .tensorflow.BuildConfiguration build_configuration = 3; + bool has_build_configuration() const; + private: + bool _internal_has_build_configuration() const; + public: + void clear_build_configuration(); + const ::tensorflow::BuildConfiguration& build_configuration() const; + PROTOBUF_NODISCARD ::tensorflow::BuildConfiguration* release_build_configuration(); + ::tensorflow::BuildConfiguration* mutable_build_configuration(); + void set_allocated_build_configuration(::tensorflow::BuildConfiguration* build_configuration); + private: + const ::tensorflow::BuildConfiguration& _internal_build_configuration() const; + ::tensorflow::BuildConfiguration* _internal_mutable_build_configuration(); + public: + void unsafe_arena_set_allocated_build_configuration( + ::tensorflow::BuildConfiguration* build_configuration); + ::tensorflow::BuildConfiguration* unsafe_arena_release_build_configuration(); + + // .tensorflow.CommitId commit_id = 4; + bool has_commit_id() const; + private: + bool _internal_has_commit_id() const; + public: + void clear_commit_id(); + const ::tensorflow::CommitId& commit_id() const; + PROTOBUF_NODISCARD ::tensorflow::CommitId* release_commit_id(); + ::tensorflow::CommitId* mutable_commit_id(); + void set_allocated_commit_id(::tensorflow::CommitId* commit_id); + private: + const ::tensorflow::CommitId& _internal_commit_id() const; + ::tensorflow::CommitId* _internal_mutable_commit_id(); + public: + void unsafe_arena_set_allocated_commit_id( + ::tensorflow::CommitId* commit_id); + ::tensorflow::CommitId* unsafe_arena_release_commit_id(); + + // .tensorflow.MachineConfiguration machine_configuration = 7; + bool has_machine_configuration() const; + private: + bool _internal_has_machine_configuration() const; + public: + void clear_machine_configuration(); + const ::tensorflow::MachineConfiguration& machine_configuration() const; + PROTOBUF_NODISCARD ::tensorflow::MachineConfiguration* release_machine_configuration(); + ::tensorflow::MachineConfiguration* mutable_machine_configuration(); + void set_allocated_machine_configuration(::tensorflow::MachineConfiguration* machine_configuration); + private: + const ::tensorflow::MachineConfiguration& _internal_machine_configuration() const; + ::tensorflow::MachineConfiguration* _internal_mutable_machine_configuration(); + public: + void unsafe_arena_set_allocated_machine_configuration( + ::tensorflow::MachineConfiguration* machine_configuration); + ::tensorflow::MachineConfiguration* unsafe_arena_release_machine_configuration(); + + // .tensorflow.RunConfiguration run_configuration = 8; + bool has_run_configuration() const; + private: + bool _internal_has_run_configuration() const; + public: + void clear_run_configuration(); + const ::tensorflow::RunConfiguration& run_configuration() const; + PROTOBUF_NODISCARD ::tensorflow::RunConfiguration* release_run_configuration(); + ::tensorflow::RunConfiguration* mutable_run_configuration(); + void set_allocated_run_configuration(::tensorflow::RunConfiguration* run_configuration); + private: + const ::tensorflow::RunConfiguration& _internal_run_configuration() const; + ::tensorflow::RunConfiguration* _internal_mutable_run_configuration(); + public: + void unsafe_arena_set_allocated_run_configuration( + ::tensorflow::RunConfiguration* run_configuration); + ::tensorflow::RunConfiguration* unsafe_arena_release_run_configuration(); + + // int64 start_time = 5; + void clear_start_time(); + int64_t start_time() const; + void set_start_time(int64_t value); + private: + int64_t _internal_start_time() const; + void _internal_set_start_time(int64_t value); + public: + + // double run_time = 6; + void clear_run_time(); + double run_time() const; + void set_run_time(double value); + private: + double _internal_run_time() const; + void _internal_set_run_time(double value); + public: + + // .tensorflow.TestResults.BenchmarkType benchmark_type = 10; + void clear_benchmark_type(); + ::tensorflow::TestResults_BenchmarkType benchmark_type() const; + void set_benchmark_type(::tensorflow::TestResults_BenchmarkType value); + private: + ::tensorflow::TestResults_BenchmarkType _internal_benchmark_type() const; + void _internal_set_benchmark_type(::tensorflow::TestResults_BenchmarkType value); + public: + + // @@protoc_insertion_point(class_scope:tensorflow.TestResults) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ { + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr target_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr name_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr run_mode_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr tf_version_; + ::tensorflow::BenchmarkEntries* entries_; + ::tensorflow::BuildConfiguration* build_configuration_; + ::tensorflow::CommitId* commit_id_; + ::tensorflow::MachineConfiguration* machine_configuration_; + ::tensorflow::RunConfiguration* run_configuration_; + int64_t start_time_; + double run_time_; + int benchmark_type_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + }; + union { Impl_ _impl_; }; + friend struct ::TableStruct_tsl_2fprotobuf_2ftest_5flog_2eproto; +}; +// =================================================================== + + +// =================================================================== + +#ifdef __GNUC__ + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wstrict-aliasing" +#endif // __GNUC__ +// EntryValue + +// double double_value = 1; +inline bool EntryValue::_internal_has_double_value() const { + return kind_case() == kDoubleValue; +} +inline bool EntryValue::has_double_value() const { + return _internal_has_double_value(); +} +inline void EntryValue::set_has_double_value() { + _impl_._oneof_case_[0] = kDoubleValue; +} +inline void EntryValue::clear_double_value() { + if (_internal_has_double_value()) { + _impl_.kind_.double_value_ = 0; + clear_has_kind(); + } +} +inline double EntryValue::_internal_double_value() const { + if (_internal_has_double_value()) { + return _impl_.kind_.double_value_; + } + return 0; +} +inline void EntryValue::_internal_set_double_value(double value) { + if (!_internal_has_double_value()) { + clear_kind(); + set_has_double_value(); + } + _impl_.kind_.double_value_ = value; +} +inline double EntryValue::double_value() const { + // @@protoc_insertion_point(field_get:tensorflow.EntryValue.double_value) + return _internal_double_value(); +} +inline void EntryValue::set_double_value(double value) { + _internal_set_double_value(value); + // @@protoc_insertion_point(field_set:tensorflow.EntryValue.double_value) +} + +// string string_value = 2; +inline bool EntryValue::_internal_has_string_value() const { + return kind_case() == kStringValue; +} +inline bool EntryValue::has_string_value() const { + return _internal_has_string_value(); +} +inline void EntryValue::set_has_string_value() { + _impl_._oneof_case_[0] = kStringValue; +} +inline void EntryValue::clear_string_value() { + if (_internal_has_string_value()) { + _impl_.kind_.string_value_.Destroy(); + clear_has_kind(); + } +} +inline const std::string& EntryValue::string_value() const { + // @@protoc_insertion_point(field_get:tensorflow.EntryValue.string_value) + return _internal_string_value(); +} +template +inline void EntryValue::set_string_value(ArgT0&& arg0, ArgT... args) { + if (!_internal_has_string_value()) { + clear_kind(); + set_has_string_value(); + _impl_.kind_.string_value_.InitDefault(); + } + _impl_.kind_.string_value_.Set( static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:tensorflow.EntryValue.string_value) +} +inline std::string* EntryValue::mutable_string_value() { + std::string* _s = _internal_mutable_string_value(); + // @@protoc_insertion_point(field_mutable:tensorflow.EntryValue.string_value) + return _s; +} +inline const std::string& EntryValue::_internal_string_value() const { + if (_internal_has_string_value()) { + return _impl_.kind_.string_value_.Get(); + } + return ::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(); +} +inline void EntryValue::_internal_set_string_value(const std::string& value) { + if (!_internal_has_string_value()) { + clear_kind(); + set_has_string_value(); + _impl_.kind_.string_value_.InitDefault(); + } + _impl_.kind_.string_value_.Set(value, GetArenaForAllocation()); +} +inline std::string* EntryValue::_internal_mutable_string_value() { + if (!_internal_has_string_value()) { + clear_kind(); + set_has_string_value(); + _impl_.kind_.string_value_.InitDefault(); + } + return _impl_.kind_.string_value_.Mutable( GetArenaForAllocation()); +} +inline std::string* EntryValue::release_string_value() { + // @@protoc_insertion_point(field_release:tensorflow.EntryValue.string_value) + if (_internal_has_string_value()) { + clear_has_kind(); + return _impl_.kind_.string_value_.Release(); + } else { + return nullptr; + } +} +inline void EntryValue::set_allocated_string_value(std::string* string_value) { + if (has_kind()) { + clear_kind(); + } + if (string_value != nullptr) { + set_has_string_value(); + _impl_.kind_.string_value_.InitAllocated(string_value, GetArenaForAllocation()); + } + // @@protoc_insertion_point(field_set_allocated:tensorflow.EntryValue.string_value) +} + +inline bool EntryValue::has_kind() const { + return kind_case() != KIND_NOT_SET; +} +inline void EntryValue::clear_has_kind() { + _impl_._oneof_case_[0] = KIND_NOT_SET; +} +inline EntryValue::KindCase EntryValue::kind_case() const { + return EntryValue::KindCase(_impl_._oneof_case_[0]); +} +// ------------------------------------------------------------------- + +// MetricEntry + +// string name = 1; +inline void MetricEntry::clear_name() { + _impl_.name_.ClearToEmpty(); +} +inline const std::string& MetricEntry::name() const { + // @@protoc_insertion_point(field_get:tensorflow.MetricEntry.name) + return _internal_name(); +} +template +inline PROTOBUF_ALWAYS_INLINE +void MetricEntry::set_name(ArgT0&& arg0, ArgT... args) { + + _impl_.name_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:tensorflow.MetricEntry.name) +} +inline std::string* MetricEntry::mutable_name() { + std::string* _s = _internal_mutable_name(); + // @@protoc_insertion_point(field_mutable:tensorflow.MetricEntry.name) + return _s; +} +inline const std::string& MetricEntry::_internal_name() const { + return _impl_.name_.Get(); +} +inline void MetricEntry::_internal_set_name(const std::string& value) { + + _impl_.name_.Set(value, GetArenaForAllocation()); +} +inline std::string* MetricEntry::_internal_mutable_name() { + + return _impl_.name_.Mutable(GetArenaForAllocation()); +} +inline std::string* MetricEntry::release_name() { + // @@protoc_insertion_point(field_release:tensorflow.MetricEntry.name) + return _impl_.name_.Release(); +} +inline void MetricEntry::set_allocated_name(std::string* name) { + if (name != nullptr) { + + } else { + + } + _impl_.name_.SetAllocated(name, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.name_.IsDefault()) { + _impl_.name_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:tensorflow.MetricEntry.name) +} + +// double value = 2; +inline void MetricEntry::clear_value() { + _impl_.value_ = 0; +} +inline double MetricEntry::_internal_value() const { + return _impl_.value_; +} +inline double MetricEntry::value() const { + // @@protoc_insertion_point(field_get:tensorflow.MetricEntry.value) + return _internal_value(); +} +inline void MetricEntry::_internal_set_value(double value) { + + _impl_.value_ = value; +} +inline void MetricEntry::set_value(double value) { + _internal_set_value(value); + // @@protoc_insertion_point(field_set:tensorflow.MetricEntry.value) +} + +// .google.protobuf.DoubleValue min_value = 3; +inline bool MetricEntry::_internal_has_min_value() const { + return this != internal_default_instance() && _impl_.min_value_ != nullptr; +} +inline bool MetricEntry::has_min_value() const { + return _internal_has_min_value(); +} +inline const ::PROTOBUF_NAMESPACE_ID::DoubleValue& MetricEntry::_internal_min_value() const { + const ::PROTOBUF_NAMESPACE_ID::DoubleValue* p = _impl_.min_value_; + return p != nullptr ? *p : reinterpret_cast( + ::PROTOBUF_NAMESPACE_ID::_DoubleValue_default_instance_); +} +inline const ::PROTOBUF_NAMESPACE_ID::DoubleValue& MetricEntry::min_value() const { + // @@protoc_insertion_point(field_get:tensorflow.MetricEntry.min_value) + return _internal_min_value(); +} +inline void MetricEntry::unsafe_arena_set_allocated_min_value( + ::PROTOBUF_NAMESPACE_ID::DoubleValue* min_value) { + if (GetArenaForAllocation() == nullptr) { + delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.min_value_); + } + _impl_.min_value_ = min_value; + if (min_value) { + + } else { + + } + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.MetricEntry.min_value) +} +inline ::PROTOBUF_NAMESPACE_ID::DoubleValue* MetricEntry::release_min_value() { + + ::PROTOBUF_NAMESPACE_ID::DoubleValue* temp = _impl_.min_value_; + _impl_.min_value_ = nullptr; +#ifdef PROTOBUF_FORCE_COPY_IN_RELEASE + auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp); + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + if (GetArenaForAllocation() == nullptr) { delete old; } +#else // PROTOBUF_FORCE_COPY_IN_RELEASE + if (GetArenaForAllocation() != nullptr) { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + } +#endif // !PROTOBUF_FORCE_COPY_IN_RELEASE + return temp; +} +inline ::PROTOBUF_NAMESPACE_ID::DoubleValue* MetricEntry::unsafe_arena_release_min_value() { + // @@protoc_insertion_point(field_release:tensorflow.MetricEntry.min_value) + + ::PROTOBUF_NAMESPACE_ID::DoubleValue* temp = _impl_.min_value_; + _impl_.min_value_ = nullptr; + return temp; +} +inline ::PROTOBUF_NAMESPACE_ID::DoubleValue* MetricEntry::_internal_mutable_min_value() { + + if (_impl_.min_value_ == nullptr) { + auto* p = CreateMaybeMessage<::PROTOBUF_NAMESPACE_ID::DoubleValue>(GetArenaForAllocation()); + _impl_.min_value_ = p; + } + return _impl_.min_value_; +} +inline ::PROTOBUF_NAMESPACE_ID::DoubleValue* MetricEntry::mutable_min_value() { + ::PROTOBUF_NAMESPACE_ID::DoubleValue* _msg = _internal_mutable_min_value(); + // @@protoc_insertion_point(field_mutable:tensorflow.MetricEntry.min_value) + return _msg; +} +inline void MetricEntry::set_allocated_min_value(::PROTOBUF_NAMESPACE_ID::DoubleValue* min_value) { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); + if (message_arena == nullptr) { + delete reinterpret_cast< ::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.min_value_); + } + if (min_value) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena( + reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(min_value)); + if (message_arena != submessage_arena) { + min_value = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, min_value, submessage_arena); + } + + } else { + + } + _impl_.min_value_ = min_value; + // @@protoc_insertion_point(field_set_allocated:tensorflow.MetricEntry.min_value) +} + +// .google.protobuf.DoubleValue max_value = 4; +inline bool MetricEntry::_internal_has_max_value() const { + return this != internal_default_instance() && _impl_.max_value_ != nullptr; +} +inline bool MetricEntry::has_max_value() const { + return _internal_has_max_value(); +} +inline const ::PROTOBUF_NAMESPACE_ID::DoubleValue& MetricEntry::_internal_max_value() const { + const ::PROTOBUF_NAMESPACE_ID::DoubleValue* p = _impl_.max_value_; + return p != nullptr ? *p : reinterpret_cast( + ::PROTOBUF_NAMESPACE_ID::_DoubleValue_default_instance_); +} +inline const ::PROTOBUF_NAMESPACE_ID::DoubleValue& MetricEntry::max_value() const { + // @@protoc_insertion_point(field_get:tensorflow.MetricEntry.max_value) + return _internal_max_value(); +} +inline void MetricEntry::unsafe_arena_set_allocated_max_value( + ::PROTOBUF_NAMESPACE_ID::DoubleValue* max_value) { + if (GetArenaForAllocation() == nullptr) { + delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.max_value_); + } + _impl_.max_value_ = max_value; + if (max_value) { + + } else { + + } + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.MetricEntry.max_value) +} +inline ::PROTOBUF_NAMESPACE_ID::DoubleValue* MetricEntry::release_max_value() { + + ::PROTOBUF_NAMESPACE_ID::DoubleValue* temp = _impl_.max_value_; + _impl_.max_value_ = nullptr; +#ifdef PROTOBUF_FORCE_COPY_IN_RELEASE + auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp); + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + if (GetArenaForAllocation() == nullptr) { delete old; } +#else // PROTOBUF_FORCE_COPY_IN_RELEASE + if (GetArenaForAllocation() != nullptr) { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + } +#endif // !PROTOBUF_FORCE_COPY_IN_RELEASE + return temp; +} +inline ::PROTOBUF_NAMESPACE_ID::DoubleValue* MetricEntry::unsafe_arena_release_max_value() { + // @@protoc_insertion_point(field_release:tensorflow.MetricEntry.max_value) + + ::PROTOBUF_NAMESPACE_ID::DoubleValue* temp = _impl_.max_value_; + _impl_.max_value_ = nullptr; + return temp; +} +inline ::PROTOBUF_NAMESPACE_ID::DoubleValue* MetricEntry::_internal_mutable_max_value() { + + if (_impl_.max_value_ == nullptr) { + auto* p = CreateMaybeMessage<::PROTOBUF_NAMESPACE_ID::DoubleValue>(GetArenaForAllocation()); + _impl_.max_value_ = p; + } + return _impl_.max_value_; +} +inline ::PROTOBUF_NAMESPACE_ID::DoubleValue* MetricEntry::mutable_max_value() { + ::PROTOBUF_NAMESPACE_ID::DoubleValue* _msg = _internal_mutable_max_value(); + // @@protoc_insertion_point(field_mutable:tensorflow.MetricEntry.max_value) + return _msg; +} +inline void MetricEntry::set_allocated_max_value(::PROTOBUF_NAMESPACE_ID::DoubleValue* max_value) { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); + if (message_arena == nullptr) { + delete reinterpret_cast< ::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.max_value_); + } + if (max_value) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena( + reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(max_value)); + if (message_arena != submessage_arena) { + max_value = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, max_value, submessage_arena); + } + + } else { + + } + _impl_.max_value_ = max_value; + // @@protoc_insertion_point(field_set_allocated:tensorflow.MetricEntry.max_value) +} + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// BenchmarkEntry + +// string name = 1; +inline void BenchmarkEntry::clear_name() { + _impl_.name_.ClearToEmpty(); +} +inline const std::string& BenchmarkEntry::name() const { + // @@protoc_insertion_point(field_get:tensorflow.BenchmarkEntry.name) + return _internal_name(); +} +template +inline PROTOBUF_ALWAYS_INLINE +void BenchmarkEntry::set_name(ArgT0&& arg0, ArgT... args) { + + _impl_.name_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:tensorflow.BenchmarkEntry.name) +} +inline std::string* BenchmarkEntry::mutable_name() { + std::string* _s = _internal_mutable_name(); + // @@protoc_insertion_point(field_mutable:tensorflow.BenchmarkEntry.name) + return _s; +} +inline const std::string& BenchmarkEntry::_internal_name() const { + return _impl_.name_.Get(); +} +inline void BenchmarkEntry::_internal_set_name(const std::string& value) { + + _impl_.name_.Set(value, GetArenaForAllocation()); +} +inline std::string* BenchmarkEntry::_internal_mutable_name() { + + return _impl_.name_.Mutable(GetArenaForAllocation()); +} +inline std::string* BenchmarkEntry::release_name() { + // @@protoc_insertion_point(field_release:tensorflow.BenchmarkEntry.name) + return _impl_.name_.Release(); +} +inline void BenchmarkEntry::set_allocated_name(std::string* name) { + if (name != nullptr) { + + } else { + + } + _impl_.name_.SetAllocated(name, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.name_.IsDefault()) { + _impl_.name_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:tensorflow.BenchmarkEntry.name) +} + +// int64 iters = 2; +inline void BenchmarkEntry::clear_iters() { + _impl_.iters_ = int64_t{0}; +} +inline int64_t BenchmarkEntry::_internal_iters() const { + return _impl_.iters_; +} +inline int64_t BenchmarkEntry::iters() const { + // @@protoc_insertion_point(field_get:tensorflow.BenchmarkEntry.iters) + return _internal_iters(); +} +inline void BenchmarkEntry::_internal_set_iters(int64_t value) { + + _impl_.iters_ = value; +} +inline void BenchmarkEntry::set_iters(int64_t value) { + _internal_set_iters(value); + // @@protoc_insertion_point(field_set:tensorflow.BenchmarkEntry.iters) +} + +// double cpu_time = 3; +inline void BenchmarkEntry::clear_cpu_time() { + _impl_.cpu_time_ = 0; +} +inline double BenchmarkEntry::_internal_cpu_time() const { + return _impl_.cpu_time_; +} +inline double BenchmarkEntry::cpu_time() const { + // @@protoc_insertion_point(field_get:tensorflow.BenchmarkEntry.cpu_time) + return _internal_cpu_time(); +} +inline void BenchmarkEntry::_internal_set_cpu_time(double value) { + + _impl_.cpu_time_ = value; +} +inline void BenchmarkEntry::set_cpu_time(double value) { + _internal_set_cpu_time(value); + // @@protoc_insertion_point(field_set:tensorflow.BenchmarkEntry.cpu_time) +} + +// double wall_time = 4; +inline void BenchmarkEntry::clear_wall_time() { + _impl_.wall_time_ = 0; +} +inline double BenchmarkEntry::_internal_wall_time() const { + return _impl_.wall_time_; +} +inline double BenchmarkEntry::wall_time() const { + // @@protoc_insertion_point(field_get:tensorflow.BenchmarkEntry.wall_time) + return _internal_wall_time(); +} +inline void BenchmarkEntry::_internal_set_wall_time(double value) { + + _impl_.wall_time_ = value; +} +inline void BenchmarkEntry::set_wall_time(double value) { + _internal_set_wall_time(value); + // @@protoc_insertion_point(field_set:tensorflow.BenchmarkEntry.wall_time) +} + +// double throughput = 5; +inline void BenchmarkEntry::clear_throughput() { + _impl_.throughput_ = 0; +} +inline double BenchmarkEntry::_internal_throughput() const { + return _impl_.throughput_; +} +inline double BenchmarkEntry::throughput() const { + // @@protoc_insertion_point(field_get:tensorflow.BenchmarkEntry.throughput) + return _internal_throughput(); +} +inline void BenchmarkEntry::_internal_set_throughput(double value) { + + _impl_.throughput_ = value; +} +inline void BenchmarkEntry::set_throughput(double value) { + _internal_set_throughput(value); + // @@protoc_insertion_point(field_set:tensorflow.BenchmarkEntry.throughput) +} + +// map extras = 6; +inline int BenchmarkEntry::_internal_extras_size() const { + return _impl_.extras_.size(); +} +inline int BenchmarkEntry::extras_size() const { + return _internal_extras_size(); +} +inline void BenchmarkEntry::clear_extras() { + _impl_.extras_.Clear(); +} +inline const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::tensorflow::EntryValue >& +BenchmarkEntry::_internal_extras() const { + return _impl_.extras_.GetMap(); +} +inline const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::tensorflow::EntryValue >& +BenchmarkEntry::extras() const { + // @@protoc_insertion_point(field_map:tensorflow.BenchmarkEntry.extras) + return _internal_extras(); +} +inline ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::tensorflow::EntryValue >* +BenchmarkEntry::_internal_mutable_extras() { + return _impl_.extras_.MutableMap(); +} +inline ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::tensorflow::EntryValue >* +BenchmarkEntry::mutable_extras() { + // @@protoc_insertion_point(field_mutable_map:tensorflow.BenchmarkEntry.extras) + return _internal_mutable_extras(); +} + +// repeated .tensorflow.MetricEntry metrics = 7; +inline int BenchmarkEntry::_internal_metrics_size() const { + return _impl_.metrics_.size(); +} +inline int BenchmarkEntry::metrics_size() const { + return _internal_metrics_size(); +} +inline void BenchmarkEntry::clear_metrics() { + _impl_.metrics_.Clear(); +} +inline ::tensorflow::MetricEntry* BenchmarkEntry::mutable_metrics(int index) { + // @@protoc_insertion_point(field_mutable:tensorflow.BenchmarkEntry.metrics) + return _impl_.metrics_.Mutable(index); +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::MetricEntry >* +BenchmarkEntry::mutable_metrics() { + // @@protoc_insertion_point(field_mutable_list:tensorflow.BenchmarkEntry.metrics) + return &_impl_.metrics_; +} +inline const ::tensorflow::MetricEntry& BenchmarkEntry::_internal_metrics(int index) const { + return _impl_.metrics_.Get(index); +} +inline const ::tensorflow::MetricEntry& BenchmarkEntry::metrics(int index) const { + // @@protoc_insertion_point(field_get:tensorflow.BenchmarkEntry.metrics) + return _internal_metrics(index); +} +inline ::tensorflow::MetricEntry* BenchmarkEntry::_internal_add_metrics() { + return _impl_.metrics_.Add(); +} +inline ::tensorflow::MetricEntry* BenchmarkEntry::add_metrics() { + ::tensorflow::MetricEntry* _add = _internal_add_metrics(); + // @@protoc_insertion_point(field_add:tensorflow.BenchmarkEntry.metrics) + return _add; +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::MetricEntry >& +BenchmarkEntry::metrics() const { + // @@protoc_insertion_point(field_list:tensorflow.BenchmarkEntry.metrics) + return _impl_.metrics_; +} + +// ------------------------------------------------------------------- + +// BenchmarkEntries + +// repeated .tensorflow.BenchmarkEntry entry = 1; +inline int BenchmarkEntries::_internal_entry_size() const { + return _impl_.entry_.size(); +} +inline int BenchmarkEntries::entry_size() const { + return _internal_entry_size(); +} +inline void BenchmarkEntries::clear_entry() { + _impl_.entry_.Clear(); +} +inline ::tensorflow::BenchmarkEntry* BenchmarkEntries::mutable_entry(int index) { + // @@protoc_insertion_point(field_mutable:tensorflow.BenchmarkEntries.entry) + return _impl_.entry_.Mutable(index); +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::BenchmarkEntry >* +BenchmarkEntries::mutable_entry() { + // @@protoc_insertion_point(field_mutable_list:tensorflow.BenchmarkEntries.entry) + return &_impl_.entry_; +} +inline const ::tensorflow::BenchmarkEntry& BenchmarkEntries::_internal_entry(int index) const { + return _impl_.entry_.Get(index); +} +inline const ::tensorflow::BenchmarkEntry& BenchmarkEntries::entry(int index) const { + // @@protoc_insertion_point(field_get:tensorflow.BenchmarkEntries.entry) + return _internal_entry(index); +} +inline ::tensorflow::BenchmarkEntry* BenchmarkEntries::_internal_add_entry() { + return _impl_.entry_.Add(); +} +inline ::tensorflow::BenchmarkEntry* BenchmarkEntries::add_entry() { + ::tensorflow::BenchmarkEntry* _add = _internal_add_entry(); + // @@protoc_insertion_point(field_add:tensorflow.BenchmarkEntries.entry) + return _add; +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::BenchmarkEntry >& +BenchmarkEntries::entry() const { + // @@protoc_insertion_point(field_list:tensorflow.BenchmarkEntries.entry) + return _impl_.entry_; +} + +// ------------------------------------------------------------------- + +// BuildConfiguration + +// string mode = 1; +inline void BuildConfiguration::clear_mode() { + _impl_.mode_.ClearToEmpty(); +} +inline const std::string& BuildConfiguration::mode() const { + // @@protoc_insertion_point(field_get:tensorflow.BuildConfiguration.mode) + return _internal_mode(); +} +template +inline PROTOBUF_ALWAYS_INLINE +void BuildConfiguration::set_mode(ArgT0&& arg0, ArgT... args) { + + _impl_.mode_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:tensorflow.BuildConfiguration.mode) +} +inline std::string* BuildConfiguration::mutable_mode() { + std::string* _s = _internal_mutable_mode(); + // @@protoc_insertion_point(field_mutable:tensorflow.BuildConfiguration.mode) + return _s; +} +inline const std::string& BuildConfiguration::_internal_mode() const { + return _impl_.mode_.Get(); +} +inline void BuildConfiguration::_internal_set_mode(const std::string& value) { + + _impl_.mode_.Set(value, GetArenaForAllocation()); +} +inline std::string* BuildConfiguration::_internal_mutable_mode() { + + return _impl_.mode_.Mutable(GetArenaForAllocation()); +} +inline std::string* BuildConfiguration::release_mode() { + // @@protoc_insertion_point(field_release:tensorflow.BuildConfiguration.mode) + return _impl_.mode_.Release(); +} +inline void BuildConfiguration::set_allocated_mode(std::string* mode) { + if (mode != nullptr) { + + } else { + + } + _impl_.mode_.SetAllocated(mode, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.mode_.IsDefault()) { + _impl_.mode_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:tensorflow.BuildConfiguration.mode) +} + +// repeated string cc_flags = 2; +inline int BuildConfiguration::_internal_cc_flags_size() const { + return _impl_.cc_flags_.size(); +} +inline int BuildConfiguration::cc_flags_size() const { + return _internal_cc_flags_size(); +} +inline void BuildConfiguration::clear_cc_flags() { + _impl_.cc_flags_.Clear(); +} +inline std::string* BuildConfiguration::add_cc_flags() { + std::string* _s = _internal_add_cc_flags(); + // @@protoc_insertion_point(field_add_mutable:tensorflow.BuildConfiguration.cc_flags) + return _s; +} +inline const std::string& BuildConfiguration::_internal_cc_flags(int index) const { + return _impl_.cc_flags_.Get(index); +} +inline const std::string& BuildConfiguration::cc_flags(int index) const { + // @@protoc_insertion_point(field_get:tensorflow.BuildConfiguration.cc_flags) + return _internal_cc_flags(index); +} +inline std::string* BuildConfiguration::mutable_cc_flags(int index) { + // @@protoc_insertion_point(field_mutable:tensorflow.BuildConfiguration.cc_flags) + return _impl_.cc_flags_.Mutable(index); +} +inline void BuildConfiguration::set_cc_flags(int index, const std::string& value) { + _impl_.cc_flags_.Mutable(index)->assign(value); + // @@protoc_insertion_point(field_set:tensorflow.BuildConfiguration.cc_flags) +} +inline void BuildConfiguration::set_cc_flags(int index, std::string&& value) { + _impl_.cc_flags_.Mutable(index)->assign(std::move(value)); + // @@protoc_insertion_point(field_set:tensorflow.BuildConfiguration.cc_flags) +} +inline void BuildConfiguration::set_cc_flags(int index, const char* value) { + GOOGLE_DCHECK(value != nullptr); + _impl_.cc_flags_.Mutable(index)->assign(value); + // @@protoc_insertion_point(field_set_char:tensorflow.BuildConfiguration.cc_flags) +} +inline void BuildConfiguration::set_cc_flags(int index, const char* value, size_t size) { + _impl_.cc_flags_.Mutable(index)->assign( + reinterpret_cast(value), size); + // @@protoc_insertion_point(field_set_pointer:tensorflow.BuildConfiguration.cc_flags) +} +inline std::string* BuildConfiguration::_internal_add_cc_flags() { + return _impl_.cc_flags_.Add(); +} +inline void BuildConfiguration::add_cc_flags(const std::string& value) { + _impl_.cc_flags_.Add()->assign(value); + // @@protoc_insertion_point(field_add:tensorflow.BuildConfiguration.cc_flags) +} +inline void BuildConfiguration::add_cc_flags(std::string&& value) { + _impl_.cc_flags_.Add(std::move(value)); + // @@protoc_insertion_point(field_add:tensorflow.BuildConfiguration.cc_flags) +} +inline void BuildConfiguration::add_cc_flags(const char* value) { + GOOGLE_DCHECK(value != nullptr); + _impl_.cc_flags_.Add()->assign(value); + // @@protoc_insertion_point(field_add_char:tensorflow.BuildConfiguration.cc_flags) +} +inline void BuildConfiguration::add_cc_flags(const char* value, size_t size) { + _impl_.cc_flags_.Add()->assign(reinterpret_cast(value), size); + // @@protoc_insertion_point(field_add_pointer:tensorflow.BuildConfiguration.cc_flags) +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField& +BuildConfiguration::cc_flags() const { + // @@protoc_insertion_point(field_list:tensorflow.BuildConfiguration.cc_flags) + return _impl_.cc_flags_; +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField* +BuildConfiguration::mutable_cc_flags() { + // @@protoc_insertion_point(field_mutable_list:tensorflow.BuildConfiguration.cc_flags) + return &_impl_.cc_flags_; +} + +// repeated string opts = 3; +inline int BuildConfiguration::_internal_opts_size() const { + return _impl_.opts_.size(); +} +inline int BuildConfiguration::opts_size() const { + return _internal_opts_size(); +} +inline void BuildConfiguration::clear_opts() { + _impl_.opts_.Clear(); +} +inline std::string* BuildConfiguration::add_opts() { + std::string* _s = _internal_add_opts(); + // @@protoc_insertion_point(field_add_mutable:tensorflow.BuildConfiguration.opts) + return _s; +} +inline const std::string& BuildConfiguration::_internal_opts(int index) const { + return _impl_.opts_.Get(index); +} +inline const std::string& BuildConfiguration::opts(int index) const { + // @@protoc_insertion_point(field_get:tensorflow.BuildConfiguration.opts) + return _internal_opts(index); +} +inline std::string* BuildConfiguration::mutable_opts(int index) { + // @@protoc_insertion_point(field_mutable:tensorflow.BuildConfiguration.opts) + return _impl_.opts_.Mutable(index); +} +inline void BuildConfiguration::set_opts(int index, const std::string& value) { + _impl_.opts_.Mutable(index)->assign(value); + // @@protoc_insertion_point(field_set:tensorflow.BuildConfiguration.opts) +} +inline void BuildConfiguration::set_opts(int index, std::string&& value) { + _impl_.opts_.Mutable(index)->assign(std::move(value)); + // @@protoc_insertion_point(field_set:tensorflow.BuildConfiguration.opts) +} +inline void BuildConfiguration::set_opts(int index, const char* value) { + GOOGLE_DCHECK(value != nullptr); + _impl_.opts_.Mutable(index)->assign(value); + // @@protoc_insertion_point(field_set_char:tensorflow.BuildConfiguration.opts) +} +inline void BuildConfiguration::set_opts(int index, const char* value, size_t size) { + _impl_.opts_.Mutable(index)->assign( + reinterpret_cast(value), size); + // @@protoc_insertion_point(field_set_pointer:tensorflow.BuildConfiguration.opts) +} +inline std::string* BuildConfiguration::_internal_add_opts() { + return _impl_.opts_.Add(); +} +inline void BuildConfiguration::add_opts(const std::string& value) { + _impl_.opts_.Add()->assign(value); + // @@protoc_insertion_point(field_add:tensorflow.BuildConfiguration.opts) +} +inline void BuildConfiguration::add_opts(std::string&& value) { + _impl_.opts_.Add(std::move(value)); + // @@protoc_insertion_point(field_add:tensorflow.BuildConfiguration.opts) +} +inline void BuildConfiguration::add_opts(const char* value) { + GOOGLE_DCHECK(value != nullptr); + _impl_.opts_.Add()->assign(value); + // @@protoc_insertion_point(field_add_char:tensorflow.BuildConfiguration.opts) +} +inline void BuildConfiguration::add_opts(const char* value, size_t size) { + _impl_.opts_.Add()->assign(reinterpret_cast(value), size); + // @@protoc_insertion_point(field_add_pointer:tensorflow.BuildConfiguration.opts) +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField& +BuildConfiguration::opts() const { + // @@protoc_insertion_point(field_list:tensorflow.BuildConfiguration.opts) + return _impl_.opts_; +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField* +BuildConfiguration::mutable_opts() { + // @@protoc_insertion_point(field_mutable_list:tensorflow.BuildConfiguration.opts) + return &_impl_.opts_; +} + +// ------------------------------------------------------------------- + +// CommitId + +// int64 changelist = 1; +inline bool CommitId::_internal_has_changelist() const { + return kind_case() == kChangelist; +} +inline bool CommitId::has_changelist() const { + return _internal_has_changelist(); +} +inline void CommitId::set_has_changelist() { + _impl_._oneof_case_[0] = kChangelist; +} +inline void CommitId::clear_changelist() { + if (_internal_has_changelist()) { + _impl_.kind_.changelist_ = int64_t{0}; + clear_has_kind(); + } +} +inline int64_t CommitId::_internal_changelist() const { + if (_internal_has_changelist()) { + return _impl_.kind_.changelist_; + } + return int64_t{0}; +} +inline void CommitId::_internal_set_changelist(int64_t value) { + if (!_internal_has_changelist()) { + clear_kind(); + set_has_changelist(); + } + _impl_.kind_.changelist_ = value; +} +inline int64_t CommitId::changelist() const { + // @@protoc_insertion_point(field_get:tensorflow.CommitId.changelist) + return _internal_changelist(); +} +inline void CommitId::set_changelist(int64_t value) { + _internal_set_changelist(value); + // @@protoc_insertion_point(field_set:tensorflow.CommitId.changelist) +} + +// string hash = 2; +inline bool CommitId::_internal_has_hash() const { + return kind_case() == kHash; +} +inline bool CommitId::has_hash() const { + return _internal_has_hash(); +} +inline void CommitId::set_has_hash() { + _impl_._oneof_case_[0] = kHash; +} +inline void CommitId::clear_hash() { + if (_internal_has_hash()) { + _impl_.kind_.hash_.Destroy(); + clear_has_kind(); + } +} +inline const std::string& CommitId::hash() const { + // @@protoc_insertion_point(field_get:tensorflow.CommitId.hash) + return _internal_hash(); +} +template +inline void CommitId::set_hash(ArgT0&& arg0, ArgT... args) { + if (!_internal_has_hash()) { + clear_kind(); + set_has_hash(); + _impl_.kind_.hash_.InitDefault(); + } + _impl_.kind_.hash_.Set( static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:tensorflow.CommitId.hash) +} +inline std::string* CommitId::mutable_hash() { + std::string* _s = _internal_mutable_hash(); + // @@protoc_insertion_point(field_mutable:tensorflow.CommitId.hash) + return _s; +} +inline const std::string& CommitId::_internal_hash() const { + if (_internal_has_hash()) { + return _impl_.kind_.hash_.Get(); + } + return ::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(); +} +inline void CommitId::_internal_set_hash(const std::string& value) { + if (!_internal_has_hash()) { + clear_kind(); + set_has_hash(); + _impl_.kind_.hash_.InitDefault(); + } + _impl_.kind_.hash_.Set(value, GetArenaForAllocation()); +} +inline std::string* CommitId::_internal_mutable_hash() { + if (!_internal_has_hash()) { + clear_kind(); + set_has_hash(); + _impl_.kind_.hash_.InitDefault(); + } + return _impl_.kind_.hash_.Mutable( GetArenaForAllocation()); +} +inline std::string* CommitId::release_hash() { + // @@protoc_insertion_point(field_release:tensorflow.CommitId.hash) + if (_internal_has_hash()) { + clear_has_kind(); + return _impl_.kind_.hash_.Release(); + } else { + return nullptr; + } +} +inline void CommitId::set_allocated_hash(std::string* hash) { + if (has_kind()) { + clear_kind(); + } + if (hash != nullptr) { + set_has_hash(); + _impl_.kind_.hash_.InitAllocated(hash, GetArenaForAllocation()); + } + // @@protoc_insertion_point(field_set_allocated:tensorflow.CommitId.hash) +} + +// string snapshot = 3; +inline void CommitId::clear_snapshot() { + _impl_.snapshot_.ClearToEmpty(); +} +inline const std::string& CommitId::snapshot() const { + // @@protoc_insertion_point(field_get:tensorflow.CommitId.snapshot) + return _internal_snapshot(); +} +template +inline PROTOBUF_ALWAYS_INLINE +void CommitId::set_snapshot(ArgT0&& arg0, ArgT... args) { + + _impl_.snapshot_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:tensorflow.CommitId.snapshot) +} +inline std::string* CommitId::mutable_snapshot() { + std::string* _s = _internal_mutable_snapshot(); + // @@protoc_insertion_point(field_mutable:tensorflow.CommitId.snapshot) + return _s; +} +inline const std::string& CommitId::_internal_snapshot() const { + return _impl_.snapshot_.Get(); +} +inline void CommitId::_internal_set_snapshot(const std::string& value) { + + _impl_.snapshot_.Set(value, GetArenaForAllocation()); +} +inline std::string* CommitId::_internal_mutable_snapshot() { + + return _impl_.snapshot_.Mutable(GetArenaForAllocation()); +} +inline std::string* CommitId::release_snapshot() { + // @@protoc_insertion_point(field_release:tensorflow.CommitId.snapshot) + return _impl_.snapshot_.Release(); +} +inline void CommitId::set_allocated_snapshot(std::string* snapshot) { + if (snapshot != nullptr) { + + } else { + + } + _impl_.snapshot_.SetAllocated(snapshot, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.snapshot_.IsDefault()) { + _impl_.snapshot_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:tensorflow.CommitId.snapshot) +} + +// int64 pending_changelist = 4; +inline void CommitId::clear_pending_changelist() { + _impl_.pending_changelist_ = int64_t{0}; +} +inline int64_t CommitId::_internal_pending_changelist() const { + return _impl_.pending_changelist_; +} +inline int64_t CommitId::pending_changelist() const { + // @@protoc_insertion_point(field_get:tensorflow.CommitId.pending_changelist) + return _internal_pending_changelist(); +} +inline void CommitId::_internal_set_pending_changelist(int64_t value) { + + _impl_.pending_changelist_ = value; +} +inline void CommitId::set_pending_changelist(int64_t value) { + _internal_set_pending_changelist(value); + // @@protoc_insertion_point(field_set:tensorflow.CommitId.pending_changelist) +} + +inline bool CommitId::has_kind() const { + return kind_case() != KIND_NOT_SET; +} +inline void CommitId::clear_has_kind() { + _impl_._oneof_case_[0] = KIND_NOT_SET; +} +inline CommitId::KindCase CommitId::kind_case() const { + return CommitId::KindCase(_impl_._oneof_case_[0]); +} +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// CPUInfo + +// int64 num_cores = 1; +inline void CPUInfo::clear_num_cores() { + _impl_.num_cores_ = int64_t{0}; +} +inline int64_t CPUInfo::_internal_num_cores() const { + return _impl_.num_cores_; +} +inline int64_t CPUInfo::num_cores() const { + // @@protoc_insertion_point(field_get:tensorflow.CPUInfo.num_cores) + return _internal_num_cores(); +} +inline void CPUInfo::_internal_set_num_cores(int64_t value) { + + _impl_.num_cores_ = value; +} +inline void CPUInfo::set_num_cores(int64_t value) { + _internal_set_num_cores(value); + // @@protoc_insertion_point(field_set:tensorflow.CPUInfo.num_cores) +} + +// int64 num_cores_allowed = 2; +inline void CPUInfo::clear_num_cores_allowed() { + _impl_.num_cores_allowed_ = int64_t{0}; +} +inline int64_t CPUInfo::_internal_num_cores_allowed() const { + return _impl_.num_cores_allowed_; +} +inline int64_t CPUInfo::num_cores_allowed() const { + // @@protoc_insertion_point(field_get:tensorflow.CPUInfo.num_cores_allowed) + return _internal_num_cores_allowed(); +} +inline void CPUInfo::_internal_set_num_cores_allowed(int64_t value) { + + _impl_.num_cores_allowed_ = value; +} +inline void CPUInfo::set_num_cores_allowed(int64_t value) { + _internal_set_num_cores_allowed(value); + // @@protoc_insertion_point(field_set:tensorflow.CPUInfo.num_cores_allowed) +} + +// double mhz_per_cpu = 3; +inline void CPUInfo::clear_mhz_per_cpu() { + _impl_.mhz_per_cpu_ = 0; +} +inline double CPUInfo::_internal_mhz_per_cpu() const { + return _impl_.mhz_per_cpu_; +} +inline double CPUInfo::mhz_per_cpu() const { + // @@protoc_insertion_point(field_get:tensorflow.CPUInfo.mhz_per_cpu) + return _internal_mhz_per_cpu(); +} +inline void CPUInfo::_internal_set_mhz_per_cpu(double value) { + + _impl_.mhz_per_cpu_ = value; +} +inline void CPUInfo::set_mhz_per_cpu(double value) { + _internal_set_mhz_per_cpu(value); + // @@protoc_insertion_point(field_set:tensorflow.CPUInfo.mhz_per_cpu) +} + +// string cpu_info = 4; +inline void CPUInfo::clear_cpu_info() { + _impl_.cpu_info_.ClearToEmpty(); +} +inline const std::string& CPUInfo::cpu_info() const { + // @@protoc_insertion_point(field_get:tensorflow.CPUInfo.cpu_info) + return _internal_cpu_info(); +} +template +inline PROTOBUF_ALWAYS_INLINE +void CPUInfo::set_cpu_info(ArgT0&& arg0, ArgT... args) { + + _impl_.cpu_info_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:tensorflow.CPUInfo.cpu_info) +} +inline std::string* CPUInfo::mutable_cpu_info() { + std::string* _s = _internal_mutable_cpu_info(); + // @@protoc_insertion_point(field_mutable:tensorflow.CPUInfo.cpu_info) + return _s; +} +inline const std::string& CPUInfo::_internal_cpu_info() const { + return _impl_.cpu_info_.Get(); +} +inline void CPUInfo::_internal_set_cpu_info(const std::string& value) { + + _impl_.cpu_info_.Set(value, GetArenaForAllocation()); +} +inline std::string* CPUInfo::_internal_mutable_cpu_info() { + + return _impl_.cpu_info_.Mutable(GetArenaForAllocation()); +} +inline std::string* CPUInfo::release_cpu_info() { + // @@protoc_insertion_point(field_release:tensorflow.CPUInfo.cpu_info) + return _impl_.cpu_info_.Release(); +} +inline void CPUInfo::set_allocated_cpu_info(std::string* cpu_info) { + if (cpu_info != nullptr) { + + } else { + + } + _impl_.cpu_info_.SetAllocated(cpu_info, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.cpu_info_.IsDefault()) { + _impl_.cpu_info_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:tensorflow.CPUInfo.cpu_info) +} + +// string cpu_governor = 5; +inline void CPUInfo::clear_cpu_governor() { + _impl_.cpu_governor_.ClearToEmpty(); +} +inline const std::string& CPUInfo::cpu_governor() const { + // @@protoc_insertion_point(field_get:tensorflow.CPUInfo.cpu_governor) + return _internal_cpu_governor(); +} +template +inline PROTOBUF_ALWAYS_INLINE +void CPUInfo::set_cpu_governor(ArgT0&& arg0, ArgT... args) { + + _impl_.cpu_governor_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:tensorflow.CPUInfo.cpu_governor) +} +inline std::string* CPUInfo::mutable_cpu_governor() { + std::string* _s = _internal_mutable_cpu_governor(); + // @@protoc_insertion_point(field_mutable:tensorflow.CPUInfo.cpu_governor) + return _s; +} +inline const std::string& CPUInfo::_internal_cpu_governor() const { + return _impl_.cpu_governor_.Get(); +} +inline void CPUInfo::_internal_set_cpu_governor(const std::string& value) { + + _impl_.cpu_governor_.Set(value, GetArenaForAllocation()); +} +inline std::string* CPUInfo::_internal_mutable_cpu_governor() { + + return _impl_.cpu_governor_.Mutable(GetArenaForAllocation()); +} +inline std::string* CPUInfo::release_cpu_governor() { + // @@protoc_insertion_point(field_release:tensorflow.CPUInfo.cpu_governor) + return _impl_.cpu_governor_.Release(); +} +inline void CPUInfo::set_allocated_cpu_governor(std::string* cpu_governor) { + if (cpu_governor != nullptr) { + + } else { + + } + _impl_.cpu_governor_.SetAllocated(cpu_governor, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.cpu_governor_.IsDefault()) { + _impl_.cpu_governor_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:tensorflow.CPUInfo.cpu_governor) +} + +// map cache_size = 6; +inline int CPUInfo::_internal_cache_size_size() const { + return _impl_.cache_size_.size(); +} +inline int CPUInfo::cache_size_size() const { + return _internal_cache_size_size(); +} +inline void CPUInfo::clear_cache_size() { + _impl_.cache_size_.Clear(); +} +inline const ::PROTOBUF_NAMESPACE_ID::Map< std::string, int64_t >& +CPUInfo::_internal_cache_size() const { + return _impl_.cache_size_.GetMap(); +} +inline const ::PROTOBUF_NAMESPACE_ID::Map< std::string, int64_t >& +CPUInfo::cache_size() const { + // @@protoc_insertion_point(field_map:tensorflow.CPUInfo.cache_size) + return _internal_cache_size(); +} +inline ::PROTOBUF_NAMESPACE_ID::Map< std::string, int64_t >* +CPUInfo::_internal_mutable_cache_size() { + return _impl_.cache_size_.MutableMap(); +} +inline ::PROTOBUF_NAMESPACE_ID::Map< std::string, int64_t >* +CPUInfo::mutable_cache_size() { + // @@protoc_insertion_point(field_mutable_map:tensorflow.CPUInfo.cache_size) + return _internal_mutable_cache_size(); +} + +// ------------------------------------------------------------------- + +// MemoryInfo + +// int64 total = 1; +inline void MemoryInfo::clear_total() { + _impl_.total_ = int64_t{0}; +} +inline int64_t MemoryInfo::_internal_total() const { + return _impl_.total_; +} +inline int64_t MemoryInfo::total() const { + // @@protoc_insertion_point(field_get:tensorflow.MemoryInfo.total) + return _internal_total(); +} +inline void MemoryInfo::_internal_set_total(int64_t value) { + + _impl_.total_ = value; +} +inline void MemoryInfo::set_total(int64_t value) { + _internal_set_total(value); + // @@protoc_insertion_point(field_set:tensorflow.MemoryInfo.total) +} + +// int64 available = 2; +inline void MemoryInfo::clear_available() { + _impl_.available_ = int64_t{0}; +} +inline int64_t MemoryInfo::_internal_available() const { + return _impl_.available_; +} +inline int64_t MemoryInfo::available() const { + // @@protoc_insertion_point(field_get:tensorflow.MemoryInfo.available) + return _internal_available(); +} +inline void MemoryInfo::_internal_set_available(int64_t value) { + + _impl_.available_ = value; +} +inline void MemoryInfo::set_available(int64_t value) { + _internal_set_available(value); + // @@protoc_insertion_point(field_set:tensorflow.MemoryInfo.available) +} + +// ------------------------------------------------------------------- + +// GPUInfo + +// string model = 1; +inline void GPUInfo::clear_model() { + _impl_.model_.ClearToEmpty(); +} +inline const std::string& GPUInfo::model() const { + // @@protoc_insertion_point(field_get:tensorflow.GPUInfo.model) + return _internal_model(); +} +template +inline PROTOBUF_ALWAYS_INLINE +void GPUInfo::set_model(ArgT0&& arg0, ArgT... args) { + + _impl_.model_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:tensorflow.GPUInfo.model) +} +inline std::string* GPUInfo::mutable_model() { + std::string* _s = _internal_mutable_model(); + // @@protoc_insertion_point(field_mutable:tensorflow.GPUInfo.model) + return _s; +} +inline const std::string& GPUInfo::_internal_model() const { + return _impl_.model_.Get(); +} +inline void GPUInfo::_internal_set_model(const std::string& value) { + + _impl_.model_.Set(value, GetArenaForAllocation()); +} +inline std::string* GPUInfo::_internal_mutable_model() { + + return _impl_.model_.Mutable(GetArenaForAllocation()); +} +inline std::string* GPUInfo::release_model() { + // @@protoc_insertion_point(field_release:tensorflow.GPUInfo.model) + return _impl_.model_.Release(); +} +inline void GPUInfo::set_allocated_model(std::string* model) { + if (model != nullptr) { + + } else { + + } + _impl_.model_.SetAllocated(model, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.model_.IsDefault()) { + _impl_.model_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:tensorflow.GPUInfo.model) +} + +// string uuid = 2; +inline void GPUInfo::clear_uuid() { + _impl_.uuid_.ClearToEmpty(); +} +inline const std::string& GPUInfo::uuid() const { + // @@protoc_insertion_point(field_get:tensorflow.GPUInfo.uuid) + return _internal_uuid(); +} +template +inline PROTOBUF_ALWAYS_INLINE +void GPUInfo::set_uuid(ArgT0&& arg0, ArgT... args) { + + _impl_.uuid_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:tensorflow.GPUInfo.uuid) +} +inline std::string* GPUInfo::mutable_uuid() { + std::string* _s = _internal_mutable_uuid(); + // @@protoc_insertion_point(field_mutable:tensorflow.GPUInfo.uuid) + return _s; +} +inline const std::string& GPUInfo::_internal_uuid() const { + return _impl_.uuid_.Get(); +} +inline void GPUInfo::_internal_set_uuid(const std::string& value) { + + _impl_.uuid_.Set(value, GetArenaForAllocation()); +} +inline std::string* GPUInfo::_internal_mutable_uuid() { + + return _impl_.uuid_.Mutable(GetArenaForAllocation()); +} +inline std::string* GPUInfo::release_uuid() { + // @@protoc_insertion_point(field_release:tensorflow.GPUInfo.uuid) + return _impl_.uuid_.Release(); +} +inline void GPUInfo::set_allocated_uuid(std::string* uuid) { + if (uuid != nullptr) { + + } else { + + } + _impl_.uuid_.SetAllocated(uuid, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.uuid_.IsDefault()) { + _impl_.uuid_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:tensorflow.GPUInfo.uuid) +} + +// string bus_id = 3; +inline void GPUInfo::clear_bus_id() { + _impl_.bus_id_.ClearToEmpty(); +} +inline const std::string& GPUInfo::bus_id() const { + // @@protoc_insertion_point(field_get:tensorflow.GPUInfo.bus_id) + return _internal_bus_id(); +} +template +inline PROTOBUF_ALWAYS_INLINE +void GPUInfo::set_bus_id(ArgT0&& arg0, ArgT... args) { + + _impl_.bus_id_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:tensorflow.GPUInfo.bus_id) +} +inline std::string* GPUInfo::mutable_bus_id() { + std::string* _s = _internal_mutable_bus_id(); + // @@protoc_insertion_point(field_mutable:tensorflow.GPUInfo.bus_id) + return _s; +} +inline const std::string& GPUInfo::_internal_bus_id() const { + return _impl_.bus_id_.Get(); +} +inline void GPUInfo::_internal_set_bus_id(const std::string& value) { + + _impl_.bus_id_.Set(value, GetArenaForAllocation()); +} +inline std::string* GPUInfo::_internal_mutable_bus_id() { + + return _impl_.bus_id_.Mutable(GetArenaForAllocation()); +} +inline std::string* GPUInfo::release_bus_id() { + // @@protoc_insertion_point(field_release:tensorflow.GPUInfo.bus_id) + return _impl_.bus_id_.Release(); +} +inline void GPUInfo::set_allocated_bus_id(std::string* bus_id) { + if (bus_id != nullptr) { + + } else { + + } + _impl_.bus_id_.SetAllocated(bus_id, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.bus_id_.IsDefault()) { + _impl_.bus_id_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:tensorflow.GPUInfo.bus_id) +} + +// ------------------------------------------------------------------- + +// PlatformInfo + +// string bits = 1; +inline void PlatformInfo::clear_bits() { + _impl_.bits_.ClearToEmpty(); +} +inline const std::string& PlatformInfo::bits() const { + // @@protoc_insertion_point(field_get:tensorflow.PlatformInfo.bits) + return _internal_bits(); +} +template +inline PROTOBUF_ALWAYS_INLINE +void PlatformInfo::set_bits(ArgT0&& arg0, ArgT... args) { + + _impl_.bits_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:tensorflow.PlatformInfo.bits) +} +inline std::string* PlatformInfo::mutable_bits() { + std::string* _s = _internal_mutable_bits(); + // @@protoc_insertion_point(field_mutable:tensorflow.PlatformInfo.bits) + return _s; +} +inline const std::string& PlatformInfo::_internal_bits() const { + return _impl_.bits_.Get(); +} +inline void PlatformInfo::_internal_set_bits(const std::string& value) { + + _impl_.bits_.Set(value, GetArenaForAllocation()); +} +inline std::string* PlatformInfo::_internal_mutable_bits() { + + return _impl_.bits_.Mutable(GetArenaForAllocation()); +} +inline std::string* PlatformInfo::release_bits() { + // @@protoc_insertion_point(field_release:tensorflow.PlatformInfo.bits) + return _impl_.bits_.Release(); +} +inline void PlatformInfo::set_allocated_bits(std::string* bits) { + if (bits != nullptr) { + + } else { + + } + _impl_.bits_.SetAllocated(bits, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.bits_.IsDefault()) { + _impl_.bits_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:tensorflow.PlatformInfo.bits) +} + +// string linkage = 2; +inline void PlatformInfo::clear_linkage() { + _impl_.linkage_.ClearToEmpty(); +} +inline const std::string& PlatformInfo::linkage() const { + // @@protoc_insertion_point(field_get:tensorflow.PlatformInfo.linkage) + return _internal_linkage(); +} +template +inline PROTOBUF_ALWAYS_INLINE +void PlatformInfo::set_linkage(ArgT0&& arg0, ArgT... args) { + + _impl_.linkage_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:tensorflow.PlatformInfo.linkage) +} +inline std::string* PlatformInfo::mutable_linkage() { + std::string* _s = _internal_mutable_linkage(); + // @@protoc_insertion_point(field_mutable:tensorflow.PlatformInfo.linkage) + return _s; +} +inline const std::string& PlatformInfo::_internal_linkage() const { + return _impl_.linkage_.Get(); +} +inline void PlatformInfo::_internal_set_linkage(const std::string& value) { + + _impl_.linkage_.Set(value, GetArenaForAllocation()); +} +inline std::string* PlatformInfo::_internal_mutable_linkage() { + + return _impl_.linkage_.Mutable(GetArenaForAllocation()); +} +inline std::string* PlatformInfo::release_linkage() { + // @@protoc_insertion_point(field_release:tensorflow.PlatformInfo.linkage) + return _impl_.linkage_.Release(); +} +inline void PlatformInfo::set_allocated_linkage(std::string* linkage) { + if (linkage != nullptr) { + + } else { + + } + _impl_.linkage_.SetAllocated(linkage, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.linkage_.IsDefault()) { + _impl_.linkage_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:tensorflow.PlatformInfo.linkage) +} + +// string machine = 3; +inline void PlatformInfo::clear_machine() { + _impl_.machine_.ClearToEmpty(); +} +inline const std::string& PlatformInfo::machine() const { + // @@protoc_insertion_point(field_get:tensorflow.PlatformInfo.machine) + return _internal_machine(); +} +template +inline PROTOBUF_ALWAYS_INLINE +void PlatformInfo::set_machine(ArgT0&& arg0, ArgT... args) { + + _impl_.machine_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:tensorflow.PlatformInfo.machine) +} +inline std::string* PlatformInfo::mutable_machine() { + std::string* _s = _internal_mutable_machine(); + // @@protoc_insertion_point(field_mutable:tensorflow.PlatformInfo.machine) + return _s; +} +inline const std::string& PlatformInfo::_internal_machine() const { + return _impl_.machine_.Get(); +} +inline void PlatformInfo::_internal_set_machine(const std::string& value) { + + _impl_.machine_.Set(value, GetArenaForAllocation()); +} +inline std::string* PlatformInfo::_internal_mutable_machine() { + + return _impl_.machine_.Mutable(GetArenaForAllocation()); +} +inline std::string* PlatformInfo::release_machine() { + // @@protoc_insertion_point(field_release:tensorflow.PlatformInfo.machine) + return _impl_.machine_.Release(); +} +inline void PlatformInfo::set_allocated_machine(std::string* machine) { + if (machine != nullptr) { + + } else { + + } + _impl_.machine_.SetAllocated(machine, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.machine_.IsDefault()) { + _impl_.machine_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:tensorflow.PlatformInfo.machine) +} + +// string release = 4; +inline void PlatformInfo::clear_release() { + _impl_.release_.ClearToEmpty(); +} +inline const std::string& PlatformInfo::release() const { + // @@protoc_insertion_point(field_get:tensorflow.PlatformInfo.release) + return _internal_release(); +} +template +inline PROTOBUF_ALWAYS_INLINE +void PlatformInfo::set_release(ArgT0&& arg0, ArgT... args) { + + _impl_.release_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:tensorflow.PlatformInfo.release) +} +inline std::string* PlatformInfo::mutable_release() { + std::string* _s = _internal_mutable_release(); + // @@protoc_insertion_point(field_mutable:tensorflow.PlatformInfo.release) + return _s; +} +inline const std::string& PlatformInfo::_internal_release() const { + return _impl_.release_.Get(); +} +inline void PlatformInfo::_internal_set_release(const std::string& value) { + + _impl_.release_.Set(value, GetArenaForAllocation()); +} +inline std::string* PlatformInfo::_internal_mutable_release() { + + return _impl_.release_.Mutable(GetArenaForAllocation()); +} +inline std::string* PlatformInfo::release_release() { + // @@protoc_insertion_point(field_release:tensorflow.PlatformInfo.release) + return _impl_.release_.Release(); +} +inline void PlatformInfo::set_allocated_release(std::string* release) { + if (release != nullptr) { + + } else { + + } + _impl_.release_.SetAllocated(release, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.release_.IsDefault()) { + _impl_.release_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:tensorflow.PlatformInfo.release) +} + +// string system = 5; +inline void PlatformInfo::clear_system() { + _impl_.system_.ClearToEmpty(); +} +inline const std::string& PlatformInfo::system() const { + // @@protoc_insertion_point(field_get:tensorflow.PlatformInfo.system) + return _internal_system(); +} +template +inline PROTOBUF_ALWAYS_INLINE +void PlatformInfo::set_system(ArgT0&& arg0, ArgT... args) { + + _impl_.system_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:tensorflow.PlatformInfo.system) +} +inline std::string* PlatformInfo::mutable_system() { + std::string* _s = _internal_mutable_system(); + // @@protoc_insertion_point(field_mutable:tensorflow.PlatformInfo.system) + return _s; +} +inline const std::string& PlatformInfo::_internal_system() const { + return _impl_.system_.Get(); +} +inline void PlatformInfo::_internal_set_system(const std::string& value) { + + _impl_.system_.Set(value, GetArenaForAllocation()); +} +inline std::string* PlatformInfo::_internal_mutable_system() { + + return _impl_.system_.Mutable(GetArenaForAllocation()); +} +inline std::string* PlatformInfo::release_system() { + // @@protoc_insertion_point(field_release:tensorflow.PlatformInfo.system) + return _impl_.system_.Release(); +} +inline void PlatformInfo::set_allocated_system(std::string* system) { + if (system != nullptr) { + + } else { + + } + _impl_.system_.SetAllocated(system, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.system_.IsDefault()) { + _impl_.system_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:tensorflow.PlatformInfo.system) +} + +// string version = 6; +inline void PlatformInfo::clear_version() { + _impl_.version_.ClearToEmpty(); +} +inline const std::string& PlatformInfo::version() const { + // @@protoc_insertion_point(field_get:tensorflow.PlatformInfo.version) + return _internal_version(); +} +template +inline PROTOBUF_ALWAYS_INLINE +void PlatformInfo::set_version(ArgT0&& arg0, ArgT... args) { + + _impl_.version_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:tensorflow.PlatformInfo.version) +} +inline std::string* PlatformInfo::mutable_version() { + std::string* _s = _internal_mutable_version(); + // @@protoc_insertion_point(field_mutable:tensorflow.PlatformInfo.version) + return _s; +} +inline const std::string& PlatformInfo::_internal_version() const { + return _impl_.version_.Get(); +} +inline void PlatformInfo::_internal_set_version(const std::string& value) { + + _impl_.version_.Set(value, GetArenaForAllocation()); +} +inline std::string* PlatformInfo::_internal_mutable_version() { + + return _impl_.version_.Mutable(GetArenaForAllocation()); +} +inline std::string* PlatformInfo::release_version() { + // @@protoc_insertion_point(field_release:tensorflow.PlatformInfo.version) + return _impl_.version_.Release(); +} +inline void PlatformInfo::set_allocated_version(std::string* version) { + if (version != nullptr) { + + } else { + + } + _impl_.version_.SetAllocated(version, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.version_.IsDefault()) { + _impl_.version_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:tensorflow.PlatformInfo.version) +} + +// ------------------------------------------------------------------- + +// AvailableDeviceInfo + +// string name = 1; +inline void AvailableDeviceInfo::clear_name() { + _impl_.name_.ClearToEmpty(); +} +inline const std::string& AvailableDeviceInfo::name() const { + // @@protoc_insertion_point(field_get:tensorflow.AvailableDeviceInfo.name) + return _internal_name(); +} +template +inline PROTOBUF_ALWAYS_INLINE +void AvailableDeviceInfo::set_name(ArgT0&& arg0, ArgT... args) { + + _impl_.name_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:tensorflow.AvailableDeviceInfo.name) +} +inline std::string* AvailableDeviceInfo::mutable_name() { + std::string* _s = _internal_mutable_name(); + // @@protoc_insertion_point(field_mutable:tensorflow.AvailableDeviceInfo.name) + return _s; +} +inline const std::string& AvailableDeviceInfo::_internal_name() const { + return _impl_.name_.Get(); +} +inline void AvailableDeviceInfo::_internal_set_name(const std::string& value) { + + _impl_.name_.Set(value, GetArenaForAllocation()); +} +inline std::string* AvailableDeviceInfo::_internal_mutable_name() { + + return _impl_.name_.Mutable(GetArenaForAllocation()); +} +inline std::string* AvailableDeviceInfo::release_name() { + // @@protoc_insertion_point(field_release:tensorflow.AvailableDeviceInfo.name) + return _impl_.name_.Release(); +} +inline void AvailableDeviceInfo::set_allocated_name(std::string* name) { + if (name != nullptr) { + + } else { + + } + _impl_.name_.SetAllocated(name, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.name_.IsDefault()) { + _impl_.name_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:tensorflow.AvailableDeviceInfo.name) +} + +// string type = 2; +inline void AvailableDeviceInfo::clear_type() { + _impl_.type_.ClearToEmpty(); +} +inline const std::string& AvailableDeviceInfo::type() const { + // @@protoc_insertion_point(field_get:tensorflow.AvailableDeviceInfo.type) + return _internal_type(); +} +template +inline PROTOBUF_ALWAYS_INLINE +void AvailableDeviceInfo::set_type(ArgT0&& arg0, ArgT... args) { + + _impl_.type_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:tensorflow.AvailableDeviceInfo.type) +} +inline std::string* AvailableDeviceInfo::mutable_type() { + std::string* _s = _internal_mutable_type(); + // @@protoc_insertion_point(field_mutable:tensorflow.AvailableDeviceInfo.type) + return _s; +} +inline const std::string& AvailableDeviceInfo::_internal_type() const { + return _impl_.type_.Get(); +} +inline void AvailableDeviceInfo::_internal_set_type(const std::string& value) { + + _impl_.type_.Set(value, GetArenaForAllocation()); +} +inline std::string* AvailableDeviceInfo::_internal_mutable_type() { + + return _impl_.type_.Mutable(GetArenaForAllocation()); +} +inline std::string* AvailableDeviceInfo::release_type() { + // @@protoc_insertion_point(field_release:tensorflow.AvailableDeviceInfo.type) + return _impl_.type_.Release(); +} +inline void AvailableDeviceInfo::set_allocated_type(std::string* type) { + if (type != nullptr) { + + } else { + + } + _impl_.type_.SetAllocated(type, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.type_.IsDefault()) { + _impl_.type_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:tensorflow.AvailableDeviceInfo.type) +} + +// int64 memory_limit = 3; +inline void AvailableDeviceInfo::clear_memory_limit() { + _impl_.memory_limit_ = int64_t{0}; +} +inline int64_t AvailableDeviceInfo::_internal_memory_limit() const { + return _impl_.memory_limit_; +} +inline int64_t AvailableDeviceInfo::memory_limit() const { + // @@protoc_insertion_point(field_get:tensorflow.AvailableDeviceInfo.memory_limit) + return _internal_memory_limit(); +} +inline void AvailableDeviceInfo::_internal_set_memory_limit(int64_t value) { + + _impl_.memory_limit_ = value; +} +inline void AvailableDeviceInfo::set_memory_limit(int64_t value) { + _internal_set_memory_limit(value); + // @@protoc_insertion_point(field_set:tensorflow.AvailableDeviceInfo.memory_limit) +} + +// string physical_description = 4; +inline void AvailableDeviceInfo::clear_physical_description() { + _impl_.physical_description_.ClearToEmpty(); +} +inline const std::string& AvailableDeviceInfo::physical_description() const { + // @@protoc_insertion_point(field_get:tensorflow.AvailableDeviceInfo.physical_description) + return _internal_physical_description(); +} +template +inline PROTOBUF_ALWAYS_INLINE +void AvailableDeviceInfo::set_physical_description(ArgT0&& arg0, ArgT... args) { + + _impl_.physical_description_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:tensorflow.AvailableDeviceInfo.physical_description) +} +inline std::string* AvailableDeviceInfo::mutable_physical_description() { + std::string* _s = _internal_mutable_physical_description(); + // @@protoc_insertion_point(field_mutable:tensorflow.AvailableDeviceInfo.physical_description) + return _s; +} +inline const std::string& AvailableDeviceInfo::_internal_physical_description() const { + return _impl_.physical_description_.Get(); +} +inline void AvailableDeviceInfo::_internal_set_physical_description(const std::string& value) { + + _impl_.physical_description_.Set(value, GetArenaForAllocation()); +} +inline std::string* AvailableDeviceInfo::_internal_mutable_physical_description() { + + return _impl_.physical_description_.Mutable(GetArenaForAllocation()); +} +inline std::string* AvailableDeviceInfo::release_physical_description() { + // @@protoc_insertion_point(field_release:tensorflow.AvailableDeviceInfo.physical_description) + return _impl_.physical_description_.Release(); +} +inline void AvailableDeviceInfo::set_allocated_physical_description(std::string* physical_description) { + if (physical_description != nullptr) { + + } else { + + } + _impl_.physical_description_.SetAllocated(physical_description, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.physical_description_.IsDefault()) { + _impl_.physical_description_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:tensorflow.AvailableDeviceInfo.physical_description) +} + +// ------------------------------------------------------------------- + +// MachineConfiguration + +// string hostname = 1; +inline void MachineConfiguration::clear_hostname() { + _impl_.hostname_.ClearToEmpty(); +} +inline const std::string& MachineConfiguration::hostname() const { + // @@protoc_insertion_point(field_get:tensorflow.MachineConfiguration.hostname) + return _internal_hostname(); +} +template +inline PROTOBUF_ALWAYS_INLINE +void MachineConfiguration::set_hostname(ArgT0&& arg0, ArgT... args) { + + _impl_.hostname_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:tensorflow.MachineConfiguration.hostname) +} +inline std::string* MachineConfiguration::mutable_hostname() { + std::string* _s = _internal_mutable_hostname(); + // @@protoc_insertion_point(field_mutable:tensorflow.MachineConfiguration.hostname) + return _s; +} +inline const std::string& MachineConfiguration::_internal_hostname() const { + return _impl_.hostname_.Get(); +} +inline void MachineConfiguration::_internal_set_hostname(const std::string& value) { + + _impl_.hostname_.Set(value, GetArenaForAllocation()); +} +inline std::string* MachineConfiguration::_internal_mutable_hostname() { + + return _impl_.hostname_.Mutable(GetArenaForAllocation()); +} +inline std::string* MachineConfiguration::release_hostname() { + // @@protoc_insertion_point(field_release:tensorflow.MachineConfiguration.hostname) + return _impl_.hostname_.Release(); +} +inline void MachineConfiguration::set_allocated_hostname(std::string* hostname) { + if (hostname != nullptr) { + + } else { + + } + _impl_.hostname_.SetAllocated(hostname, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.hostname_.IsDefault()) { + _impl_.hostname_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:tensorflow.MachineConfiguration.hostname) +} + +// string serial_identifier = 7; +inline void MachineConfiguration::clear_serial_identifier() { + _impl_.serial_identifier_.ClearToEmpty(); +} +inline const std::string& MachineConfiguration::serial_identifier() const { + // @@protoc_insertion_point(field_get:tensorflow.MachineConfiguration.serial_identifier) + return _internal_serial_identifier(); +} +template +inline PROTOBUF_ALWAYS_INLINE +void MachineConfiguration::set_serial_identifier(ArgT0&& arg0, ArgT... args) { + + _impl_.serial_identifier_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:tensorflow.MachineConfiguration.serial_identifier) +} +inline std::string* MachineConfiguration::mutable_serial_identifier() { + std::string* _s = _internal_mutable_serial_identifier(); + // @@protoc_insertion_point(field_mutable:tensorflow.MachineConfiguration.serial_identifier) + return _s; +} +inline const std::string& MachineConfiguration::_internal_serial_identifier() const { + return _impl_.serial_identifier_.Get(); +} +inline void MachineConfiguration::_internal_set_serial_identifier(const std::string& value) { + + _impl_.serial_identifier_.Set(value, GetArenaForAllocation()); +} +inline std::string* MachineConfiguration::_internal_mutable_serial_identifier() { + + return _impl_.serial_identifier_.Mutable(GetArenaForAllocation()); +} +inline std::string* MachineConfiguration::release_serial_identifier() { + // @@protoc_insertion_point(field_release:tensorflow.MachineConfiguration.serial_identifier) + return _impl_.serial_identifier_.Release(); +} +inline void MachineConfiguration::set_allocated_serial_identifier(std::string* serial_identifier) { + if (serial_identifier != nullptr) { + + } else { + + } + _impl_.serial_identifier_.SetAllocated(serial_identifier, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.serial_identifier_.IsDefault()) { + _impl_.serial_identifier_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:tensorflow.MachineConfiguration.serial_identifier) +} + +// .tensorflow.PlatformInfo platform_info = 2; +inline bool MachineConfiguration::_internal_has_platform_info() const { + return this != internal_default_instance() && _impl_.platform_info_ != nullptr; +} +inline bool MachineConfiguration::has_platform_info() const { + return _internal_has_platform_info(); +} +inline void MachineConfiguration::clear_platform_info() { + if (GetArenaForAllocation() == nullptr && _impl_.platform_info_ != nullptr) { + delete _impl_.platform_info_; + } + _impl_.platform_info_ = nullptr; +} +inline const ::tensorflow::PlatformInfo& MachineConfiguration::_internal_platform_info() const { + const ::tensorflow::PlatformInfo* p = _impl_.platform_info_; + return p != nullptr ? *p : reinterpret_cast( + ::tensorflow::_PlatformInfo_default_instance_); +} +inline const ::tensorflow::PlatformInfo& MachineConfiguration::platform_info() const { + // @@protoc_insertion_point(field_get:tensorflow.MachineConfiguration.platform_info) + return _internal_platform_info(); +} +inline void MachineConfiguration::unsafe_arena_set_allocated_platform_info( + ::tensorflow::PlatformInfo* platform_info) { + if (GetArenaForAllocation() == nullptr) { + delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.platform_info_); + } + _impl_.platform_info_ = platform_info; + if (platform_info) { + + } else { + + } + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.MachineConfiguration.platform_info) +} +inline ::tensorflow::PlatformInfo* MachineConfiguration::release_platform_info() { + + ::tensorflow::PlatformInfo* temp = _impl_.platform_info_; + _impl_.platform_info_ = nullptr; +#ifdef PROTOBUF_FORCE_COPY_IN_RELEASE + auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp); + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + if (GetArenaForAllocation() == nullptr) { delete old; } +#else // PROTOBUF_FORCE_COPY_IN_RELEASE + if (GetArenaForAllocation() != nullptr) { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + } +#endif // !PROTOBUF_FORCE_COPY_IN_RELEASE + return temp; +} +inline ::tensorflow::PlatformInfo* MachineConfiguration::unsafe_arena_release_platform_info() { + // @@protoc_insertion_point(field_release:tensorflow.MachineConfiguration.platform_info) + + ::tensorflow::PlatformInfo* temp = _impl_.platform_info_; + _impl_.platform_info_ = nullptr; + return temp; +} +inline ::tensorflow::PlatformInfo* MachineConfiguration::_internal_mutable_platform_info() { + + if (_impl_.platform_info_ == nullptr) { + auto* p = CreateMaybeMessage<::tensorflow::PlatformInfo>(GetArenaForAllocation()); + _impl_.platform_info_ = p; + } + return _impl_.platform_info_; +} +inline ::tensorflow::PlatformInfo* MachineConfiguration::mutable_platform_info() { + ::tensorflow::PlatformInfo* _msg = _internal_mutable_platform_info(); + // @@protoc_insertion_point(field_mutable:tensorflow.MachineConfiguration.platform_info) + return _msg; +} +inline void MachineConfiguration::set_allocated_platform_info(::tensorflow::PlatformInfo* platform_info) { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); + if (message_arena == nullptr) { + delete _impl_.platform_info_; + } + if (platform_info) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(platform_info); + if (message_arena != submessage_arena) { + platform_info = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, platform_info, submessage_arena); + } + + } else { + + } + _impl_.platform_info_ = platform_info; + // @@protoc_insertion_point(field_set_allocated:tensorflow.MachineConfiguration.platform_info) +} + +// .tensorflow.CPUInfo cpu_info = 3; +inline bool MachineConfiguration::_internal_has_cpu_info() const { + return this != internal_default_instance() && _impl_.cpu_info_ != nullptr; +} +inline bool MachineConfiguration::has_cpu_info() const { + return _internal_has_cpu_info(); +} +inline void MachineConfiguration::clear_cpu_info() { + if (GetArenaForAllocation() == nullptr && _impl_.cpu_info_ != nullptr) { + delete _impl_.cpu_info_; + } + _impl_.cpu_info_ = nullptr; +} +inline const ::tensorflow::CPUInfo& MachineConfiguration::_internal_cpu_info() const { + const ::tensorflow::CPUInfo* p = _impl_.cpu_info_; + return p != nullptr ? *p : reinterpret_cast( + ::tensorflow::_CPUInfo_default_instance_); +} +inline const ::tensorflow::CPUInfo& MachineConfiguration::cpu_info() const { + // @@protoc_insertion_point(field_get:tensorflow.MachineConfiguration.cpu_info) + return _internal_cpu_info(); +} +inline void MachineConfiguration::unsafe_arena_set_allocated_cpu_info( + ::tensorflow::CPUInfo* cpu_info) { + if (GetArenaForAllocation() == nullptr) { + delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.cpu_info_); + } + _impl_.cpu_info_ = cpu_info; + if (cpu_info) { + + } else { + + } + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.MachineConfiguration.cpu_info) +} +inline ::tensorflow::CPUInfo* MachineConfiguration::release_cpu_info() { + + ::tensorflow::CPUInfo* temp = _impl_.cpu_info_; + _impl_.cpu_info_ = nullptr; +#ifdef PROTOBUF_FORCE_COPY_IN_RELEASE + auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp); + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + if (GetArenaForAllocation() == nullptr) { delete old; } +#else // PROTOBUF_FORCE_COPY_IN_RELEASE + if (GetArenaForAllocation() != nullptr) { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + } +#endif // !PROTOBUF_FORCE_COPY_IN_RELEASE + return temp; +} +inline ::tensorflow::CPUInfo* MachineConfiguration::unsafe_arena_release_cpu_info() { + // @@protoc_insertion_point(field_release:tensorflow.MachineConfiguration.cpu_info) + + ::tensorflow::CPUInfo* temp = _impl_.cpu_info_; + _impl_.cpu_info_ = nullptr; + return temp; +} +inline ::tensorflow::CPUInfo* MachineConfiguration::_internal_mutable_cpu_info() { + + if (_impl_.cpu_info_ == nullptr) { + auto* p = CreateMaybeMessage<::tensorflow::CPUInfo>(GetArenaForAllocation()); + _impl_.cpu_info_ = p; + } + return _impl_.cpu_info_; +} +inline ::tensorflow::CPUInfo* MachineConfiguration::mutable_cpu_info() { + ::tensorflow::CPUInfo* _msg = _internal_mutable_cpu_info(); + // @@protoc_insertion_point(field_mutable:tensorflow.MachineConfiguration.cpu_info) + return _msg; +} +inline void MachineConfiguration::set_allocated_cpu_info(::tensorflow::CPUInfo* cpu_info) { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); + if (message_arena == nullptr) { + delete _impl_.cpu_info_; + } + if (cpu_info) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(cpu_info); + if (message_arena != submessage_arena) { + cpu_info = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, cpu_info, submessage_arena); + } + + } else { + + } + _impl_.cpu_info_ = cpu_info; + // @@protoc_insertion_point(field_set_allocated:tensorflow.MachineConfiguration.cpu_info) +} + +// repeated .google.protobuf.Any device_info = 4; +inline int MachineConfiguration::_internal_device_info_size() const { + return _impl_.device_info_.size(); +} +inline int MachineConfiguration::device_info_size() const { + return _internal_device_info_size(); +} +inline ::PROTOBUF_NAMESPACE_ID::Any* MachineConfiguration::mutable_device_info(int index) { + // @@protoc_insertion_point(field_mutable:tensorflow.MachineConfiguration.device_info) + return _impl_.device_info_.Mutable(index); +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::PROTOBUF_NAMESPACE_ID::Any >* +MachineConfiguration::mutable_device_info() { + // @@protoc_insertion_point(field_mutable_list:tensorflow.MachineConfiguration.device_info) + return &_impl_.device_info_; +} +inline const ::PROTOBUF_NAMESPACE_ID::Any& MachineConfiguration::_internal_device_info(int index) const { + return _impl_.device_info_.Get(index); +} +inline const ::PROTOBUF_NAMESPACE_ID::Any& MachineConfiguration::device_info(int index) const { + // @@protoc_insertion_point(field_get:tensorflow.MachineConfiguration.device_info) + return _internal_device_info(index); +} +inline ::PROTOBUF_NAMESPACE_ID::Any* MachineConfiguration::_internal_add_device_info() { + return _impl_.device_info_.Add(); +} +inline ::PROTOBUF_NAMESPACE_ID::Any* MachineConfiguration::add_device_info() { + ::PROTOBUF_NAMESPACE_ID::Any* _add = _internal_add_device_info(); + // @@protoc_insertion_point(field_add:tensorflow.MachineConfiguration.device_info) + return _add; +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::PROTOBUF_NAMESPACE_ID::Any >& +MachineConfiguration::device_info() const { + // @@protoc_insertion_point(field_list:tensorflow.MachineConfiguration.device_info) + return _impl_.device_info_; +} + +// repeated .tensorflow.AvailableDeviceInfo available_device_info = 5; +inline int MachineConfiguration::_internal_available_device_info_size() const { + return _impl_.available_device_info_.size(); +} +inline int MachineConfiguration::available_device_info_size() const { + return _internal_available_device_info_size(); +} +inline void MachineConfiguration::clear_available_device_info() { + _impl_.available_device_info_.Clear(); +} +inline ::tensorflow::AvailableDeviceInfo* MachineConfiguration::mutable_available_device_info(int index) { + // @@protoc_insertion_point(field_mutable:tensorflow.MachineConfiguration.available_device_info) + return _impl_.available_device_info_.Mutable(index); +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::AvailableDeviceInfo >* +MachineConfiguration::mutable_available_device_info() { + // @@protoc_insertion_point(field_mutable_list:tensorflow.MachineConfiguration.available_device_info) + return &_impl_.available_device_info_; +} +inline const ::tensorflow::AvailableDeviceInfo& MachineConfiguration::_internal_available_device_info(int index) const { + return _impl_.available_device_info_.Get(index); +} +inline const ::tensorflow::AvailableDeviceInfo& MachineConfiguration::available_device_info(int index) const { + // @@protoc_insertion_point(field_get:tensorflow.MachineConfiguration.available_device_info) + return _internal_available_device_info(index); +} +inline ::tensorflow::AvailableDeviceInfo* MachineConfiguration::_internal_add_available_device_info() { + return _impl_.available_device_info_.Add(); +} +inline ::tensorflow::AvailableDeviceInfo* MachineConfiguration::add_available_device_info() { + ::tensorflow::AvailableDeviceInfo* _add = _internal_add_available_device_info(); + // @@protoc_insertion_point(field_add:tensorflow.MachineConfiguration.available_device_info) + return _add; +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::tensorflow::AvailableDeviceInfo >& +MachineConfiguration::available_device_info() const { + // @@protoc_insertion_point(field_list:tensorflow.MachineConfiguration.available_device_info) + return _impl_.available_device_info_; +} + +// .tensorflow.MemoryInfo memory_info = 6; +inline bool MachineConfiguration::_internal_has_memory_info() const { + return this != internal_default_instance() && _impl_.memory_info_ != nullptr; +} +inline bool MachineConfiguration::has_memory_info() const { + return _internal_has_memory_info(); +} +inline void MachineConfiguration::clear_memory_info() { + if (GetArenaForAllocation() == nullptr && _impl_.memory_info_ != nullptr) { + delete _impl_.memory_info_; + } + _impl_.memory_info_ = nullptr; +} +inline const ::tensorflow::MemoryInfo& MachineConfiguration::_internal_memory_info() const { + const ::tensorflow::MemoryInfo* p = _impl_.memory_info_; + return p != nullptr ? *p : reinterpret_cast( + ::tensorflow::_MemoryInfo_default_instance_); +} +inline const ::tensorflow::MemoryInfo& MachineConfiguration::memory_info() const { + // @@protoc_insertion_point(field_get:tensorflow.MachineConfiguration.memory_info) + return _internal_memory_info(); +} +inline void MachineConfiguration::unsafe_arena_set_allocated_memory_info( + ::tensorflow::MemoryInfo* memory_info) { + if (GetArenaForAllocation() == nullptr) { + delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.memory_info_); + } + _impl_.memory_info_ = memory_info; + if (memory_info) { + + } else { + + } + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.MachineConfiguration.memory_info) +} +inline ::tensorflow::MemoryInfo* MachineConfiguration::release_memory_info() { + + ::tensorflow::MemoryInfo* temp = _impl_.memory_info_; + _impl_.memory_info_ = nullptr; +#ifdef PROTOBUF_FORCE_COPY_IN_RELEASE + auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp); + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + if (GetArenaForAllocation() == nullptr) { delete old; } +#else // PROTOBUF_FORCE_COPY_IN_RELEASE + if (GetArenaForAllocation() != nullptr) { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + } +#endif // !PROTOBUF_FORCE_COPY_IN_RELEASE + return temp; +} +inline ::tensorflow::MemoryInfo* MachineConfiguration::unsafe_arena_release_memory_info() { + // @@protoc_insertion_point(field_release:tensorflow.MachineConfiguration.memory_info) + + ::tensorflow::MemoryInfo* temp = _impl_.memory_info_; + _impl_.memory_info_ = nullptr; + return temp; +} +inline ::tensorflow::MemoryInfo* MachineConfiguration::_internal_mutable_memory_info() { + + if (_impl_.memory_info_ == nullptr) { + auto* p = CreateMaybeMessage<::tensorflow::MemoryInfo>(GetArenaForAllocation()); + _impl_.memory_info_ = p; + } + return _impl_.memory_info_; +} +inline ::tensorflow::MemoryInfo* MachineConfiguration::mutable_memory_info() { + ::tensorflow::MemoryInfo* _msg = _internal_mutable_memory_info(); + // @@protoc_insertion_point(field_mutable:tensorflow.MachineConfiguration.memory_info) + return _msg; +} +inline void MachineConfiguration::set_allocated_memory_info(::tensorflow::MemoryInfo* memory_info) { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); + if (message_arena == nullptr) { + delete _impl_.memory_info_; + } + if (memory_info) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(memory_info); + if (message_arena != submessage_arena) { + memory_info = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, memory_info, submessage_arena); + } + + } else { + + } + _impl_.memory_info_ = memory_info; + // @@protoc_insertion_point(field_set_allocated:tensorflow.MachineConfiguration.memory_info) +} + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// RunConfiguration + +// repeated string argument = 1; +inline int RunConfiguration::_internal_argument_size() const { + return _impl_.argument_.size(); +} +inline int RunConfiguration::argument_size() const { + return _internal_argument_size(); +} +inline void RunConfiguration::clear_argument() { + _impl_.argument_.Clear(); +} +inline std::string* RunConfiguration::add_argument() { + std::string* _s = _internal_add_argument(); + // @@protoc_insertion_point(field_add_mutable:tensorflow.RunConfiguration.argument) + return _s; +} +inline const std::string& RunConfiguration::_internal_argument(int index) const { + return _impl_.argument_.Get(index); +} +inline const std::string& RunConfiguration::argument(int index) const { + // @@protoc_insertion_point(field_get:tensorflow.RunConfiguration.argument) + return _internal_argument(index); +} +inline std::string* RunConfiguration::mutable_argument(int index) { + // @@protoc_insertion_point(field_mutable:tensorflow.RunConfiguration.argument) + return _impl_.argument_.Mutable(index); +} +inline void RunConfiguration::set_argument(int index, const std::string& value) { + _impl_.argument_.Mutable(index)->assign(value); + // @@protoc_insertion_point(field_set:tensorflow.RunConfiguration.argument) +} +inline void RunConfiguration::set_argument(int index, std::string&& value) { + _impl_.argument_.Mutable(index)->assign(std::move(value)); + // @@protoc_insertion_point(field_set:tensorflow.RunConfiguration.argument) +} +inline void RunConfiguration::set_argument(int index, const char* value) { + GOOGLE_DCHECK(value != nullptr); + _impl_.argument_.Mutable(index)->assign(value); + // @@protoc_insertion_point(field_set_char:tensorflow.RunConfiguration.argument) +} +inline void RunConfiguration::set_argument(int index, const char* value, size_t size) { + _impl_.argument_.Mutable(index)->assign( + reinterpret_cast(value), size); + // @@protoc_insertion_point(field_set_pointer:tensorflow.RunConfiguration.argument) +} +inline std::string* RunConfiguration::_internal_add_argument() { + return _impl_.argument_.Add(); +} +inline void RunConfiguration::add_argument(const std::string& value) { + _impl_.argument_.Add()->assign(value); + // @@protoc_insertion_point(field_add:tensorflow.RunConfiguration.argument) +} +inline void RunConfiguration::add_argument(std::string&& value) { + _impl_.argument_.Add(std::move(value)); + // @@protoc_insertion_point(field_add:tensorflow.RunConfiguration.argument) +} +inline void RunConfiguration::add_argument(const char* value) { + GOOGLE_DCHECK(value != nullptr); + _impl_.argument_.Add()->assign(value); + // @@protoc_insertion_point(field_add_char:tensorflow.RunConfiguration.argument) +} +inline void RunConfiguration::add_argument(const char* value, size_t size) { + _impl_.argument_.Add()->assign(reinterpret_cast(value), size); + // @@protoc_insertion_point(field_add_pointer:tensorflow.RunConfiguration.argument) +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField& +RunConfiguration::argument() const { + // @@protoc_insertion_point(field_list:tensorflow.RunConfiguration.argument) + return _impl_.argument_; +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField* +RunConfiguration::mutable_argument() { + // @@protoc_insertion_point(field_mutable_list:tensorflow.RunConfiguration.argument) + return &_impl_.argument_; +} + +// map env_vars = 2; +inline int RunConfiguration::_internal_env_vars_size() const { + return _impl_.env_vars_.size(); +} +inline int RunConfiguration::env_vars_size() const { + return _internal_env_vars_size(); +} +inline void RunConfiguration::clear_env_vars() { + _impl_.env_vars_.Clear(); +} +inline const ::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >& +RunConfiguration::_internal_env_vars() const { + return _impl_.env_vars_.GetMap(); +} +inline const ::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >& +RunConfiguration::env_vars() const { + // @@protoc_insertion_point(field_map:tensorflow.RunConfiguration.env_vars) + return _internal_env_vars(); +} +inline ::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >* +RunConfiguration::_internal_mutable_env_vars() { + return _impl_.env_vars_.MutableMap(); +} +inline ::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >* +RunConfiguration::mutable_env_vars() { + // @@protoc_insertion_point(field_mutable_map:tensorflow.RunConfiguration.env_vars) + return _internal_mutable_env_vars(); +} + +// ------------------------------------------------------------------- + +// TestResults + +// string target = 1; +inline void TestResults::clear_target() { + _impl_.target_.ClearToEmpty(); +} +inline const std::string& TestResults::target() const { + // @@protoc_insertion_point(field_get:tensorflow.TestResults.target) + return _internal_target(); +} +template +inline PROTOBUF_ALWAYS_INLINE +void TestResults::set_target(ArgT0&& arg0, ArgT... args) { + + _impl_.target_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:tensorflow.TestResults.target) +} +inline std::string* TestResults::mutable_target() { + std::string* _s = _internal_mutable_target(); + // @@protoc_insertion_point(field_mutable:tensorflow.TestResults.target) + return _s; +} +inline const std::string& TestResults::_internal_target() const { + return _impl_.target_.Get(); +} +inline void TestResults::_internal_set_target(const std::string& value) { + + _impl_.target_.Set(value, GetArenaForAllocation()); +} +inline std::string* TestResults::_internal_mutable_target() { + + return _impl_.target_.Mutable(GetArenaForAllocation()); +} +inline std::string* TestResults::release_target() { + // @@protoc_insertion_point(field_release:tensorflow.TestResults.target) + return _impl_.target_.Release(); +} +inline void TestResults::set_allocated_target(std::string* target) { + if (target != nullptr) { + + } else { + + } + _impl_.target_.SetAllocated(target, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.target_.IsDefault()) { + _impl_.target_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:tensorflow.TestResults.target) +} + +// .tensorflow.BenchmarkEntries entries = 2; +inline bool TestResults::_internal_has_entries() const { + return this != internal_default_instance() && _impl_.entries_ != nullptr; +} +inline bool TestResults::has_entries() const { + return _internal_has_entries(); +} +inline void TestResults::clear_entries() { + if (GetArenaForAllocation() == nullptr && _impl_.entries_ != nullptr) { + delete _impl_.entries_; + } + _impl_.entries_ = nullptr; +} +inline const ::tensorflow::BenchmarkEntries& TestResults::_internal_entries() const { + const ::tensorflow::BenchmarkEntries* p = _impl_.entries_; + return p != nullptr ? *p : reinterpret_cast( + ::tensorflow::_BenchmarkEntries_default_instance_); +} +inline const ::tensorflow::BenchmarkEntries& TestResults::entries() const { + // @@protoc_insertion_point(field_get:tensorflow.TestResults.entries) + return _internal_entries(); +} +inline void TestResults::unsafe_arena_set_allocated_entries( + ::tensorflow::BenchmarkEntries* entries) { + if (GetArenaForAllocation() == nullptr) { + delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.entries_); + } + _impl_.entries_ = entries; + if (entries) { + + } else { + + } + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.TestResults.entries) +} +inline ::tensorflow::BenchmarkEntries* TestResults::release_entries() { + + ::tensorflow::BenchmarkEntries* temp = _impl_.entries_; + _impl_.entries_ = nullptr; +#ifdef PROTOBUF_FORCE_COPY_IN_RELEASE + auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp); + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + if (GetArenaForAllocation() == nullptr) { delete old; } +#else // PROTOBUF_FORCE_COPY_IN_RELEASE + if (GetArenaForAllocation() != nullptr) { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + } +#endif // !PROTOBUF_FORCE_COPY_IN_RELEASE + return temp; +} +inline ::tensorflow::BenchmarkEntries* TestResults::unsafe_arena_release_entries() { + // @@protoc_insertion_point(field_release:tensorflow.TestResults.entries) + + ::tensorflow::BenchmarkEntries* temp = _impl_.entries_; + _impl_.entries_ = nullptr; + return temp; +} +inline ::tensorflow::BenchmarkEntries* TestResults::_internal_mutable_entries() { + + if (_impl_.entries_ == nullptr) { + auto* p = CreateMaybeMessage<::tensorflow::BenchmarkEntries>(GetArenaForAllocation()); + _impl_.entries_ = p; + } + return _impl_.entries_; +} +inline ::tensorflow::BenchmarkEntries* TestResults::mutable_entries() { + ::tensorflow::BenchmarkEntries* _msg = _internal_mutable_entries(); + // @@protoc_insertion_point(field_mutable:tensorflow.TestResults.entries) + return _msg; +} +inline void TestResults::set_allocated_entries(::tensorflow::BenchmarkEntries* entries) { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); + if (message_arena == nullptr) { + delete _impl_.entries_; + } + if (entries) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(entries); + if (message_arena != submessage_arena) { + entries = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, entries, submessage_arena); + } + + } else { + + } + _impl_.entries_ = entries; + // @@protoc_insertion_point(field_set_allocated:tensorflow.TestResults.entries) +} + +// .tensorflow.BuildConfiguration build_configuration = 3; +inline bool TestResults::_internal_has_build_configuration() const { + return this != internal_default_instance() && _impl_.build_configuration_ != nullptr; +} +inline bool TestResults::has_build_configuration() const { + return _internal_has_build_configuration(); +} +inline void TestResults::clear_build_configuration() { + if (GetArenaForAllocation() == nullptr && _impl_.build_configuration_ != nullptr) { + delete _impl_.build_configuration_; + } + _impl_.build_configuration_ = nullptr; +} +inline const ::tensorflow::BuildConfiguration& TestResults::_internal_build_configuration() const { + const ::tensorflow::BuildConfiguration* p = _impl_.build_configuration_; + return p != nullptr ? *p : reinterpret_cast( + ::tensorflow::_BuildConfiguration_default_instance_); +} +inline const ::tensorflow::BuildConfiguration& TestResults::build_configuration() const { + // @@protoc_insertion_point(field_get:tensorflow.TestResults.build_configuration) + return _internal_build_configuration(); +} +inline void TestResults::unsafe_arena_set_allocated_build_configuration( + ::tensorflow::BuildConfiguration* build_configuration) { + if (GetArenaForAllocation() == nullptr) { + delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.build_configuration_); + } + _impl_.build_configuration_ = build_configuration; + if (build_configuration) { + + } else { + + } + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.TestResults.build_configuration) +} +inline ::tensorflow::BuildConfiguration* TestResults::release_build_configuration() { + + ::tensorflow::BuildConfiguration* temp = _impl_.build_configuration_; + _impl_.build_configuration_ = nullptr; +#ifdef PROTOBUF_FORCE_COPY_IN_RELEASE + auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp); + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + if (GetArenaForAllocation() == nullptr) { delete old; } +#else // PROTOBUF_FORCE_COPY_IN_RELEASE + if (GetArenaForAllocation() != nullptr) { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + } +#endif // !PROTOBUF_FORCE_COPY_IN_RELEASE + return temp; +} +inline ::tensorflow::BuildConfiguration* TestResults::unsafe_arena_release_build_configuration() { + // @@protoc_insertion_point(field_release:tensorflow.TestResults.build_configuration) + + ::tensorflow::BuildConfiguration* temp = _impl_.build_configuration_; + _impl_.build_configuration_ = nullptr; + return temp; +} +inline ::tensorflow::BuildConfiguration* TestResults::_internal_mutable_build_configuration() { + + if (_impl_.build_configuration_ == nullptr) { + auto* p = CreateMaybeMessage<::tensorflow::BuildConfiguration>(GetArenaForAllocation()); + _impl_.build_configuration_ = p; + } + return _impl_.build_configuration_; +} +inline ::tensorflow::BuildConfiguration* TestResults::mutable_build_configuration() { + ::tensorflow::BuildConfiguration* _msg = _internal_mutable_build_configuration(); + // @@protoc_insertion_point(field_mutable:tensorflow.TestResults.build_configuration) + return _msg; +} +inline void TestResults::set_allocated_build_configuration(::tensorflow::BuildConfiguration* build_configuration) { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); + if (message_arena == nullptr) { + delete _impl_.build_configuration_; + } + if (build_configuration) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(build_configuration); + if (message_arena != submessage_arena) { + build_configuration = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, build_configuration, submessage_arena); + } + + } else { + + } + _impl_.build_configuration_ = build_configuration; + // @@protoc_insertion_point(field_set_allocated:tensorflow.TestResults.build_configuration) +} + +// .tensorflow.CommitId commit_id = 4; +inline bool TestResults::_internal_has_commit_id() const { + return this != internal_default_instance() && _impl_.commit_id_ != nullptr; +} +inline bool TestResults::has_commit_id() const { + return _internal_has_commit_id(); +} +inline void TestResults::clear_commit_id() { + if (GetArenaForAllocation() == nullptr && _impl_.commit_id_ != nullptr) { + delete _impl_.commit_id_; + } + _impl_.commit_id_ = nullptr; +} +inline const ::tensorflow::CommitId& TestResults::_internal_commit_id() const { + const ::tensorflow::CommitId* p = _impl_.commit_id_; + return p != nullptr ? *p : reinterpret_cast( + ::tensorflow::_CommitId_default_instance_); +} +inline const ::tensorflow::CommitId& TestResults::commit_id() const { + // @@protoc_insertion_point(field_get:tensorflow.TestResults.commit_id) + return _internal_commit_id(); +} +inline void TestResults::unsafe_arena_set_allocated_commit_id( + ::tensorflow::CommitId* commit_id) { + if (GetArenaForAllocation() == nullptr) { + delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.commit_id_); + } + _impl_.commit_id_ = commit_id; + if (commit_id) { + + } else { + + } + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.TestResults.commit_id) +} +inline ::tensorflow::CommitId* TestResults::release_commit_id() { + + ::tensorflow::CommitId* temp = _impl_.commit_id_; + _impl_.commit_id_ = nullptr; +#ifdef PROTOBUF_FORCE_COPY_IN_RELEASE + auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp); + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + if (GetArenaForAllocation() == nullptr) { delete old; } +#else // PROTOBUF_FORCE_COPY_IN_RELEASE + if (GetArenaForAllocation() != nullptr) { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + } +#endif // !PROTOBUF_FORCE_COPY_IN_RELEASE + return temp; +} +inline ::tensorflow::CommitId* TestResults::unsafe_arena_release_commit_id() { + // @@protoc_insertion_point(field_release:tensorflow.TestResults.commit_id) + + ::tensorflow::CommitId* temp = _impl_.commit_id_; + _impl_.commit_id_ = nullptr; + return temp; +} +inline ::tensorflow::CommitId* TestResults::_internal_mutable_commit_id() { + + if (_impl_.commit_id_ == nullptr) { + auto* p = CreateMaybeMessage<::tensorflow::CommitId>(GetArenaForAllocation()); + _impl_.commit_id_ = p; + } + return _impl_.commit_id_; +} +inline ::tensorflow::CommitId* TestResults::mutable_commit_id() { + ::tensorflow::CommitId* _msg = _internal_mutable_commit_id(); + // @@protoc_insertion_point(field_mutable:tensorflow.TestResults.commit_id) + return _msg; +} +inline void TestResults::set_allocated_commit_id(::tensorflow::CommitId* commit_id) { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); + if (message_arena == nullptr) { + delete _impl_.commit_id_; + } + if (commit_id) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(commit_id); + if (message_arena != submessage_arena) { + commit_id = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, commit_id, submessage_arena); + } + + } else { + + } + _impl_.commit_id_ = commit_id; + // @@protoc_insertion_point(field_set_allocated:tensorflow.TestResults.commit_id) +} + +// int64 start_time = 5; +inline void TestResults::clear_start_time() { + _impl_.start_time_ = int64_t{0}; +} +inline int64_t TestResults::_internal_start_time() const { + return _impl_.start_time_; +} +inline int64_t TestResults::start_time() const { + // @@protoc_insertion_point(field_get:tensorflow.TestResults.start_time) + return _internal_start_time(); +} +inline void TestResults::_internal_set_start_time(int64_t value) { + + _impl_.start_time_ = value; +} +inline void TestResults::set_start_time(int64_t value) { + _internal_set_start_time(value); + // @@protoc_insertion_point(field_set:tensorflow.TestResults.start_time) +} + +// double run_time = 6; +inline void TestResults::clear_run_time() { + _impl_.run_time_ = 0; +} +inline double TestResults::_internal_run_time() const { + return _impl_.run_time_; +} +inline double TestResults::run_time() const { + // @@protoc_insertion_point(field_get:tensorflow.TestResults.run_time) + return _internal_run_time(); +} +inline void TestResults::_internal_set_run_time(double value) { + + _impl_.run_time_ = value; +} +inline void TestResults::set_run_time(double value) { + _internal_set_run_time(value); + // @@protoc_insertion_point(field_set:tensorflow.TestResults.run_time) +} + +// .tensorflow.MachineConfiguration machine_configuration = 7; +inline bool TestResults::_internal_has_machine_configuration() const { + return this != internal_default_instance() && _impl_.machine_configuration_ != nullptr; +} +inline bool TestResults::has_machine_configuration() const { + return _internal_has_machine_configuration(); +} +inline void TestResults::clear_machine_configuration() { + if (GetArenaForAllocation() == nullptr && _impl_.machine_configuration_ != nullptr) { + delete _impl_.machine_configuration_; + } + _impl_.machine_configuration_ = nullptr; +} +inline const ::tensorflow::MachineConfiguration& TestResults::_internal_machine_configuration() const { + const ::tensorflow::MachineConfiguration* p = _impl_.machine_configuration_; + return p != nullptr ? *p : reinterpret_cast( + ::tensorflow::_MachineConfiguration_default_instance_); +} +inline const ::tensorflow::MachineConfiguration& TestResults::machine_configuration() const { + // @@protoc_insertion_point(field_get:tensorflow.TestResults.machine_configuration) + return _internal_machine_configuration(); +} +inline void TestResults::unsafe_arena_set_allocated_machine_configuration( + ::tensorflow::MachineConfiguration* machine_configuration) { + if (GetArenaForAllocation() == nullptr) { + delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.machine_configuration_); + } + _impl_.machine_configuration_ = machine_configuration; + if (machine_configuration) { + + } else { + + } + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.TestResults.machine_configuration) +} +inline ::tensorflow::MachineConfiguration* TestResults::release_machine_configuration() { + + ::tensorflow::MachineConfiguration* temp = _impl_.machine_configuration_; + _impl_.machine_configuration_ = nullptr; +#ifdef PROTOBUF_FORCE_COPY_IN_RELEASE + auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp); + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + if (GetArenaForAllocation() == nullptr) { delete old; } +#else // PROTOBUF_FORCE_COPY_IN_RELEASE + if (GetArenaForAllocation() != nullptr) { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + } +#endif // !PROTOBUF_FORCE_COPY_IN_RELEASE + return temp; +} +inline ::tensorflow::MachineConfiguration* TestResults::unsafe_arena_release_machine_configuration() { + // @@protoc_insertion_point(field_release:tensorflow.TestResults.machine_configuration) + + ::tensorflow::MachineConfiguration* temp = _impl_.machine_configuration_; + _impl_.machine_configuration_ = nullptr; + return temp; +} +inline ::tensorflow::MachineConfiguration* TestResults::_internal_mutable_machine_configuration() { + + if (_impl_.machine_configuration_ == nullptr) { + auto* p = CreateMaybeMessage<::tensorflow::MachineConfiguration>(GetArenaForAllocation()); + _impl_.machine_configuration_ = p; + } + return _impl_.machine_configuration_; +} +inline ::tensorflow::MachineConfiguration* TestResults::mutable_machine_configuration() { + ::tensorflow::MachineConfiguration* _msg = _internal_mutable_machine_configuration(); + // @@protoc_insertion_point(field_mutable:tensorflow.TestResults.machine_configuration) + return _msg; +} +inline void TestResults::set_allocated_machine_configuration(::tensorflow::MachineConfiguration* machine_configuration) { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); + if (message_arena == nullptr) { + delete _impl_.machine_configuration_; + } + if (machine_configuration) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(machine_configuration); + if (message_arena != submessage_arena) { + machine_configuration = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, machine_configuration, submessage_arena); + } + + } else { + + } + _impl_.machine_configuration_ = machine_configuration; + // @@protoc_insertion_point(field_set_allocated:tensorflow.TestResults.machine_configuration) +} + +// .tensorflow.RunConfiguration run_configuration = 8; +inline bool TestResults::_internal_has_run_configuration() const { + return this != internal_default_instance() && _impl_.run_configuration_ != nullptr; +} +inline bool TestResults::has_run_configuration() const { + return _internal_has_run_configuration(); +} +inline void TestResults::clear_run_configuration() { + if (GetArenaForAllocation() == nullptr && _impl_.run_configuration_ != nullptr) { + delete _impl_.run_configuration_; + } + _impl_.run_configuration_ = nullptr; +} +inline const ::tensorflow::RunConfiguration& TestResults::_internal_run_configuration() const { + const ::tensorflow::RunConfiguration* p = _impl_.run_configuration_; + return p != nullptr ? *p : reinterpret_cast( + ::tensorflow::_RunConfiguration_default_instance_); +} +inline const ::tensorflow::RunConfiguration& TestResults::run_configuration() const { + // @@protoc_insertion_point(field_get:tensorflow.TestResults.run_configuration) + return _internal_run_configuration(); +} +inline void TestResults::unsafe_arena_set_allocated_run_configuration( + ::tensorflow::RunConfiguration* run_configuration) { + if (GetArenaForAllocation() == nullptr) { + delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.run_configuration_); + } + _impl_.run_configuration_ = run_configuration; + if (run_configuration) { + + } else { + + } + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.TestResults.run_configuration) +} +inline ::tensorflow::RunConfiguration* TestResults::release_run_configuration() { + + ::tensorflow::RunConfiguration* temp = _impl_.run_configuration_; + _impl_.run_configuration_ = nullptr; +#ifdef PROTOBUF_FORCE_COPY_IN_RELEASE + auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp); + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + if (GetArenaForAllocation() == nullptr) { delete old; } +#else // PROTOBUF_FORCE_COPY_IN_RELEASE + if (GetArenaForAllocation() != nullptr) { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + } +#endif // !PROTOBUF_FORCE_COPY_IN_RELEASE + return temp; +} +inline ::tensorflow::RunConfiguration* TestResults::unsafe_arena_release_run_configuration() { + // @@protoc_insertion_point(field_release:tensorflow.TestResults.run_configuration) + + ::tensorflow::RunConfiguration* temp = _impl_.run_configuration_; + _impl_.run_configuration_ = nullptr; + return temp; +} +inline ::tensorflow::RunConfiguration* TestResults::_internal_mutable_run_configuration() { + + if (_impl_.run_configuration_ == nullptr) { + auto* p = CreateMaybeMessage<::tensorflow::RunConfiguration>(GetArenaForAllocation()); + _impl_.run_configuration_ = p; + } + return _impl_.run_configuration_; +} +inline ::tensorflow::RunConfiguration* TestResults::mutable_run_configuration() { + ::tensorflow::RunConfiguration* _msg = _internal_mutable_run_configuration(); + // @@protoc_insertion_point(field_mutable:tensorflow.TestResults.run_configuration) + return _msg; +} +inline void TestResults::set_allocated_run_configuration(::tensorflow::RunConfiguration* run_configuration) { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); + if (message_arena == nullptr) { + delete _impl_.run_configuration_; + } + if (run_configuration) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(run_configuration); + if (message_arena != submessage_arena) { + run_configuration = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, run_configuration, submessage_arena); + } + + } else { + + } + _impl_.run_configuration_ = run_configuration; + // @@protoc_insertion_point(field_set_allocated:tensorflow.TestResults.run_configuration) +} + +// string name = 9; +inline void TestResults::clear_name() { + _impl_.name_.ClearToEmpty(); +} +inline const std::string& TestResults::name() const { + // @@protoc_insertion_point(field_get:tensorflow.TestResults.name) + return _internal_name(); +} +template +inline PROTOBUF_ALWAYS_INLINE +void TestResults::set_name(ArgT0&& arg0, ArgT... args) { + + _impl_.name_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:tensorflow.TestResults.name) +} +inline std::string* TestResults::mutable_name() { + std::string* _s = _internal_mutable_name(); + // @@protoc_insertion_point(field_mutable:tensorflow.TestResults.name) + return _s; +} +inline const std::string& TestResults::_internal_name() const { + return _impl_.name_.Get(); +} +inline void TestResults::_internal_set_name(const std::string& value) { + + _impl_.name_.Set(value, GetArenaForAllocation()); +} +inline std::string* TestResults::_internal_mutable_name() { + + return _impl_.name_.Mutable(GetArenaForAllocation()); +} +inline std::string* TestResults::release_name() { + // @@protoc_insertion_point(field_release:tensorflow.TestResults.name) + return _impl_.name_.Release(); +} +inline void TestResults::set_allocated_name(std::string* name) { + if (name != nullptr) { + + } else { + + } + _impl_.name_.SetAllocated(name, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.name_.IsDefault()) { + _impl_.name_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:tensorflow.TestResults.name) +} + +// .tensorflow.TestResults.BenchmarkType benchmark_type = 10; +inline void TestResults::clear_benchmark_type() { + _impl_.benchmark_type_ = 0; +} +inline ::tensorflow::TestResults_BenchmarkType TestResults::_internal_benchmark_type() const { + return static_cast< ::tensorflow::TestResults_BenchmarkType >(_impl_.benchmark_type_); +} +inline ::tensorflow::TestResults_BenchmarkType TestResults::benchmark_type() const { + // @@protoc_insertion_point(field_get:tensorflow.TestResults.benchmark_type) + return _internal_benchmark_type(); +} +inline void TestResults::_internal_set_benchmark_type(::tensorflow::TestResults_BenchmarkType value) { + + _impl_.benchmark_type_ = value; +} +inline void TestResults::set_benchmark_type(::tensorflow::TestResults_BenchmarkType value) { + _internal_set_benchmark_type(value); + // @@protoc_insertion_point(field_set:tensorflow.TestResults.benchmark_type) +} + +// string run_mode = 11; +inline void TestResults::clear_run_mode() { + _impl_.run_mode_.ClearToEmpty(); +} +inline const std::string& TestResults::run_mode() const { + // @@protoc_insertion_point(field_get:tensorflow.TestResults.run_mode) + return _internal_run_mode(); +} +template +inline PROTOBUF_ALWAYS_INLINE +void TestResults::set_run_mode(ArgT0&& arg0, ArgT... args) { + + _impl_.run_mode_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:tensorflow.TestResults.run_mode) +} +inline std::string* TestResults::mutable_run_mode() { + std::string* _s = _internal_mutable_run_mode(); + // @@protoc_insertion_point(field_mutable:tensorflow.TestResults.run_mode) + return _s; +} +inline const std::string& TestResults::_internal_run_mode() const { + return _impl_.run_mode_.Get(); +} +inline void TestResults::_internal_set_run_mode(const std::string& value) { + + _impl_.run_mode_.Set(value, GetArenaForAllocation()); +} +inline std::string* TestResults::_internal_mutable_run_mode() { + + return _impl_.run_mode_.Mutable(GetArenaForAllocation()); +} +inline std::string* TestResults::release_run_mode() { + // @@protoc_insertion_point(field_release:tensorflow.TestResults.run_mode) + return _impl_.run_mode_.Release(); +} +inline void TestResults::set_allocated_run_mode(std::string* run_mode) { + if (run_mode != nullptr) { + + } else { + + } + _impl_.run_mode_.SetAllocated(run_mode, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.run_mode_.IsDefault()) { + _impl_.run_mode_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:tensorflow.TestResults.run_mode) +} + +// string tf_version = 12; +inline void TestResults::clear_tf_version() { + _impl_.tf_version_.ClearToEmpty(); +} +inline const std::string& TestResults::tf_version() const { + // @@protoc_insertion_point(field_get:tensorflow.TestResults.tf_version) + return _internal_tf_version(); +} +template +inline PROTOBUF_ALWAYS_INLINE +void TestResults::set_tf_version(ArgT0&& arg0, ArgT... args) { + + _impl_.tf_version_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:tensorflow.TestResults.tf_version) +} +inline std::string* TestResults::mutable_tf_version() { + std::string* _s = _internal_mutable_tf_version(); + // @@protoc_insertion_point(field_mutable:tensorflow.TestResults.tf_version) + return _s; +} +inline const std::string& TestResults::_internal_tf_version() const { + return _impl_.tf_version_.Get(); +} +inline void TestResults::_internal_set_tf_version(const std::string& value) { + + _impl_.tf_version_.Set(value, GetArenaForAllocation()); +} +inline std::string* TestResults::_internal_mutable_tf_version() { + + return _impl_.tf_version_.Mutable(GetArenaForAllocation()); +} +inline std::string* TestResults::release_tf_version() { + // @@protoc_insertion_point(field_release:tensorflow.TestResults.tf_version) + return _impl_.tf_version_.Release(); +} +inline void TestResults::set_allocated_tf_version(std::string* tf_version) { + if (tf_version != nullptr) { + + } else { + + } + _impl_.tf_version_.SetAllocated(tf_version, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.tf_version_.IsDefault()) { + _impl_.tf_version_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:tensorflow.TestResults.tf_version) +} + +#ifdef __GNUC__ + #pragma GCC diagnostic pop +#endif // __GNUC__ +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + + +// @@protoc_insertion_point(namespace_scope) + +} // namespace tensorflow + +PROTOBUF_NAMESPACE_OPEN + +template <> struct is_proto_enum< ::tensorflow::TestResults_BenchmarkType> : ::std::true_type {}; +template <> +inline const EnumDescriptor* GetEnumDescriptor< ::tensorflow::TestResults_BenchmarkType>() { + return ::tensorflow::TestResults_BenchmarkType_descriptor(); +} + +PROTOBUF_NAMESPACE_CLOSE + +// @@protoc_insertion_point(global_scope) + +#include +#endif // GOOGLE_PROTOBUF_INCLUDED_GOOGLE_PROTOBUF_INCLUDED_tsl_2fprotobuf_2ftest_5flog_2eproto diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/test_log.proto b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/test_log.proto new file mode 100644 index 0000000000000000000000000000000000000000..6d3af02e65767be895dafa319cc05b2be6582581 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/test_log.proto @@ -0,0 +1,223 @@ +// Protocol messages for describing the results of benchmarks and unit tests. +syntax = "proto3"; + +package tensorflow; + +import "google/protobuf/any.proto"; +import "google/protobuf/wrappers.proto"; + +option cc_enable_arenas = true; +option java_outer_classname = "TestLogProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.util.testlog"; + +message EntryValue { + oneof kind { + double double_value = 1; + string string_value = 2; + } +} + +message MetricEntry { + // Metric name + string name = 1; + + // Metric value + double value = 2; + + // The minimum acceptable value for the metric if specified + google.protobuf.DoubleValue min_value = 3; + + // The maximum acceptable value for the metric if specified + google.protobuf.DoubleValue max_value = 4; +} + +// Each unit test or benchmark in a test or benchmark run provides +// some set of information. Here we provide some reasonable keys +// one would expect to see, with optional key/value pairs for things +// we haven't considered. +// +// This BenchmarkEntry should be emitted by each unit test or benchmark +// reporter. +message BenchmarkEntry { + // The name of the specific benchmark or test + // (e.g. BM_AdjustContrast_gpu_B_W_H) + string name = 1; + + // If a benchmark, how many iterations it was run for + int64 iters = 2; + + // Total cpu time used for all iterations (in seconds) + double cpu_time = 3; + + // Total wall time used for all iterations (in seconds) + double wall_time = 4; + + // Throughput (in MB/s) + double throughput = 5; + + // Generic map from result key to value. + map extras = 6; + + // Metric name, value and expected range. This can include accuracy metrics + // typically used to determine whether the accuracy test has passed + repeated MetricEntry metrics = 7; +} + +message BenchmarkEntries { + repeated BenchmarkEntry entry = 1; +} + +message BuildConfiguration { + string mode = 1; // opt, dbg, etc + repeated string cc_flags = 2; // CC compiler flags, if known + repeated string opts = 3; // Bazel compilation options, if known +} + +message CommitId { + oneof kind { + // Submitted changelist. + int64 changelist = 1; + string hash = 2; + } + // Hash of intermediate change between hash/changelist and what was tested. + // Not used if the build is from a commit without modifications. + string snapshot = 3; + // Changelist tested if the change list is not already submitted. + int64 pending_changelist = 4; +} + +message CPUInfo { + int64 num_cores = 1; + + int64 num_cores_allowed = 2; + + // How fast are these cpus? + double mhz_per_cpu = 3; + + // Additional cpu information. For example, + // Intel Ivybridge with HyperThreading (24 cores) dL1:32KB dL2:256KB dL3:30MB + string cpu_info = 4; + + // What kind of cpu scaling is enabled on the host. + // Examples include "performance", "ondemand", "conservative", "mixed". + string cpu_governor = 5; + + // Cache sizes (in bytes), e.g. "L2": 262144 (for 256KB) + map cache_size = 6; +} + +message MemoryInfo { + int64 total = 1; // Total virtual memory in bytes + int64 available = 2; // Immediately available memory in bytes +} + +message GPUInfo { + string model = 1; // e.g. "Tesla K40c" + string uuid = 2; // Final entry in output of "nvidia-smi -L" + string bus_id = 3; // e.g. "0000:04:00.0" +} + +message PlatformInfo { + string bits = 1; // e.g. '64bit' + string linkage = 2; // e.g. 'ELF' + string machine = 3; // e.g. 'i386' + string release = 4; // e.g. '3.13.0-76-generic' + string system = 5; // e.g. 'Linux' + string version = 6; // e.g. '#120-Ubuntu SMP Mon Jan 18 15:59:10 UTC 2016' +} + +message AvailableDeviceInfo { // Matches DeviceAttributes + string name = 1; // Device name. + string type = 2; // Device type, e.g. 'CPU' or 'GPU'. + int64 memory_limit = 3; // Memory capacity in bytes. + string physical_description = 4; // The physical description of this device. +} + +message MachineConfiguration { + // Host name of machine that ran the benchmark. + string hostname = 1; + + // Unique serial number of the machine. + string serial_identifier = 7; + + // Additional platform information. + PlatformInfo platform_info = 2; + + // CPU Information. + CPUInfo cpu_info = 3; + + // Other devices that are attached and relevant (e.g. GPUInfo). + repeated google.protobuf.Any device_info = 4; + + // Devices accessible to the test (e.g. as given by list_local_devices). + repeated AvailableDeviceInfo available_device_info = 5; + + MemoryInfo memory_info = 6; +} + +// Run-specific items such as arguments to the test / benchmark. +message RunConfiguration { + repeated string argument = 1; + // Environment variables used to run the test/benchmark. + map env_vars = 2; +} + +// The output of one benchmark / test run. Each run contains a list of +// tests or benchmarks, stored as BenchmarkEntry messages. +// +// This message should be emitted by the reporter (which runs the +// test / BM in a subprocess and then reads the emitted BenchmarkEntry messages; +// usually from a serialized json file, finally collecting them along +// with additional information about the test run. +message TestResults { + // The target of the run, e.g.: + // //tensorflow/core:kernels_adjust_contrast_op_benchmark_test + string target = 1; + + // The list of tests or benchmarks in this run. + BenchmarkEntries entries = 2; + + // The configuration of the build (compiled opt? with cuda? any copts?) + BuildConfiguration build_configuration = 3; + + // The commit id (git hash or changelist) + CommitId commit_id = 4; + + // The time the run started (in seconds of UTC time since Unix epoch) + int64 start_time = 5; + + // The amount of time the total run took (wall time in seconds) + double run_time = 6; + + // Machine-specific parameters (Platform and CPU info) + MachineConfiguration machine_configuration = 7; + + // Run-specific parameters (arguments, etc) + RunConfiguration run_configuration = 8; + + // Benchmark target identifier. + string name = 9; + + // The type of benchmark. + enum BenchmarkType { + UNKNOWN = 0; // Fallback for protos written before Type was introduced. + CPP_MICROBENCHMARK = 1; + PYTHON_BENCHMARK = 2; + ANDROID_BENCHMARK = 3; + EDGE_BENCHMARK = 4; + IOS_BENCHMARK = 5; + } + BenchmarkType benchmark_type = 10; + + // Used for differentiating between continuous and debug builds. + // Must be one of: + // * cbuild: results from continuous build. + // * presubmit: results from oneshot requests. + // * culprit: results from culprit finder rerun. + string run_mode = 11; + + // TensorFlow version this benchmark runs against. + // This can be either set to full version or just the major version. + string tf_version = 12; +}