diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/coordination/coordination_service_agent.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/coordination/coordination_service_agent.h new file mode 100644 index 0000000000000000000000000000000000000000..dd192e07c3b9a404a8201216e0f24b1c7b5d8fa0 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/coordination/coordination_service_agent.h @@ -0,0 +1,283 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_COORDINATION_COORDINATION_SERVICE_AGENT_H_ +#define TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_COORDINATION_COORDINATION_SERVICE_AGENT_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/time/time.h" +#include "tsl/distributed_runtime/call_options.h" +#include "tsl/distributed_runtime/coordination/coordination_client.h" +#include "tsl/platform/status.h" +#include "tsl/platform/statusor.h" +#include "tsl/protobuf/coordination_service.pb.h" + +namespace tensorflow { +class CoordinationServiceConfig; +}; // namespace tensorflow + +namespace tsl { +class Env; + +// CoordinationServiceAgent defines the interface for tasks to communicate with +// the coordination service instance (which implements +// CoordinationServiceInterface). One instance of the agent should be deployed +// on each task for it to send various requests and stores / retrieves config +// key-value data to the service. +// +// See CoordinationServiceInterface for more details on coordination service. +// +// All coordination service errors will have an additional +// CoordinationServiceError payload to distinguish themselves from RPC failures. +// The payload can optionally specify the error origin, and if the error is +// reported by the user via `agent->ReportError()`. +// +// Possible service errors: +// - Internal: Coordination service is not enabled. +// If it was previously accessible, coordination service +// has been shut down. +// - Aborted: Incarnation mismatch during heartbeat (either remote +// task or coordination service has restarted). +// - Unavailable: Heartbeat timeout from remote task (failed, +// crashed or got preempted). +// - InvalidArgument: Unexpected heartbeat from remote task (not +// registered or wrong config). +class CoordinationServiceAgent { + public: + using StatusOrValueCallback = + std::function&)>; + // Collection of key-value pairs in the same directory. + using StatusOrValueDirCallback = std::function>&)>; + using ChangedKeyValuesCallback = + std::function&)>; + + virtual ~CoordinationServiceAgent() = default; + + // Initialize coordination service agent. + virtual Status Initialize( + tsl::Env* env, std::string_view job_name, int task_id, + const tensorflow::CoordinationServiceConfig& configs, + std::unique_ptr leader_client, + StatusCallback error_fn) = 0; + virtual Status Initialize( + tsl::Env* env, const tensorflow::CoordinatedTask& task, + const tensorflow::CoordinationServiceConfig& configs, + std::unique_ptr leader_client, + StatusCallback error_fn) = 0; + + // Return true if the coordination service agent has been initialized. + virtual bool IsInitialized() = 0; + + // Return true if the coordination service agent has successfully connected + // with the Coordination Service + virtual bool IsConnected() = 0; + + // Return true if the coordination service agent has an error state. + virtual bool IsError() = 0; + + // Connect to coordination service with the following steps: + // - connect to service address specified in the config of `server_def` + // - register itself as a task to the service + // - start a thread to periodically send heartbeat message with the service + // Possible service errors: + // - FailedPrecondition: Agent is not in DISCONNECTED state. + // - InvalidArgument: Unexpected task registration + // - Aborted: Duplicate task registration (agent will retry connecting until + // the configured timeout) + virtual Status Connect() = 0; + + // Wait for all tasks to be up and registered. The call blocks until all tasks + // in the cluster are up, or some error occurs. + // Possible service errors: + // - FailedPrecondition: Agent is not in CONNECTED state. + // - InvalidArgument: Unexpected task request + virtual Status WaitForAllTasks( + const tensorflow::DeviceInfo& local_devices) = 0; + + // Get the device attributes of tasks from remote tasks in the cluster. + virtual const tensorflow::DeviceInfo& GetClusterDeviceInfo() = 0; + + // State transition in coordination service agent: + // + // Init Connect SetError + // UNINITIALIZED ---> DISCONNECTED ------> CONNECTED -------> ERROR + // ^ | + // |__________________________________| + // Reset + + // Get task associated with this agent. + virtual StatusOr GetOwnTask() = 0; + + // Get status of a remote task. + virtual StatusOr> + GetTaskState(const std::vector& task) = 0; + + // Report error to coordination service. This will invoke the error callback. + // Note that the error payload will set `is_reported_error` to true, to + // distinguish user-specified errors from internal service or RPC failures. + // Possible service errors: + // - FailedPrecondition: Uninitialized/disconnected/already in error state. + // - InvalidArgument: Unexpected task request + virtual Status ReportError(const Status& error) = 0; + + // Shuts down by disconnecting from the service. Should only be called if + // agent is connected and no further agent calls (except the destructor) are + // expected. If `shutdown_barrier_timeout_in_ms` is specified in the config, + // blocks until all tasks reach the barrier before shutting down together. If + // the barrier times out, this agent will still disconnect, while an error is + // reported to other agents that did not reach the barrier on time. + // Possible service errors: + // - InvalidArgument: Unexpected task request. + // - FailedPrecondition: Task was in error state (note: agent is still + // shut down forcefully). + virtual Status Shutdown() = 0; + + // Disconnect from the service, and clean up the internal error status. + // Possible service errors: + // - InvalidArgument: Unexpected task request. + // - FailedPrecondition: task is not in error state/has already + // disconnected. + virtual Status Reset() = 0; + + // Key-value store API. + // The agent does not need to be connected to utilize the key-value store. + // There are no concurrency guarantees. To avoid a race / impose an ordering + // on potentially concurrent ops (e.g. set, delete), use WaitAtBarrier(). + + // Get config key-value from the service. + // If the key-value is not inserted yet, this is a blocking call that waits + // until the corresponding key is inserted. + // - DeadlineExceeded: timed out waiting for key. + virtual StatusOr GetKeyValue(std::string_view key) = 0; + virtual StatusOr GetKeyValue(std::string_view key, + absl::Duration timeout) = 0; + // Note: Cancel the underlying RPC call with `call_opts->StartCancel()` and + // `call_opts->ClearCancelCallback()`. + virtual std::shared_ptr GetKeyValueAsync( + std::string_view, StatusOrValueCallback done) = 0; + + // Get config key-value from the service. + // - NotFound: the requested key does not exist. + virtual StatusOr TryGetKeyValue(std::string_view key) = 0; + + // Get all values under a directory (key). + // A value is considered to be in the directory if its key is prefixed with + // the directory. + // This is not a blocking call. If no keys are found, an empty vector is + // returned immediately. + virtual StatusOr> GetKeyValueDir( + std::string_view key) = 0; + virtual void GetKeyValueDirAsync(std::string_view key, + StatusOrValueDirCallback done) = 0; + + // Insert config key-value to the service. + // - AlreadyExists: key is already set. + virtual Status InsertKeyValue(std::string_view key, + std::string_view value) = 0; + + // Delete config keys in the coordination service. + virtual Status DeleteKeyValue(std::string_view key) = 0; + + // Update the value of a config key. + virtual Status UpdateKeyValue(std::string_view key, + std::string_view value) = 0; + + // Register a callback that will be invoked when the key or keys under the key + // directory are changed (inserted, deleted, or updated). + virtual Status StartWatchKey(std::string_view key, + ChangedKeyValuesCallback on_change) = 0; + virtual Status StopWatchKey(std::string_view key) = 0; + + // Blocks until all (or a subset of) tasks are at the barrier or the barrier + // fails. + // + // `barrier_id` should be unique across barriers. + // + // The first WaitAtBarrier() call received by the service for a particular + // barrier_id is special in that it determines the barrier deadline based on + // timeout duration. + // However, if subsequent calls by different agents specify a different set of + // `tasks` for the same `barrier_id`, the barrier will fail instantly. + // For example, + // agent_1->WaitAtBarrier(“barrier”, 10min, <<”worker”, 1>, <”worker”, 2>>); + // agent_2->WaitAtBarrier(“barrier”, 10min, <<”worker”, 2>, <”worker”, 3>>); + // Barrier fails after agent_2’s call because it specifies a different set of + // participating tasks. + // + // If no tasks are specified (default), the barrier will block for all the + // connected tasks. + // + // Possible service errors: + // - DeadlineExceeded: Timed out waiting for specified tasks at the barrier. + // Deadline is determined by the server timestamp when it receives the + // first WaitAtBarrier() + timeout duration. + // - Cancelled: One of the tasks called CancelBarrier(). + // - Aborted: Service is shutting down. + // - Internal: Any participating task is in ERROR state. + // - InvalidArgument: (1) Conflicting tasks specified by different agents + // for the same barrier, (2) one of the participating tasks is not in + // the cluster, or (3) task making the request is not included in the + // list of participating tasks. + // - FailedPrecondition: Agent is in UNINITIALIZED or ERROR state. Or the + // same barrier_id was already used previously. + virtual Status WaitAtBarrier( + std::string_view barrier_id, absl::Duration timeout, + const std::vector& tasks) = 0; + + virtual void WaitAtBarrierAsync( + std::string_view barrier_id, absl::Duration timeout, + const std::vector& tasks, + StatusCallback done) = 0; + + // Aborts the barrier if it is ongoing. + // Current and future WaitAtBarrier() calls with the same id will return a + // CANCELLED error status. + // Possible service errors: + // - FailedPrecondition: Barrier has already been passed. + virtual Status CancelBarrier(std::string_view barrier_id) = 0; + virtual void CancelBarrierAsync(std::string_view barrier_id, + StatusCallback done) = 0; + + // Get unowned Env* that the agent was initialized with. + virtual StatusOr GetEnv() = 0; + + protected: + // Set the service agent to error status and invoke the error callback. + // Note: different from ReportError, this does not report the error status to + // remote coordination service. + virtual void SetError(const Status& error) = 0; + + // Activate the key-value callback watch. + virtual Status ActivateWatch(std::string_view, + const std::map&) = 0; + + private: + friend class CoordinationServiceRpcHandler; +}; + +std::unique_ptr CreateCoordinationServiceAgent(); + +} // namespace tsl + +#endif // TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_COORDINATION_COORDINATION_SERVICE_AGENT_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/coordination/coordination_service_error_util.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/coordination/coordination_service_error_util.h new file mode 100644 index 0000000000000000000000000000000000000000..79156778f40d9e332f26c1e36a44744e88ae3134 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/coordination/coordination_service_error_util.h @@ -0,0 +1,60 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_COORDINATION_COORDINATION_SERVICE_ERROR_UTIL_H_ +#define TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_COORDINATION_COORDINATION_SERVICE_ERROR_UTIL_H_ + +#include "absl/strings/string_view.h" +#include "tsl/platform/errors.h" +#include "tsl/platform/status.h" +#include "tsl/protobuf/coordination_service.pb.h" + +namespace tsl { + +constexpr absl::string_view CoordinationErrorPayloadKey() { + return "type.googleapis.com/tensorflow.CoordinationServiceError"; +} + +// Mark error as a coordination service error (as opposed to RPC +// errors). +inline Status MakeCoordinationError(Status s) { + s.SetPayload(CoordinationErrorPayloadKey(), absl::Cord("")); + return s; +} + +// Mark error as a coordination service error (as opposed to RPC +// errors), and indicate error origin. +// Errors reported via the agent API by the user should set `is_reported_error` +// to true. +inline Status MakeCoordinationError(Status s, + const tensorflow::CoordinatedTask& origin, + bool is_reported_error = false) { + tensorflow::CoordinationServiceError error; + *error.mutable_source_task() = origin; + error.set_is_reported_error(is_reported_error); + s.SetPayload(CoordinationErrorPayloadKey(), + absl::Cord(error.SerializeAsString())); + return s; +} + +// Mark error as a coordination service error with payload. +inline Status MakeCoordinationError( + Status s, const tensorflow::CoordinationServiceError& payload) { + s.SetPayload(CoordinationErrorPayloadKey(), + absl::Cord(payload.SerializeAsString())); + return s; +} +} // namespace tsl + +#endif // TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_COORDINATION_COORDINATION_SERVICE_ERROR_UTIL_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/rpc/async_service_interface.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/rpc/async_service_interface.h new file mode 100644 index 0000000000000000000000000000000000000000..479ca0ec8a4c6ae1cebc85cd10a43f2fc9641572 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/rpc/async_service_interface.h @@ -0,0 +1,41 @@ +/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_RPC_ASYNC_SERVICE_INTERFACE_H_ +#define TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_RPC_ASYNC_SERVICE_INTERFACE_H_ + +namespace tsl { + +// Represents an abstract asynchronous service that handles incoming +// RPCs with a polling loop. +class AsyncServiceInterface { + public: + virtual ~AsyncServiceInterface() {} + + // A blocking method that should be called to handle incoming RPCs. + // This method will block until the service shuts down. + virtual void HandleRPCsLoop() = 0; + + // Starts shutting down this service. + // + // NOTE(mrry): To shut down this service completely, the caller must + // also shut down any servers that might share ownership of this + // service's resources (e.g. completion queues). + virtual void Shutdown() = 0; +}; + +} // namespace tsl + +#endif // TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_RPC_ASYNC_SERVICE_INTERFACE_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/rpc/coordination/grpc_coordination_client.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/rpc/coordination/grpc_coordination_client.h new file mode 100644 index 0000000000000000000000000000000000000000..969722bac3376f4835909a4c1168a8ab65a60aff --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/rpc/coordination/grpc_coordination_client.h @@ -0,0 +1,34 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_RPC_COORDINATION_GRPC_COORDINATION_CLIENT_H_ +#define TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_RPC_COORDINATION_GRPC_COORDINATION_CLIENT_H_ + +#include + +#include "tsl/distributed_runtime/coordination/coordination_client.h" +#include "tsl/distributed_runtime/rpc/grpc_channel.h" + +namespace tsl { + +CoordinationClientCache* NewGrpcCoordinationClientCache( + std::shared_ptr channel); + +CoordinationClient* NewGrpcCoordinationClient( + std::shared_ptr<::grpc::Channel> channel); + +} // namespace tsl + +#endif // TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_RPC_COORDINATION_GRPC_COORDINATION_CLIENT_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/rpc/coordination/grpc_coordination_service_impl.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/rpc/coordination/grpc_coordination_service_impl.h new file mode 100644 index 0000000000000000000000000000000000000000..daa7e65225f6607c72a394c63c32522840e71d9c --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/rpc/coordination/grpc_coordination_service_impl.h @@ -0,0 +1,117 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_RPC_COORDINATION_GRPC_COORDINATION_SERVICE_IMPL_H_ +#define TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_RPC_COORDINATION_GRPC_COORDINATION_SERVICE_IMPL_H_ + +#include + +#include "grpcpp/alarm.h" +#include "grpcpp/completion_queue.h" +#include "grpcpp/server_builder.h" +#include "tsl/distributed_runtime/coordination/coordination_service_agent.h" +#include "tsl/distributed_runtime/coordination/coordination_service_rpc_handler.h" +#include "tsl/distributed_runtime/rpc/async_service_interface.h" +#include "tsl/distributed_runtime/rpc/grpc_call.h" +#include "tsl/distributed_runtime/rpc/grpc_util.h" +#include "tsl/platform/mutex.h" +#include "tsl/platform/thread_annotations.h" +#include "tsl/platform/threadpool.h" +#include "tsl/protobuf/coordination_service.grpc.pb.h" +#include "tsl/protobuf/coordination_service.pb.h" + +namespace tsl { + +class GrpcCoordinationServiceImpl : public AsyncServiceInterface { + public: + template + using CoordCall = Call; + + GrpcCoordinationServiceImpl(thread::ThreadPool* compute_pool, + ::grpc::ServerBuilder* server_builder); + ~GrpcCoordinationServiceImpl() override {} + + void HandleRPCsLoop() override; + void Shutdown() override; + void SetCoordinationServiceAgentInstance(CoordinationServiceAgent* agent) { + rpc_handler_.SetAgentInstance(agent); + } + void SetCoordinationServiceInstance(CoordinationServiceInterface* service) { + rpc_handler_.SetServiceInstance(service); + } + CoordinationServiceRpcHandler* GetRpcHandler() { return &rpc_handler_; } + + private: +#define HANDLER(method) \ + void method##Handler(CoordCall* call) { \ + tf_shared_lock l(shutdown_mu_); \ + if (shutdown_) { \ + call->SendResponse(ToGrpcStatus( \ + errors::Internal("Coordination service has been shut down."))); \ + return; \ + } \ + compute_pool_.Schedule([this, call]() { \ + rpc_handler_.method##Async(&call->request, &call->response, \ + [call](const Status& s) { \ + call->ClearCancelCallback(); \ + call->SendResponse(ToGrpcStatus(s)); \ + }); \ + }); \ + Call:: \ + EnqueueRequest(&service_, cq_.get(), \ + &tensorflow::grpc::CoordinationService::AsyncService:: \ + Request##method, \ + &GrpcCoordinationServiceImpl::method##Handler, \ + /*supports_cancel=*/false); \ + } + HANDLER(RegisterTask); + HANDLER(WaitForAllTasks); + HANDLER(ShutdownTask); + HANDLER(ResetTask); + HANDLER(Heartbeat); + HANDLER(ReportErrorToTask); + HANDLER(ReportErrorToService); + HANDLER(GetTaskState); + HANDLER(InsertKeyValue); + HANDLER(GetKeyValue); + HANDLER(TryGetKeyValue); + HANDLER(GetKeyValueDir); + HANDLER(DeleteKeyValue); + HANDLER(Barrier); + HANDLER(CancelBarrier); +#undef HANDLER + + thread::ThreadPool& compute_pool_; + CoordinationServiceRpcHandler rpc_handler_; + + mutex shutdown_mu_; + bool shutdown_ TF_GUARDED_BY(shutdown_mu_); + std::unique_ptr<::grpc::Alarm> shutdown_alarm_; + + std::unique_ptr<::grpc::ServerCompletionQueue> cq_; + tensorflow::grpc::CoordinationService::AsyncService service_; + + GrpcCoordinationServiceImpl(const GrpcCoordinationServiceImpl&) = delete; + void operator=(const GrpcCoordinationServiceImpl&) = delete; +}; + +} // namespace tsl + +#endif // TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_RPC_COORDINATION_GRPC_COORDINATION_SERVICE_IMPL_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/rpc/grpc_state.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/rpc/grpc_state.h new file mode 100644 index 0000000000000000000000000000000000000000..37b41edc0a01038ff280ee40b2f6c7df7157a62c --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/distributed_runtime/rpc/grpc_state.h @@ -0,0 +1,254 @@ +/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_RPC_GRPC_STATE_H_ +#define TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_RPC_GRPC_STATE_H_ + +#include +#include +#include + +#include "grpcpp/generic/generic_stub.h" +#include "grpcpp/grpcpp.h" +#include "absl/status/status.h" +#include "tsl/distributed_runtime/call_options.h" +#include "tsl/distributed_runtime/rpc/grpc_client_cq_tag.h" +#include "tsl/distributed_runtime/rpc/grpc_util.h" +#include "tsl/platform/errors.h" +#include "tsl/platform/status.h" +#include "tsl/platform/strcat.h" +#include "tsl/platform/threadpool.h" +#include "tsl/util/env_var.h" + +namespace tsl { + +// Object allocated per active RPC. +// Manage the state of a single asynchronous RPC request. If `max_retries` +// is greater than 0, the request will be retried for any transient failures. +// Note: `parse_proto_fn` is used solely to allow TensorFlow's worker service +// to pass in an optimized function that avoids an unnecessary copy of tensors. +// That is not implemented as an overload of tsl::GrpcMaybeParseProto because it +// has dependencies on many TensorFlow-specific absractions. +template +class RPCState : public GrpcClientCQTag { + public: + RPCState( + ::grpc::GenericStub* stub, ::grpc::CompletionQueue* cq, + const ::grpc::string& method, const protobuf::Message& request, + Response* response, StatusCallback done, CallOptions* call_opts, + thread::ThreadPool* threadpool, int32_t max_retries = 0, + bool fail_fast = true, const string* target = nullptr, + std::function parse_proto_fn = + [](::grpc::ByteBuffer* src, Response* dst) { + return tsl::GrpcMaybeParseProto(src, dst); + }) + : RPCState( + stub, cq, method, request, response, std::move(done), call_opts, + threadpool, + // 1) If GRPC_FAIL_FAST is set to 'true' or 'false', + // fail_fast=$GRPC_FAIL_FAST. See b/141948186. + // 2) Otherwise if GRPC_FAIL_FAST is set to 'use_caller', use the + // fail_fast from the caller. See b/140260119. + // + // Current default: use caller's fail_fast argument. + // + // NOTE: Callers mostly set fail_fast=true to prevent job hanging + // on worker task failures, except a few cases such as GetStatus + // in cluster initialization and collective param resolution. + [fail_fast, &done]() -> bool { + string fail_fast_env; + TF_CHECK_OK(ReadStringFromEnvVar("GRPC_FAIL_FAST", "use_caller", + &fail_fast_env)); + string fail_fast_env_lower = absl::AsciiStrToLower(fail_fast_env); + if (fail_fast_env_lower == "true") { + return true; + } else if (fail_fast_env_lower == "use_caller") { + return fail_fast; + } else if (fail_fast_env_lower == "false") { + return false; + } else { + string error_message = strings::StrCat( + "Invalid GRPC_FAIL_FAST config: ", fail_fast_env); + LOG(WARNING) << error_message; + done(errors::InvalidArgument(error_message)); + return false; + } + }(), + (call_opts != nullptr ? call_opts->GetTimeout() : 0), max_retries, + target, parse_proto_fn) {} + + template + RPCState( + ::grpc::GenericStub* stub, ::grpc::CompletionQueue* cq, + const ::grpc::string& method, const Request& request, Response* response, + StatusCallback done, CallOptions* call_opts, + thread::ThreadPool* threadpool, bool fail_fast, int64_t timeout_in_ms, + int32_t max_retries, const string* target, + std::function parse_proto_fn = + [](::grpc::ByteBuffer* src, Response* dst) { + return tsl::GrpcMaybeParseProto(src, dst); + }) + : call_opts_(call_opts), + threadpool_(threadpool), + done_(std::move(done)), + timeout_in_ms_(timeout_in_ms), + max_retries_(max_retries), + cq_(cq), + stub_(stub), + method_(method), + fail_fast_(fail_fast), + target_(target), + parse_proto_fn_(std::move(parse_proto_fn)) { + response_ = response; + ::grpc::Status s = GrpcMaybeUnparseProto(request, &request_buf_); + if (!s.ok()) { + LOG(ERROR) << "GrpcMaybeUnparseProto returned with non-ok status: " + << s.error_message(); + // Skip retry logic if we fail to parse our request. + done_(FromGrpcStatus(s)); + delete this; + return; + } + StartCall(); + } + + void StartCall() { + context_.reset(new ::grpc::ClientContext()); + context_->set_wait_for_ready(!fail_fast_); + if (timeout_in_ms_ > 0) { + context_->set_deadline( + gpr_time_from_millis(timeout_in_ms_, GPR_TIMESPAN)); + } + if (call_opts_) { + call_opts_->SetCancelCallback([this]() { context_->TryCancel(); }); + } + + VLOG(2) << "Starting call: " << method_; + + call_ = stub_->PrepareUnaryCall(context_.get(), method_, request_buf_, cq_); + call_->StartCall(); + call_->Finish(&response_buf_, &status_, this); + } + + void OnCompleted(bool ok) override { + if (call_opts_) { + call_opts_->ClearCancelCallback(); + } + + VLOG(2) << "Completed call: " << method_; + + Status s = FromGrpcStatus(status_); + if (s.ok() && !ok) { + // Since this function is only being used for processing the response + // to Finish for client-side unary calls, ok should never be false + s.Update( + errors::Internal("GRPC status is okay but CompletionQueueStatus is " + "not. This should never happen.")); + } + + if (s.ok()) { + if (threadpool_) { + // Run parse and callback in another thread, returning this + // one to service more RPCs. + threadpool_->Schedule([this]() { ParseAndCallDone(); }); + } else { + ParseAndCallDone(); + } + return; + } + + VLOG(1) << method_ << " returned with non-ok status: " << s + << " Retries: " << num_retries_ << " Max: " << max_retries_ << "\n" + << context_->debug_error_string(); + // Retry if we have any attempts left + if (++num_retries_ <= max_retries_ && + (absl::IsUnavailable(s) || absl::IsUnknown(s))) { + response_buf_.Clear(); + VLOG(1) << "Retrying call for " << method_ << "Retry: " << num_retries_ + << " of " << max_retries_; + + ComputeRetryBackoffMs(/*min_backoff_ms=*/1, /*max_backoff_ms=*/10000); + int64_t backoff_us = retry_backoff_ms_ * 1000; + Env::Default()->SchedClosureAfter(/*micros=*/backoff_us, + [this]() { StartCall(); }); + } else { + // Attach additional GRPC error information if any to the final status + string error_msg = std::string(s.message()); + strings::StrAppend(&error_msg, "\nAdditional GRPC error information"); + if (target_) { + strings::StrAppend(&error_msg, " from remote target ", *target_); + } + strings::StrAppend(&error_msg, " while calling ", method_); + strings::StrAppend(&error_msg, ":\n:", context_->debug_error_string()); + s = errors::CreateWithUpdatedMessage(s, error_msg); + // Always treat gRPC cancellation as a derived error. This ensures that + // other error types are preferred during status aggregation. (gRPC + // cancellation messages do not contain the original status message). + if (s.code() == absl::StatusCode::kCancelled) { + s = StatusGroup::MakeDerived(s); + } + + done_(s); + delete this; + } + } + + void ParseAndCallDone() { + Status s; + if (!parse_proto_fn_(&response_buf_, response_)) { + s.Update(errors::Internal("could not parse rpc response")); + } + done_(s); + delete this; + } + + private: + void ComputeRetryBackoffMs(int min_backoff_ms, int max_backoff_ms) { + constexpr float kBackoffBase = 1.3; + if (retry_backoff_ms_ < 0) { + retry_backoff_ms_ = min_backoff_ms; + } else { + retry_backoff_ms_ *= kBackoffBase; + if (retry_backoff_ms_ > max_backoff_ms) { + retry_backoff_ms_ = max_backoff_ms; + } + } + } + + CallOptions* call_opts_; + std::unique_ptr<::grpc::ClientContext> context_; + thread::ThreadPool* threadpool_; + std::unique_ptr<::grpc::GenericClientAsyncResponseReader> call_; + Response* response_; + ::grpc::ByteBuffer request_buf_; + ::grpc::ByteBuffer response_buf_; + ::grpc::Status status_; + StatusCallback done_; + int64_t timeout_in_ms_; + + size_t num_retries_ = 0; + size_t max_retries_; + double retry_backoff_ms_ = -1; + + ::grpc::CompletionQueue* cq_; + ::grpc::GenericStub* stub_; + ::grpc::string method_; + bool fail_fast_; + const string* target_; + std::function parse_proto_fn_ = nullptr; +}; +} // namespace tsl + +#endif // TENSORFLOW_TSL_DISTRIBUTED_RUNTIME_RPC_GRPC_STATE_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/abi.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/abi.h new file mode 100644 index 0000000000000000000000000000000000000000..b7106a0d7203a3ae93ae1671549d509b31ce1f1b --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/abi.h @@ -0,0 +1,31 @@ +/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_ABI_H_ +#define TENSORFLOW_TSL_PLATFORM_ABI_H_ + +#include + +#include "tsl/platform/types.h" + +namespace tsl { +namespace port { + +std::string MaybeAbiDemangle(const char* name); + +} // namespace port +} // namespace tsl + +#endif // TENSORFLOW_TSL_PLATFORM_ABI_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/base64.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/base64.h new file mode 100644 index 0000000000000000000000000000000000000000..888a3ebb35545a26e2ebc089d2ad93de09b84fd2 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/base64.h @@ -0,0 +1,60 @@ +/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_BASE64_H_ +#define TENSORFLOW_TSL_PLATFORM_BASE64_H_ + +#include + +#include "tsl/platform/status.h" +#include "tsl/platform/stringpiece.h" + +namespace tsl { + +/// \brief Converts data into web-safe base64 encoding. +/// +/// See https://en.wikipedia.org/wiki/Base64 +template +Status Base64Encode(StringPiece source, bool with_padding, T* encoded); +template +Status Base64Encode(StringPiece source, + T* encoded); // with_padding=false. + +/// \brief Converts data from web-safe base64 encoding. +/// +/// See https://en.wikipedia.org/wiki/Base64 +template +Status Base64Decode(StringPiece data, T* decoded); + +// Explicit instantiations defined in base64.cc. +extern template Status Base64Decode(StringPiece data, + std::string* decoded); +extern template Status Base64Encode(StringPiece source, + std::string* encoded); +extern template Status Base64Encode(StringPiece source, + bool with_padding, + std::string* encoded); + +extern template Status Base64Decode(StringPiece data, + tstring* decoded); +extern template Status Base64Encode(StringPiece source, + tstring* encoded); +extern template Status Base64Encode(StringPiece source, + bool with_padding, + tstring* encoded); + +} // namespace tsl + +#endif // TENSORFLOW_TSL_PLATFORM_BASE64_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/bfloat16.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/bfloat16.h new file mode 100644 index 0000000000000000000000000000000000000000..daf5e93eca56dcc65f2ea21176d6d0cf4962c410 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/bfloat16.h @@ -0,0 +1,27 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_BFLOAT16_H_ +#define TENSORFLOW_TSL_PLATFORM_BFLOAT16_H_ + +// clang-format off +#include "Eigen/Core" // from @eigen_archive +// clang-format on + +namespace tsl { +typedef Eigen::bfloat16 bfloat16; +} // end namespace tsl + +#endif // TENSORFLOW_TSL_PLATFORM_BFLOAT16_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/blocking_counter.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/blocking_counter.h new file mode 100644 index 0000000000000000000000000000000000000000..c085e4d66af54e0c43e03adc4dbfdce973efd011 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/blocking_counter.h @@ -0,0 +1,80 @@ +/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_BLOCKING_COUNTER_H_ +#define TENSORFLOW_TSL_PLATFORM_BLOCKING_COUNTER_H_ + +#include + +#include "tsl/platform/logging.h" +#include "tsl/platform/mutex.h" + +namespace tsl { + +class BlockingCounter { + public: + BlockingCounter(int initial_count) + : state_(initial_count << 1), notified_(false) { + CHECK_GE(initial_count, 0); + DCHECK_EQ((initial_count << 1) >> 1, initial_count); + } + + ~BlockingCounter() {} + + inline void DecrementCount() { + unsigned int v = state_.fetch_sub(2, std::memory_order_acq_rel) - 2; + if (v != 1) { + DCHECK_NE(((v + 2) & ~1), 0); + return; // either count has not dropped to 0, or waiter is not waiting + } + mutex_lock l(mu_); + DCHECK(!notified_); + notified_ = true; + cond_var_.notify_all(); + } + + inline void Wait() { + unsigned int v = state_.fetch_or(1, std::memory_order_acq_rel); + if ((v >> 1) == 0) return; + mutex_lock l(mu_); + while (!notified_) { + cond_var_.wait(l); + } + } + // Wait for the specified time, return false iff the count has not dropped to + // zero before the timeout expired. + inline bool WaitFor(std::chrono::milliseconds ms) { + unsigned int v = state_.fetch_or(1, std::memory_order_acq_rel); + if ((v >> 1) == 0) return true; + mutex_lock l(mu_); + while (!notified_) { + const std::cv_status status = cond_var_.wait_for(l, ms); + if (status == std::cv_status::timeout) { + return false; + } + } + return true; + } + + private: + mutex mu_; + condition_variable cond_var_; + std::atomic state_; // low bit is waiter flag + bool notified_; +}; + +} // namespace tsl + +#endif // TENSORFLOW_TSL_PLATFORM_BLOCKING_COUNTER_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/casts.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/casts.h new file mode 100644 index 0000000000000000000000000000000000000000..c1253123b3716d42b62be38cdb76263ef800ac51 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/casts.h @@ -0,0 +1,31 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_CASTS_H_ +#define TENSORFLOW_TSL_PLATFORM_CASTS_H_ + +#include "tsl/platform/platform.h" + +#if defined(PLATFORM_GOOGLE) +#include "tsl/platform/google/casts.h" // IWYU pragma: export +#elif defined(PLATFORM_POSIX) || defined(PLATFORM_POSIX_ANDROID) || \ + defined(PLATFORM_GOOGLE_ANDROID) || defined(PLATFORM_POSIX_IOS) || \ + defined(PLATFORM_GOOGLE_IOS) || defined(PLATFORM_WINDOWS) +#include "tsl/platform/default/casts.h" // IWYU pragma: export +#else +#error Define the appropriate PLATFORM_ macro for this platform +#endif + +#endif // TENSORFLOW_TSL_PLATFORM_CASTS_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/coding.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/coding.h new file mode 100644 index 0000000000000000000000000000000000000000..65ce99287a9b8fe61cc96216bb69fbf9c05cbfad --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/coding.h @@ -0,0 +1,71 @@ +/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +// Endian-neutral encoding: +// * Fixed-length numbers are encoded with least-significant byte first +// * In addition we support variable length "varint" encoding +// * Strings are encoded prefixed by their length in varint format + +#ifndef TENSORFLOW_TSL_PLATFORM_CODING_H_ +#define TENSORFLOW_TSL_PLATFORM_CODING_H_ + +#include "tsl/platform/stringpiece.h" +#include "tsl/platform/types.h" + +namespace tsl { +namespace core { + +// Maximum number of bytes occupied by a varint32. +static const int kMaxVarint32Bytes = 5; + +// Maximum number of bytes occupied by a varint64. +static const int kMaxVarint64Bytes = 10; + +// Lower-level versions of Put... that write directly into a character buffer +// REQUIRES: dst has enough space for the value being written +extern void EncodeFixed16(char* dst, uint16 value); +extern void EncodeFixed32(char* dst, uint32 value); +extern void EncodeFixed64(char* dst, uint64 value); +extern void PutFixed16(string* dst, uint16 value); +extern void PutFixed32(string* dst, uint32 value); +extern void PutFixed64(string* dst, uint64 value); + +extern void PutVarint32(string* dst, uint32 value); +extern void PutVarint64(string* dst, uint64 value); + +extern void PutVarint32(tstring* dst, uint32 value); +extern void PutVarint64(tstring* dst, uint64 value); + +extern bool GetVarint32(StringPiece* input, uint32* value); +extern bool GetVarint64(StringPiece* input, uint64* value); + +extern const char* GetVarint32Ptr(const char* p, const char* limit, uint32* v); +extern const char* GetVarint64Ptr(const char* p, const char* limit, uint64* v); + +// Internal routine for use by fallback path of GetVarint32Ptr +extern const char* GetVarint32PtrFallback(const char* p, const char* limit, + uint32* value); +extern const char* GetVarint32Ptr(const char* p, const char* limit, + uint32* value); +extern char* EncodeVarint32(char* dst, uint32 v); +extern char* EncodeVarint64(char* dst, uint64 v); + +// Returns the length of the varint32 or varint64 encoding of "v" +extern int VarintLength(uint64_t v); + +} // namespace core +} // namespace tsl + +#endif // TENSORFLOW_TSL_PLATFORM_CODING_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/context.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/context.h new file mode 100644 index 0000000000000000000000000000000000000000..045a2dca1dfb61731579c3375fa66334010f579c --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/context.h @@ -0,0 +1,47 @@ +/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_CONTEXT_H_ +#define TENSORFLOW_TSL_PLATFORM_CONTEXT_H_ + +#include "tsl/platform/platform.h" + +namespace tsl { + +enum class ContextKind { + // Initial state with default (empty) values. + kDefault, + // Initial state inherited from the creating or scheduling thread. + kThread, +}; + +// Context is a container for request-specific information that should be passed +// to threads that perform related work. The default constructor should capture +// all relevant context. +class Context; + +// Scoped object that sets the current thread's context until the object is +// destroyed. +class WithContext; + +} // namespace tsl + +#if defined(PLATFORM_GOOGLE) +#include "tsl/platform/google/context.h" // IWYU pragma: export +#else +#include "tsl/platform/default/context.h" // IWYU pragma: export +#endif + +#endif // TENSORFLOW_TSL_PLATFORM_CONTEXT_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/cord.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/cord.h new file mode 100644 index 0000000000000000000000000000000000000000..cb1233f576ae40805b2dc1c6a59a55cc555d6c03 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/cord.h @@ -0,0 +1,26 @@ +/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_CORD_H_ +#define TENSORFLOW_TSL_PLATFORM_CORD_H_ + +// It seems CORD doesn't work well with CUDA <= 10.2 +#if !defined(__CUDACC__) || ((defined(__CUDACC__) && CUDA_VERSION > 10020)) +#include "absl/strings/cord.h" // IWYU pragma: export +#define TF_CORD_SUPPORT 1 + +#endif // __CUDACC__ + +#endif // TENSORFLOW_TSL_PLATFORM_CORD_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/cpu_info.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/cpu_info.h new file mode 100644 index 0000000000000000000000000000000000000000..68506b1d34ae8e8eda2110cfbc800ee62a925b41 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/cpu_info.h @@ -0,0 +1,171 @@ +/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_CPU_INFO_H_ +#define TENSORFLOW_TSL_PLATFORM_CPU_INFO_H_ + +#include + +// TODO(ahentz): This is not strictly required here but, for historical +// reasons, many people depend on cpu_info.h in order to use kLittleEndian. +#include "tsl/platform/byte_order.h" + +#if defined(_MSC_VER) +// included so __cpuidex function is available for GETCPUID on Windows +#include +#endif + +namespace tsl { +namespace port { + +// Returns an estimate of the number of schedulable CPUs for this +// process. Usually, it's constant throughout the lifetime of a +// process, but it might change if the underlying cluster management +// software can change it dynamically. If the underlying call fails, a default +// value (e.g. `4`) may be returned. +int NumSchedulableCPUs(); + +// Returns an estimate for the maximum parallelism for this process. +// Applications should avoid running more than this number of threads with +// intensive workloads concurrently to avoid performance degradation and +// contention. +// This value is either the number of schedulable CPUs, or a value specific to +// the underlying cluster management. Applications should assume this value can +// change throughout the lifetime of the process. This function must not be +// called during initialization, i.e., before main() has started. +int MaxParallelism(); + +// Returns an estimate for the maximum parallelism for this process on the +// provided numa node, or any numa node if `numa_node` is kNUMANoAffinity. +// See MaxParallelism() for more information. +int MaxParallelism(int numa_node); + +// Returns the total number of CPUs on the system. This number should +// not change even if the underlying cluster management software may +// change the number of schedulable CPUs. Unlike `NumSchedulableCPUs`, if the +// underlying call fails, an invalid value of -1 will be returned; +// the user must check for validity. +static constexpr int kUnknownCPU = -1; +int NumTotalCPUs(); + +// Returns the id of the current CPU. Returns -1 if the current CPU cannot be +// identified. If successful, the return value will be in [0, NumTotalCPUs()). +int GetCurrentCPU(); + +// Returns an estimate of the number of hyperthreads per physical core +// on the CPU +int NumHyperthreadsPerCore(); + +// Mostly ISA related features that we care about +enum CPUFeature { + // Do not change numeric assignments. + MMX = 0, + SSE = 1, + SSE2 = 2, + SSE3 = 3, + SSSE3 = 4, + SSE4_1 = 5, + SSE4_2 = 6, + CMOV = 7, + CMPXCHG8B = 8, + CMPXCHG16B = 9, + POPCNT = 10, + AES = 11, + AVX = 12, + RDRAND = 13, + AVX2 = 14, + FMA = 15, + F16C = 16, + PCLMULQDQ = 17, + RDSEED = 18, + ADX = 19, + SMAP = 20, + + // Prefetch Vector Data Into Caches with Intent to Write and T1 Hint + // http://www.felixcloutier.com/x86/PREFETCHWT1.html. + // You probably want PREFETCHW instead. + PREFETCHWT1 = 21, + + BMI1 = 22, + BMI2 = 23, + HYPERVISOR = 25, // 0 when on a real CPU, 1 on (well-behaved) hypervisor. + + // Prefetch Data into Caches in Anticipation of a Write (3D Now!). + // http://www.felixcloutier.com/x86/PREFETCHW.html + PREFETCHW = 26, + + // AVX-512: 512-bit vectors (plus masking, etc.) in Knights Landing, + // Skylake, Xeon, etc. Each of these entries is a different subset of + // instructions, various combinations of which occur on various CPU types. + AVX512F = 27, // Foundation + AVX512CD = 28, // Conflict detection + AVX512ER = 29, // Exponential and reciprocal + AVX512PF = 30, // Prefetching + AVX512VL = 31, // Shorter vector lengths + AVX512BW = 32, // Byte and word + AVX512DQ = 33, // Dword and qword + AVX512VBMI = 34, // Bit manipulation + AVX512IFMA = 35, // Integer multiply-add + AVX512_4VNNIW = 36, // Integer neural network (Intel Xeon Phi only) + AVX512_4FMAPS = 37, // Floating point neural network (Intel Xeon Phi only) + AVX512_VNNI = 38, // Integer neural network + AVX512_BF16 = 39, // Bfloat16 neural network + + // AVX version of AVX512_VNNI in CPUs such as Alder Lake and Sapphire Rapids. + AVX_VNNI = 40, // Integer neural network + + // AMX: Advanced Matrix Extension in Sapphire Rapids. + // Perform matrix multiplication on the Tile Matrix Multiply (TMUL) unit, + // supporting two popular data types in neural networks, int8 and bfloat16. + AMX_TILE = 41, // Tile configuration and load/store + AMX_INT8 = 42, // Int8 tile matrix multiplication + AMX_BF16 = 43, // Bfloat16 tile matrix multiplication + + AVX512_FP16 = 44, // Float16 neural network + AMX_FP16 = 45, // Float16 tile matrix multiplication + AVX_NE_CONVERT = 46, // Instructions for faster bfloat16, float16 convert. + AVX_VNNI_INT8 = 47, // VNNI instructions for combinations of u8, s8 dtypes. +}; + +enum Aarch64CPU { + ARM_NEOVERSE_N1 = 0, // ARM NEOVERSE N1 + ARM_NEOVERSE_V1 = 1, // ARM NEOVERSE V1 +}; +// Checks whether the current AArch64 processor is supported. +bool TestAarch64CPU(Aarch64CPU cpu); + +// Checks whether the current processor supports one of the features above. +// Checks CPU registers to return hardware capabilities. +bool TestCPUFeature(CPUFeature feature); + +// Returns CPU Vendor string (i.e. 'GenuineIntel', 'AuthenticAMD', etc.) +std::string CPUVendorIDString(); + +// Returns CPU family. +int CPUFamily(); + +// Returns CPU model number. +int CPUModelNum(); + +// Returns nominal core processor cycles per second of each processor. +double NominalCPUFrequency(); + +// Returns num of hyperthreads per physical core +int CPUIDNumSMT(); + +} // namespace port +} // namespace tsl + +#endif // TENSORFLOW_TSL_PLATFORM_CPU_INFO_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/ctstring.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/ctstring.h new file mode 100644 index 0000000000000000000000000000000000000000..f841e5f4d22af5634ac5d94ec6ca0e4d07c44516 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/ctstring.h @@ -0,0 +1,123 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_CTSTRING_H_ +#define TENSORFLOW_TSL_PLATFORM_CTSTRING_H_ + +#include +#include + +#include "tsl/platform/ctstring_internal.h" + +// Initialize a new tstring. This must be called before using any function +// below. +inline void TF_TString_Init(TF_TString *str); +// Deallocate a tstring. +inline void TF_TString_Dealloc(TF_TString *str); + +// Resizes `str' to `new_size'. This function will appropriately grow or shrink +// the string buffer to fit a `new_size' string. Grown regions of the string +// will be initialized with `c'. +inline char *TF_TString_Resize(TF_TString *str, size_t new_size, char c); +// Similar to TF_TString_Resize, except the newly allocated regions will remain +// uninitialized. This is useful if you plan on overwriting the newly grown +// regions immediately after allocation; doing so will elide a superfluous +// initialization of the new buffer. +inline char *TF_TString_ResizeUninitialized(TF_TString *str, size_t new_size); +// Reserves a string buffer with a capacity of at least `new_cap'. +// Reserve will not change the size, or the contents of the existing +// string. This is useful if you have a rough idea of `str's upperbound in +// size, and want to avoid allocations as you append to `str'. It should not be +// considered safe to write in the region between size and capacity; explicitly +// resize before doing so. +inline void TF_TString_Reserve(TF_TString *str, size_t new_cap); +// Similar to TF_TString_Reserve, except that we ensure amortized growth, i.e. +// that we grow the capacity by at least a constant factor >1. +inline void TF_TString_ReserveAmortized(TF_TString *str, size_t new_cap); + +// Returns the size of the string. +inline size_t TF_TString_GetSize(const TF_TString *str); +// Returns the capacity of the string buffer. It should not be considered safe +// to write in the region between size and capacity---call Resize or +// ResizeUninitialized before doing so. +inline size_t TF_TString_GetCapacity(const TF_TString *str); +// Returns the underlying type of the tstring: +// TF_TSTR_SMALL: +// Small string optimization; the contents of strings +// less than 22-bytes are stored in the TF_TString struct. This avoids any +// heap allocations. +// TF_TSTR_LARGE: +// Heap allocated string. +// TF_TSTR_OFFSET: (currently unused) +// An offset defined string. The string buffer begins at an internally +// defined little-endian offset from `str'; i.e. GetDataPointer() = str + +// offset. This type is useful for memory mapping or reading string tensors +// directly from file, without the need to deserialize the data. For +// security reasons, it is imperative that OFFSET based string tensors are +// validated before use, or are from a trusted source. +// TF_TSTR_VIEW: +// A view into an unowned character string. +// +// NOTE: +// VIEW and OFFSET types are immutable, so any modifcation via Append, +// AppendN, or GetMutableDataPointer of a VIEW/OFFSET based tstring will +// result in a conversion to an owned type (SMALL/LARGE). +inline TF_TString_Type TF_TString_GetType(const TF_TString *str); + +// Returns a const char pointer to the start of the underlying string. The +// underlying character buffer may not be null-terminated. +inline const char *TF_TString_GetDataPointer(const TF_TString *str); +// Returns a char pointer to a mutable representation of the underlying string. +// In the case of VIEW and OFFSET types, `src' is converted to an owned type +// (SMALL/LARGE). The underlying character buffer may not be null-terminated. +inline char *TF_TString_GetMutableDataPointer(TF_TString *str); + +// Sets `dst' as a VIEW type to `src'. `dst' will not take ownership of `src'. +// It is the user's responsibility to ensure that the lifetime of `src' exceeds +// `dst'. Any mutations to `dst' via Append, AppendN, or GetMutableDataPointer, +// will result in a copy into an owned SMALL or LARGE type, and will not modify +// `src'. +inline void TF_TString_AssignView(TF_TString *dst, const char *src, + size_t size); + +// Appends `src' onto `dst'. If `dst' is a VIEW or OFFSET type, it will first +// be converted to an owned LARGE or SMALL type. `dst' should not point to +// memory owned by `src'. +inline void TF_TString_Append(TF_TString *dst, const TF_TString *src); +inline void TF_TString_AppendN(TF_TString *dst, const char *src, size_t size); + +// Copy/Move/Assign semantics +// +// | src | dst | complexity +// Copy | * | SMALL/LARGE | fixed/O(size) +// Assign | SMALL | SMALL | fixed +// Assign | OFFSET | VIEW | fixed +// Assign | VIEW | VIEW | fixed +// Assign | LARGE | LARGE | O(size) +// Move | * | same as src | fixed + +// Copies `src' to `dst'. `dst' will be an owned type (SMALL/LARGE). `src' +// should not point to memory owned by `dst'. +inline void TF_TString_Copy(TF_TString *dst, const char *src, size_t size); +// Assigns a `src' tstring to `dst'. An OFFSET `src' type will yield a `VIEW' +// `dst'. LARGE `src' types will be copied to a new buffer; all other `src' +// types will incur a fixed cost. +inline void TF_TString_Assign(TF_TString *dst, const TF_TString *src); +// Moves a `src' tstring to `dst'. Moving a LARGE `src' to `dst' will result in +// a valid but unspecified `src'. This function incurs a fixed cost for all +// inputs. +inline void TF_TString_Move(TF_TString *dst, TF_TString *src); + +#endif // TENSORFLOW_TSL_PLATFORM_CTSTRING_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/cuda_libdevice_path.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/cuda_libdevice_path.h new file mode 100644 index 0000000000000000000000000000000000000000..d8c2b6d01daf43f0b85242be3e948ff14ec1691e --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/cuda_libdevice_path.h @@ -0,0 +1,49 @@ +/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_CUDA_LIBDEVICE_PATH_H_ +#define TENSORFLOW_TSL_PLATFORM_CUDA_LIBDEVICE_PATH_H_ + +#include +#include + +namespace tsl { + +// Returns, in order of preference, potential locations of the root directory of +// the CUDA SDK, which contains sub-folders such as bin, lib64, and nvvm. +std::vector CandidateCudaRoots(); + +// A convenient wrapper for CandidateCudaRoots, which allows supplying a +// preferred location (inserted first in the output vector), and a flag whether +// the current working directory should be searched (inserted last). +inline std::vector CandidateCudaRoots( + std::string preferred_location, bool use_working_directory = true) { + std::vector candidates = CandidateCudaRoots(); + if (!preferred_location.empty()) { + candidates.insert(candidates.begin(), preferred_location); + } + + // "." is our last resort, even though it probably won't work. + candidates.push_back("."); + + return candidates; +} + +// Returns true if we should prefer ptxas from PATH. +bool PreferPtxasFromPath(); + +} // namespace tsl + +#endif // TENSORFLOW_TSL_PLATFORM_CUDA_LIBDEVICE_PATH_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/demangle.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/demangle.h new file mode 100644 index 0000000000000000000000000000000000000000..95f07ff0ce1bcc7998bd0079edae5e7921db4322 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/demangle.h @@ -0,0 +1,34 @@ +/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_DEMANGLE_H_ +#define TENSORFLOW_TSL_PLATFORM_DEMANGLE_H_ + +#include "tsl/platform/types.h" + +namespace tsl { +namespace port { + +// If the compiler supports, demangle a mangled symbol name and return +// the demangled name. Otherwise, returns 'mangled' as is. +string Demangle(const char* mangled); +inline string Demangle(const string mangled) { + return Demangle(mangled.c_str()); +} + +} // namespace port +} // namespace tsl + +#endif // TENSORFLOW_TSL_PLATFORM_DEMANGLE_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/denormal.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/denormal.h new file mode 100644 index 0000000000000000000000000000000000000000..5b13ab1b0d752c51f0a1426eac5710aad2b97d9f --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/denormal.h @@ -0,0 +1,94 @@ +/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_DENORMAL_H_ +#define TENSORFLOW_TSL_PLATFORM_DENORMAL_H_ + +#include "tsl/platform/macros.h" + +namespace tsl { +namespace port { + +// State for handling of denormals. +class DenormalState { + public: + DenormalState(bool flush_to_zero, bool denormals_are_zero) + : flush_to_zero_(flush_to_zero), + denormals_are_zero_(denormals_are_zero) {} + + // Output denormals of floating-point operations are flushed to zero. + inline bool flush_to_zero() const { return flush_to_zero_; } + + // Input denormals to floating-point operations are treated as zero. + inline bool denormals_are_zero() const { return denormals_are_zero_; } + + bool operator==(const DenormalState& other) const; + bool operator!=(const DenormalState& other) const; + + private: + bool flush_to_zero_; + bool denormals_are_zero_; +}; + +// Gets the platform's current state for handling denormals. +DenormalState GetDenormalState(); + +// Sets handling of denormals if the platform allows it. Returns `true` if the +// platform supports setting denormals to the specified state. Otherwise the +// denormal state remains unmodified and false is returned. +bool SetDenormalState(const DenormalState& state); + +// Remembers the flush denormal state on construction and restores that same +// state on destruction. +class ScopedRestoreFlushDenormalState { + public: + ScopedRestoreFlushDenormalState(); + ~ScopedRestoreFlushDenormalState(); + + private: + DenormalState denormal_state_; + ScopedRestoreFlushDenormalState(const ScopedRestoreFlushDenormalState&) = + delete; + void operator=(const ScopedRestoreFlushDenormalState&) = delete; +}; + +// While this class is active, denormal floating point numbers are flushed +// to zero. The destructor restores the original flags. +class ScopedFlushDenormal { + public: + ScopedFlushDenormal(); + + private: + ScopedRestoreFlushDenormalState restore_; + ScopedFlushDenormal(const ScopedFlushDenormal&) = delete; + void operator=(const ScopedFlushDenormal&) = delete; +}; + +// While this class is active, denormal floating point numbers are not flushed +// to zero. The destructor restores the original flags. +class ScopedDontFlushDenormal { + public: + ScopedDontFlushDenormal(); + + private: + ScopedRestoreFlushDenormalState restore_; + ScopedDontFlushDenormal(const ScopedDontFlushDenormal&) = delete; + void operator=(const ScopedDontFlushDenormal&) = delete; +}; + +} // namespace port +} // namespace tsl + +#endif // TENSORFLOW_TSL_PLATFORM_DENORMAL_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/dso_loader.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/dso_loader.h new file mode 100644 index 0000000000000000000000000000000000000000..5aa1f9e430df6e371c6df9e80f957224f6ce09e7 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/dso_loader.h @@ -0,0 +1,31 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_DSO_LOADER_H_ +#define TENSORFLOW_TSL_PLATFORM_DSO_LOADER_H_ + +#include "tsl/platform/platform.h" + +// Include appropriate platform-dependent implementations +#if defined(PLATFORM_GOOGLE) || defined(PLATFORM_CHROMIUMOS) +#include "tsl/platform/google/dso_loader.h" +#elif defined(PLATFORM_POSIX) || defined(PLATFORM_POSIX_ANDROID) || \ + defined(PLATFORM_GOOGLE_ANDROID) || defined(PLATFORM_WINDOWS) +#include "tsl/platform/default/dso_loader.h" +#else +#error Define the appropriate PLATFORM_ macro for this platform +#endif + +#endif // TENSORFLOW_TSL_PLATFORM_DSO_LOADER_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/dynamic_annotations.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/dynamic_annotations.h new file mode 100644 index 0000000000000000000000000000000000000000..e0c5867c9c5037bed97ff0c33ab9990558caebd7 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/dynamic_annotations.h @@ -0,0 +1,36 @@ +/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_DYNAMIC_ANNOTATIONS_H_ +#define TENSORFLOW_TSL_PLATFORM_DYNAMIC_ANNOTATIONS_H_ + +#include "absl/base/dynamic_annotations.h" + +#define TF_ANNOTATE_MEMORY_IS_INITIALIZED(ptr, bytes) \ + ANNOTATE_MEMORY_IS_INITIALIZED(ptr, bytes) + +#define TF_ANNOTATE_BENIGN_RACE(ptr, description) \ + ANNOTATE_BENIGN_RACE(ptr, description) + +// Tell MemorySanitizer to relax the handling of a given function. All "Use of +// uninitialized value" warnings from such functions will be suppressed, and +// all values loaded from memory will be considered fully initialized. +#ifdef MEMORY_SANITIZER +#define TF_ATTRIBUTE_NO_SANITIZE_MEMORY __attribute__((no_sanitize_memory)) +#else +#define TF_ATTRIBUTE_NO_SANITIZE_MEMORY +#endif + +#endif // TENSORFLOW_TSL_PLATFORM_DYNAMIC_ANNOTATIONS_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/env.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/env.h new file mode 100644 index 0000000000000000000000000000000000000000..fe3354c765a06f310b6fe00e6f83666bed312f8c --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/env.h @@ -0,0 +1,706 @@ +/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_ENV_H_ +#define TENSORFLOW_TSL_PLATFORM_ENV_H_ + +#include + +#include +#include +#include +#include +#include + +#include "absl/functional/any_invocable.h" +#include "tsl/platform/env_time.h" +#include "tsl/platform/errors.h" +#include "tsl/platform/file_system.h" +#include "tsl/platform/macros.h" +#include "tsl/platform/mutex.h" +#include "tsl/platform/numa.h" +#include "tsl/platform/platform.h" +#include "tsl/platform/protobuf.h" +#include "tsl/platform/status.h" +#include "tsl/platform/stringpiece.h" +#include "tsl/platform/types.h" + +// Delete leaked Windows definitions. +#ifdef PLATFORM_WINDOWS +#undef CopyFile +#undef DeleteFile +#endif + +namespace tsl { + +class Thread; +struct ThreadOptions; + +/// \brief An interface used by the tensorflow implementation to +/// access operating system functionality like the filesystem etc. +/// +/// Callers may wish to provide a custom Env object to get fine grain +/// control. +/// +/// All Env implementations are safe for concurrent access from +/// multiple threads without any external synchronization. +class Env { + public: + Env(); + virtual ~Env() = default; + + /// \brief Returns a default environment suitable for the current operating + /// system. + /// + /// Sophisticated users may wish to provide their own Env + /// implementation instead of relying on this default environment. + /// + /// The result of Default() belongs to this library and must never be deleted. + static Env* Default(); + + /// \brief Returns the FileSystem object to handle operations on the file + /// specified by 'fname'. The FileSystem object is used as the implementation + /// for the file system related (non-virtual) functions that follow. + /// Returned FileSystem object is still owned by the Env object and will + // (might) be destroyed when the environment is destroyed. + virtual Status GetFileSystemForFile(const std::string& fname, + FileSystem** result); + + /// \brief Returns the file system schemes registered for this Env. + virtual Status GetRegisteredFileSystemSchemes( + std::vector* schemes); + + /// \brief Register a file system for a scheme. + virtual Status RegisterFileSystem(const std::string& scheme, + FileSystemRegistry::Factory factory); + + /// \brief Register a modular file system for a scheme. + /// + /// Same as `RegisterFileSystem` but for filesystems provided by plugins. + /// + /// TODO(b/139060984): After all filesystems are converted, make this be the + /// canonical registration function. + virtual Status RegisterFileSystem(const std::string& scheme, + std::unique_ptr filesystem); + + Status SetOption(const std::string& scheme, const std::string& key, + const std::string& value); + + Status SetOption(const std::string& scheme, const std::string& key, + const std::vector& values); + + Status SetOption(const std::string& scheme, const std::string& key, + const std::vector& values); + + Status SetOption(const std::string& scheme, const std::string& key, + const std::vector& values); + + /// \brief Flush filesystem caches for all registered filesystems. + Status FlushFileSystemCaches(); + + /// \brief Creates a brand new random access read-only file with the + /// specified name. + + /// On success, stores a pointer to the new file in + /// *result and returns OK. On failure stores NULL in *result and + /// returns non-OK. If the file does not exist, returns a non-OK + /// status. + /// + /// The returned file may be concurrently accessed by multiple threads. + /// + /// The ownership of the returned RandomAccessFile is passed to the caller + /// and the object should be deleted when is not used. The file object + /// shouldn't live longer than the Env object. + Status NewRandomAccessFile(const std::string& fname, + std::unique_ptr* result); + + Status NewRandomAccessFile(const std::string& fname, TransactionToken* token, + std::unique_ptr* result) { + // We duplicate these methods due to Google internal coding style prevents + // virtual functions with default arguments. See PR #41615. + return OkStatus(); + } + + /// \brief Creates an object that writes to a new file with the specified + /// name. + /// + /// Deletes any existing file with the same name and creates a + /// new file. On success, stores a pointer to the new file in + /// *result and returns OK. On failure stores NULL in *result and + /// returns non-OK. + /// + /// The returned file will only be accessed by one thread at a time. + /// + /// The ownership of the returned WritableFile is passed to the caller + /// and the object should be deleted when is not used. The file object + /// shouldn't live longer than the Env object. + Status NewWritableFile(const std::string& fname, + std::unique_ptr* result); + + Status NewWritableFile(const std::string& fname, TransactionToken* token, + std::unique_ptr* result) { + return OkStatus(); + } + + /// \brief Creates an object that either appends to an existing file, or + /// writes to a new file (if the file does not exist to begin with). + /// + /// On success, stores a pointer to the new file in *result and + /// returns OK. On failure stores NULL in *result and returns + /// non-OK. + /// + /// The returned file will only be accessed by one thread at a time. + /// + /// The ownership of the returned WritableFile is passed to the caller + /// and the object should be deleted when is not used. The file object + /// shouldn't live longer than the Env object. + Status NewAppendableFile(const std::string& fname, + std::unique_ptr* result); + + Status NewAppendableFile(const std::string& fname, TransactionToken* token, + std::unique_ptr* result) { + return OkStatus(); + } + /// \brief Creates a readonly region of memory with the file context. + /// + /// On success, it returns a pointer to read-only memory region + /// from the content of file fname. The ownership of the region is passed to + /// the caller. On failure stores nullptr in *result and returns non-OK. + /// + /// The returned memory region can be accessed from many threads in parallel. + /// + /// The ownership of the returned ReadOnlyMemoryRegion is passed to the caller + /// and the object should be deleted when is not used. The memory region + /// object shouldn't live longer than the Env object. + Status NewReadOnlyMemoryRegionFromFile( + const std::string& fname, std::unique_ptr* result); + + Status NewReadOnlyMemoryRegionFromFile( + const std::string& fname, TransactionToken* token, + std::unique_ptr* result) { + return OkStatus(); + } + + /// Returns OK if the named path exists and NOT_FOUND otherwise. + Status FileExists(const std::string& fname); + + Status FileExists(const std::string& fname, TransactionToken* token) { + return OkStatus(); + } + + /// Returns true if all the listed files exist, false otherwise. + /// if status is not null, populate the vector with a detailed status + /// for each file. + bool FilesExist(const std::vector& files, + std::vector* status); + + bool FilesExist(const std::vector& files, TransactionToken* token, + std::vector* status) { + return true; + } + + /// \brief Stores in *result the names of the children of the specified + /// directory. The names are relative to "dir". + /// + /// Original contents of *results are dropped. + Status GetChildren(const std::string& dir, std::vector* result); + + Status GetChildren(const std::string& dir, TransactionToken* token, + std::vector* result) { + return OkStatus(); + } + + /// \brief Returns true if the path matches the given pattern. The wildcards + /// allowed in pattern are described in FileSystem::GetMatchingPaths. + virtual bool MatchPath(const std::string& path, + const std::string& pattern) = 0; + + /// \brief Given a pattern, stores in *results the set of paths that matches + /// that pattern. *results is cleared. + /// + /// More details about `pattern` in FileSystem::GetMatchingPaths. + virtual Status GetMatchingPaths(const std::string& pattern, + std::vector* results); + + Status GetMatchingPaths(const std::string& pattern, TransactionToken* token, + std::vector* results) { + return OkStatus(); + } + + /// Deletes the named file. + Status DeleteFile(const std::string& fname); + + Status DeleteFile(const std::string& fname, TransactionToken* token) { + return OkStatus(); + } + + /// \brief Deletes the specified directory and all subdirectories and files + /// underneath it. This is accomplished by traversing the directory tree + /// rooted at dirname and deleting entries as they are encountered. + /// + /// If dirname itself is not readable or does not exist, *undeleted_dir_count + /// is set to 1, *undeleted_file_count is set to 0 and an appropriate status + /// (e.g. NOT_FOUND) is returned. + /// + /// If dirname and all its descendants were successfully deleted, TF_OK is + /// returned and both error counters are set to zero. + /// + /// Otherwise, while traversing the tree, undeleted_file_count and + /// undeleted_dir_count are updated if an entry of the corresponding type + /// could not be deleted. The returned error status represents the reason that + /// any one of these entries could not be deleted. + /// + /// REQUIRES: undeleted_files, undeleted_dirs to be not null. + /// + /// Typical return codes: + /// * OK - dirname exists and we were able to delete everything underneath. + /// * NOT_FOUND - dirname doesn't exist + /// * PERMISSION_DENIED - dirname or some descendant is not writable + /// * UNIMPLEMENTED - Some underlying functions (like Delete) are not + /// implemented + Status DeleteRecursively(const std::string& dirname, int64_t* undeleted_files, + int64_t* undeleted_dirs); + + Status DeleteRecursively(const std::string& dirname, TransactionToken* token, + int64_t* undeleted_files, int64_t* undeleted_dirs) { + return OkStatus(); + } + + /// \brief Creates the specified directory and all the necessary + /// subdirectories. Typical return codes. + /// * OK - successfully created the directory and sub directories, even if + /// they were already created. + /// * PERMISSION_DENIED - dirname or some subdirectory is not writable. + Status RecursivelyCreateDir(const std::string& dirname); + + Status RecursivelyCreateDir(const std::string& dirname, + TransactionToken* token) { + return OkStatus(); + } + /// \brief Creates the specified directory. Typical return codes + /// * OK - successfully created the directory. + /// * ALREADY_EXISTS - directory already exists. + /// * PERMISSION_DENIED - dirname is not writable. + Status CreateDir(const std::string& dirname); + + Status CreateDir(const std::string& dirname, TransactionToken* token) { + return OkStatus(); + } + + /// Deletes the specified directory. + Status DeleteDir(const std::string& dirname); + + Status DeleteDir(const std::string& dirname, TransactionToken* token) { + return OkStatus(); + } + + /// Obtains statistics for the given path. + Status Stat(const std::string& fname, FileStatistics* stat); + + Status Stat(const std::string& fname, TransactionToken* token, + FileStatistics* stat) { + return OkStatus(); + } + + /// \brief Returns whether the given path is a directory or not. + /// Typical return codes (not guaranteed exhaustive): + /// * OK - The path exists and is a directory. + /// * FAILED_PRECONDITION - The path exists and is not a directory. + /// * NOT_FOUND - The path entry does not exist. + /// * PERMISSION_DENIED - Insufficient permissions. + /// * UNIMPLEMENTED - The file factory doesn't support directories. + Status IsDirectory(const std::string& fname); + + /// \brief Returns whether the given path is on a file system + /// that has atomic move capabilities. This can be used + /// to determine if there needs to be a temp location to safely write objects. + /// The second boolean argument has_atomic_move contains this information. + /// + /// Returns one of the following status codes (not guaranteed exhaustive): + /// * OK - The path is on a recognized file system, + /// so has_atomic_move holds the above information. + /// * UNIMPLEMENTED - The file system of the path hasn't been implemented in + /// TF + Status HasAtomicMove(const std::string& path, bool* has_atomic_move); + + /// Stores the size of `fname` in `*file_size`. + Status GetFileSize(const std::string& fname, uint64* file_size); + + Status GetFileSize(const std::string& fname, TransactionToken* token, + uint64* file_size) { + return OkStatus(); + } + + /// \brief Renames file src to target. If target already exists, it will be + /// replaced. + Status RenameFile(const std::string& src, const std::string& target); + + Status RenameFile(const std::string& src, const std::string& target, + TransactionToken* token) { + return OkStatus(); + } + + /// \brief Copy the src to target. + Status CopyFile(const std::string& src, const std::string& target); + + Status CopyFile(const std::string& src, const std::string& target, + TransactionToken* token) { + return OkStatus(); + } + + /// \brief starts a new transaction on the filesystem that handles filename + Status StartTransaction(const std::string& filename, + TransactionToken** token) { + *token = nullptr; + return OkStatus(); + } + + /// \brief Adds `path` to transaction in `token` if token belongs to + /// filesystem that handles the path. + Status AddToTransaction(const std::string& path, TransactionToken* token) { + return OkStatus(); + } + + /// \brief Get token for `path` or start a new transaction and add `path` to + /// it. + Status GetTokenOrStartTransaction(const std::string& path, + TransactionToken** token) { + *token = nullptr; + return OkStatus(); + } + + /// \brief Returns the transaction for `path` or nullptr in `token` + Status GetTransactionForPath(const std::string& path, + TransactionToken** token) { + *token = nullptr; + return OkStatus(); + } + + /// \brief Finalizes the transaction + Status EndTransaction(TransactionToken* token) { return OkStatus(); } + + /// \brief Returns the absolute path of the current executable. It resolves + /// symlinks if there is any. + std::string GetExecutablePath(); + + /// Creates a local unique temporary file name. Returns true if success. + bool LocalTempFilename(std::string* filename); + + /// Creates a local unique file name that starts with |prefix| and ends with + /// |suffix|. Returns true if success. + bool CreateUniqueFileName(std::string* prefix, const std::string& suffix); + + /// \brief Return the runfiles directory if running under bazel. Returns + /// the directory the executable is located in if not running under bazel. + virtual std::string GetRunfilesDir() = 0; + + // TODO(jeff,sanjay): Add back thread/thread-pool support if needed. + // TODO(jeff,sanjay): if needed, tighten spec so relative to epoch, or + // provide a routine to get the absolute time. + + /// \brief Returns the number of nano-seconds since the Unix epoch. + virtual uint64 NowNanos() const { return EnvTime::NowNanos(); } + + /// \brief Returns the number of micro-seconds since the Unix epoch. + virtual uint64 NowMicros() const { return EnvTime::NowMicros(); } + + /// \brief Returns the number of seconds since the Unix epoch. + virtual uint64 NowSeconds() const { return EnvTime::NowSeconds(); } + + /// Sleeps/delays the thread for the prescribed number of micro-seconds. + virtual void SleepForMicroseconds(int64_t micros) = 0; + + /// Returns the process ID of the calling process. + int32 GetProcessId(); + + /// \brief Returns a new thread that is running fn() and is identified + /// (for debugging/performance-analysis) by "name". + /// + /// Caller takes ownership of the result and must delete it eventually + /// (the deletion will block until fn() stops running). + virtual Thread* StartThread( + const ThreadOptions& thread_options, const std::string& name, + absl::AnyInvocable fn) TF_MUST_USE_RESULT = 0; + + // Returns the thread id of calling thread. + // Posix: Returns pthread id which is only guaranteed to be unique within a + // process. + // Windows: Returns thread id which is unique. + virtual int32 GetCurrentThreadId() = 0; + + // Copies current thread name to "name". Returns true if success. + virtual bool GetCurrentThreadName(std::string* name) = 0; + + // \brief Schedules the given closure on a thread-pool. + // + // NOTE(mrry): This closure may block. + virtual void SchedClosure(absl::AnyInvocable closure) = 0; + + // \brief Schedules the given closure on a thread-pool after the given number + // of microseconds. + // + // NOTE(mrry): This closure must not block. + virtual void SchedClosureAfter(int64_t micros, + absl::AnyInvocable closure) = 0; + + // \brief Load a dynamic library. + // + // Pass "library_filename" to a platform-specific mechanism for dynamically + // loading a library. The rules for determining the exact location of the + // library are platform-specific and are not documented here. + // + // On success, returns a handle to the library in "*handle" and returns + // OK from the function. + // Otherwise returns nullptr in "*handle" and an error status from the + // function. + virtual Status LoadDynamicLibrary(const char* library_filename, + void** handle) = 0; + + // \brief Get a pointer to a symbol from a dynamic library. + // + // "handle" should be a pointer returned from a previous call to LoadLibrary. + // On success, store a pointer to the located symbol in "*symbol" and return + // OK from the function. Otherwise, returns nullptr in "*symbol" and an error + // status from the function. + virtual Status GetSymbolFromLibrary(void* handle, const char* symbol_name, + void** symbol) = 0; + + // \brief build the name of dynamic library. + // + // "name" should be name of the library. + // "version" should be the version of the library or NULL + // returns the name that LoadLibrary() can use + virtual std::string FormatLibraryFileName(const std::string& name, + const std::string& version) = 0; + + // Returns a possible list of local temporary directories. + virtual void GetLocalTempDirectories(std::vector* list) = 0; + + private: + std::unique_ptr file_system_registry_; + Env(const Env&) = delete; + void operator=(const Env&) = delete; +}; + +/// \brief An implementation of Env that forwards all calls to another Env. +/// +/// May be useful to clients who wish to override just part of the +/// functionality of another Env. +class EnvWrapper : public Env { + public: + /// Initializes an EnvWrapper that delegates all calls to *t + explicit EnvWrapper(Env* t) : target_(t) {} + ~EnvWrapper() override; + + /// Returns the target to which this Env forwards all calls + Env* target() const { return target_; } + + Status GetFileSystemForFile(const std::string& fname, + FileSystem** result) override { + return target_->GetFileSystemForFile(fname, result); + } + + Status GetRegisteredFileSystemSchemes(std::vector* schemes) override { + return target_->GetRegisteredFileSystemSchemes(schemes); + } + + Status RegisterFileSystem(const std::string& scheme, + FileSystemRegistry::Factory factory) override { + return target_->RegisterFileSystem(scheme, factory); + } + + bool MatchPath(const std::string& path, const std::string& pattern) override { + return target_->MatchPath(path, pattern); + } + + uint64 NowMicros() const override { return target_->NowMicros(); } + void SleepForMicroseconds(int64_t micros) override { + target_->SleepForMicroseconds(micros); + } + Thread* StartThread(const ThreadOptions& thread_options, + const std::string& name, + absl::AnyInvocable fn) override { + return target_->StartThread(thread_options, name, std::move(fn)); + } + int32 GetCurrentThreadId() override { return target_->GetCurrentThreadId(); } + bool GetCurrentThreadName(std::string* name) override { + return target_->GetCurrentThreadName(name); + } + void SchedClosure(absl::AnyInvocable closure) override { + target_->SchedClosure(std::move(closure)); + } + void SchedClosureAfter(int64_t micros, + absl::AnyInvocable closure) override { + target_->SchedClosureAfter(micros, std::move(closure)); + } + Status LoadDynamicLibrary(const char* library_filename, + void** handle) override { + return target_->LoadDynamicLibrary(library_filename, handle); + } + Status GetSymbolFromLibrary(void* handle, const char* symbol_name, + void** symbol) override { + return target_->GetSymbolFromLibrary(handle, symbol_name, symbol); + } + std::string FormatLibraryFileName(const std::string& name, + const std::string& version) override { + return target_->FormatLibraryFileName(name, version); + } + + std::string GetRunfilesDir() override { return target_->GetRunfilesDir(); } + + private: + void GetLocalTempDirectories(std::vector* list) override { + target_->GetLocalTempDirectories(list); + } + + Env* target_; +}; + +/// Represents a thread used to run a TSL function. +class Thread { + public: + Thread() {} + + /// Blocks until the thread of control stops running. + virtual ~Thread(); + + private: + Thread(const Thread&) = delete; + void operator=(const Thread&) = delete; +}; + +/// \brief Cross-platform setenv. +/// +/// Since setenv() is not available on windows, we provide an +/// alternative with platform specific implementations here. +int setenv(const char* name, const char* value, int overwrite); + +/// Cross-platform unsetenv. +int unsetenv(const char* name); + +/// \brief Options to configure a Thread. +/// +/// Note that the options are all hints, and the +/// underlying implementation may choose to ignore it. +struct ThreadOptions { + /// Thread stack size to use (in bytes). + size_t stack_size = 0; // 0: use system default value + /// Guard area size to use near thread stacks to use (in bytes) + size_t guard_size = 0; // 0: use system default value + int numa_node = port::kNUMANoAffinity; +}; + +/// A utility routine: copy contents of `src` in file system `src_fs` +/// to `target` in file system `target_fs`. +Status FileSystemCopyFile(FileSystem* src_fs, const std::string& src, + FileSystem* target_fs, const std::string& target); + +/// A utility routine: reads contents of named file into `*data` +Status ReadFileToString(Env* env, const std::string& fname, std::string* data); + +/// A utility routine: write contents of `data` to file named `fname` +/// (overwriting existing contents, if any). +Status WriteStringToFile(Env* env, const std::string& fname, + const StringPiece& data); + +/// Write binary representation of "proto" to the named file. +Status WriteBinaryProto(Env* env, const std::string& fname, + const protobuf::MessageLite& proto); + +/// Reads contents of named file and parse as binary encoded proto data +/// and store into `*proto`. +Status ReadBinaryProto(Env* env, const std::string& fname, + protobuf::MessageLite* proto); + +/// Write the text representation of "proto" to the named file. +inline Status WriteTextProto(Env* /* env */, const std::string& /* fname */, + const protobuf::MessageLite& /* proto */) { + return errors::Unimplemented("Can't write text protos with protolite."); +} +Status WriteTextProto(Env* env, const std::string& fname, + const protobuf::Message& proto); + +/// Read contents of named file and parse as text encoded proto data +/// and store into `*proto`. +inline Status ReadTextProto(Env* /* env */, const std::string& /* fname */, + protobuf::MessageLite* /* proto */) { + return errors::Unimplemented("Can't parse text protos with protolite."); +} +Status ReadTextProto(Env* env, const std::string& fname, + protobuf::Message* proto); + +/// Read contents of named file and parse as either text or binary encoded proto +/// data and store into `*proto`. +Status ReadTextOrBinaryProto(Env* env, const std::string& fname, + protobuf::Message* proto); +Status ReadTextOrBinaryProto(Env* env, const std::string& fname, + protobuf::MessageLite* proto); + +// START_SKIP_DOXYGEN + +// The following approach to register filesystems is deprecated and will be +// replaced with modular filesystem plugins registration. +// TODO(b/139060984): After all filesystems are converted, remove this. +namespace register_file_system { + +template +struct Register { + Register(Env* env, const std::string& scheme, bool try_modular_filesystems) { + // TODO(yongtang): Remove legacy file system registration for hdfs/s3/gcs + // after TF 2.6+. + if (try_modular_filesystems) { + const char* env_value = getenv("TF_USE_MODULAR_FILESYSTEM"); + string load_plugin = env_value ? absl::AsciiStrToLower(env_value) : ""; + if (load_plugin == "true" || load_plugin == "1") { + // We don't register the static filesystem and wait for SIG IO one + LOG(WARNING) << "Using modular file system for '" << scheme << "'." + << " Please switch to tensorflow-io" + << " (https://github.com/tensorflow/io) for file system" + << " support of '" << scheme << "'."; + return; + } + // If the envvar is missing or not "true"/"1", then fall back to legacy + // implementation to be backwards compatible. + } + // TODO(b/32704451): Don't just ignore the ::tensorflow::Status object! + env->RegisterFileSystem(scheme, []() -> FileSystem* { return new Factory; }) + .IgnoreError(); + } +}; + +} // namespace register_file_system + +// END_SKIP_DOXYGEN + +} // namespace tsl + +// Register a FileSystem implementation for a scheme. Files with names that have +// "scheme://" prefixes are routed to use this implementation. +#define REGISTER_FILE_SYSTEM_ENV(env, scheme, factory, modular) \ + REGISTER_FILE_SYSTEM_UNIQ_HELPER(__COUNTER__, env, scheme, factory, modular) +#define REGISTER_FILE_SYSTEM_UNIQ_HELPER(ctr, env, scheme, factory, modular) \ + REGISTER_FILE_SYSTEM_UNIQ(ctr, env, scheme, factory, modular) +#define REGISTER_FILE_SYSTEM_UNIQ(ctr, env, scheme, factory, modular) \ + static ::tsl::register_file_system::Register register_ff##ctr \ + TF_ATTRIBUTE_UNUSED = \ + ::tsl::register_file_system::Register(env, scheme, modular) + +#define REGISTER_FILE_SYSTEM(scheme, factory) \ + REGISTER_FILE_SYSTEM_ENV(::tsl::Env::Default(), scheme, factory, false); + +#define REGISTER_LEGACY_FILE_SYSTEM(scheme, factory) \ + REGISTER_FILE_SYSTEM_ENV(::tsl::Env::Default(), scheme, factory, true); + +#endif // TENSORFLOW_TSL_PLATFORM_ENV_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/error_logging.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/error_logging.h new file mode 100644 index 0000000000000000000000000000000000000000..0ee471d0fbab8395bba9af669a34ea4a27494077 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/error_logging.h @@ -0,0 +1,29 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_ERROR_LOGGING_H_ +#define TENSORFLOW_TSL_PLATFORM_ERROR_LOGGING_H_ + +#include "absl/status/status.h" +#include "absl/strings/string_view.h" + +namespace tsl::error_logging { + +absl::Status Log(absl::string_view component, absl::string_view subcomponent, + absl::string_view error_msg); + +} + +#endif // TENSORFLOW_TSL_PLATFORM_ERROR_LOGGING_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/errors.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/errors.h new file mode 100644 index 0000000000000000000000000000000000000000..9008dedad8270c9c581264e870b12f2b190136a9 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/errors.h @@ -0,0 +1,646 @@ +/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_ERRORS_H_ +#define TENSORFLOW_TSL_PLATFORM_ERRORS_H_ + +#include +#include +#include +#include +#include +#include + +#include "absl/base/attributes.h" +#include "absl/status/status.h" +#include "absl/strings/cord.h" +#include "absl/strings/str_join.h" +#include "tsl/platform/logging.h" +#include "tsl/platform/macros.h" +#include "tsl/platform/status.h" +#include "tsl/platform/str_util.h" +#include "tsl/platform/strcat.h" + +namespace tsl { +namespace error { +// NOLINTBEGIN(misc-unused-using-decls) +// TODO(aminim): figure out the protobuf migration story. +using tensorflow::error::ABORTED; +using tensorflow::error::ALREADY_EXISTS; +using tensorflow::error::CANCELLED; +using tensorflow::error::Code; +using tensorflow::error::DATA_LOSS; +using tensorflow::error::DEADLINE_EXCEEDED; +using tensorflow::error::FAILED_PRECONDITION; +using tensorflow::error::INTERNAL; +using tensorflow::error::INVALID_ARGUMENT; +using tensorflow::error::NOT_FOUND; +using tensorflow::error::OK; +using tensorflow::error::OUT_OF_RANGE; +using tensorflow::error::PERMISSION_DENIED; +using tensorflow::error::RESOURCE_EXHAUSTED; +using tensorflow::error::UNAUTHENTICATED; +using tensorflow::error::UNAVAILABLE; +using tensorflow::error::UNIMPLEMENTED; +using tensorflow::error::UNKNOWN; +// NOLINTEND(misc-unused-using-decls) +} // namespace error + +namespace errors { + +namespace internal { + +// The DECLARE_ERROR macro below only supports types that can be converted +// into StrCat's AlphaNum. For the other types we rely on a slower path +// through std::stringstream. To add support of a new type, it is enough to +// make sure there is an operator<<() for it: +// +// std::ostream& operator<<(std::ostream& os, const MyType& foo) { +// os << foo.ToString(); +// return os; +// } +// Eventually absl::strings will have native support for this and we will be +// able to completely remove PrepareForStrCat(). +template +typename std::enable_if::value, + std::string>::type +PrepareForStrCat(const T& t) { + std::stringstream ss; + ss << t; + return ss.str(); +} +inline const strings::AlphaNum& PrepareForStrCat(const strings::AlphaNum& a) { + return a; +} + +} // namespace internal + +// Maps UNIX errors into a Status. +Status IOError(const string& context, int err_number); + +// Returns all payloads from a Status as a key-value map. +inline std::unordered_map GetPayloads( + const ::tsl::Status& status) { + std::unordered_map payloads; + status.ForEachPayload( + [&payloads](::tsl::StringPiece key, const absl::Cord& value) { + payloads[std::string(key)] = std::string(value); + }); + return payloads; +} + +// Inserts all given payloads into the given status. Will overwrite existing +// payloads if they exist with the same key. +inline void InsertPayloads( + ::tsl::Status& status, + const std::unordered_map& payloads) { + for (const auto& payload : payloads) { + status.SetPayload(payload.first, absl::Cord(payload.second)); + } +} + +// Copies all payloads from one Status to another. Will overwrite existing +// payloads in the destination if they exist with the same key. +inline void CopyPayloads(const ::tsl::Status& from, ::tsl::Status& to) { + from.ForEachPayload([&to](::tsl::StringPiece key, const absl::Cord& value) { + to.SetPayload(key, value); + }); +} + +#if defined(PLATFORM_GOOGLE) +// Creates a new status with the given code, message and payloads. +inline ::tsl::Status Create( + absl::StatusCode code, ::tsl::StringPiece message, + const std::unordered_map& payloads, + absl::SourceLocation loc = absl::SourceLocation::current()) { + Status status(code, message, loc); + InsertPayloads(status, payloads); + return status; +} +// Returns a new Status, replacing its message with the given. +inline ::tsl::Status CreateWithUpdatedMessage(const ::tsl::Status& status, + ::tsl::StringPiece message) { + auto locations = status.GetSourceLocations(); + auto initial_loc = + locations.empty() ? absl::SourceLocation::current() : locations[0]; + Status new_status = Create(static_cast(status.code()), + message, GetPayloads(status), initial_loc); + if (locations.size() > 1) { + for (auto loc : locations.subspan(1)) { + new_status.AddSourceLocation(loc); + } + } + return new_status; +} + +#else +inline ::absl::Status Create( + absl::StatusCode code, ::tsl::StringPiece message, + const std::unordered_map& payloads) { + Status status(code, message); + InsertPayloads(status, payloads); + return status; +} +// Returns a new Status, replacing its message with the given. +inline ::tsl::Status CreateWithUpdatedMessage(const ::tsl::Status& status, + ::tsl::StringPiece message) { + return Create(static_cast(status.code()), message, + GetPayloads(status)); +} +#endif + +// Append some context to an error message. Each time we append +// context put it on a new line, since it is possible for there +// to be several layers of additional context. +template +void AppendToMessage(::tsl::Status* status, Args... args) { + auto new_status = CreateWithUpdatedMessage( + *status, ::tsl::strings::StrCat(status->message(), "\n\t", args...)); + CopyPayloads(*status, new_status); + *status = std::move(new_status); +} + +// For propagating errors when calling a function. +#define TF_RETURN_IF_ERROR(...) \ + do { \ + ::absl::Status _status = (__VA_ARGS__); \ + if (TF_PREDICT_FALSE(!_status.ok())) { \ + MAYBE_ADD_SOURCE_LOCATION(_status) \ + return _status; \ + } \ + } while (0) + +#define TF_RETURN_WITH_CONTEXT_IF_ERROR(expr, ...) \ + do { \ + ::tsl::Status _status = (expr); \ + if (TF_PREDICT_FALSE(!_status.ok())) { \ + ::tsl::errors::AppendToMessage(&_status, __VA_ARGS__); \ + return _status; \ + } \ + } while (0) + +// Convenience functions for generating and using error status. +// Example usage: +// status.Update(errors::InvalidArgument("The ", foo, " isn't right.")); +// if (errors::IsInvalidArgument(status)) { ... } +// switch (status.code()) { case error::INVALID_ARGUMENT: ... } + +// CANCELLED +template +::tsl::Status Cancelled(Args... args) { + return ::tsl::Status(absl::StatusCode::kCancelled, + ::tsl::strings::StrCat( + ::tsl::errors::internal::PrepareForStrCat(args)...)); +} +template +::tsl::Status CancelledWithPayloads( + const ::tsl::StringPiece& message, + const std::unordered_map& payloads) { + return errors::Create(absl::StatusCode::kCancelled, message, payloads); +} + +// InvalidArgument +template +::tsl::Status InvalidArgument(Args... args) { + return ::tsl::Status(absl::StatusCode::kInvalidArgument, + ::tsl::strings::StrCat( + ::tsl::errors::internal::PrepareForStrCat(args)...)); +} + +#if defined(PLATFORM_GOOGLE) +// Specialized overloads to capture source location for up to three arguments. +template +::absl::Status InvalidArgument( + Arg1 arg1, Arg2 arg2, Arg3 arg3, Arg4 arg4, + absl::SourceLocation loc = absl::SourceLocation::current()) { + return ::tsl::Status( + absl::StatusCode::kInvalidArgument, + ::tsl::strings::StrCat(::tsl::errors::internal::PrepareForStrCat(arg1), + ::tsl::errors::internal::PrepareForStrCat(arg2), + ::tsl::errors::internal::PrepareForStrCat(arg3), + ::tsl::errors::internal::PrepareForStrCat(arg4)), + loc); +} +template +::absl::Status InvalidArgument( + Arg1 arg1, Arg2 arg2, Arg3 arg3, + absl::SourceLocation loc = absl::SourceLocation::current()) { + return ::tsl::Status( + absl::StatusCode::kInvalidArgument, + ::tsl::strings::StrCat(::tsl::errors::internal::PrepareForStrCat(arg1), + ::tsl::errors::internal::PrepareForStrCat(arg2), + ::tsl::errors::internal::PrepareForStrCat(arg3)), + loc); +} +template +::absl::Status InvalidArgument( + Arg1 arg1, Arg2 arg2, + absl::SourceLocation loc = absl::SourceLocation::current()) { + return ::tsl::Status( + absl::StatusCode::kInvalidArgument, + ::tsl::strings::StrCat(::tsl::errors::internal::PrepareForStrCat(arg1), + ::tsl::errors::internal::PrepareForStrCat(arg2)), + loc); +} +template +::absl::Status InvalidArgument( + Arg1 arg1, absl::SourceLocation loc = absl::SourceLocation::current()) { + return ::tsl::Status( + absl::StatusCode::kInvalidArgument, + ::tsl::strings::StrCat(::tsl::errors::internal::PrepareForStrCat(arg1)), + loc); +} +template +::absl::Status InvalidArgumentWithPayloads( + const ::tsl::StringPiece& message, + const std::unordered_map& payloads, + absl::SourceLocation loc = absl::SourceLocation::current()) { + return errors::Create(absl::StatusCode::kInvalidArgument, message, payloads, + loc); +} +#else +template +::absl::Status InvalidArgument(Arg1 arg1, Arg2 arg2, Arg3 arg3) { + return ::absl::Status( + absl::StatusCode::kInvalidArgument, + ::tsl::strings::StrCat(::tsl::errors::internal::PrepareForStrCat(arg1), + ::tsl::errors::internal::PrepareForStrCat(arg2), + ::tsl::errors::internal::PrepareForStrCat(arg3))); +} +template +::absl::Status InvalidArgument(Arg1 arg1, Arg2 arg2) { + return ::absl::Status( + absl::StatusCode::kInvalidArgument, + ::tsl::strings::StrCat(::tsl::errors::internal::PrepareForStrCat(arg1), + ::tsl::errors::internal::PrepareForStrCat(arg2))); +} +template +::absl::Status InvalidArgument(Arg1 arg1) { + return ::absl::Status( + absl::StatusCode::kInvalidArgument, + ::tsl::strings::StrCat(::tsl::errors::internal::PrepareForStrCat(arg1))); +} +template +::absl::Status InvalidArgumentWithPayloads( + const ::tsl::StringPiece& message, + const std::unordered_map& payloads) { + return errors::Create(absl::StatusCode::kInvalidArgument, message, payloads); +} +#endif + +// NotFound +template +::tsl::Status NotFound(Args... args) { + return ::tsl::Status(absl::StatusCode::kNotFound, + ::tsl::strings::StrCat( + ::tsl::errors::internal::PrepareForStrCat(args)...)); +} +#if defined(PLATFORM_GOOGLE) +// Specialized overloads to capture source location for up to three arguments. +template +::absl::Status NotFound( + Arg1 arg1, Arg2 arg2, Arg3 arg3, + absl::SourceLocation loc = absl::SourceLocation::current()) { + return ::tsl::Status( + absl::StatusCode::kNotFound, + ::tsl::strings::StrCat(::tsl::errors::internal::PrepareForStrCat(arg1), + ::tsl::errors::internal::PrepareForStrCat(arg2), + ::tsl::errors::internal::PrepareForStrCat(arg3)), + loc); +} +template +::absl::Status NotFound( + Arg1 arg1, Arg2 arg2, + absl::SourceLocation loc = absl::SourceLocation::current()) { + return ::tsl::Status( + absl::StatusCode::kNotFound, + ::tsl::strings::StrCat(::tsl::errors::internal::PrepareForStrCat(arg1), + ::tsl::errors::internal::PrepareForStrCat(arg2)), + loc); +} +template +::absl::Status NotFound( + Arg1 arg1, absl::SourceLocation loc = absl::SourceLocation::current()) { + return ::tsl::Status( + absl::StatusCode::kNotFound, + ::tsl::strings::StrCat(::tsl::errors::internal::PrepareForStrCat(arg1)), + loc); +} +template +::absl::Status NotFoundWithPayloads( + const ::tsl::StringPiece& message, + const std::unordered_map& payloads, + absl::SourceLocation loc = absl::SourceLocation::current()) { + return errors::Create(absl::StatusCode::kNotFound, message, payloads, loc); +} +#else +template +::absl::Status NotFound(Arg1 arg1, Arg2 arg2, Arg3 arg3) { + return ::absl::Status( + absl::StatusCode::kNotFound, + ::tsl::strings::StrCat(::tsl::errors::internal::PrepareForStrCat(arg1), + ::tsl::errors::internal::PrepareForStrCat(arg2), + ::tsl::errors::internal::PrepareForStrCat(arg3))); +} +template +::absl::Status NotFound(Arg1 arg1, Arg2 arg2) { + return ::absl::Status( + absl::StatusCode::kNotFound, + ::tsl::strings::StrCat(::tsl::errors::internal::PrepareForStrCat(arg1), + ::tsl::errors::internal::PrepareForStrCat(arg2))); +} +template +::absl::Status NotFound(Arg1 arg1) { + return ::absl::Status( + absl::StatusCode::kNotFound, + ::tsl::strings::StrCat(::tsl::errors::internal::PrepareForStrCat(arg1))); +} +template +::absl::Status NotFoundWithPayloads( + const ::tsl::StringPiece& message, + const std::unordered_map& payloads) { + return errors::Create(absl::StatusCode::kNotFound, message, payloads); +} +#endif + +// AlreadyExists +template +::tsl::Status AlreadyExists(Args... args) { + return ::tsl::Status(absl::StatusCode::kAlreadyExists, + ::tsl::strings::StrCat( + ::tsl::errors::internal::PrepareForStrCat(args)...)); +} +template +::tsl::Status AlreadyExistsWithPayloads( + const ::tsl::StringPiece& message, + const std::unordered_map& payloads) { + return errors::Create(absl::StatusCode::kAlreadyExists, message, payloads); +} + +// ResourceExhausted +template +::tsl::Status ResourceExhausted(Args... args) { + return ::tsl::Status(absl::StatusCode::kResourceExhausted, + ::tsl::strings::StrCat( + ::tsl::errors::internal::PrepareForStrCat(args)...)); +} +template +::tsl::Status ResourceExhaustedWithPayloads( + const ::tsl::StringPiece& message, + const std::unordered_map& payloads) { + return errors::Create(absl::StatusCode::kResourceExhausted, message, + payloads); +} + +// Unavailable +template +::tsl::Status Unavailable(Args... args) { + return ::tsl::Status(absl::StatusCode::kUnavailable, + ::tsl::strings::StrCat( + ::tsl::errors::internal::PrepareForStrCat(args)...)); +} +template +::tsl::Status UnavailableWithPayloads( + const ::tsl::StringPiece& message, + const std::unordered_map& payloads) { + return errors::Create(absl::StatusCode::kUnavailable, message, payloads); +} + +// FailedPrecondition +template +::tsl::Status FailedPrecondition(Args... args) { + return ::tsl::Status(absl::StatusCode::kFailedPrecondition, + ::tsl::strings::StrCat( + ::tsl::errors::internal::PrepareForStrCat(args)...)); +} +template +::tsl::Status FailedPreconditionWithPayloads( + const ::tsl::StringPiece& message, + const std::unordered_map& payloads) { + return errors::Create(absl::StatusCode::kFailedPrecondition, message, + payloads); +} + +// OutOfRange +template +::tsl::Status OutOfRange(Args... args) { + return ::tsl::Status(absl::StatusCode::kOutOfRange, + ::tsl::strings::StrCat( + ::tsl::errors::internal::PrepareForStrCat(args)...)); +} +template +::tsl::Status OutOfRangeWithPayloads( + const ::tsl::StringPiece& message, + const std::unordered_map& payloads) { + return errors::Create(absl::StatusCode::kOutOfRange, message, payloads); +} + +// Unimplemented +template +::tsl::Status Unimplemented(Args... args) { + return ::tsl::Status(absl::StatusCode::kUnimplemented, + ::tsl::strings::StrCat( + ::tsl::errors::internal::PrepareForStrCat(args)...)); +} +template +::tsl::Status UnimplementedWithPayloads( + const ::tsl::StringPiece& message, + const std::unordered_map& payloads) { + return errors::Create(absl::StatusCode::kUnimplemented, message, payloads); +} + +// Internal +template +::tsl::Status Internal(Args... args) { + return ::tsl::Status(absl::StatusCode::kInternal, + ::tsl::strings::StrCat( + ::tsl::errors::internal::PrepareForStrCat(args)...)); +} +template +::tsl::Status InternalWithPayloads( + const ::tsl::StringPiece& message, + const std::unordered_map& payloads) { + return errors::Create(absl::StatusCode::kInternal, message, payloads); +} + +// Aborted +template +::tsl::Status Aborted(Args... args) { + return ::tsl::Status(absl::StatusCode::kAborted, + ::tsl::strings::StrCat( + ::tsl::errors::internal::PrepareForStrCat(args)...)); +} +template +::tsl::Status AbortedWithPayloads( + const ::tsl::StringPiece& message, + const std::unordered_map& payloads) { + return errors::Create(absl::StatusCode::kAborted, message, payloads); +} + +// DeadlineExceeded +template +::tsl::Status DeadlineExceeded(Args... args) { + return ::tsl::Status(absl::StatusCode::kDeadlineExceeded, + ::tsl::strings::StrCat( + ::tsl::errors::internal::PrepareForStrCat(args)...)); +} +template +::tsl::Status DeadlineExceededWithPayloads( + const ::tsl::StringPiece& message, + const std::unordered_map& payloads) { + return errors::Create(absl::StatusCode::kDeadlineExceeded, message, payloads); +} + +// DataLoss +template +::tsl::Status DataLoss(Args... args) { + return ::tsl::Status(absl::StatusCode::kDataLoss, + ::tsl::strings::StrCat( + ::tsl::errors::internal::PrepareForStrCat(args)...)); +} +template +::tsl::Status DataLossWithPayloads( + const ::tsl::StringPiece& message, + const std::unordered_map& payloads) { + return errors::Create(absl::StatusCode::kDataLoss, message, payloads); +} + +// Unknown +template +::tsl::Status Unknown(Args... args) { + return ::tsl::Status(absl::StatusCode::kUnknown, + ::tsl::strings::StrCat( + ::tsl::errors::internal::PrepareForStrCat(args)...)); +} +template +::tsl::Status UnknownPayloads( + const ::tsl::StringPiece& message, + const std::unordered_map& payloads) { + return errors::Create(absl::StatusCode::kUnknown, message, payloads); +} +// PermissionDenied +template +::tsl::Status PermissionDenied(Args... args) { + return ::tsl::Status(absl::StatusCode::kPermissionDenied, + ::tsl::strings::StrCat( + ::tsl::errors::internal::PrepareForStrCat(args)...)); +} +template +::tsl::Status PermissionDeniedWithPayloads( + const ::tsl::StringPiece& message, + const std::unordered_map& payloads) { + return errors::Create(absl::StatusCode::kPermissionDenied, message, payloads); +} + +// Unauthenticated +template +::tsl::Status Unauthenticated(Args... args) { + return ::tsl::Status(absl::StatusCode::kUnauthenticated, + ::tsl::strings::StrCat( + ::tsl::errors::internal::PrepareForStrCat(args)...)); +} +template +::tsl::Status UnauthenticatedWithPayloads( + const ::tsl::StringPiece& message, + const std::unordered_map& payloads) { + return errors::Create(absl::StatusCode::kUnauthenticated, message, payloads); +} + +bool IsAborted(const Status& status); +bool IsAlreadyExists(const Status& status); +bool IsCancelled(const Status& status); +bool IsDataLoss(const Status& status); +bool IsDeadlineExceeded(const Status& status); +bool IsFailedPrecondition(const Status& status); +bool IsInternal(const Status& status); +bool IsInvalidArgument(const Status& status); +bool IsNotFound(const Status& status); +bool IsOutOfRange(const Status& status); +bool IsPermissionDenied(const Status& status); +bool IsResourceExhausted(const Status& status); +bool IsUnauthenticated(const Status& status); +bool IsUnavailable(const Status& status); +bool IsUnimplemented(const Status& status); +bool IsUnknown(const Status& status); + +// Produces a formatted string pattern from the name which can uniquely identify +// this node upstream to produce an informative error message. The pattern +// followed is: {{node }} +// Note: The pattern below determines the regex _NODEDEF_NAME_RE in the file +// tensorflow/python/client/session.py +// LINT.IfChange +inline std::string FormatNodeNameForError(absl::string_view name) { + return strings::StrCat("{{node ", name, "}}"); +} +// LINT.ThenChange(//tensorflow/python/client/session.py) +template +std::string FormatNodeNamesForError(const T& names) { + return absl::StrJoin( + names, ", ", [](std::string* output, absl::string_view s) { + ::tsl::strings::StrAppend(output, FormatNodeNameForError(s)); + }); +} +// LINT.IfChange +inline std::string FormatColocationNodeForError(absl::string_view name) { + return strings::StrCat("{{colocation_node ", name, "}}"); +} +// LINT.ThenChange(//tensorflow/python/framework/error_interpolation.py) +template >> +std::string FormatColocationNodeForError(const T& names) { + return absl::StrJoin( + names, ", ", [](std::string* output, absl::string_view s) { + ::tsl::strings::StrAppend(output, FormatColocationNodeForError(s)); + }); +} + +inline std::string FormatFunctionForError(absl::string_view name) { + return strings::StrCat("{{function_node ", name, "}}"); +} + +inline Status ReplaceErrorFromNonCommunicationOps(const Status s, + absl::string_view op_name) { + assert(::tsl::errors::IsUnavailable(s)); + return Status( + absl::StatusCode::kInternal, + strings::StrCat( + s.message(), "\nExecuting non-communication op <", op_name, + "> originally returned UnavailableError, and was replaced by " + "InternalError to avoid invoking TF network error handling logic.")); +} + +template +std::string FormatOriginalNodeLocationForError(const T& node_names, + const T& func_names) { + std::vector error_message; + for (int i = 0; i != node_names.size(); ++i) { + if (i != 0) { + error_message.push_back(", "); + } + if (i < func_names.size()) { + error_message.push_back(FormatFunctionForError(func_names[i])); + } + error_message.push_back(FormatNodeNameForError(node_names[i])); + } + return absl::StrJoin(error_message, ""); +} + +// The CanonicalCode() for non-errors. +using ::tsl::error::OK; // NOLINT + +} // namespace errors +} // namespace tsl + +#endif // TENSORFLOW_TSL_PLATFORM_ERRORS_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/file_statistics.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/file_statistics.h new file mode 100644 index 0000000000000000000000000000000000000000..ebe50be46ae811e40c6f1eb0d6e5702df4318941 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/file_statistics.h @@ -0,0 +1,39 @@ +/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_FILE_STATISTICS_H_ +#define TENSORFLOW_TSL_PLATFORM_FILE_STATISTICS_H_ + +#include "tsl/platform/types.h" + +namespace tsl { + +struct FileStatistics { + // The length of the file or -1 if finding file length is not supported. + int64_t length = -1; + // The last modified time in nanoseconds. + int64_t mtime_nsec = 0; + // True if the file is a directory, otherwise false. + bool is_directory = false; + + FileStatistics() {} + FileStatistics(int64_t length, int64_t mtime_nsec, bool is_directory) + : length(length), mtime_nsec(mtime_nsec), is_directory(is_directory) {} + ~FileStatistics() {} +}; + +} // namespace tsl + +#endif // TENSORFLOW_TSL_PLATFORM_FILE_STATISTICS_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/file_system.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/file_system.h new file mode 100644 index 0000000000000000000000000000000000000000..8f7bd875e35bc3fdb008b3384e73314dd54ae442 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/file_system.h @@ -0,0 +1,923 @@ +/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_FILE_SYSTEM_H_ +#define TENSORFLOW_TSL_PLATFORM_FILE_SYSTEM_H_ + +#include + +#include +#include +#include +#include +#include +#include + +#include "tsl/platform/cord.h" +#include "tsl/platform/errors.h" +#include "tsl/platform/file_statistics.h" +#include "tsl/platform/macros.h" +#include "tsl/platform/platform.h" +#include "tsl/platform/stringpiece.h" +#include "tsl/platform/types.h" + +#ifdef PLATFORM_WINDOWS +#undef DeleteFile +#undef CopyFile +#undef TranslateName +#endif + +namespace tsl { + +class FileAcl; +class RandomAccessFile; +class ReadOnlyMemoryRegion; +class WritableFile; + +class FileSystem; +struct TransactionToken { + FileSystem* owner; + void* token; +}; + +/// A generic interface for accessing a file system. Implementations +/// of custom filesystem adapters must implement this interface, +/// RandomAccessFile, WritableFile, and ReadOnlyMemoryRegion classes. +class FileSystem { + public: + /// \brief Creates a brand new random access read-only file with the + /// specified name. + /// + /// On success, stores a pointer to the new file in + /// *result and returns OK. On failure stores NULL in *result and + /// returns non-OK. If the file does not exist, returns a non-OK + /// status. + /// + /// The returned file may be concurrently accessed by multiple threads. + /// + /// The ownership of the returned RandomAccessFile is passed to the caller + /// and the object should be deleted when is not used. + virtual tsl::Status NewRandomAccessFile( + const std::string& fname, std::unique_ptr* result) { + return NewRandomAccessFile(fname, nullptr, result); + } + + virtual tsl::Status NewRandomAccessFile( + const std::string& fname, TransactionToken* token, + std::unique_ptr* result) { + // We duplicate these methods due to Google internal coding style prevents + // virtual functions with default arguments. See PR #41615. + return OkStatus(); + } + + /// \brief Creates an object that writes to a new file with the specified + /// name. + /// + /// Deletes any existing file with the same name and creates a + /// new file. On success, stores a pointer to the new file in + /// *result and returns OK. On failure stores NULL in *result and + /// returns non-OK. + /// + /// The returned file will only be accessed by one thread at a time. + /// + /// The ownership of the returned WritableFile is passed to the caller + /// and the object should be deleted when is not used. + virtual tsl::Status NewWritableFile(const std::string& fname, + std::unique_ptr* result) { + return NewWritableFile(fname, nullptr, result); + } + + virtual tsl::Status NewWritableFile(const std::string& fname, + TransactionToken* token, + std::unique_ptr* result) { + return OkStatus(); + } + + /// \brief Creates an object that either appends to an existing file, or + /// writes to a new file (if the file does not exist to begin with). + /// + /// On success, stores a pointer to the new file in *result and + /// returns OK. On failure stores NULL in *result and returns + /// non-OK. + /// + /// The returned file will only be accessed by one thread at a time. + /// + /// The ownership of the returned WritableFile is passed to the caller + /// and the object should be deleted when is not used. + virtual tsl::Status NewAppendableFile(const std::string& fname, + std::unique_ptr* result) { + return NewAppendableFile(fname, nullptr, result); + } + + virtual tsl::Status NewAppendableFile(const std::string& fname, + TransactionToken* token, + std::unique_ptr* result) { + return OkStatus(); + } + + /// \brief Creates a readonly region of memory with the file context. + /// + /// On success, it returns a pointer to read-only memory region + /// from the content of file fname. The ownership of the region is passed to + /// the caller. On failure stores nullptr in *result and returns non-OK. + /// + /// The returned memory region can be accessed from many threads in parallel. + /// + /// The ownership of the returned ReadOnlyMemoryRegion is passed to the caller + /// and the object should be deleted when is not used. + virtual tsl::Status NewReadOnlyMemoryRegionFromFile( + const std::string& fname, std::unique_ptr* result) { + return NewReadOnlyMemoryRegionFromFile(fname, nullptr, result); + } + + virtual tsl::Status NewReadOnlyMemoryRegionFromFile( + const std::string& fname, TransactionToken* token, + std::unique_ptr* result) { + return OkStatus(); + } + + /// Returns OK if the named path exists and NOT_FOUND otherwise. + virtual tsl::Status FileExists(const std::string& fname) { + return FileExists(fname, nullptr); + } + + virtual tsl::Status FileExists(const std::string& fname, + TransactionToken* token) { + return OkStatus(); + } + + /// Returns true if all the listed files exist, false otherwise. + /// if status is not null, populate the vector with a detailed status + /// for each file. + virtual bool FilesExist(const std::vector& files, + std::vector* status) { + return FilesExist(files, nullptr, status); + } + + virtual bool FilesExist(const std::vector& files, + TransactionToken* token, std::vector* status); + + /// \brief Returns the immediate children in the given directory. + /// + /// The returned paths are relative to 'dir'. + virtual tsl::Status GetChildren(const std::string& dir, + std::vector* result) { + return GetChildren(dir, nullptr, result); + } + + virtual tsl::Status GetChildren(const std::string& dir, + TransactionToken* token, + std::vector* result) { + return OkStatus(); + } + + /// \brief Given a pattern, stores in *results the set of paths that matches + /// that pattern. *results is cleared. + /// + /// pattern must match all of a name, not just a substring. + /// + /// pattern: { term } + /// term: + /// '*': matches any sequence of non-'/' characters + /// '?': matches a single non-'/' character + /// '[' [ '^' ] { match-list } ']': + /// matches any single character (not) on the list + /// c: matches character c (c != '*', '?', '\\', '[') + /// '\\' c: matches character c + /// character-range: + /// c: matches character c (c != '\\', '-', ']') + /// '\\' c: matches character c + /// lo '-' hi: matches character c for lo <= c <= hi + /// + /// Typical return codes: + /// * OK - no errors + /// * UNIMPLEMENTED - Some underlying functions (like GetChildren) are not + /// implemented + virtual tsl::Status GetMatchingPaths(const std::string& pattern, + std::vector* results) { + return GetMatchingPaths(pattern, nullptr, results); + } + + virtual tsl::Status GetMatchingPaths(const std::string& pattern, + TransactionToken* token, + std::vector* results) { + return OkStatus(); + } + + /// \brief Checks if the given filename matches the pattern. + /// + /// This function provides the equivalent of posix fnmatch, however it is + /// implemented without fnmatch to ensure that this can be used for cloud + /// filesystems on windows. For windows filesystems, it uses PathMatchSpec. + virtual bool Match(const std::string& filename, const std::string& pattern); + + /// \brief Obtains statistics for the given path. + virtual tsl::Status Stat(const std::string& fname, FileStatistics* stat) { + return Stat(fname, nullptr, stat); + } + + virtual tsl::Status Stat(const std::string& fname, TransactionToken* token, + FileStatistics* stat) { + return OkStatus(); + } + + /// \brief Deletes the named file. + virtual tsl::Status DeleteFile(const std::string& fname) { + return DeleteFile(fname, nullptr); + } + + virtual tsl::Status DeleteFile(const std::string& fname, + TransactionToken* token) { + return OkStatus(); + } + + /// \brief Creates the specified directory. + /// Typical return codes: + /// * OK - successfully created the directory. + /// * ALREADY_EXISTS - directory with name dirname already exists. + /// * PERMISSION_DENIED - dirname is not writable. + virtual tsl::Status CreateDir(const std::string& dirname) { + return CreateDir(dirname, nullptr); + } + + virtual tsl::Status CreateDir(const std::string& dirname, + TransactionToken* token) { + return OkStatus(); + } + + /// \brief Creates the specified directory and all the necessary + /// subdirectories. + /// Typical return codes: + /// * OK - successfully created the directory and sub directories, even if + /// they were already created. + /// * PERMISSION_DENIED - dirname or some subdirectory is not writable. + virtual tsl::Status RecursivelyCreateDir(const std::string& dirname) { + return RecursivelyCreateDir(dirname, nullptr); + } + + virtual tsl::Status RecursivelyCreateDir(const std::string& dirname, + TransactionToken* token); + + /// \brief Deletes the specified directory. + virtual tsl::Status DeleteDir(const std::string& dirname) { + return DeleteDir(dirname, nullptr); + } + + virtual tsl::Status DeleteDir(const std::string& dirname, + TransactionToken* token) { + return OkStatus(); + } + + /// \brief Deletes the specified directory and all subdirectories and files + /// underneath it. This is accomplished by traversing the directory tree + /// rooted at dirname and deleting entries as they are encountered. + /// + /// If dirname itself is not readable or does not exist, *undeleted_dir_count + /// is set to 1, *undeleted_file_count is set to 0 and an appropriate status + /// (e.g. NOT_FOUND) is returned. + /// + /// If dirname and all its descendants were successfully deleted, TF_OK is + /// returned and both error counters are set to zero. + /// + /// Otherwise, while traversing the tree, undeleted_file_count and + /// undeleted_dir_count are updated if an entry of the corresponding type + /// could not be deleted. The returned error status represents the reason that + /// any one of these entries could not be deleted. + /// + /// REQUIRES: undeleted_files, undeleted_dirs to be not null. + /// + /// Typical return codes: + /// * OK - dirname exists and we were able to delete everything underneath. + /// * NOT_FOUND - dirname doesn't exist + /// * PERMISSION_DENIED - dirname or some descendant is not writable + /// * UNIMPLEMENTED - Some underlying functions (like Delete) are not + /// implemented + virtual tsl::Status DeleteRecursively(const std::string& dirname, + int64_t* undeleted_files, + int64_t* undeleted_dirs) { + return DeleteRecursively(dirname, nullptr, undeleted_files, undeleted_dirs); + } + + virtual tsl::Status DeleteRecursively(const std::string& dirname, + TransactionToken* token, + int64_t* undeleted_files, + int64_t* undeleted_dirs); + + /// \brief Stores the size of `fname` in `*file_size`. + virtual tsl::Status GetFileSize(const std::string& fname, uint64* file_size) { + return GetFileSize(fname, nullptr, file_size); + } + + virtual tsl::Status GetFileSize(const std::string& fname, + TransactionToken* token, uint64* file_size) { + return OkStatus(); + } + + /// \brief Overwrites the target if it exists. + virtual tsl::Status RenameFile(const std::string& src, + const std::string& target) { + return RenameFile(src, target, nullptr); + } + + virtual tsl::Status RenameFile(const std::string& src, + const std::string& target, + TransactionToken* token) { + return OkStatus(); + } + + /// \brief Copy the src to target. + virtual tsl::Status CopyFile(const std::string& src, + const std::string& target) { + return CopyFile(src, target, nullptr); + } + + virtual tsl::Status CopyFile(const std::string& src, + const std::string& target, + TransactionToken* token); + + /// \brief Translate an URI to a filename for the FileSystem implementation. + /// + /// The implementation in this class cleans up the path, removing + /// duplicate /'s, resolving .. and removing trailing '/'. + /// This respects relative vs. absolute paths, but does not + /// invoke any system calls (getcwd(2)) in order to resolve relative + /// paths with respect to the actual working directory. That is, this is + /// purely string manipulation, completely independent of process state. + virtual std::string TranslateName(const std::string& name) const; + + /// \brief Returns whether the given path is a directory or not. + /// + /// Typical return codes (not guaranteed exhaustive): + /// * OK - The path exists and is a directory. + /// * FAILED_PRECONDITION - The path exists and is not a directory. + /// * NOT_FOUND - The path entry does not exist. + /// * PERMISSION_DENIED - Insufficient permissions. + /// * UNIMPLEMENTED - The file factory doesn't support directories. + virtual tsl::Status IsDirectory(const std::string& fname) { + return IsDirectory(fname, nullptr); + } + + virtual tsl::Status IsDirectory(const std::string& fname, + TransactionToken* token); + + /// \brief Returns whether the given path is on a file system + /// that has atomic move capabilities. This can be used + /// to determine if there needs to be a temp location to safely write objects. + /// The second boolean argument has_atomic_move contains this information. + /// + /// Returns one of the following status codes (not guaranteed exhaustive): + /// * OK - The path is on a recognized file system, + /// so has_atomic_move holds the above information. + /// * UNIMPLEMENTED - The file system of the path hasn't been implemented in + /// TF + virtual Status HasAtomicMove(const std::string& path, bool* has_atomic_move); + + /// \brief Flushes any cached filesystem objects from memory. + virtual void FlushCaches() { FlushCaches(nullptr); } + + virtual void FlushCaches(TransactionToken* token); + + /// \brief The separator this filesystem uses. + /// + /// This is implemented as a part of the filesystem, because even on windows, + /// a user may need access to filesystems with '/' separators, such as cloud + /// filesystems. + virtual char Separator() const; + + /// \brief Split a path to its basename and dirname. + /// + /// Helper function for Basename and Dirname. + std::pair SplitPath(StringPiece uri) const; + + /// \brief returns the final file name in the given path. + /// + /// Returns the part of the path after the final "/". If there is no + /// "/" in the path, the result is the same as the input. + virtual StringPiece Basename(StringPiece path) const; + + /// \brief Returns the part of the path before the final "/". + /// + /// If there is a single leading "/" in the path, the result will be the + /// leading "/". If there is no "/" in the path, the result is the empty + /// prefix of the input. + StringPiece Dirname(StringPiece path) const; + + /// \brief Returns the part of the basename of path after the final ".". + /// + /// If there is no "." in the basename, the result is empty. + StringPiece Extension(StringPiece path) const; + + /// \brief Clean duplicate and trailing, "/"s, and resolve ".." and ".". + /// + /// NOTE: This respects relative vs. absolute paths, but does not + /// invoke any system calls (getcwd(2)) in order to resolve relative + /// paths with respect to the actual working directory. That is, this is + /// purely string manipulation, completely independent of process state. + std::string CleanPath(StringPiece path) const; + + /// \brief Creates a URI from a scheme, host, and path. + /// + /// If the scheme is empty, we just return the path. + std::string CreateURI(StringPiece scheme, StringPiece host, + StringPiece path) const; + + /// \brief Return true if path is absolute. + bool IsAbsolutePath(tsl::StringPiece path) const; + +#ifndef SWIG // variadic templates + /// \brief Join multiple paths together. + /// + /// This function also removes the unnecessary path separators. + /// For example: + /// + /// Arguments | JoinPath + /// ---------------------------+---------- + /// '/foo', 'bar' | /foo/bar + /// '/foo/', 'bar' | /foo/bar + /// '/foo', '/bar' | /foo/bar + /// + /// Usage: + /// string path = io::JoinPath("/mydir", filename); + /// string path = io::JoinPath(FLAGS_test_srcdir, filename); + /// string path = io::JoinPath("/full", "path", "to", "filename"); + template + std::string JoinPath(const T&... args) { + return JoinPathImpl({args...}); + } +#endif /* SWIG */ + + std::string JoinPathImpl(std::initializer_list paths); + + /// \brief Populates the scheme, host, and path from a URI. + /// + /// scheme, host, and path are guaranteed by this function to point into the + /// contents of uri, even if empty. + /// + /// Corner cases: + /// - If the URI is invalid, scheme and host are set to empty strings and the + /// passed string is assumed to be a path + /// - If the URI omits the path (e.g. file://host), then the path is left + /// empty. + void ParseURI(StringPiece remaining, StringPiece* scheme, StringPiece* host, + StringPiece* path) const; + + // Transaction related API + + /// \brief Starts a new transaction + virtual tsl::Status StartTransaction(TransactionToken** token) { + *token = nullptr; + return OkStatus(); + } + + /// \brief Adds `path` to transaction in `token` + virtual tsl::Status AddToTransaction(const std::string& path, + TransactionToken* token) { + return OkStatus(); + } + + /// \brief Ends transaction + virtual tsl::Status EndTransaction(TransactionToken* token) { + return OkStatus(); + } + + /// \brief Get token for `path` or start a new transaction and add `path` to + /// it. + virtual tsl::Status GetTokenOrStartTransaction(const std::string& path, + TransactionToken** token) { + *token = nullptr; + return OkStatus(); + } + + /// \brief Return transaction for `path` or nullptr in `token` + virtual tsl::Status GetTransactionForPath(const std::string& path, + TransactionToken** token) { + *token = nullptr; + return OkStatus(); + } + + /// \brief Decode transaction to human readable string. + virtual std::string DecodeTransaction(const TransactionToken* token); + + /// \brief Set File System Configuration Options + virtual Status SetOption(const string& key, const string& value) { + return errors::Unimplemented("SetOption"); + } + + /// \brief Set File System Configuration Option + virtual tsl::Status SetOption(const std::string& name, + const std::vector& values) { + return errors::Unimplemented("SetOption"); + } + + /// \brief Set File System Configuration Option + virtual tsl::Status SetOption(const std::string& name, + const std::vector& values) { + return errors::Unimplemented("SetOption"); + } + + /// \brief Set File System Configuration Option + virtual tsl::Status SetOption(const std::string& name, + const std::vector& values) { + return errors::Unimplemented("SetOption"); + } + + /// \brief Set File System ACL checker. + /// + /// No checks are enforced if a FileAcl is never set. + virtual tsl::Status SetFileAcl(std::shared_ptr file_acl) { + return errors::Unimplemented("SetFileAcl"); + } + + FileSystem() {} + + virtual ~FileSystem() = default; +}; +/// This macro adds forwarding methods from FileSystem class to +/// used class since name hiding will prevent these to be accessed from +/// derived classes and would require all use locations to migrate to +/// Transactional API. This is an interim solution until ModularFileSystem class +/// becomes a singleton. +// TODO(sami): Remove this macro when filesystem plugins migration is complete. +#define TF_USE_FILESYSTEM_METHODS_WITH_NO_TRANSACTION_SUPPORT \ + using FileSystem::NewRandomAccessFile; \ + using FileSystem::NewWritableFile; \ + using FileSystem::NewAppendableFile; \ + using FileSystem::NewReadOnlyMemoryRegionFromFile; \ + using FileSystem::FileExists; \ + using FileSystem::GetChildren; \ + using FileSystem::GetMatchingPaths; \ + using FileSystem::Stat; \ + using FileSystem::DeleteFile; \ + using FileSystem::RecursivelyCreateDir; \ + using FileSystem::DeleteDir; \ + using FileSystem::DeleteRecursively; \ + using FileSystem::GetFileSize; \ + using FileSystem::RenameFile; \ + using FileSystem::CopyFile; \ + using FileSystem::IsDirectory; \ + using FileSystem::FlushCaches + +/// A Wrapper class for Transactional FileSystem support. +/// This provides means to make use of the transactions with minimal code change +/// Any operations that are done through this interface will be through the +/// transaction created at the time of construction of this instance. +/// See FileSystem documentation for method descriptions. +/// This class simply forwards all calls to wrapped filesystem either with given +/// transaction token or with token used in its construction. This allows doing +/// transactional filesystem access with minimal code change. +class WrappedFileSystem : public FileSystem { + public: + TF_USE_FILESYSTEM_METHODS_WITH_NO_TRANSACTION_SUPPORT; + + tsl::Status NewRandomAccessFile( + const std::string& fname, TransactionToken* token, + std::unique_ptr* result) override { + return fs_->NewRandomAccessFile(fname, (token ? token : token_), result); + } + + tsl::Status NewWritableFile(const std::string& fname, TransactionToken* token, + std::unique_ptr* result) override { + return fs_->NewWritableFile(fname, (token ? token : token_), result); + } + + tsl::Status NewAppendableFile( + const std::string& fname, TransactionToken* token, + std::unique_ptr* result) override { + return fs_->NewAppendableFile(fname, (token ? token : token_), result); + } + + tsl::Status NewReadOnlyMemoryRegionFromFile( + const std::string& fname, TransactionToken* token, + std::unique_ptr* result) override { + return fs_->NewReadOnlyMemoryRegionFromFile(fname, (token ? token : token_), + result); + } + + tsl::Status FileExists(const std::string& fname, + TransactionToken* token) override { + return fs_->FileExists(fname, (token ? token : token_)); + } + + bool FilesExist(const std::vector& files, TransactionToken* token, + std::vector* status) override { + return fs_->FilesExist(files, (token ? token : token_), status); + } + + tsl::Status GetChildren(const std::string& dir, TransactionToken* token, + std::vector* result) override { + return fs_->GetChildren(dir, (token ? token : token_), result); + } + + tsl::Status GetMatchingPaths(const std::string& pattern, + TransactionToken* token, + std::vector* results) override { + return fs_->GetMatchingPaths(pattern, (token ? token : token_), results); + } + + bool Match(const std::string& filename, const std::string& pattern) override { + return fs_->Match(filename, pattern); + } + + tsl::Status Stat(const std::string& fname, TransactionToken* token, + FileStatistics* stat) override { + return fs_->Stat(fname, (token ? token : token_), stat); + } + + tsl::Status DeleteFile(const std::string& fname, + TransactionToken* token) override { + return fs_->DeleteFile(fname, (token ? token : token_)); + } + + tsl::Status CreateDir(const std::string& dirname, + TransactionToken* token) override { + return fs_->CreateDir(dirname, (token ? token : token_)); + } + + tsl::Status RecursivelyCreateDir(const std::string& dirname, + TransactionToken* token) override { + return fs_->RecursivelyCreateDir(dirname, (token ? token : token_)); + } + + tsl::Status DeleteDir(const std::string& dirname, + TransactionToken* token) override { + return fs_->DeleteDir(dirname, (token ? token : token_)); + } + + tsl::Status DeleteRecursively(const std::string& dirname, + TransactionToken* token, + int64_t* undeleted_files, + int64_t* undeleted_dirs) override { + return fs_->DeleteRecursively(dirname, (token ? token : token_), + undeleted_files, undeleted_dirs); + } + + tsl::Status GetFileSize(const std::string& fname, TransactionToken* token, + uint64* file_size) override { + return fs_->GetFileSize(fname, (token ? token : token_), file_size); + } + + tsl::Status RenameFile(const std::string& src, const std::string& target, + TransactionToken* token) override { + return fs_->RenameFile(src, target, (token ? token : token_)); + } + + tsl::Status CopyFile(const std::string& src, const std::string& target, + TransactionToken* token) override { + return fs_->CopyFile(src, target, (token ? token : token_)); + } + + std::string TranslateName(const std::string& name) const override { + return fs_->TranslateName(name); + } + + tsl::Status IsDirectory(const std::string& fname, + TransactionToken* token) override { + return fs_->IsDirectory(fname, (token ? token : token_)); + } + + Status HasAtomicMove(const std::string& path, + bool* has_atomic_move) override { + return fs_->HasAtomicMove(path, has_atomic_move); + } + + void FlushCaches(TransactionToken* token) override { + return fs_->FlushCaches((token ? token : token_)); + } + + char Separator() const override { return fs_->Separator(); } + + StringPiece Basename(StringPiece path) const override { + return fs_->Basename(path); + } + + tsl::Status StartTransaction(TransactionToken** token) override { + return fs_->StartTransaction(token); + } + + tsl::Status AddToTransaction(const std::string& path, + TransactionToken* token) override { + return fs_->AddToTransaction(path, (token ? token : token_)); + } + + tsl::Status EndTransaction(TransactionToken* token) override { + return fs_->EndTransaction(token); + } + + tsl::Status GetTransactionForPath(const std::string& path, + TransactionToken** token) override { + return fs_->GetTransactionForPath(path, token); + } + + tsl::Status GetTokenOrStartTransaction(const std::string& path, + TransactionToken** token) override { + return fs_->GetTokenOrStartTransaction(path, token); + } + + std::string DecodeTransaction(const TransactionToken* token) override { + return fs_->DecodeTransaction((token ? token : token_)); + } + + WrappedFileSystem(FileSystem* file_system, TransactionToken* token) + : fs_(file_system), token_(token) {} + + ~WrappedFileSystem() override = default; + + private: + FileSystem* fs_; + TransactionToken* token_; +}; + +/// A file abstraction for randomly reading the contents of a file. +class RandomAccessFile { + public: + RandomAccessFile() {} + virtual ~RandomAccessFile() = default; + + /// \brief Returns the name of the file. + /// + /// This is an optional operation that may not be implemented by every + /// filesystem. + virtual tsl::Status Name(StringPiece* result) const { + return errors::Unimplemented("This filesystem does not support Name()"); + } + + /// \brief Reads up to `n` bytes from the file starting at `offset`. + /// + /// `scratch[0..n-1]` may be written by this routine. Sets `*result` + /// to the data that was read (including if fewer than `n` bytes were + /// successfully read). May set `*result` to point at data in + /// `scratch[0..n-1]`, so `scratch[0..n-1]` must be live when + /// `*result` is used. + /// + /// On OK returned status: `n` bytes have been stored in `*result`. + /// On non-OK returned status: `[0..n]` bytes have been stored in `*result`. + /// + /// Returns `OUT_OF_RANGE` if fewer than n bytes were stored in `*result` + /// because of EOF. + /// + /// Safe for concurrent use by multiple threads. + virtual tsl::Status Read(uint64 offset, size_t n, StringPiece* result, + char* scratch) const = 0; + +#if defined(TF_CORD_SUPPORT) + /// \brief Read up to `n` bytes from the file starting at `offset`. + virtual tsl::Status Read(uint64 offset, size_t n, absl::Cord* cord) const { + return errors::Unimplemented( + "Read(uint64, size_t, absl::Cord*) is not " + "implemented"); + } +#endif + + private: + RandomAccessFile(const RandomAccessFile&) = delete; + void operator=(const RandomAccessFile&) = delete; +}; + +/// \brief A file abstraction for sequential writing. +/// +/// The implementation must provide buffering since callers may append +/// small fragments at a time to the file. +class WritableFile { + public: + WritableFile() {} + virtual ~WritableFile() = default; + + /// \brief Append 'data' to the file. + virtual tsl::Status Append(StringPiece data) = 0; + +#if defined(TF_CORD_SUPPORT) + // \brief Append 'data' to the file. + virtual tsl::Status Append(const absl::Cord& cord) { + for (StringPiece chunk : cord.Chunks()) { + TF_RETURN_IF_ERROR(Append(chunk)); + } + return OkStatus(); + } +#endif + + /// \brief Close the file. + /// + /// Flush() and de-allocate resources associated with this file + /// + /// Typical return codes (not guaranteed to be exhaustive): + /// * OK + /// * Other codes, as returned from Flush() + virtual tsl::Status Close() = 0; + + /// \brief Flushes the file and optionally syncs contents to filesystem. + /// + /// This should flush any local buffers whose contents have not been + /// delivered to the filesystem. + /// + /// If the process terminates after a successful flush, the contents + /// may still be persisted, since the underlying filesystem may + /// eventually flush the contents. If the OS or machine crashes + /// after a successful flush, the contents may or may not be + /// persisted, depending on the implementation. + virtual tsl::Status Flush() = 0; + + // \brief Returns the name of the file. + /// + /// This is an optional operation that may not be implemented by every + /// filesystem. + virtual tsl::Status Name(StringPiece* result) const { + return errors::Unimplemented("This filesystem does not support Name()"); + } + + /// \brief Syncs contents of file to filesystem. + /// + /// This waits for confirmation from the filesystem that the contents + /// of the file have been persisted to the filesystem; if the OS + /// or machine crashes after a successful Sync, the contents should + /// be properly saved. + virtual tsl::Status Sync() = 0; + + /// \brief Retrieves the current write position in the file, or -1 on + /// error. + /// + /// This is an optional operation, subclasses may choose to return + /// errors::Unimplemented. + virtual tsl::Status Tell(int64_t* position) { + *position = -1; + return errors::Unimplemented("This filesystem does not support Tell()"); + } + + private: + WritableFile(const WritableFile&) = delete; + void operator=(const WritableFile&) = delete; +}; + +/// \brief A readonly memmapped file abstraction. +/// +/// The implementation must guarantee that all memory is accessible when the +/// object exists, independently from the Env that created it. +class ReadOnlyMemoryRegion { + public: + ReadOnlyMemoryRegion() {} + virtual ~ReadOnlyMemoryRegion() = default; + + /// \brief Returns a pointer to the memory region. + virtual const void* data() = 0; + + /// \brief Returns the length of the memory region in bytes. + virtual uint64 length() = 0; +}; + +/// \brief A registry for file system implementations. +/// +/// Filenames are specified as an URI, which is of the form +/// [scheme://]. +/// File system implementations are registered using the REGISTER_FILE_SYSTEM +/// macro, providing the 'scheme' as the key. +/// +/// There are two `Register` methods: one using `Factory` for legacy filesystems +/// (deprecated mechanism of subclassing `FileSystem` and using +/// `REGISTER_FILE_SYSTEM` macro), and one using `std::unique_ptr` +/// for the new modular approach. +/// +/// Note that the new API expects a pointer to `ModularFileSystem` but this is +/// not checked as there should be exactly one caller to the API and doing the +/// check results in a circular dependency between `BUILD` targets. +/// +/// Plan is to completely remove the filesystem registration from `Env` and +/// incorporate it into `ModularFileSystem` class (which will be renamed to be +/// the only `FileSystem` class and marked as `final`). But this will happen at +/// a later time, after we convert all filesystems to the new API. +/// +/// TODO(b/139060984): After all filesystems are converted, remove old +/// registration and update comment. +class FileSystemRegistry { + public: + typedef std::function Factory; + + virtual ~FileSystemRegistry() = default; + virtual tsl::Status Register(const std::string& scheme, Factory factory) = 0; + virtual tsl::Status Register(const std::string& scheme, + std::unique_ptr filesystem) = 0; + virtual FileSystem* Lookup(const std::string& scheme) = 0; + virtual tsl::Status GetRegisteredFileSystemSchemes( + std::vector* schemes) = 0; +}; + +/// \brief An abstraction for enforcing ACL checks in FileSystem. +class FileAcl { + public: + virtual absl::Status CheckAccess(std::string_view path) = 0; + virtual ~FileAcl() = default; +}; + +} // namespace tsl + +#endif // TENSORFLOW_TSL_PLATFORM_FILE_SYSTEM_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/file_system_helper.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/file_system_helper.h new file mode 100644 index 0000000000000000000000000000000000000000..5cc2cdc5a1e898eac8637b4e9518fb8c00e984ed --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/file_system_helper.h @@ -0,0 +1,64 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_FILE_SYSTEM_HELPER_H_ +#define TENSORFLOW_TSL_PLATFORM_FILE_SYSTEM_HELPER_H_ + +#include +#include + +#include "tsl/platform/env.h" +#include "tsl/platform/status.h" +#include "tsl/platform/statusor.h" + +namespace tsl { + +class FileSystem; +class Env; + +namespace internal { + +// Given a pattern, stores in 'results' the set of paths (in the given file +// system) that match that pattern. +// +// This helper may be used by implementations of FileSystem::GetMatchingPaths() +// in order to provide parallel scanning of subdirectories (except on iOS). +// +// Arguments: +// fs: may not be null and will be used to identify directories and list +// their contents. +// env: may not be null and will be used to check if a match has been found. +// pattern: see FileSystem::GetMatchingPaths() for details. +// results: will be cleared and may not be null. +// +// Returns an error status if any call to 'fs' failed. +Status GetMatchingPaths(FileSystem* fs, Env* env, const string& pattern, + std::vector* results); + +// Given a file path, determines whether the file exists. This helper simplifies +// the use of Env::FileExists. +// +// Arguments: +// env: may not be null. +// fname: the file path to look up +// +// Returns true if the file exists, false if it does not exist, or an error +// Status. +StatusOr FileExists(Env* env, const string& fname); + +} // namespace internal +} // namespace tsl + +#endif // TENSORFLOW_TSL_PLATFORM_FILE_SYSTEM_HELPER_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/fingerprint.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/fingerprint.h new file mode 100644 index 0000000000000000000000000000000000000000..bb961fd89c174225ee1967a70ade127332cdc4b4 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/fingerprint.h @@ -0,0 +1,127 @@ +/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_FINGERPRINT_H_ +#define TENSORFLOW_TSL_PLATFORM_FINGERPRINT_H_ + +#include "tsl/platform/platform.h" +#include "tsl/platform/stringpiece.h" +#include "tsl/platform/types.h" + +#if TSL_IS_IN_OSS +#define USE_OSS_FARMHASH +#endif // TSL_IS_IN_OSS + +#ifdef USE_OSS_FARMHASH +#include +#else +#include "util/hash/farmhash_fingerprint.h" +#endif + +namespace tsl { + +struct Fprint128 { + uint64_t low64; + uint64_t high64; +}; + +inline bool operator==(const Fprint128& lhs, const Fprint128& rhs) { + return lhs.low64 == rhs.low64 && lhs.high64 == rhs.high64; +} + +struct Fprint128Hasher { + size_t operator()(const Fprint128& v) const { + // Low64 should be sufficiently mixed to allow use of it as a Hash. + return static_cast(v.low64); + } +}; + +namespace internal { +// Mixes some of the bits that got propagated to the high bits back into the +// low bits. +inline uint64_t ShiftMix(const uint64_t val) { return val ^ (val >> 47); } +} // namespace internal + +// This concatenates two 64-bit fingerprints. It is a convenience function to +// get a fingerprint for a combination of already fingerprinted components. For +// example this code is used to concatenate the hashes from each of the features +// on sparse crosses. +// +// One shouldn't expect FingerprintCat64(Fingerprint64(x), Fingerprint64(y)) +// to indicate anything about FingerprintCat64(StrCat(x, y)). This operation +// is not commutative. +// +// From a security standpoint, we don't encourage this pattern to be used +// for everything as it is vulnerable to length-extension attacks and it +// is easier to compute multicollisions. +inline uint64_t FingerprintCat64(const uint64_t fp1, const uint64_t fp2) { + static const uint64_t kMul = 0xc6a4a7935bd1e995ULL; + uint64_t result = fp1 ^ kMul; + result ^= internal::ShiftMix(fp2 * kMul) * kMul; + result *= kMul; + result = internal::ShiftMix(result) * kMul; + result = internal::ShiftMix(result); + return result; +} + +// This is a portable fingerprint interface for strings that will never change. +// However, it is not suitable for cryptography. +inline uint64_t Fingerprint64(const tsl::StringPiece s) { +#ifdef USE_OSS_FARMHASH + return ::util::Fingerprint64(s.data(), s.size()); +#else + // Fingerprint op depends on the fact that Fingerprint64() is implemented by + // Farmhash. If the implementation ever changes, Fingerprint op should be + // modified to keep using Farmhash. + // LINT.IfChange + return farmhash::Fingerprint64(s.data(), s.size()); + // LINT.ThenChange(//tensorflow/core/kernels/fingerprint_op.cc) +#endif +} + +// 32-bit variant of Fingerprint64 above (same properties and caveats apply). +inline uint32_t Fingerprint32(const tsl::StringPiece s) { +#ifdef USE_OSS_FARMHASH + return ::util::Fingerprint32(s.data(), s.size()); +#else + return farmhash::Fingerprint32(s.data(), s.size()); +#endif +} + +// 128-bit variant of Fingerprint64 above (same properties and caveats apply). +inline Fprint128 Fingerprint128(const tsl::StringPiece s) { +#ifdef USE_OSS_FARMHASH + const auto fingerprint = ::util::Fingerprint128(s.data(), s.size()); + return {::util::Uint128Low64(fingerprint), + ::util::Uint128High64(fingerprint)}; +#else + const auto fingerprint = farmhash::Fingerprint128(s.data(), s.size()); + return {absl::Uint128Low64(fingerprint), absl::Uint128High64(fingerprint)}; +#endif +} + +inline Fprint128 FingerprintCat128(const Fprint128& a, const Fprint128& b) { + return {FingerprintCat64(a.low64, b.low64), + FingerprintCat64(a.high64, b.high64)}; +} + +inline Fprint128 FingerprintCat128(const Fprint128& a, const uint64_t b) { + auto x = FingerprintCat64(a.low64, b); + return {x, FingerprintCat64(a.high64, x)}; +} + +} // namespace tsl + +#endif // TENSORFLOW_TSL_PLATFORM_FINGERPRINT_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/gif.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/gif.h new file mode 100644 index 0000000000000000000000000000000000000000..865b6f201e66fe23fe33acd5f5e47a68325f8a56 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/gif.h @@ -0,0 +1,21 @@ +/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_GIF_H_ +#define TENSORFLOW_TSL_PLATFORM_GIF_H_ + +#include "gif_lib.h" // from @gif + +#endif // TENSORFLOW_TSL_PLATFORM_GIF_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/hash.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/hash.h new file mode 100644 index 0000000000000000000000000000000000000000..d8d676a72d3b040f93aff5a51e528e69129434a8 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/hash.h @@ -0,0 +1,135 @@ +/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +// Simple hash functions used for internal data structures + +#ifndef TENSORFLOW_TSL_PLATFORM_HASH_H_ +#define TENSORFLOW_TSL_PLATFORM_HASH_H_ + +#include +#include + +#include +#include + +#include "tsl/platform/stringpiece.h" +#include "tsl/platform/types.h" + +namespace tsl { + +extern uint32 Hash32(const char* data, size_t n, uint32 seed); +extern uint64 Hash64(const char* data, size_t n, uint64 seed); + +inline uint64 Hash64(const char* data, size_t n) { + return Hash64(data, n, 0xDECAFCAFFE); +} + +inline uint64 Hash64(const char* data) { return Hash64(data, ::strlen(data)); } + +inline uint64 Hash64(const std::string& str) { + return Hash64(str.data(), str.size()); +} + +inline uint64 Hash64(const tstring& str) { + return Hash64(str.data(), str.size()); +} + +inline uint64 Hash64Combine(uint64 a, uint64 b) { + return a ^ (b + 0x9e3779b97f4a7800ULL + (a << 10) + (a >> 4)); +} + +// Combine two hashes in an order-independent way. This operation should be +// associative and compute the same hash for a collection of elements +// independent of traversal order. Note that it is better to combine hashes +// symmetrically with addition rather than XOR, since (x^x) == 0 but (x+x) != 0. +inline uint64 Hash64CombineUnordered(uint64 a, uint64 b) { return a + b; } + +// Hash functor suitable for use with power-of-two sized hashtables. Use +// instead of std::hash. +// +// In particular, tsl::hash is not the identity function for pointers. +// This is important for power-of-two sized hashtables like FlatMap and FlatSet, +// because otherwise they waste the majority of their hash buckets. +// +// The second type argument is only used for SFNIAE below. +template +struct hash { + size_t operator()(const T& t) const { return std::hash()(t); } +}; + +template +struct hash::value>::type> { + size_t operator()(T value) const { + // This works around a defect in the std::hash C++ spec that isn't fixed in + // (at least) gcc 4.8.4: + // http://www.open-std.org/jtc1/sc22/wg21/docs/lwg-defects.html#2148 + // + // We should be able to remove this and use the default + // tsl::hash() once we stop building with GCC versions old + // enough to not have this defect fixed. + return std::hash()(static_cast(value)); + } +}; + +template +struct hash { + size_t operator()(const T* t) const { + // Hash pointers as integers, but bring more entropy to the lower bits. + size_t k = static_cast(reinterpret_cast(t)); + return k + (k >> 6); + } +}; + +template <> +struct hash { + size_t operator()(const string& s) const { + return static_cast(Hash64(s)); + } +}; + +template <> +struct hash { + size_t operator()(const tstring& s) const { + return static_cast(Hash64(s.data(), s.size())); + } +}; + +template <> +struct hash { + size_t operator()(StringPiece sp) const { + return static_cast(Hash64(sp.data(), sp.size())); + } +}; +using StringPieceHasher = ::tsl::hash; + +template +struct hash> { + size_t operator()(const std::pair& p) const { + return Hash64Combine(hash()(p.first), hash()(p.second)); + } +}; + +} // namespace tsl + +namespace std { +template <> +struct hash { + size_t operator()(const tsl::tstring& s) const { + return static_cast(tsl::Hash64(s.data(), s.size())); + } +}; +} // namespace std + +#endif // TENSORFLOW_TSL_PLATFORM_HASH_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/host_info.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/host_info.h new file mode 100644 index 0000000000000000000000000000000000000000..630f9424525e0484fdf07b56738040be93805480 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/host_info.h @@ -0,0 +1,58 @@ +/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_HOST_INFO_H_ +#define TENSORFLOW_TSL_PLATFORM_HOST_INFO_H_ + +#include + +#include "tsl/platform/types.h" + +namespace tsl { +namespace port { + +// Statistical data of IO operations performed by the job. +struct IOStatistics { + struct Distribution { + uint64_t count = 0; + double mean = 0.0; + double std_dev = 0.0; + }; + // Distribution of round trip IO latency in microseconds. + Distribution roundtrip_latency_usec; + // Distribution of data received by IO reads in bytes. + Distribution response_bytes; +}; + +// Return the hostname of the machine on which this process is running. +string Hostname(); + +// Return the job name as a string if it exists, otherwise return an empty +// string. +string JobName(); + +// Returns the Borg job UID as an int64_t if it exists. Otherwise return -1. +int64_t JobUid(); + +// Returns the Borg task ID as an int64_t if it exists. Otherwise return -1. +int64_t TaskId(); + +// Retrieves the host file read statistics. +IOStatistics GetIOStatistics(); + +} // namespace port +} // namespace tsl + +#endif // TENSORFLOW_TSL_PLATFORM_HOST_INFO_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/human_readable_json.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/human_readable_json.h new file mode 100644 index 0000000000000000000000000000000000000000..b8e81df81f1829341eab005e3542533b90d88547 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/human_readable_json.h @@ -0,0 +1,45 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_HUMAN_READABLE_JSON_H_ +#define TENSORFLOW_TSL_PLATFORM_HUMAN_READABLE_JSON_H_ + +#include "tsl/platform/protobuf.h" +#include "tsl/platform/status.h" + +namespace tsl { + +// Converts a proto to a JSON-like string that's meant to be human-readable +// but still machine-parseable. +// +// This string may not be strictly JSON-compliant, but it must be parseable by +// HumanReadableJSONToProto. +// +// When ignore_accuracy_loss = true, this function may ignore JavaScript +// accuracy loss with large integers. +Status ProtoToHumanReadableJson(const protobuf::Message& proto, string* result, + bool ignore_accuracy_loss); +Status ProtoToHumanReadableJson(const protobuf::MessageLite& proto, + string* result, bool ignore_accuracy_loss); + +// Converts a string produced by ProtoToHumanReadableJSON to a protobuf. Not +// guaranteed to work for general JSON. +Status HumanReadableJsonToProto(const string& str, protobuf::Message* proto); +Status HumanReadableJsonToProto(const string& str, + protobuf::MessageLite* proto); + +} // namespace tsl + +#endif // TENSORFLOW_TSL_PLATFORM_HUMAN_READABLE_JSON_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/init_main.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/init_main.h new file mode 100644 index 0000000000000000000000000000000000000000..c02c1e8d1db2ba776c570e4e243a858df004ff3f --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/init_main.h @@ -0,0 +1,27 @@ +/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_INIT_MAIN_H_ +#define TENSORFLOW_TSL_PLATFORM_INIT_MAIN_H_ + +namespace tsl { +namespace port { + +void InitMain(const char* usage, int* argc, char*** argv); + +} // namespace port +} // namespace tsl + +#endif // TENSORFLOW_TSL_PLATFORM_INIT_MAIN_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/jpeg.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/jpeg.h new file mode 100644 index 0000000000000000000000000000000000000000..a7b640db03943f205f5d8e5c37931cead464f2f8 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/jpeg.h @@ -0,0 +1,29 @@ +/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_JPEG_H_ +#define TENSORFLOW_TSL_PLATFORM_JPEG_H_ + +#include +#include +#include +#include + +extern "C" { +#include "jerror.h" // from @libjpeg_turbo // IWYU pragma: export +#include "jpeglib.h" // from @libjpeg_turbo // IWYU pragma: export +} + +#endif // TENSORFLOW_TSL_PLATFORM_JPEG_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/load_library.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/load_library.h new file mode 100644 index 0000000000000000000000000000000000000000..5a42f2a3439fd00877e9e02625fea3c5e544f093 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/load_library.h @@ -0,0 +1,37 @@ +/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_LOAD_LIBRARY_H_ +#define TENSORFLOW_TSL_PLATFORM_LOAD_LIBRARY_H_ + +#include + +#include "absl/status/status.h" + +namespace tsl { + +namespace internal { + +absl::Status LoadDynamicLibrary(const char* library_filename, void** handle); +absl::Status GetSymbolFromLibrary(void* handle, const char* symbol_name, + void** symbol); +std::string FormatLibraryFileName(const std::string& name, + const std::string& version); + +} // namespace internal + +} // namespace tsl + +#endif // TENSORFLOW_TSL_PLATFORM_LOAD_LIBRARY_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/logging.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/logging.h new file mode 100644 index 0000000000000000000000000000000000000000..f60bea4a3d65fbdfcbba6c0a3ece3ceb3079e4ed --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/logging.h @@ -0,0 +1,29 @@ +/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_LOGGING_H_ +#define TENSORFLOW_TSL_PLATFORM_LOGGING_H_ + +#include "tsl/platform/platform.h" + +#if defined(PLATFORM_GOOGLE) || defined(PLATFORM_GOOGLE_ANDROID) || \ + defined(PLATFORM_GOOGLE_IOS) || defined(GOOGLE_LOGGING) || \ + defined(__EMSCRIPTEN__) || defined(PLATFORM_CHROMIUMOS) +#include "tsl/platform/google/logging.h" // IWYU pragma: export +#else +#include "tsl/platform/default/logging.h" // IWYU pragma: export +#endif + +#endif // TENSORFLOW_TSL_PLATFORM_LOGGING_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/macros.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/macros.h new file mode 100644 index 0000000000000000000000000000000000000000..cb91c4ff64e8477ff949c4062bdfe0142bd861b2 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/macros.h @@ -0,0 +1,162 @@ +/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_MACROS_H_ +#define TENSORFLOW_TSL_PLATFORM_MACROS_H_ + +// Compiler attributes +#if (defined(__GNUC__) || defined(__APPLE__)) && !defined(SWIG) +// Compiler supports GCC-style attributes +#define TF_ATTRIBUTE_NORETURN __attribute__((noreturn)) +#define TF_ATTRIBUTE_ALWAYS_INLINE __attribute__((always_inline)) +#define TF_ATTRIBUTE_NOINLINE __attribute__((noinline)) +#define TF_ATTRIBUTE_UNUSED __attribute__((unused)) +#define TF_ATTRIBUTE_COLD __attribute__((cold)) +#define TF_ATTRIBUTE_WEAK __attribute__((weak)) +#define TF_PACKED __attribute__((packed)) +#define TF_MUST_USE_RESULT __attribute__((warn_unused_result)) +#define TF_PRINTF_ATTRIBUTE(string_index, first_to_check) \ + __attribute__((__format__(__printf__, string_index, first_to_check))) +#define TF_SCANF_ATTRIBUTE(string_index, first_to_check) \ + __attribute__((__format__(__scanf__, string_index, first_to_check))) +#elif defined(_MSC_VER) +// Non-GCC equivalents +#define TF_ATTRIBUTE_NORETURN __declspec(noreturn) +#define TF_ATTRIBUTE_ALWAYS_INLINE __forceinline +#define TF_ATTRIBUTE_NOINLINE +#define TF_ATTRIBUTE_UNUSED +#define TF_ATTRIBUTE_COLD +#define TF_ATTRIBUTE_WEAK +#define TF_MUST_USE_RESULT +#define TF_PACKED +#define TF_PRINTF_ATTRIBUTE(string_index, first_to_check) +#define TF_SCANF_ATTRIBUTE(string_index, first_to_check) +#else +// Non-GCC equivalents +#define TF_ATTRIBUTE_NORETURN +#define TF_ATTRIBUTE_ALWAYS_INLINE +#define TF_ATTRIBUTE_NOINLINE +#define TF_ATTRIBUTE_UNUSED +#define TF_ATTRIBUTE_COLD +#define TF_ATTRIBUTE_WEAK +#define TF_MUST_USE_RESULT +#define TF_PACKED +#define TF_PRINTF_ATTRIBUTE(string_index, first_to_check) +#define TF_SCANF_ATTRIBUTE(string_index, first_to_check) +#endif + +// Control visibility outside .so +#if defined(_WIN32) +#ifdef TF_COMPILE_LIBRARY +#define TF_EXPORT __declspec(dllexport) +#else +#define TF_EXPORT __declspec(dllimport) +#endif // TF_COMPILE_LIBRARY +#else +#define TF_EXPORT __attribute__((visibility("default"))) +#endif // _WIN32 + +#ifdef __has_builtin +#define TF_HAS_BUILTIN(x) __has_builtin(x) +#else +#define TF_HAS_BUILTIN(x) 0 +#endif + +// C++11-style attributes (N2761) +#if defined(__has_cpp_attribute) +// Safely checks if an attribute is supported. Equivalent to +// ABSL_HAVE_CPP_ATTRIBUTE. +#define TF_HAS_CPP_ATTRIBUTE(n) __has_cpp_attribute(n) +#else +#define TF_HAS_CPP_ATTRIBUTE(n) 0 +#endif + +// [[clang::annotate("x")]] allows attaching custom strings (e.g. "x") to +// declarations (variables, functions, fields, etc.) for use by tools. They are +// represented in the Clang AST (as AnnotateAttr nodes) and in LLVM IR, but not +// in final output. +#if TF_HAS_CPP_ATTRIBUTE(clang::annotate) +#define TF_ATTRIBUTE_ANNOTATE(str) [[clang::annotate(str)]] +#else +#define TF_ATTRIBUTE_ANNOTATE(str) +#endif + +// A variable declaration annotated with the `TF_CONST_INIT` attribute will +// not compile (on supported platforms) unless the variable has a constant +// initializer. +#if TF_HAS_CPP_ATTRIBUTE(clang::require_constant_initialization) +#define TF_CONST_INIT [[clang::require_constant_initialization]] +#else +#define TF_CONST_INIT +#endif + +// Compilers can be told that a certain branch is not likely to be taken +// (for instance, a CHECK failure), and use that information in static +// analysis. Giving it this information can help it optimize for the +// common case in the absence of better information (ie. +// -fprofile-arcs). +#if TF_HAS_BUILTIN(__builtin_expect) || (defined(__GNUC__) && __GNUC__ >= 3) +#define TF_PREDICT_FALSE(x) (__builtin_expect(x, 0)) +#define TF_PREDICT_TRUE(x) (__builtin_expect(!!(x), 1)) +#else +#define TF_PREDICT_FALSE(x) (x) +#define TF_PREDICT_TRUE(x) (x) +#endif + +// DEPRECATED: directly use the macro implementation instead. +// A macro to disallow the copy constructor and operator= functions +// This is usually placed in the private: declarations for a class. +#define TF_DISALLOW_COPY_AND_ASSIGN(TypeName) \ + TypeName(const TypeName&) = delete; \ + void operator=(const TypeName&) = delete + +// The TF_ARRAYSIZE(arr) macro returns the # of elements in an array arr. +// +// The expression TF_ARRAYSIZE(a) is a compile-time constant of type +// size_t. +#define TF_ARRAYSIZE(a) \ + ((sizeof(a) / sizeof(*(a))) / \ + static_cast(!(sizeof(a) % sizeof(*(a))))) + +#if defined(__GXX_EXPERIMENTAL_CXX0X__) || __cplusplus >= 201103L || \ + (defined(_MSC_VER) && _MSC_VER >= 1900) +// Define this to 1 if the code is compiled in C++11 mode; leave it +// undefined otherwise. Do NOT define it to 0 -- that causes +// '#ifdef LANG_CXX11' to behave differently from '#if LANG_CXX11'. +#define LANG_CXX11 1 +#endif + +#if defined(__clang__) && defined(LANG_CXX11) && defined(__has_warning) +#if __has_feature(cxx_attributes) && __has_warning("-Wimplicit-fallthrough") +#define TF_FALLTHROUGH_INTENDED [[clang::fallthrough]] // NOLINT +#endif +#endif + +#ifndef TF_FALLTHROUGH_INTENDED +#define TF_FALLTHROUGH_INTENDED \ + do { \ + } while (0) +#endif + +namespace tsl { +namespace internal { +template +void remove_unused_variable_compiler_warning(const T&){}; +} // namespace internal +} // namespace tsl +#define TF_UNUSED_VARIABLE(x) \ + tensorflow::internal::remove_unused_variable_compiler_warning(x) + +#endif // TENSORFLOW_TSL_PLATFORM_MACROS_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/mem.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/mem.h new file mode 100644 index 0000000000000000000000000000000000000000..0f32727f0f753df54d54b3c33b2d83a27102e3b0 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/mem.h @@ -0,0 +1,86 @@ +/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_MEM_H_ +#define TENSORFLOW_TSL_PLATFORM_MEM_H_ + +// TODO(cwhipkey): remove this when callers use annotations directly. +#include "tsl/platform/dynamic_annotations.h" +#include "tsl/platform/platform.h" +#include "tsl/platform/types.h" + +namespace tsl { +namespace port { + +// Aligned allocation/deallocation. `minimum_alignment` must be a power of 2 +// and a multiple of sizeof(void*). +void* AlignedMalloc(size_t size, int minimum_alignment); +void AlignedFree(void* aligned_memory); + +void* Malloc(size_t size); +void* Realloc(void* ptr, size_t size); +void Free(void* ptr); + +// Tries to release num_bytes of free memory back to the operating +// system for reuse. Use this routine with caution -- to get this +// memory back may require faulting pages back in by the OS, and +// that may be slow. +// +// Currently, if a malloc implementation does not support this +// routine, this routine is a no-op. +void MallocExtension_ReleaseToSystem(std::size_t num_bytes); + +// Returns the actual number N of bytes reserved by the malloc for the +// pointer p. This number may be equal to or greater than the number +// of bytes requested when p was allocated. +// +// This routine is just useful for statistics collection. The +// client must *not* read or write from the extra bytes that are +// indicated by this call. +// +// Example, suppose the client gets memory by calling +// p = malloc(10) +// and GetAllocatedSize(p) may return 16. The client must only use the +// first 10 bytes p[0..9], and not attempt to read or write p[10..15]. +// +// Currently, if a malloc implementation does not support this +// routine, this routine returns 0. +std::size_t MallocExtension_GetAllocatedSize(const void* p); + +struct MemoryInfo { + int64_t total = 0; + int64_t free = 0; +}; + +struct MemoryBandwidthInfo { + int64_t bw_used = 0; // memory bandwidth used across all CPU (in MBs/second) +}; + +// Retrieves the host memory information. If any of the fields in the returned +// MemoryInfo structure is INT64_MAX, it means such information is not +// available. +MemoryInfo GetMemoryInfo(); + +// Retrieves the host memory bandwidth information. If any field in the returned +// structure is INT64_MAX, it means such information is not available. +MemoryBandwidthInfo GetMemoryBandwidthInfo(); + +// Returns the amount of RAM available in bytes, or INT64_MAX if unknown. +static inline int64_t AvailableRam() { return GetMemoryInfo().free; } + +} // namespace port +} // namespace tsl + +#endif // TENSORFLOW_TSL_PLATFORM_MEM_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/ml_dtypes.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/ml_dtypes.h new file mode 100644 index 0000000000000000000000000000000000000000..c25efc2f865b7061b182c5a4869fe352e08252a6 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/ml_dtypes.h @@ -0,0 +1,35 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_ML_DTYPES_H_ +#define TENSORFLOW_TSL_PLATFORM_ML_DTYPES_H_ + +#include "ml_dtypes/include/float8.h" // from @ml_dtypes +#include "ml_dtypes/include/int4.h" // from @ml_dtypes + +namespace tsl { +using float8_e4m3fn = ml_dtypes::float8_e4m3fn; +using float8_e4m3fnuz = ml_dtypes::float8_e4m3fnuz; +using float8_e4m3b11fnuz = ml_dtypes::float8_e4m3b11fnuz; +using float8_e4m3b11 = float8_e4m3b11fnuz; // Deprecated: old name for + // backward-compatibility only. +using float8_e5m2 = ml_dtypes::float8_e5m2; +using float8_e5m2fnuz = ml_dtypes::float8_e5m2fnuz; + +using int4 = ml_dtypes::int4; +using uint4 = ml_dtypes::uint4; +} // namespace tsl + +#endif // TENSORFLOW_TSL_PLATFORM_ML_DTYPES_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/mutex.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/mutex.h new file mode 100644 index 0000000000000000000000000000000000000000..b0e542878f79978ca192d3c87c2cf9cebe88bd7f --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/mutex.h @@ -0,0 +1,332 @@ +/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_MUTEX_H_ +#define TENSORFLOW_TSL_PLATFORM_MUTEX_H_ + +#include // NOLINT +// for std::try_to_lock_t and std::cv_status +#include // NOLINT +#include // NOLINT + +#include "tsl/platform/platform.h" +#include "tsl/platform/thread_annotations.h" +#include "tsl/platform/types.h" + +// Include appropriate platform-dependent implementation details of mutex etc. +#if defined(PLATFORM_GOOGLE) +#include "tsl/platform/google/mutex_data.h" +#elif defined(PLATFORM_POSIX) || defined(PLATFORM_POSIX_ANDROID) || \ + defined(PLATFORM_GOOGLE_ANDROID) || defined(PLATFORM_POSIX_IOS) || \ + defined(PLATFORM_GOOGLE_IOS) || defined(PLATFORM_WINDOWS) +#include "tsl/platform/default/mutex_data.h" +#else +#error Define the appropriate PLATFORM_ macro for this platform +#endif + +namespace tsl { + +enum ConditionResult { kCond_Timeout, kCond_MaybeNotified }; +enum LinkerInitialized { LINKER_INITIALIZED }; + +class condition_variable; +class Condition; + +// Mimic std::mutex + C++17's shared_mutex, adding a LinkerInitialized +// constructor interface. This type is as fast as mutex, but is also a shared +// lock, and provides conditional critical sections (via Await()), as an +// alternative to condition variables. +class TF_LOCKABLE mutex { + public: + mutex(); + // The default implementation of the underlying mutex is safe to use after + // the linker initialization to zero. + explicit constexpr mutex(LinkerInitialized x) + : +#if defined(PLATFORM_GOOGLE) + mu_(absl::kConstInit) +#else + mu_() +#endif + { + } + + void lock() TF_EXCLUSIVE_LOCK_FUNCTION(); + bool try_lock() TF_EXCLUSIVE_TRYLOCK_FUNCTION(true); + void unlock() TF_UNLOCK_FUNCTION(); + + void lock_shared() TF_SHARED_LOCK_FUNCTION(); + bool try_lock_shared() TF_SHARED_TRYLOCK_FUNCTION(true); + void unlock_shared() TF_UNLOCK_FUNCTION(); + + // ------- + // Conditional critical sections. + // These represent an alternative to condition variables that is easier to + // use. The predicate must be encapsulated in a function (via Condition), + // but there is no need to use a while-loop, and no need to signal the + // condition. Example: suppose "mu" protects "counter"; we wish one thread + // to wait until counter is decremented to zero by another thread. + // // Predicate expressed as a function: + // static bool IntIsZero(int* pi) { return *pi == 0; } + // + // // Waiter: + // mu.lock(); + // mu.Await(Condition(&IntIsZero, &counter)); // no loop needed + // // lock is held and counter==0... + // mu.unlock(); + // + // // Decrementer: + // mu.lock(); + // counter--; + // mu.unlock(); // no need to signal; mutex will check condition + // + // A mutex may be used with condition variables and conditional critical + // sections at the same time. Conditional critical sections are easier to + // use, but if there are multiple conditions that are simultaneously false, + // condition variables may be faster. + + // Unlock *this and wait until cond.Eval() is true, then atomically reacquire + // *this in the same mode in which it was previously held and return. + void Await(const Condition& cond); + + // Unlock *this and wait until either cond.Eval is true, or abs_deadline_ns + // has been reached, then atomically reacquire *this in the same mode in + // which it was previously held, and return whether cond.Eval() is true. + // See tsl/tsl/platform/env_time.h for the time interface. + bool AwaitWithDeadline(const Condition& cond, uint64 abs_deadline_ns); + // ------- + + private: + friend class condition_variable; + internal::MuData mu_; +}; + +// A Condition represents a predicate on state protected by a mutex. The +// function must have no side-effects on that state. When passed to +// mutex::Await(), the function will be called with the mutex held. It may be +// called: +// - any number of times; +// - by any thread using the mutex; and/or +// - with the mutex held in any mode (read or write). +// If you must use a lambda, prefix the lambda with +, and capture no variables. +// For example: Condition(+[](int *pi)->bool { return *pi == 0; }, &i) +class Condition { + public: + template + Condition(bool (*func)(T* arg), T* arg); // Value is (*func)(arg) + template + Condition(T* obj, bool (T::*method)()); // Value is obj->*method() + template + Condition(T* obj, bool (T::*method)() const); // Value is obj->*method() + explicit Condition(const bool* flag); // Value is *flag + + // Return the value of the predicate represented by this Condition. + bool Eval() const { return (*this->eval_)(this); } + + private: + bool (*eval_)(const Condition*); // CallFunction, CallMethod, or, ReturnBool + bool (*function_)(void*); // predicate of form (*function_)(arg_) + bool (Condition::*method_)(); // predicate of form arg_->method_() + void* arg_; + Condition(); + // The following functions can be pointed to by the eval_ field. + template + static bool CallFunction(const Condition* cond); // call function_ + template + static bool CallMethod(const Condition* cond); // call method_ + static bool ReturnBool(const Condition* cond); // access *(bool *)arg_ +}; + +// Mimic a subset of the std::unique_lock functionality. +class TF_SCOPED_LOCKABLE mutex_lock { + public: + typedef ::tsl::mutex mutex_type; + + explicit mutex_lock(mutex_type& mu) TF_EXCLUSIVE_LOCK_FUNCTION(mu) + : mu_(&mu) { + mu_->lock(); + } + + mutex_lock(mutex_type& mu, std::try_to_lock_t) TF_EXCLUSIVE_LOCK_FUNCTION(mu) + : mu_(&mu) { + if (!mu.try_lock()) { + mu_ = nullptr; + } + } + + // Manually nulls out the source to prevent double-free. + // (std::move does not null the source pointer by default.) + mutex_lock(mutex_lock&& ml) noexcept TF_EXCLUSIVE_LOCK_FUNCTION(ml.mu_) + : mu_(ml.mu_) { + ml.mu_ = nullptr; + } + ~mutex_lock() TF_UNLOCK_FUNCTION() { + if (mu_ != nullptr) { + mu_->unlock(); + } + } + mutex_type* mutex() { return mu_; } + + explicit operator bool() const { return mu_ != nullptr; } + + private: + mutex_type* mu_; +}; + +// Catch bug where variable name is omitted, e.g. mutex_lock (mu); +#define mutex_lock(x) static_assert(0, "mutex_lock_decl_missing_var_name"); + +// Mimic a subset of the std::shared_lock functionality. +// Name chosen to minimize conflicts with the tf_shared_lock macro, below. +class TF_SCOPED_LOCKABLE tf_shared_lock { + public: + typedef ::tsl::mutex mutex_type; + + explicit tf_shared_lock(mutex_type& mu) TF_SHARED_LOCK_FUNCTION(mu) + : mu_(&mu) { + mu_->lock_shared(); + } + + tf_shared_lock(mutex_type& mu, std::try_to_lock_t) TF_SHARED_LOCK_FUNCTION(mu) + : mu_(&mu) { + if (!mu.try_lock_shared()) { + mu_ = nullptr; + } + } + + // Manually nulls out the source to prevent double-free. + // (std::move does not null the source pointer by default.) + tf_shared_lock(tf_shared_lock&& ml) noexcept TF_SHARED_LOCK_FUNCTION(ml.mu_) + : mu_(ml.mu_) { + ml.mu_ = nullptr; + } + ~tf_shared_lock() TF_UNLOCK_FUNCTION() { + if (mu_ != nullptr) { + mu_->unlock_shared(); + } + } + mutex_type* mutex() { return mu_; } + + explicit operator bool() const { return mu_ != nullptr; } + + private: + mutex_type* mu_; +}; + +// Catch bug where variable name is omitted, e.g. tf_shared_lock (mu); +#define tf_shared_lock(x) \ + static_assert(0, "tf_shared_lock_decl_missing_var_name"); + +// Mimic std::condition_variable. +class condition_variable { + public: + condition_variable(); + + void wait(mutex_lock& lock); + + template + void wait(mutex_lock& lock, Predicate stop_waiting) { + while (!stop_waiting()) { + wait(lock); + } + } + + template + std::cv_status wait_for(mutex_lock& lock, + std::chrono::duration dur); + void notify_one(); + void notify_all(); + + private: + friend ConditionResult WaitForMilliseconds(mutex_lock* mu, + condition_variable* cv, + int64_t ms); + internal::CVData cv_; +}; + +// Like "cv->wait(*mu)", except that it only waits for up to "ms" milliseconds. +// +// Returns kCond_Timeout if the timeout expired without this +// thread noticing a signal on the condition variable. Otherwise may +// return either kCond_Timeout or kCond_MaybeNotified +inline ConditionResult WaitForMilliseconds(mutex_lock* mu, + condition_variable* cv, int64_t ms) { + std::cv_status s = cv->wait_for(*mu, std::chrono::milliseconds(ms)); + return (s == std::cv_status::timeout) ? kCond_Timeout : kCond_MaybeNotified; +} + +// ------------------------------------------------------------ +// Implementation details follow. Clients should ignore them. + +// private static +template +inline bool Condition::CallFunction(const Condition* cond) { + bool (*fn)(T*) = reinterpret_cast(cond->function_); + return (*fn)(static_cast(cond->arg_)); +} + +template +inline Condition::Condition(bool (*func)(T*), T* arg) + : eval_(&CallFunction), + function_(reinterpret_cast(func)), + method_(nullptr), + arg_(const_cast(static_cast(arg))) {} + +// private static +template +inline bool Condition::CallMethod(const Condition* cond) { + bool (T::*m)() = reinterpret_cast(cond->method_); + return (static_cast(cond->arg_)->*m)(); +} + +template +inline Condition::Condition(T* obj, bool (T::*method)()) + : eval_(&CallMethod), + function_(nullptr), + method_(reinterpret_cast(method)), + arg_(const_cast(static_cast(obj))) {} + +template +inline Condition::Condition(T* obj, bool (T::*method)() const) + : eval_(&CallMethod), + function_(nullptr), + method_(reinterpret_cast(method)), + arg_(const_cast(static_cast(obj))) {} + +// private static +inline bool Condition::ReturnBool(const Condition* cond) { + return *static_cast(cond->arg_); +} + +inline Condition::Condition(const bool* flag) + : eval_(&ReturnBool), + function_(nullptr), + method_(nullptr), + arg_(const_cast(static_cast(flag))) {} + +} // namespace tsl + +// Include appropriate platform-dependent implementation details of mutex etc. +#if defined(PLATFORM_GOOGLE) +#include "tsl/platform/google/mutex.h" +#elif defined(PLATFORM_POSIX) || defined(PLATFORM_POSIX_ANDROID) || \ + defined(PLATFORM_GOOGLE_ANDROID) || defined(PLATFORM_POSIX_IOS) || \ + defined(PLATFORM_GOOGLE_IOS) || defined(PLATFORM_WINDOWS) +#include "tsl/platform/default/mutex.h" +#else +#error Define the appropriate PLATFORM_ macro for this platform +#endif + +#endif // TENSORFLOW_TSL_PLATFORM_MUTEX_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/net.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/net.h new file mode 100644 index 0000000000000000000000000000000000000000..8f08e922a92704284543513245541bdcbc0f6c98 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/net.h @@ -0,0 +1,27 @@ +/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_NET_H_ +#define TENSORFLOW_TSL_PLATFORM_NET_H_ + +namespace tsl { +namespace internal { + +int PickUnusedPortOrDie(); + +} // namespace internal +} // namespace tsl + +#endif // TENSORFLOW_TSL_PLATFORM_NET_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/notification.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/notification.h new file mode 100644 index 0000000000000000000000000000000000000000..80e5b388d2a93d7906256aecdd4ea9252fc3bd56 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/notification.h @@ -0,0 +1,40 @@ +/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_NOTIFICATION_H_ +#define TENSORFLOW_TSL_PLATFORM_NOTIFICATION_H_ + +#include // NOLINT +#include // NOLINT +#include +#include // NOLINT + +#include "absl/synchronization/notification.h" +#include "absl/time/time.h" + +namespace tsl { + +using absl::Notification; + +// TODO(ddunleavy): remove this method and replace uses of `tsl::Notification` +// with `absl::Notification`. +inline bool WaitForNotificationWithTimeout(Notification* n, + int64_t timeout_in_us) { + return n->WaitForNotificationWithTimeout(absl::Microseconds(timeout_in_us)); +} + +} // namespace tsl + +#endif // TENSORFLOW_TSL_PLATFORM_NOTIFICATION_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/null_file_system.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/null_file_system.h new file mode 100644 index 0000000000000000000000000000000000000000..77b8142ee357b7338d618d9c607ee8c72d933b20 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/null_file_system.h @@ -0,0 +1,111 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_NULL_FILE_SYSTEM_H_ +#define TENSORFLOW_TSL_PLATFORM_NULL_FILE_SYSTEM_H_ + +#include +#include +#include + +#include "tsl/platform/env.h" +#include "tsl/platform/file_system.h" +#include "tsl/platform/file_system_helper.h" + +namespace tsl { + +// START_SKIP_DOXYGEN + +#ifndef SWIG +// Degenerate file system that provides no implementations. +class NullFileSystem : public FileSystem { + public: + NullFileSystem() {} + + ~NullFileSystem() override = default; + + TF_USE_FILESYSTEM_METHODS_WITH_NO_TRANSACTION_SUPPORT; + + Status NewRandomAccessFile( + const string& fname, TransactionToken* token, + std::unique_ptr* result) override { + return errors::Unimplemented("NewRandomAccessFile unimplemented"); + } + + Status NewWritableFile(const string& fname, TransactionToken* token, + std::unique_ptr* result) override { + return errors::Unimplemented("NewWritableFile unimplemented"); + } + + Status NewAppendableFile(const string& fname, TransactionToken* token, + std::unique_ptr* result) override { + return errors::Unimplemented("NewAppendableFile unimplemented"); + } + + Status NewReadOnlyMemoryRegionFromFile( + const string& fname, TransactionToken* token, + std::unique_ptr* result) override { + return errors::Unimplemented( + "NewReadOnlyMemoryRegionFromFile unimplemented"); + } + + Status FileExists(const string& fname, TransactionToken* token) override { + return errors::Unimplemented("FileExists unimplemented"); + } + + Status GetChildren(const string& dir, TransactionToken* token, + std::vector* result) override { + return errors::Unimplemented("GetChildren unimplemented"); + } + + Status GetMatchingPaths(const string& pattern, TransactionToken* token, + std::vector* results) override { + return internal::GetMatchingPaths(this, Env::Default(), pattern, results); + } + + Status DeleteFile(const string& fname, TransactionToken* token) override { + return errors::Unimplemented("DeleteFile unimplemented"); + } + + Status CreateDir(const string& dirname, TransactionToken* token) override { + return errors::Unimplemented("CreateDir unimplemented"); + } + + Status DeleteDir(const string& dirname, TransactionToken* token) override { + return errors::Unimplemented("DeleteDir unimplemented"); + } + + Status GetFileSize(const string& fname, TransactionToken* token, + uint64* file_size) override { + return errors::Unimplemented("GetFileSize unimplemented"); + } + + Status RenameFile(const string& src, const string& target, + TransactionToken* token) override { + return errors::Unimplemented("RenameFile unimplemented"); + } + + Status Stat(const string& fname, TransactionToken* token, + FileStatistics* stat) override { + return errors::Unimplemented("Stat unimplemented"); + } +}; +#endif + +// END_SKIP_DOXYGEN + +} // namespace tsl + +#endif // TENSORFLOW_TSL_PLATFORM_NULL_FILE_SYSTEM_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/numa.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/numa.h new file mode 100644 index 0000000000000000000000000000000000000000..997d03d49743825a474e4a7dbaa6344abcf479a2 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/numa.h @@ -0,0 +1,62 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_NUMA_H_ +#define TENSORFLOW_TSL_PLATFORM_NUMA_H_ + +#include "tsl/platform/platform.h" +#include "tsl/platform/types.h" + +namespace tsl { +namespace port { + +// Returns true iff NUMA functions are supported. +bool NUMAEnabled(); + +// Returns the number of NUMA nodes present with respect to CPU operations. +// Typically this will be the number of sockets where some RAM has greater +// affinity with one socket than another. +int NUMANumNodes(); + +static const int kNUMANoAffinity = -1; + +// If possible sets affinity of the current thread to the specified NUMA node. +// If node == kNUMANoAffinity removes affinity to any particular node. +void NUMASetThreadNodeAffinity(int node); + +// Returns NUMA node affinity of the current thread, kNUMANoAffinity if none. +int NUMAGetThreadNodeAffinity(); + +// Like AlignedMalloc, but allocates memory with affinity to the specified NUMA +// node. +// +// Notes: +// 1. node must be >= 0 and < NUMANumNodes. +// 1. minimum_alignment must a factor of system page size, the memory +// returned will be page-aligned. +// 2. This function is likely significantly slower than AlignedMalloc +// and should not be used for lots of small allocations. It makes more +// sense as a backing allocator for BFCAllocator, PoolAllocator, or similar. +void* NUMAMalloc(int node, size_t size, int minimum_alignment); + +// Memory allocated by NUMAMalloc must be freed via NUMAFree. +void NUMAFree(void* ptr, size_t size); + +// Returns NUMA node affinity of memory address, kNUMANoAffinity if none. +int NUMAGetMemAffinity(const void* ptr); + +} // namespace port +} // namespace tsl +#endif // TENSORFLOW_TSL_PLATFORM_NUMA_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/numbers.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/numbers.h new file mode 100644 index 0000000000000000000000000000000000000000..ca480a04e0d5a998b102cd3e3d8946ab47cc72a7 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/numbers.h @@ -0,0 +1,180 @@ +/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_NUMBERS_H_ +#define TENSORFLOW_TSL_PLATFORM_NUMBERS_H_ + +#include +#include + +#include "tsl/platform/stringpiece.h" +#include "tsl/platform/types.h" + +namespace tsl { +namespace strings { + +// ---------------------------------------------------------------------- +// FastIntToBufferLeft() +// These are intended for speed. +// +// All functions take the output buffer as an arg. FastInt() uses +// at most 22 bytes, FastTime() uses exactly 30 bytes. They all +// return a pointer to the beginning of the output, which is the same as +// the beginning of the input buffer. +// +// NOTE: In 64-bit land, sizeof(time_t) is 8, so it is possible +// to pass to FastTimeToBuffer() a time whose year cannot be +// represented in 4 digits. In this case, the output buffer +// will contain the string "Invalid:" +// ---------------------------------------------------------------------- + +// Previously documented minimums -- the buffers provided must be at least this +// long, though these numbers are subject to change: +// Int32, UInt32: 12 bytes +// Int64, UInt64, Int, Uint: 22 bytes +// Time: 30 bytes +// Use kFastToBufferSize rather than hardcoding constants. +static const int kFastToBufferSize = 32; + +// ---------------------------------------------------------------------- +// FastInt32ToBufferLeft() +// FastUInt32ToBufferLeft() +// FastInt64ToBufferLeft() +// FastUInt64ToBufferLeft() +// +// These functions convert their numeric argument to an ASCII +// representation of the numeric value in base 10, with the +// representation being left-aligned in the buffer. The caller is +// responsible for ensuring that the buffer has enough space to hold +// the output. The buffer should typically be at least kFastToBufferSize +// bytes. +// +// Returns the number of characters written. +// ---------------------------------------------------------------------- + +size_t FastInt32ToBufferLeft(int32_t i, char* buffer); // at least 12 bytes +size_t FastUInt32ToBufferLeft(uint32_t i, char* buffer); // at least 12 bytes +size_t FastInt64ToBufferLeft(int64_t i, char* buffer); // at least 22 bytes +size_t FastUInt64ToBufferLeft(uint64_t i, char* buffer); // at least 22 bytes + +// Required buffer size for DoubleToBuffer is kFastToBufferSize. +// Required buffer size for FloatToBuffer is kFastToBufferSize. +size_t DoubleToBuffer(double value, char* buffer); +size_t FloatToBuffer(float value, char* buffer); + +// Convert a 64-bit fingerprint value to an ASCII representation. +std::string FpToString(Fprint fp); + +// Attempt to parse a fingerprint in the form encoded by FpToString. If +// successful, stores the fingerprint in *fp and returns true. Otherwise, +// returns false. +bool StringToFp(const std::string& s, Fprint* fp); + +// Convert a 64-bit fingerprint value to an ASCII representation that +// is terminated by a '\0'. +// Buf must point to an array of at least kFastToBufferSize characters +StringPiece Uint64ToHexString(uint64_t v, char* buf); + +// Attempt to parse a uint64 in the form encoded by FastUint64ToHexString. If +// successful, stores the value in *v and returns true. Otherwise, +// returns false. +bool HexStringToUint64(const StringPiece& s, uint64_t* result); + +// Convert strings to 32bit integer values. +// Leading and trailing spaces are allowed. +// Return false with overflow or invalid input. +bool safe_strto32(StringPiece str, int32_t* value); + +// Convert strings to unsigned 32bit integer values. +// Leading and trailing spaces are allowed. +// Return false with overflow or invalid input. +bool safe_strtou32(StringPiece str, uint32_t* value); + +// Convert strings to 64bit integer values. +// Leading and trailing spaces are allowed. +// Return false with overflow or invalid input. +bool safe_strto64(StringPiece str, int64_t* value); + +// Convert strings to unsigned 64bit integer values. +// Leading and trailing spaces are allowed. +// Return false with overflow or invalid input. +bool safe_strtou64(StringPiece str, uint64_t* value); + +// Convert strings to floating point values. +// Leading and trailing spaces are allowed. +// Values may be rounded on over- and underflow. +// Returns false on invalid input or if `strlen(value) >= kFastToBufferSize`. +bool safe_strtof(StringPiece str, float* value); + +// Convert strings to double precision floating point values. +// Leading and trailing spaces are allowed. +// Values may be rounded on over- and underflow. +// Returns false on invalid input or if `strlen(value) >= kFastToBufferSize`. +bool safe_strtod(StringPiece str, double* value); + +inline bool ProtoParseNumeric(StringPiece s, int32_t* value) { + return safe_strto32(s, value); +} + +inline bool ProtoParseNumeric(StringPiece s, uint32_t* value) { + return safe_strtou32(s, value); +} + +inline bool ProtoParseNumeric(StringPiece s, int64_t* value) { + return safe_strto64(s, value); +} + +inline bool ProtoParseNumeric(StringPiece s, uint64_t* value) { + return safe_strtou64(s, value); +} + +inline bool ProtoParseNumeric(StringPiece s, float* value) { + return safe_strtof(s, value); +} + +inline bool ProtoParseNumeric(StringPiece s, double* value) { + return safe_strtod(s, value); +} + +// Convert strings to number of type T. +// Leading and trailing spaces are allowed. +// Values may be rounded on over- and underflow. +template +bool SafeStringToNumeric(StringPiece s, T* value) { + return ProtoParseNumeric(s, value); +} + +// Converts from an int64 to a human readable string representing the +// same number, using decimal powers. e.g. 1200000 -> "1.20M". +std::string HumanReadableNum(int64_t value); + +// Converts from an int64 representing a number of bytes to a +// human readable string representing the same number. +// e.g. 12345678 -> "11.77MiB". +std::string HumanReadableNumBytes(int64_t num_bytes); + +// Converts a time interval as double to a human readable +// string. For example: +// 0.001 -> "1 ms" +// 10.0 -> "10 s" +// 933120.0 -> "10.8 days" +// 39420000.0 -> "1.25 years" +// -10 -> "-10 s" +std::string HumanReadableElapsedTime(double seconds); + +} // namespace strings +} // namespace tsl + +#endif // TENSORFLOW_TSL_PLATFORM_NUMBERS_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/path.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/path.h new file mode 100644 index 0000000000000000000000000000000000000000..451addc60b465ca62caa71a977c35507e64c4be4 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/path.h @@ -0,0 +1,132 @@ +/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_PATH_H_ +#define TENSORFLOW_TSL_PLATFORM_PATH_H_ + +#include + +#include "tsl/platform/stringpiece.h" +#include "tsl/platform/types.h" + +namespace tsl { +namespace io { +namespace internal { +std::string JoinPathImpl(std::initializer_list paths); +} + +// Utility routines for processing filenames + +#ifndef SWIG // variadic templates +// Join multiple paths together, without introducing unnecessary path +// separators. +// For example: +// +// Arguments | JoinPath +// ---------------------------+---------- +// '/foo', 'bar' | /foo/bar +// '/foo/', 'bar' | /foo/bar +// '/foo', '/bar' | /foo/bar +// +// Usage: +// string path = io::JoinPath("/mydir", filename); +// string path = io::JoinPath(FLAGS_test_srcdir, filename); +// string path = io::JoinPath("/full", "path", "to", "filename"); +template +std::string JoinPath(const T&... args) { + return internal::JoinPathImpl({args...}); +} +#endif /* SWIG */ + +// Return true if path is absolute. +bool IsAbsolutePath(tsl::StringPiece path); + +// Returns the part of the path before the final "/". If there is a single +// leading "/" in the path, the result will be the leading "/". If there is +// no "/" in the path, the result is the empty prefix of the input. +tsl::StringPiece Dirname(tsl::StringPiece path); + +// Returns the part of the path after the final "/". If there is no +// "/" in the path, the result is the same as the input. +tsl::StringPiece Basename(tsl::StringPiece path); + +// Returns the part of the basename of path after the final ".". If +// there is no "." in the basename, the result is empty. +tsl::StringPiece Extension(tsl::StringPiece path); + +// Returns the part of the basename of path before the final ".". If +// there is no "." in the basename, the result is empty. +tsl::StringPiece BasenamePrefix(tsl::StringPiece path); + +// Returns the largest common subpath of `paths`. +// +// For example, for "/alpha/beta/gamma" and "/alpha/beta/ga" returns +// "/alpha/beta/". For "/alpha/beta/gamma" and "/alpha/beta/gamma" returns +// "/alpha/beta/". +// +// Does not perform any path normalization. +std::string CommonPathPrefix(absl::Span paths); + +// Collapse duplicate "/"s, resolve ".." and "." path elements, remove +// trailing "/". +// +// NOTE: This respects relative vs. absolute paths, but does not +// invoke any system calls (getcwd(2)) in order to resolve relative +// paths with respect to the actual working directory. That is, this is purely +// string manipulation, completely independent of process state. +std::string CleanPath(tsl::StringPiece path); + +// Populates the scheme, host, and path from a URI. scheme, host, and path are +// guaranteed by this function to point into the contents of uri, even if +// empty. +// +// Corner cases: +// - If the URI is invalid, scheme and host are set to empty strings and the +// passed string is assumed to be a path +// - If the URI omits the path (e.g. file://host), then the path is left empty. +void ParseURI(tsl::StringPiece uri, tsl::StringPiece* scheme, + tsl::StringPiece* host, tsl::StringPiece* path); + +// Creates a URI from a scheme, host, and path. If the scheme is empty, we just +// return the path. +std::string CreateURI(tsl::StringPiece scheme, tsl::StringPiece host, + tsl::StringPiece path); + +// Creates a temporary file name with an extension. +std::string GetTempFilename(const std::string& extension); + +// Returns whether the test workspace directory is known. If it's known and dir +// != nullptr then sets *dir to that. +// +// The test workspace directory is known to be TEST_SRCDIR/TEST_WORKSPACE if +// both the TEST_SRCDIR and TEST_WORKSPACE environment variables are set. +bool GetTestWorkspaceDir(std::string* dir); + +// Returns whether the TEST_UNDECLARED_OUTPUTS_DIR environment variable is set. +// If it's set and dir != nullptr then sets *dir to that. +bool GetTestUndeclaredOutputsDir(std::string* dir); + +// Resolves paths to help tests find files in their workspace or output +// directory. Returns whether the path can be resolved. If it can be then sets +// resolved_path to that. +// +// Currently the TEST_WORKSPACE and the TEST_UNDECLARED_OUTPUTS_DIR prefixes can +// be resolved. +bool ResolveTestPrefixes(tsl::StringPiece path, std::string& resolved_path); + +} // namespace io +} // namespace tsl + +#endif // TENSORFLOW_TSL_PLATFORM_PATH_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/prefetch.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/prefetch.h new file mode 100644 index 0000000000000000000000000000000000000000..d883529c6c3486b7c32e7459edf6a2239acdee50 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/prefetch.h @@ -0,0 +1,45 @@ +/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_PREFETCH_H_ +#define TENSORFLOW_TSL_PLATFORM_PREFETCH_H_ + +#include "absl/base/prefetch.h" + +namespace tsl { +namespace port { + +// Prefetching support. +// Deprecated. Prefer to call absl::Prefetch* directly. + +enum PrefetchHint { + PREFETCH_HINT_T0 = 3, // Temporal locality + PREFETCH_HINT_NTA = 0 // No temporal locality +}; + +template +void prefetch(const void* x) { + absl::PrefetchToLocalCache(x); +} + +template <> +inline void prefetch(const void* x) { + absl::PrefetchToLocalCacheNta(x); +} + +} // namespace port +} // namespace tsl + +#endif // TENSORFLOW_TSL_PLATFORM_PREFETCH_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/protobuf.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/protobuf.h new file mode 100644 index 0000000000000000000000000000000000000000..d5ce7e0837d1f1a190426382ce1e22529074f807 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/protobuf.h @@ -0,0 +1,130 @@ +/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_PROTOBUF_H_ +#define TENSORFLOW_TSL_PLATFORM_PROTOBUF_H_ + +#include + +#include "tsl/platform/platform.h" +#include "tsl/platform/types.h" + +// Import whatever namespace protobuf comes from into the +// ::tsl::protobuf namespace. +// +// TensorFlow code should use the ::tensorflow::protobuf namespace to +// refer to all protobuf APIs. + +#include "google/protobuf/descriptor.pb.h" // IWYU pragma:export +#include "google/protobuf/arena.h" // IWYU pragma:export +#include "google/protobuf/descriptor.h" // IWYU pragma:export +#include "google/protobuf/dynamic_message.h" // IWYU pragma:export +#include "google/protobuf/io/coded_stream.h" // IWYU pragma:export +#include "google/protobuf/io/tokenizer.h" // IWYU pragma:export +#include "google/protobuf/io/zero_copy_stream.h" // IWYU pragma:export +#include "google/protobuf/io/zero_copy_stream_impl_lite.h" // IWYU pragma:export +#include "google/protobuf/map.h" // IWYU pragma:export +#include "google/protobuf/message.h" // IWYU pragma:export +#include "google/protobuf/repeated_field.h" // IWYU pragma:export +#include "google/protobuf/repeated_ptr_field.h" // IWYU pragma:export +#include "google/protobuf/text_format.h" // IWYU pragma:export +#include "google/protobuf/util/field_comparator.h" // IWYU pragma:export +#include "google/protobuf/util/json_util.h" // IWYU pragma:export +#include "google/protobuf/util/message_differencer.h" // IWYU pragma:export +#include "google/protobuf/util/type_resolver_util.h" // IWYU pragma:export + +#if !TSL_IS_IN_OSS +#define TENSORFLOW_PROTOBUF_USES_CORD 1 +#endif // TSL_IS_IN_OSS + +namespace tsl { + +namespace protobuf = ::google::protobuf; +using protobuf_int64 = int64_t; +using protobuf_uint64 = uint64_t; +extern const char* kProtobufInt64Typename; +extern const char* kProtobufUint64Typename; + +// Parses a protocol buffer contained in a string in the binary wire format. +// Returns true on success. Note: Unlike protobuf's builtin ParseFromString, +// this function has no size restrictions on the total size of the encoded +// protocol buffer. +bool ParseProtoUnlimited(protobuf::MessageLite* proto, + const std::string& serialized); +bool ParseProtoUnlimited(protobuf::MessageLite* proto, const void* serialized, + size_t size); +inline bool ParseProtoUnlimited(protobuf::MessageLite* proto, + const tstring& serialized) { + return ParseProtoUnlimited(proto, serialized.data(), serialized.size()); +} + +// Returns the string value for the value of a string or bytes protobuf field. +inline const std::string& ProtobufStringToString(const std::string& s) { + return s; +} + +// Set to . Swapping is allowed, as does not need to be +// preserved. +inline void SetProtobufStringSwapAllowed(std::string* src, std::string* dest) { + *dest = std::move(*src); +} + +#if defined(TENSORFLOW_PROTOBUF_USES_CORD) +// These versions of ProtobufStringToString and SetProtobufString get used by +// tools/proto_text's generated code. They have the same name as the versions +// in tsl/platform/protobuf.h, so the generation code doesn't need to determine +// if the type is Cord or string at generation time. +inline std::string ProtobufStringToString(const absl::Cord& s) { + return std::string(s); +} +inline void SetProtobufStringSwapAllowed(std::string* src, absl::Cord* dest) { + dest->CopyFrom(*src); +} +#endif // defined(TENSORFLOW_PROTOBUF_USES_CORD) + +inline bool SerializeToTString(const protobuf::MessageLite& proto, + tstring* output) { + size_t size = proto.ByteSizeLong(); + output->resize_uninitialized(size); + return proto.SerializeWithCachedSizesToArray( + reinterpret_cast(output->data())); +} + +inline bool ParseFromTString(const tstring& input, + protobuf::MessageLite* proto) { + return proto->ParseFromArray(input.data(), static_cast(input.size())); +} + +// Analogue to StringOutputStream for tstring. +class TStringOutputStream : public protobuf::io::ZeroCopyOutputStream { + public: + explicit TStringOutputStream(tstring* target); + ~TStringOutputStream() override = default; + + TStringOutputStream(const TStringOutputStream&) = delete; + void operator=(const TStringOutputStream&) = delete; + + bool Next(void** data, int* size) override; + void BackUp(int count) override; + int64_t ByteCount() const override; + + private: + static constexpr int kMinimumSize = 16; + + tstring* target_; +}; +} // namespace tsl + +#endif // TENSORFLOW_TSL_PLATFORM_PROTOBUF_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/ram_file_system.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/ram_file_system.h new file mode 100644 index 0000000000000000000000000000000000000000..1b51653b716c3ec85a61e64ae164d6249b22f252 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/ram_file_system.h @@ -0,0 +1,358 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_RAM_FILE_SYSTEM_H_ +#define TENSORFLOW_TSL_PLATFORM_RAM_FILE_SYSTEM_H_ + +// Implementation of an in-memory TF filesystem for simple prototyping (e.g. +// via Colab). The TPU TF server does not have local filesystem access, which +// makes it difficult to provide Colab tutorials: users must have GCS access +// and sign-in in order to try out an example. +// +// Files are implemented on top of std::string. Directories, as with GCS or S3, +// are implicit based on the existence of child files. Multiple files may +// reference a single FS location, though no thread-safety guarantees are +// provided. + +#include + +#include "absl/strings/match.h" +#include "tsl/platform/env.h" +#include "tsl/platform/file_system.h" +#include "tsl/platform/mutex.h" +#include "tsl/platform/stringpiece.h" +#include "tsl/platform/types.h" + +#ifdef PLATFORM_WINDOWS +#undef DeleteFile +#undef CopyFile +#undef TranslateName +#endif + +namespace tsl { + +class RamRandomAccessFile : public RandomAccessFile, public WritableFile { + public: + RamRandomAccessFile(std::string name, std::shared_ptr cord) + : name_(name), data_(cord) {} + ~RamRandomAccessFile() override {} + + Status Name(StringPiece* result) const override { + *result = name_; + return OkStatus(); + } + + Status Read(uint64 offset, size_t n, StringPiece* result, + char* scratch) const override { + if (offset >= data_->size()) { + return errors::OutOfRange(""); + } + + uint64 left = std::min(static_cast(n), data_->size() - offset); + auto start = data_->begin() + offset; + auto end = data_->begin() + offset + left; + + std::copy(start, end, scratch); + *result = StringPiece(scratch, left); + + // In case of a partial read, we must still fill `result`, but also return + // OutOfRange. + if (left < n) { + return errors::OutOfRange(""); + } + return OkStatus(); + } + + Status Append(StringPiece data) override { + data_->append(data.data(), data.size()); + return OkStatus(); + } + +#if defined(TF_CORD_SUPPORT) + Status Append(const absl::Cord& cord) override { + data_->append(cord.char_begin(), cord.char_end()); + return OkStatus(); + } +#endif + + Status Close() override { return OkStatus(); } + Status Flush() override { return OkStatus(); } + Status Sync() override { return OkStatus(); } + + Status Tell(int64_t* position) override { + *position = -1; + return errors::Unimplemented("This filesystem does not support Tell()"); + } + + private: + RamRandomAccessFile(const RamRandomAccessFile&) = delete; + void operator=(const RamRandomAccessFile&) = delete; + std::string name_; + std::shared_ptr data_; +}; + +class RamFileSystem : public FileSystem { + public: + TF_USE_FILESYSTEM_METHODS_WITH_NO_TRANSACTION_SUPPORT; + + Status NewRandomAccessFile( + const std::string& fname_, TransactionToken* token, + std::unique_ptr* result) override { + mutex_lock m(mu_); + auto fname = StripRamFsPrefix(fname_); + + if (fs_.find(fname) == fs_.end()) { + return errors::NotFound(""); + } + if (fs_[fname] == nullptr) { + return errors::InvalidArgument(fname_, " is a directory."); + } + *result = std::unique_ptr( + new RamRandomAccessFile(fname, fs_[fname])); + return OkStatus(); + } + + Status NewWritableFile(const std::string& fname_, TransactionToken* token, + std::unique_ptr* result) override { + mutex_lock m(mu_); + auto fname = StripRamFsPrefix(fname_); + + if (fs_.find(fname) == fs_.end()) { + fs_[fname] = std::make_shared(); + } + if (fs_[fname] == nullptr) { + return errors::InvalidArgument(fname_, " is a directory."); + } + *result = std::unique_ptr( + new RamRandomAccessFile(fname, fs_[fname])); + return OkStatus(); + } + + Status NewAppendableFile(const std::string& fname_, TransactionToken* token, + std::unique_ptr* result) override { + mutex_lock m(mu_); + auto fname = StripRamFsPrefix(fname_); + + if (fs_.find(fname) == fs_.end()) { + fs_[fname] = std::make_shared(); + } + if (fs_[fname] == nullptr) { + return errors::InvalidArgument(fname_, " is a directory."); + } + *result = std::unique_ptr( + new RamRandomAccessFile(fname, fs_[fname])); + return OkStatus(); + } + + Status NewReadOnlyMemoryRegionFromFile( + const std::string& fname, TransactionToken* token, + std::unique_ptr* result) override { + return errors::Unimplemented(""); + } + + Status FileExists(const std::string& fname_, + TransactionToken* token) override { + FileStatistics stat; + auto fname = StripRamFsPrefix(fname_); + + return Stat(fname, token, &stat); + } + + Status GetChildren(const std::string& dir_, TransactionToken* token, + std::vector* result) override { + mutex_lock m(mu_); + auto dir = StripRamFsPrefix(dir_); + + auto it = fs_.lower_bound(dir); + while (it != fs_.end() && StartsWith(it->first, dir)) { + auto filename = StripPrefix(StripPrefix(it->first, dir), "/"); + // It is not either (a) the parent directory itself or (b) a subdirectory + if (!filename.empty() && filename.find("/") == std::string::npos) { + result->push_back(filename); + } + ++it; + } + + return OkStatus(); + } + + Status GetMatchingPaths(const std::string& pattern_, TransactionToken* token, + std::vector* results) override { + mutex_lock m(mu_); + auto pattern = StripRamFsPrefix(pattern_); + + Env* env = Env::Default(); + for (auto it = fs_.begin(); it != fs_.end(); ++it) { + if (env->MatchPath(it->first, pattern)) { + results->push_back("ram://" + it->first); + } + } + return OkStatus(); + } + + Status Stat(const std::string& fname_, TransactionToken* token, + FileStatistics* stat) override { + mutex_lock m(mu_); + auto fname = StripRamFsPrefix(fname_); + + auto it = fs_.lower_bound(fname); + if (it == fs_.end() || !StartsWith(it->first, fname)) { + return errors::NotFound(""); + } + + if (it->first == fname && it->second != nullptr) { + stat->is_directory = false; + stat->length = fs_[fname]->size(); + stat->mtime_nsec = 0; + return OkStatus(); + } + + stat->is_directory = true; + stat->length = 0; + stat->mtime_nsec = 0; + return OkStatus(); + } + + Status DeleteFile(const std::string& fname_, + TransactionToken* token) override { + mutex_lock m(mu_); + auto fname = StripRamFsPrefix(fname_); + + if (fs_.find(fname) != fs_.end()) { + fs_.erase(fname); + return OkStatus(); + } + + return errors::NotFound(""); + } + + Status CreateDir(const std::string& dirname_, + TransactionToken* token) override { + mutex_lock m(mu_); + auto dirname = StripRamFsPrefix(dirname_); + + auto it = fs_.find(dirname); + if (it != fs_.end() && it->second != nullptr) { + return errors::AlreadyExists( + "cannot create directory with same name as an existing file"); + } + + fs_[dirname] = nullptr; + return OkStatus(); + } + + Status RecursivelyCreateDir(const std::string& dirname_, + TransactionToken* token) override { + auto dirname = StripRamFsPrefix(dirname_); + + std::vector dirs = StrSplit(dirname, "/"); + Status last_status; + std::string dir = dirs[0]; + last_status = CreateDir(dir, token); + + for (int i = 1; i < dirs.size(); ++i) { + dir = dir + "/" + dirs[i]; + last_status = CreateDir(dir, token); + } + return last_status; + } + + Status DeleteDir(const std::string& dirname_, + TransactionToken* token) override { + mutex_lock m(mu_); + auto dirname = StripRamFsPrefix(dirname_); + + auto it = fs_.find(dirname); + if (it == fs_.end()) { + return errors::NotFound(""); + } + if (it->second != nullptr) { + return errors::InvalidArgument("Not a directory"); + } + fs_.erase(dirname); + + return OkStatus(); + } + + Status GetFileSize(const std::string& fname_, TransactionToken* token, + uint64* file_size) override { + mutex_lock m(mu_); + auto fname = StripRamFsPrefix(fname_); + + if (fs_.find(fname) != fs_.end()) { + if (fs_[fname] == nullptr) { + return errors::InvalidArgument("Not a file"); + } + *file_size = fs_[fname]->size(); + return OkStatus(); + } + return errors::NotFound(""); + } + + Status RenameFile(const std::string& src_, const std::string& target_, + TransactionToken* token) override { + mutex_lock m(mu_); + auto src = StripRamFsPrefix(src_); + auto target = StripRamFsPrefix(target_); + + if (fs_.find(src) != fs_.end()) { + fs_[target] = fs_[src]; + fs_.erase(fs_.find(src)); + return OkStatus(); + } + return errors::NotFound(""); + } + + RamFileSystem() {} + ~RamFileSystem() override {} + + private: + mutex mu_; + std::map> fs_; + + std::vector StrSplit(std::string s, std::string delim) { + std::vector ret; + size_t curr_pos = 0; + while ((curr_pos = s.find(delim)) != std::string::npos) { + ret.push_back(s.substr(0, curr_pos)); + s.erase(0, curr_pos + delim.size()); + } + ret.push_back(s); + return ret; + } + + bool StartsWith(std::string s, std::string prefix) { + return absl::StartsWith(s, prefix); + } + + string StripPrefix(std::string s, std::string prefix) { + if (absl::StartsWith(s, prefix)) { + return s.erase(0, prefix.size()); + } + return s; + } + + string StripRamFsPrefix(std::string name) { + std::string s = StripPrefix(name, "ram://"); + if (*(s.rbegin()) == '/') { + s.pop_back(); + } + return s; + } +}; + +} // namespace tsl + +#endif // TENSORFLOW_TSL_PLATFORM_RAM_FILE_SYSTEM_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/random.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/random.h new file mode 100644 index 0000000000000000000000000000000000000000..7e385387cf54f9e9242bf0b85e7ea52200f77d3a --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/random.h @@ -0,0 +1,38 @@ +/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_RANDOM_H_ +#define TENSORFLOW_TSL_PLATFORM_RANDOM_H_ + +#include "tsl/platform/types.h" + +namespace tsl { +namespace random { + +// Return a 64-bit random value. Different sequences are generated +// in different processes. +uint64 New64(); + +// Same as previous method, but uses a different RNG for each thread. +uint64 ThreadLocalNew64(); + +// Return a 64-bit random value. Uses +// std::mersenne_twister_engine::default_seed as seed value. +uint64 New64DefaultSeed(); + +} // namespace random +} // namespace tsl + +#endif // TENSORFLOW_TSL_PLATFORM_RANDOM_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/raw_coding.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/raw_coding.h new file mode 100644 index 0000000000000000000000000000000000000000..f12c1d18ef789541d5aa5f6ac4840d7ef04fb694 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/raw_coding.h @@ -0,0 +1,72 @@ +/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_RAW_CODING_H_ +#define TENSORFLOW_TSL_PLATFORM_RAW_CODING_H_ + +#include + +#include "tsl/platform/byte_order.h" +#include "tsl/platform/types.h" + +namespace tsl { +namespace core { + +// Lower-level versions of Get... that read directly from a character buffer +// without any bounds checking. + +inline uint16 DecodeFixed16(const char* ptr) { + if (port::kLittleEndian) { + // Load the raw bytes + uint16 result; + memcpy(&result, ptr, sizeof(result)); // gcc optimizes this to a plain load + return result; + } else { + return ((static_cast(static_cast(ptr[0]))) | + (static_cast(static_cast(ptr[1])) << 8)); + } +} + +inline uint32 DecodeFixed32(const char* ptr) { + if (port::kLittleEndian) { + // Load the raw bytes + uint32 result; + memcpy(&result, ptr, sizeof(result)); // gcc optimizes this to a plain load + return result; + } else { + return ((static_cast(static_cast(ptr[0]))) | + (static_cast(static_cast(ptr[1])) << 8) | + (static_cast(static_cast(ptr[2])) << 16) | + (static_cast(static_cast(ptr[3])) << 24)); + } +} + +inline uint64 DecodeFixed64(const char* ptr) { + if (port::kLittleEndian) { + // Load the raw bytes + uint64 result; + memcpy(&result, ptr, sizeof(result)); // gcc optimizes this to a plain load + return result; + } else { + uint64 lo = DecodeFixed32(ptr); + uint64 hi = DecodeFixed32(ptr + 4); + return (hi << 32) | lo; + } +} + +} // namespace core +} // namespace tsl + +#endif // TENSORFLOW_TSL_PLATFORM_RAW_CODING_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/resource_loader.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/resource_loader.h new file mode 100644 index 0000000000000000000000000000000000000000..047de4d9e59e15bb3befe6cb2215cf4e19ae326b --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/resource_loader.h @@ -0,0 +1,32 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +// Small helper library to access "data" dependencies defined in BUILD files. +// Requires the relative paths starting from tensorflow/... +// For example, to get this file, a user would call: +// GetDataDependencyFilepath("tensorflow/core/platform/resource_loadder.h") + +#ifndef TENSORFLOW_TSL_PLATFORM_RESOURCE_LOADER_H_ +#define TENSORFLOW_TSL_PLATFORM_RESOURCE_LOADER_H_ + +#include + +namespace tsl { + +std::string GetDataDependencyFilepath(const std::string& relative_path); + +} // namespace tsl + +#endif // TENSORFLOW_TSL_PLATFORM_RESOURCE_LOADER_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/retrying_file_system.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/retrying_file_system.h new file mode 100644 index 0000000000000000000000000000000000000000..591423b4fe3ec7690182dfcc400278d1460939dc --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/retrying_file_system.h @@ -0,0 +1,302 @@ +/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_RETRYING_FILE_SYSTEM_H_ +#define TENSORFLOW_TSL_PLATFORM_RETRYING_FILE_SYSTEM_H_ + +#include +#include +#include + +#include "tsl/platform/env.h" +#include "tsl/platform/errors.h" +#include "tsl/platform/file_system.h" +#include "tsl/platform/random.h" +#include "tsl/platform/retrying_utils.h" +#include "tsl/platform/status.h" + +namespace tsl { + +/// A wrapper to add retry logic to another file system. +template +class RetryingFileSystem : public FileSystem { + public: + RetryingFileSystem(std::unique_ptr base_file_system, + const RetryConfig& retry_config) + : base_file_system_(std::move(base_file_system)), + retry_config_(retry_config) {} + + TF_USE_FILESYSTEM_METHODS_WITH_NO_TRANSACTION_SUPPORT; + + Status NewRandomAccessFile( + const string& filename, TransactionToken* token, + std::unique_ptr* result) override; + + Status NewWritableFile(const string& filename, TransactionToken* token, + std::unique_ptr* result) override; + + Status NewAppendableFile(const string& filename, TransactionToken* token, + std::unique_ptr* result) override; + + Status NewReadOnlyMemoryRegionFromFile( + const string& filename, TransactionToken* token, + std::unique_ptr* result) override; + + Status FileExists(const string& fname, TransactionToken* token) override { + return RetryingUtils::CallWithRetries( + [this, &fname, token]() { + return base_file_system_->FileExists(fname, token); + }, + retry_config_); + } + + Status GetChildren(const string& dir, TransactionToken* token, + std::vector* result) override { + return RetryingUtils::CallWithRetries( + [this, &dir, result, token]() { + return base_file_system_->GetChildren(dir, token, result); + }, + retry_config_); + } + + Status GetMatchingPaths(const string& pattern, TransactionToken* token, + std::vector* result) override { + return RetryingUtils::CallWithRetries( + [this, &pattern, result, token]() { + return base_file_system_->GetMatchingPaths(pattern, token, result); + }, + retry_config_); + } + + Status Stat(const string& fname, TransactionToken* token, + FileStatistics* stat) override { + return RetryingUtils::CallWithRetries( + [this, &fname, stat, token]() { + return base_file_system_->Stat(fname, token, stat); + }, + retry_config_); + } + + Status DeleteFile(const string& fname, TransactionToken* token) override { + return RetryingUtils::DeleteWithRetries( + [this, &fname, token]() { + return base_file_system_->DeleteFile(fname, token); + }, + retry_config_); + } + + Status CreateDir(const string& dirname, TransactionToken* token) override { + return RetryingUtils::CallWithRetries( + [this, &dirname, token]() { + return base_file_system_->CreateDir(dirname, token); + }, + retry_config_); + } + + Status DeleteDir(const string& dirname, TransactionToken* token) override { + return RetryingUtils::DeleteWithRetries( + [this, &dirname, token]() { + return base_file_system_->DeleteDir(dirname, token); + }, + retry_config_); + } + + Status GetFileSize(const string& fname, TransactionToken* token, + uint64* file_size) override { + return RetryingUtils::CallWithRetries( + [this, &fname, file_size, token]() { + return base_file_system_->GetFileSize(fname, token, file_size); + }, + retry_config_); + } + + Status RenameFile(const string& src, const string& target, + TransactionToken* token) override { + return RetryingUtils::CallWithRetries( + [this, &src, &target, token]() { + return base_file_system_->RenameFile(src, target, token); + }, + retry_config_); + } + + Status IsDirectory(const string& dirname, TransactionToken* token) override { + return RetryingUtils::CallWithRetries( + [this, &dirname, token]() { + return base_file_system_->IsDirectory(dirname, token); + }, + retry_config_); + } + + Status HasAtomicMove(const string& path, bool* has_atomic_move) override { + // this method does not need to be retried + return base_file_system_->HasAtomicMove(path, has_atomic_move); + } + + Status DeleteRecursively(const string& dirname, TransactionToken* token, + int64_t* undeleted_files, + int64_t* undeleted_dirs) override { + return RetryingUtils::DeleteWithRetries( + [this, &dirname, token, undeleted_files, undeleted_dirs]() { + return base_file_system_->DeleteRecursively( + dirname, token, undeleted_files, undeleted_dirs); + }, + retry_config_); + } + + void FlushCaches(TransactionToken* token) override { + base_file_system_->FlushCaches(token); + } + + Underlying* underlying() const { return base_file_system_.get(); } + + private: + std::unique_ptr base_file_system_; + const RetryConfig retry_config_; + + RetryingFileSystem(const RetryingFileSystem&) = delete; + void operator=(const RetryingFileSystem&) = delete; +}; + +namespace retrying_internals { + +class RetryingRandomAccessFile : public RandomAccessFile { + public: + RetryingRandomAccessFile(std::unique_ptr base_file, + const RetryConfig& retry_config) + : base_file_(std::move(base_file)), retry_config_(retry_config) {} + + Status Name(StringPiece* result) const override { + return base_file_->Name(result); + } + + Status Read(uint64 offset, size_t n, StringPiece* result, + char* scratch) const override { + return RetryingUtils::CallWithRetries( + [this, offset, n, result, scratch]() { + return base_file_->Read(offset, n, result, scratch); + }, + retry_config_); + } + + private: + std::unique_ptr base_file_; + const RetryConfig retry_config_; +}; + +class RetryingWritableFile : public WritableFile { + public: + RetryingWritableFile(std::unique_ptr base_file, + const RetryConfig& retry_config) + : base_file_(std::move(base_file)), retry_config_(retry_config) {} + + ~RetryingWritableFile() override { + // Makes sure the retrying version of Close() is called in the destructor. + Close().IgnoreError(); + } + + Status Append(StringPiece data) override { + return RetryingUtils::CallWithRetries( + [this, &data]() { return base_file_->Append(data); }, retry_config_); + } + Status Close() override { + return RetryingUtils::CallWithRetries( + [this]() { return base_file_->Close(); }, retry_config_); + } + Status Flush() override { + return RetryingUtils::CallWithRetries( + [this]() { return base_file_->Flush(); }, retry_config_); + } + Status Name(StringPiece* result) const override { + return base_file_->Name(result); + } + Status Sync() override { + return RetryingUtils::CallWithRetries( + [this]() { return base_file_->Sync(); }, retry_config_); + } + Status Tell(int64_t* position) override { + return RetryingUtils::CallWithRetries( + [this, &position]() { return base_file_->Tell(position); }, + retry_config_); + } + + private: + std::unique_ptr base_file_; + const RetryConfig retry_config_; +}; + +} // namespace retrying_internals + +template +Status RetryingFileSystem::NewRandomAccessFile( + const string& filename, TransactionToken* token, + std::unique_ptr* result) { + std::unique_ptr base_file; + TF_RETURN_IF_ERROR(RetryingUtils::CallWithRetries( + [this, &filename, &base_file, token]() { + return base_file_system_->NewRandomAccessFile(filename, token, + &base_file); + }, + retry_config_)); + result->reset(new retrying_internals::RetryingRandomAccessFile( + std::move(base_file), retry_config_)); + return OkStatus(); +} + +template +Status RetryingFileSystem::NewWritableFile( + const string& filename, TransactionToken* token, + std::unique_ptr* result) { + std::unique_ptr base_file; + TF_RETURN_IF_ERROR(RetryingUtils::CallWithRetries( + [this, &filename, &base_file, token]() { + return base_file_system_->NewWritableFile(filename, token, &base_file); + }, + retry_config_)); + result->reset(new retrying_internals::RetryingWritableFile( + std::move(base_file), retry_config_)); + return OkStatus(); +} + +template +Status RetryingFileSystem::NewAppendableFile( + const string& filename, TransactionToken* token, + std::unique_ptr* result) { + std::unique_ptr base_file; + TF_RETURN_IF_ERROR(RetryingUtils::CallWithRetries( + [this, &filename, &base_file, token]() { + return base_file_system_->NewAppendableFile(filename, token, + &base_file); + }, + retry_config_)); + result->reset(new retrying_internals::RetryingWritableFile( + std::move(base_file), retry_config_)); + return OkStatus(); +} + +template +Status RetryingFileSystem::NewReadOnlyMemoryRegionFromFile( + const string& filename, TransactionToken* token, + std::unique_ptr* result) { + return RetryingUtils::CallWithRetries( + [this, &filename, result, token]() { + return base_file_system_->NewReadOnlyMemoryRegionFromFile( + filename, token, result); + }, + retry_config_); +} + +} // namespace tsl + +#endif // TENSORFLOW_TSL_PLATFORM_RETRYING_FILE_SYSTEM_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/rocm_rocdl_path.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/rocm_rocdl_path.h new file mode 100644 index 0000000000000000000000000000000000000000..7432a6566d717abf33d5c47c4479e87083ab2296 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/rocm_rocdl_path.h @@ -0,0 +1,32 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_ROCM_ROCDL_PATH_H_ +#define TENSORFLOW_TSL_PLATFORM_ROCM_ROCDL_PATH_H_ + +#include "tsl/platform/types.h" + +namespace tsl { + +// Returns the root directory of the ROCM SDK, which contains sub-folders such +// as bin, lib, and rocdl. +string RocmRoot(); + +// Returns the directory that contains ROCm-Device-Libs files in the ROCm SDK. +string RocdlRoot(); + +} // namespace tsl + +#endif // TENSORFLOW_TSL_PLATFORM_ROCM_ROCDL_PATH_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/scanner.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/scanner.h new file mode 100644 index 0000000000000000000000000000000000000000..2a53d57320cbe56ba6cd4027e457f4e7e89a9ffb --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/scanner.h @@ -0,0 +1,247 @@ +/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_SCANNER_H_ +#define TENSORFLOW_TSL_PLATFORM_SCANNER_H_ + +#include + +#include "tsl/platform/macros.h" +#include "tsl/platform/str_util.h" +#include "tsl/platform/stringpiece.h" + +namespace tsl { +namespace strings { + +// Scanner provides simplified string parsing, in which a string is parsed as a +// series of scanning calls (e.g. One, Any, Many, OneLiteral, Eos), and then +// finally GetResult is called. If GetResult returns true, then it also returns +// the remaining characters and any captured substring. +// +// The range to capture can be controlled with RestartCapture and StopCapture; +// by default, all processed characters are captured. +class Scanner { + public: + // Classes of characters. Each enum name is to be read as the union of the + // parts - e.g., class LETTER_DIGIT means the class includes all letters and + // all digits. + // + // LETTER means ascii letter a-zA-Z. + // DIGIT means ascii digit: 0-9. + enum CharClass { + // NOTE: When adding a new CharClass, update the AllCharClasses ScannerTest + // in scanner_test.cc + ALL, + DIGIT, + LETTER, + LETTER_DIGIT, + LETTER_DIGIT_DASH_UNDERSCORE, + LETTER_DIGIT_DASH_DOT_SLASH, // SLASH is / only, not backslash + LETTER_DIGIT_DASH_DOT_SLASH_UNDERSCORE, // SLASH is / only, not backslash + LETTER_DIGIT_DOT, + LETTER_DIGIT_DOT_PLUS_MINUS, + LETTER_DIGIT_DOT_UNDERSCORE, + LETTER_DIGIT_UNDERSCORE, + LOWERLETTER, + LOWERLETTER_DIGIT, + LOWERLETTER_DIGIT_UNDERSCORE, + NON_ZERO_DIGIT, + SPACE, + UPPERLETTER, + RANGLE, + }; + + explicit Scanner(StringPiece source) : cur_(source) { RestartCapture(); } + + // Consume the next character of the given class from input. If the next + // character is not in the class, then GetResult will ultimately return false. + Scanner& One(CharClass clz) { + if (cur_.empty() || !Matches(clz, cur_[0])) { + return Error(); + } + cur_.remove_prefix(1); + return *this; + } + + // Consume the next s.size() characters of the input, if they match . If + // they don't match , this is a no-op. + Scanner& ZeroOrOneLiteral(StringPiece s) { + str_util::ConsumePrefix(&cur_, s); + return *this; + } + + // Consume the next s.size() characters of the input, if they match . If + // they don't match , then GetResult will ultimately return false. + Scanner& OneLiteral(StringPiece s) { + if (!str_util::ConsumePrefix(&cur_, s)) { + error_ = true; + } + return *this; + } + + // Consume characters from the input as long as they match . Zero + // characters is still considered a match, so it will never cause GetResult to + // return false. + Scanner& Any(CharClass clz) { + while (!cur_.empty() && Matches(clz, cur_[0])) { + cur_.remove_prefix(1); + } + return *this; + } + + // Shorthand for One(clz).Any(clz). + Scanner& Many(CharClass clz) { return One(clz).Any(clz); } + + // Reset the capture start point. + // + // Later, when GetResult is called and if it returns true, the capture + // returned will start at the position at the time this was called. + Scanner& RestartCapture() { + capture_start_ = cur_.data(); + capture_end_ = nullptr; + return *this; + } + + // Stop capturing input. + // + // Later, when GetResult is called and if it returns true, the capture + // returned will end at the position at the time this was called. + Scanner& StopCapture() { + capture_end_ = cur_.data(); + return *this; + } + + // If not at the input of input, then GetResult will ultimately return false. + Scanner& Eos() { + if (!cur_.empty()) error_ = true; + return *this; + } + + // Shorthand for Any(SPACE). + Scanner& AnySpace() { return Any(SPACE); } + + // This scans input until is reached. is NOT consumed. + Scanner& ScanUntil(char end_ch) { + ScanUntilImpl(end_ch, false); + return *this; + } + + // This scans input until is reached. is NOT consumed. + // Backslash escape sequences are skipped. + // Used for implementing quoted string scanning. + Scanner& ScanEscapedUntil(char end_ch) { + ScanUntilImpl(end_ch, true); + return *this; + } + + // Return the next character that will be scanned, or if there + // are no more characters to scan. + // Note that if a scan operation has failed (so GetResult() returns false), + // then the value of Peek may or may not have advanced since the scan + // operation that failed. + char Peek(char default_value = '\0') const { + return cur_.empty() ? default_value : cur_[0]; + } + + // Returns false if there are no remaining characters to consume. + int empty() const { return cur_.empty(); } + + // Returns true if the input string successfully matched. When true is + // returned, the remaining string is returned in and the captured + // string returned in , if non-NULL. + bool GetResult(StringPiece* remaining = nullptr, + StringPiece* capture = nullptr); + + private: + void ScanUntilImpl(char end_ch, bool escaped); + + Scanner& Error() { + error_ = true; + return *this; + } + + static bool IsLetter(char ch) { + return (ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z'); + } + + static bool IsLowerLetter(char ch) { return ch >= 'a' && ch <= 'z'; } + + static bool IsDigit(char ch) { return ch >= '0' && ch <= '9'; } + + static bool IsSpace(char ch) { + return (ch == ' ' || ch == '\t' || ch == '\n' || ch == '\v' || ch == '\f' || + ch == '\r'); + } + + static bool Matches(CharClass clz, char ch) { + switch (clz) { + case ALL: + return true; + case DIGIT: + return IsDigit(ch); + case LETTER: + return IsLetter(ch); + case LETTER_DIGIT: + return IsLetter(ch) || IsDigit(ch); + case LETTER_DIGIT_DASH_UNDERSCORE: + return (IsLetter(ch) || IsDigit(ch) || ch == '-' || ch == '_'); + case LETTER_DIGIT_DASH_DOT_SLASH: + return IsLetter(ch) || IsDigit(ch) || ch == '-' || ch == '.' || + ch == '/'; + case LETTER_DIGIT_DASH_DOT_SLASH_UNDERSCORE: + return (IsLetter(ch) || IsDigit(ch) || ch == '-' || ch == '.' || + ch == '/' || ch == '_'); + case LETTER_DIGIT_DOT: + return IsLetter(ch) || IsDigit(ch) || ch == '.'; + case LETTER_DIGIT_DOT_PLUS_MINUS: + return IsLetter(ch) || IsDigit(ch) || ch == '+' || ch == '-' || + ch == '.'; + case LETTER_DIGIT_DOT_UNDERSCORE: + return IsLetter(ch) || IsDigit(ch) || ch == '.' || ch == '_'; + case LETTER_DIGIT_UNDERSCORE: + return IsLetter(ch) || IsDigit(ch) || ch == '_'; + case LOWERLETTER: + return ch >= 'a' && ch <= 'z'; + case LOWERLETTER_DIGIT: + return IsLowerLetter(ch) || IsDigit(ch); + case LOWERLETTER_DIGIT_UNDERSCORE: + return IsLowerLetter(ch) || IsDigit(ch) || ch == '_'; + case NON_ZERO_DIGIT: + return IsDigit(ch) && ch != '0'; + case SPACE: + return IsSpace(ch); + case UPPERLETTER: + return ch >= 'A' && ch <= 'Z'; + case RANGLE: + return ch == '>'; + } + return false; + } + + StringPiece cur_; + const char* capture_start_ = nullptr; + const char* capture_end_ = nullptr; + bool error_ = false; + + friend class ScannerTest; + + Scanner(const Scanner&) = delete; + void operator=(const Scanner&) = delete; +}; + +} // namespace strings +} // namespace tsl + +#endif // TENSORFLOW_TSL_PLATFORM_SCANNER_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/setround.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/setround.h new file mode 100644 index 0000000000000000000000000000000000000000..adfc3fd2ee29fa1bf43a04e2d115218ff4553935 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/setround.h @@ -0,0 +1,55 @@ +/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_SETROUND_H_ +#define TENSORFLOW_TSL_PLATFORM_SETROUND_H_ + +#if defined(__ANDROID_API__) && (__ANDROID_API__ < 21) +// The header is broken pre-API 21 for several NDK releases. +#define TF_BROKEN_CFENV +#endif + +#if defined(TF_BROKEN_CFENV) +#include // NOLINT +#else +#include // NOLINT +#endif + +#include "tsl/platform/macros.h" + +namespace tsl { +namespace port { + +// While this class is active, floating point rounding mode is set to the given +// mode. The mode can be one of the modes defined in , i.e. FE_DOWNWARD, +// FE_TONEAREST, FE_TOWARDZERO, or FE_UPWARD. The destructor restores the +// original rounding mode if it could be determined. If the original rounding +// mode could not be determined, the destructor sets it to FE_TONEAREST. +class ScopedSetRound { + public: + ScopedSetRound(int mode); + ~ScopedSetRound(); + + private: + int original_mode_; + + ScopedSetRound(const ScopedSetRound&) = delete; + void operator=(const ScopedSetRound&) = delete; +}; + +} // namespace port +} // namespace tsl + +#endif // TENSORFLOW_TSL_PLATFORM_SETROUND_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/snappy.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/snappy.h new file mode 100644 index 0000000000000000000000000000000000000000..151b4a9bce74df6fdcb8cb0b4f5f294e4609caa9 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/snappy.h @@ -0,0 +1,54 @@ +/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_SNAPPY_H_ +#define TENSORFLOW_TSL_PLATFORM_SNAPPY_H_ + +#include "tsl/platform/types.h" + +#if !defined(PLATFORM_WINDOWS) +#include +namespace tsl { +using ::iovec; // NOLINT(misc-unused-using-decls) +} // namespace tsl +#else +namespace tsl { +struct iovec { + void* iov_base; + size_t iov_len; +}; +} // namespace tsl +#endif + +namespace tsl { +namespace port { + +// Snappy compression/decompression support +bool Snappy_Compress(const char* input, size_t length, string* output); + +bool Snappy_CompressFromIOVec(const struct iovec* iov, + size_t uncompressed_length, string* output); + +bool Snappy_GetUncompressedLength(const char* input, size_t length, + size_t* result); +bool Snappy_Uncompress(const char* input, size_t length, char* output); + +bool Snappy_UncompressToIOVec(const char* compressed, size_t compressed_length, + const struct iovec* iov, size_t iov_cnt); + +} // namespace port +} // namespace tsl + +#endif // TENSORFLOW_TSL_PLATFORM_SNAPPY_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/stacktrace.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/stacktrace.h new file mode 100644 index 0000000000000000000000000000000000000000..4a00a6830b383ed7e1cc03414f62dd0f0d1f8006 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/stacktrace.h @@ -0,0 +1,34 @@ +/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_STACKTRACE_H_ +#define TENSORFLOW_TSL_PLATFORM_STACKTRACE_H_ + +#include "tsl/platform/platform.h" // IWYU pragma: export + +// Include appropriate platform-dependent implementation. +#if defined(PLATFORM_GOOGLE) +#include "tsl/platform/google/stacktrace.h" // IWYU pragma: export +#elif defined(PLATFORM_POSIX) || defined(PLATFORM_POSIX_ANDROID) || \ + defined(PLATFORM_GOOGLE_ANDROID) || defined(PLATFORM_POSIX_IOS) || \ + defined(PLATFORM_GOOGLE_IOS) +#include "tsl/platform/default/stacktrace.h" // IWYU pragma: export +#elif defined(PLATFORM_WINDOWS) +#include "tsl/platform/windows/stacktrace.h" // IWYU pragma: export +#else +#error Define the appropriate PLATFORM_ macro for this platform +#endif + +#endif // TENSORFLOW_TSL_PLATFORM_STACKTRACE_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/status_to_from_proto.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/status_to_from_proto.h new file mode 100644 index 0000000000000000000000000000000000000000..6abbe78dc0ef69cef4295ee3983c5841100876b8 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/status_to_from_proto.h @@ -0,0 +1,43 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_TSL_PLATFORM_STATUS_TO_FROM_PROTO_H_ +#define TENSORFLOW_TSL_PLATFORM_STATUS_TO_FROM_PROTO_H_ + +#include "tsl/platform/status.h" +#include "tsl/protobuf/status.pb.h" + +namespace tsl { + +// TODO(b/250921378): Merge this file with `status.h` once we figure out how to +// fix the following error with the MacOS build: +// +// ImportError: +// dlopen(/org_tensorflow/tensorflow/python/platform/_pywrap_tf2.so, 2): +// Symbol not found: tensorflow11StatusProtoC1EPN6protobuf5ArenaEb + +// Converts a `Status` to a `StatusProto`. +tensorflow::StatusProto StatusToProto(const Status& s); + +#if defined(PLATFORM_GOOGLE) +// Constructs a `Status` from a `StatusProto`. +Status StatusFromProto( + const tensorflow::StatusProto& proto, + absl::SourceLocation loc = absl::SourceLocation::current()); +#else +Status StatusFromProto(const tensorflow::StatusProto& proto); +#endif +} // namespace tsl + +#endif // TENSORFLOW_TSL_PLATFORM_STATUS_TO_FROM_PROTO_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/statusor.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/statusor.h new file mode 100644 index 0000000000000000000000000000000000000000..0db4e733112c8c6b03ffd01938f3aee8d7eb8edb --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/statusor.h @@ -0,0 +1,104 @@ +/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +// StatusOr is the union of a Status object and a T object. StatusOr models +// the concept of an object that is either a value, or an error Status +// explaining why such a value is not present. To this end, StatusOr does not +// allow its Status value to be Status::OK. +// +// The primary use-case for StatusOr is as the return value of a +// function which may fail. +// +// Example client usage for a StatusOr, where T is not a pointer: +// +// StatusOr result = DoBigCalculationThatCouldFail(); +// if (result.ok()) { +// float answer = result.value(); +// printf("Big calculation yielded: %f", answer); +// } else { +// LOG(ERROR) << result.status(); +// } +// +// Example client usage for a StatusOr: +// +// StatusOr result = FooFactory::MakeNewFoo(arg); +// if (result.ok()) { +// std::unique_ptr foo(result.value()); +// foo->DoSomethingCool(); +// } else { +// LOG(ERROR) << result.status(); +// } +// +// Example client usage for a StatusOr>: +// +// StatusOr> result = FooFactory::MakeNewFoo(arg); +// if (result.ok()) { +// std::unique_ptr foo = std::move(result.value()); +// foo->DoSomethingCool(); +// } else { +// LOG(ERROR) << result.status(); +// } +// +// Example factory implementation returning StatusOr: +// +// StatusOr FooFactory::MakeNewFoo(int arg) { +// if (arg <= 0) { +// return tsl::InvalidArgument("Arg must be positive"); +// } else { +// return new Foo(arg); +// } +// } +// +// Note that the assignment operators require that destroying the currently +// stored value cannot invalidate the argument; in other words, the argument +// cannot be an alias for the current value, or anything owned by the current +// value. +#ifndef TENSORFLOW_TSL_PLATFORM_STATUSOR_H_ +#define TENSORFLOW_TSL_PLATFORM_STATUSOR_H_ + +#include "absl/base/attributes.h" +#include "absl/status/statusor.h" +#include "tsl/platform/errors.h" +#include "tsl/platform/macros.h" +#include "tsl/platform/platform.h" +#include "tsl/platform/status.h" + +// Include appropriate platform-dependent `TF_ASSIGN_OR_RETURN`. +#if defined(PLATFORM_GOOGLE) +#include "tsl/platform/google/statusor.h" // IWYU pragma: export +#else +#include "tsl/platform/default/statusor.h" // IWYU pragma: export +#endif + +namespace tsl { + +using absl::StatusOr; + +} // namespace tsl + +#define TF_ASSERT_OK_AND_ASSIGN(lhs, rexpr) \ + TF_ASSERT_OK_AND_ASSIGN_IMPL( \ + TF_STATUS_MACROS_CONCAT_NAME(_status_or_value, __COUNTER__), lhs, \ + rexpr); + +#define TF_ASSERT_OK_AND_ASSIGN_IMPL(statusor, lhs, rexpr) \ + auto statusor = (rexpr); \ + ASSERT_TRUE(statusor.status().ok()) << statusor.status(); \ + lhs = std::move(statusor).value() + +#define TF_STATUS_MACROS_CONCAT_NAME(x, y) TF_STATUS_MACROS_CONCAT_IMPL(x, y) +#define TF_STATUS_MACROS_CONCAT_IMPL(x, y) x##y + +#endif // TENSORFLOW_TSL_PLATFORM_STATUSOR_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/str_util.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/str_util.h new file mode 100644 index 0000000000000000000000000000000000000000..4a4ed04e23fbc73ed740bc2954d6187fd501072e --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/str_util.h @@ -0,0 +1,188 @@ +/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_STR_UTIL_H_ +#define TENSORFLOW_TSL_PLATFORM_STR_UTIL_H_ + +#include +#include +#include + +#include "absl/strings/str_join.h" +#include "absl/strings/str_split.h" +#include "tsl/platform/macros.h" +#include "tsl/platform/stringpiece.h" +#include "tsl/platform/types.h" + +// Basic string utility routines +namespace tsl { +namespace str_util { + +// Returns a version of 'src' where unprintable characters have been +// escaped using C-style escape sequences. +std::string CEscape(StringPiece src); + +// Copies "source" to "dest", rewriting C-style escape sequences -- +// '\n', '\r', '\\', '\ooo', etc -- to their ASCII equivalents. +// +// Errors: Sets the description of the first encountered error in +// 'error'. To disable error reporting, set 'error' to NULL. +// +// NOTE: Does not support \u or \U! +bool CUnescape(StringPiece source, std::string* dest, std::string* error); + +// Removes any trailing whitespace from "*s". +void StripTrailingWhitespace(std::string* s); + +// Removes leading ascii_isspace() characters. +// Returns number of characters removed. +size_t RemoveLeadingWhitespace(StringPiece* text); + +// Removes trailing ascii_isspace() characters. +// Returns number of characters removed. +size_t RemoveTrailingWhitespace(StringPiece* text); + +// Removes leading and trailing ascii_isspace() chars. +// Returns number of chars removed. +size_t RemoveWhitespaceContext(StringPiece* text); + +// Consume a leading positive integer value. If any digits were +// found, store the value of the leading unsigned number in "*val", +// advance "*s" past the consumed number, and return true. If +// overflow occurred, returns false. Otherwise, returns false. +bool ConsumeLeadingDigits(StringPiece* s, uint64_t* val); + +// Consume a leading token composed of non-whitespace characters only. +// If *s starts with a non-zero number of non-whitespace characters, store +// them in *val, advance *s past them, and return true. Else return false. +bool ConsumeNonWhitespace(StringPiece* s, StringPiece* val); + +// If "*s" starts with "expected", consume it and return true. +// Otherwise, return false. +bool ConsumePrefix(StringPiece* s, StringPiece expected); + +// If "*s" ends with "expected", remove it and return true. +// Otherwise, return false. +bool ConsumeSuffix(StringPiece* s, StringPiece expected); + +// If "s" starts with "expected", return a view into "s" after "expected" but +// keep "s" unchanged. +// Otherwise, return the original "s". +TF_MUST_USE_RESULT StringPiece StripPrefix(StringPiece s, StringPiece expected); + +// If "s" ends with "expected", return a view into "s" until "expected" but +// keep "s" unchanged. +// Otherwise, return the original "s". +TF_MUST_USE_RESULT StringPiece StripSuffix(StringPiece s, StringPiece expected); + +// Return lower-cased version of s. +std::string Lowercase(StringPiece s); + +// Return upper-cased version of s. +std::string Uppercase(StringPiece s); + +// Capitalize first character of each word in "*s". "delimiters" is a +// set of characters that can be used as word boundaries. +void TitlecaseString(std::string* s, StringPiece delimiters); + +// Replaces the first occurrence (if replace_all is false) or all occurrences +// (if replace_all is true) of oldsub in s with newsub. +std::string StringReplace(StringPiece s, StringPiece oldsub, StringPiece newsub, + bool replace_all); + +// Join functionality +template +std::string Join(const T& s, const char* sep) { + return absl::StrJoin(s, sep); +} + +// A variant of Join where for each element of "s", f(&dest_string, elem) +// is invoked (f is often constructed with a lambda of the form: +// [](string* result, ElemType elem) +template +std::string Join(const T& s, const char* sep, Formatter f) { + return absl::StrJoin(s, sep, f); +} + +struct AllowEmpty { + bool operator()(StringPiece sp) const { return true; } +}; +struct SkipEmpty { + bool operator()(StringPiece sp) const { return !sp.empty(); } +}; +struct SkipWhitespace { + bool operator()(StringPiece sp) const { + return !absl::StripTrailingAsciiWhitespace(sp).empty(); + } +}; + +// Split strings using any of the supplied delimiters. For example: +// Split("a,b.c,d", ".,") would return {"a", "b", "c", "d"}. +inline std::vector Split(StringPiece text, StringPiece delims) { + return text.empty() ? std::vector() + : absl::StrSplit(text, absl::ByAnyChar(delims)); +} + +template +std::vector Split(StringPiece text, StringPiece delims, Predicate p) { + return text.empty() ? std::vector() + : absl::StrSplit(text, absl::ByAnyChar(delims), p); +} + +inline std::vector Split(StringPiece text, char delim) { + return text.empty() ? std::vector() : absl::StrSplit(text, delim); +} + +template +std::vector Split(StringPiece text, char delim, Predicate p) { + return text.empty() ? std::vector() : absl::StrSplit(text, delim, p); +} + +// StartsWith() +// +// Returns whether a given string `text` begins with `prefix`. +bool StartsWith(StringPiece text, StringPiece prefix); + +// EndsWith() +// +// Returns whether a given string `text` ends with `suffix`. +bool EndsWith(StringPiece text, StringPiece suffix); + +// StrContains() +// +// Returns whether a given string `haystack` contains the substring `needle`. +bool StrContains(StringPiece haystack, StringPiece needle); + +// Returns the length of the given null-terminated byte string 'str'. +// Returns 'string_max_len' if the null character was not found in the first +// 'string_max_len' bytes of 'str'. +size_t Strnlen(const char* str, const size_t string_max_len); + +// ----- NON STANDARD, TF SPECIFIC METHOD ----- +// Converts "^2ILoveYou!" to "i_love_you_". More specifically: +// - converts all non-alphanumeric characters to underscores +// - replaces each occurrence of a capital letter (except the very +// first character and if there is already an '_' before it) with '_' +// followed by this letter in lower case +// - Skips leading non-alpha characters +// This method is useful for producing strings matching "[a-z][a-z0-9_]*" +// as required by OpDef.ArgDef.name. The resulting string is either empty or +// matches this regex. +std::string ArgDefCase(StringPiece s); + +} // namespace str_util +} // namespace tsl + +#endif // TENSORFLOW_TSL_PLATFORM_STR_UTIL_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/stringpiece.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/stringpiece.h new file mode 100644 index 0000000000000000000000000000000000000000..dbad424b211746057b1cb40aa5371d57d6d088b1 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/stringpiece.h @@ -0,0 +1,37 @@ +/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +// StringPiece is a simple structure containing a pointer into some external +// storage and a size. The user of a StringPiece must ensure that the slice +// is not used after the corresponding external storage has been +// deallocated. +// +// Multiple threads can invoke const methods on a StringPiece without +// external synchronization, but if any of the threads may call a +// non-const method, all threads accessing the same StringPiece must use +// external synchronization. + +#ifndef TENSORFLOW_TSL_PLATFORM_STRINGPIECE_H_ +#define TENSORFLOW_TSL_PLATFORM_STRINGPIECE_H_ + +#include "absl/strings/string_view.h" // IWYU pragma: export + +namespace tsl { + +using StringPiece = absl::string_view; + +} // namespace tsl + +#endif // TENSORFLOW_TSL_PLATFORM_STRINGPIECE_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/stringprintf.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/stringprintf.h new file mode 100644 index 0000000000000000000000000000000000000000..92bc6fc771967ee4612917869f6e4336fc7c83b6 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/stringprintf.h @@ -0,0 +1,52 @@ +/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +// Printf variants that place their output in a C++ string. +// +// Usage: +// string result = strings::Printf("%d %s\n", 10, "hello"); +// strings::Appendf(&result, "%d %s\n", 20, "there"); + +#ifndef TENSORFLOW_TSL_PLATFORM_STRINGPRINTF_H_ +#define TENSORFLOW_TSL_PLATFORM_STRINGPRINTF_H_ + +#include + +#include + +#include "tsl/platform/macros.h" +#include "tsl/platform/types.h" + +namespace tsl { +namespace strings { + +// Return a C++ string +std::string Printf(const char* format, ...) + // Tell the compiler to do printf format string checking. + TF_PRINTF_ATTRIBUTE(1, 2); + +// Append result to a supplied string +void Appendf(std::string* dst, const char* format, ...) + // Tell the compiler to do printf format string checking. + TF_PRINTF_ATTRIBUTE(2, 3); + +// Lower-level routine that takes a va_list and appends to a specified +// string. All other routines are just convenience wrappers around it. +void Appendv(std::string* dst, const char* format, va_list ap); + +} // namespace strings +} // namespace tsl + +#endif // TENSORFLOW_TSL_PLATFORM_STRINGPRINTF_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/subprocess.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/subprocess.h new file mode 100644 index 0000000000000000000000000000000000000000..0fe905c2f8687c9b3a3e49aebe4d397d6d7dbf35 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/subprocess.h @@ -0,0 +1,73 @@ +/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_SUBPROCESS_H_ +#define TENSORFLOW_TSL_PLATFORM_SUBPROCESS_H_ + +#include +#include + +#include "tsl/platform/types.h" + +namespace tsl { + +// Channel identifiers. +enum Channel { + CHAN_STDIN = 0, + CHAN_STDOUT = 1, + CHAN_STDERR = 2, +}; + +// Specify how a channel is handled. +enum ChannelAction { + // Close the file descriptor when the process starts. + // This is the default behavior. + ACTION_CLOSE, + + // Make a pipe to the channel. It is used in the Communicate() method to + // transfer data between the parent and child processes. + ACTION_PIPE, + + // Duplicate the parent's file descriptor. Useful if stdout/stderr should + // go to the same place that the parent writes it. + ACTION_DUPPARENT, +}; + +// Supports spawning and killing child processes. +class SubProcess; + +// Returns an object that represents a child process that will be +// launched with the given command-line arguments `argv`. The process +// must be explicitly started by calling the Start() method on the +// returned object. +std::unique_ptr CreateSubProcess(const std::vector& argv); + +} // namespace tsl + +#include "tsl/platform/platform.h" + +#if defined(PLATFORM_GOOGLE) +#include "tsl/platform/google/subprocess.h" +#elif defined(PLATFORM_POSIX) || defined(PLATFORM_POSIX_ANDROID) || \ + defined(PLATFORM_GOOGLE_ANDROID) || defined(PLATFORM_POSIX_IOS) || \ + defined(PLATFORM_GOOGLE_IOS) +#include "tsl/platform/default/subprocess.h" // IWYU pragma: export +#elif defined(PLATFORM_WINDOWS) +#include "tsl/platform/windows/subprocess.h" // IWYU pragma: export +#else +#error Define the appropriate PLATFORM_ macro for this platform +#endif + +#endif // TENSORFLOW_TSL_PLATFORM_SUBPROCESS_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/tensor_float_32_utils.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/tensor_float_32_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..d956340c30330971e37e8e14664120f12a632a74 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/tensor_float_32_utils.h @@ -0,0 +1,29 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_TENSOR_FLOAT_32_UTILS_H_ +#define TENSORFLOW_TSL_PLATFORM_TENSOR_FLOAT_32_UTILS_H_ + +namespace tsl { + +// NOTE: The usage of this function is only supported through the Tensorflow +// framework. +void enable_tensor_float_32_execution(bool enabled); + +bool tensor_float_32_execution_enabled(); + +} // namespace tsl + +#endif // TENSORFLOW_TSL_PLATFORM_TENSOR_FLOAT_32_UTILS_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/test.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/test.h new file mode 100644 index 0000000000000000000000000000000000000000..313bfe5f0ea3dd45748061046f354629556097e3 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/test.h @@ -0,0 +1,95 @@ +/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_TEST_H_ +#define TENSORFLOW_TSL_PLATFORM_TEST_H_ + +#include +#include +#include + +#include // IWYU pragma: export +#include "tsl/platform/macros.h" +#include "tsl/platform/platform.h" +#include "tsl/platform/types.h" + +// Includes gmock.h and enables the use of gmock matchers in tensorflow tests. +// +// Test including this header can use the macros EXPECT_THAT(...) and +// ASSERT_THAT(...) in combination with gmock matchers. +// Example: +// std::vector vec = Foo(); +// EXPECT_THAT(vec, ::testing::ElementsAre(1,2,3)); +// EXPECT_THAT(vec, ::testing::UnorderedElementsAre(2,3,1)); +// +// For more details on gmock matchers see: +// https://github.com/google/googletest/blob/master/googlemock/docs/CheatSheet.md#matchers +// +// The advantages of using gmock matchers instead of self defined matchers are +// better error messages, more maintainable tests and more test coverage. +#if !defined(PLATFORM_GOOGLE) && !defined(PLATFORM_GOOGLE_ANDROID) && \ + !defined(PLATFORM_CHROMIUMOS) +#include +#include // IWYU pragma: export +#include // IWYU pragma: export +#endif +#include // IWYU pragma: export + +#define DISABLED_ON_GPU_ROCM(X) X +#if TENSORFLOW_USE_ROCM +#undef DISABLED_ON_GPU_ROCM +#define DISABLED_ON_GPU_ROCM(X) DISABLED_##X +#endif // TENSORFLOW_USE_ROCM + +namespace tsl { +namespace testing { + +// Return a temporary directory suitable for temporary testing files. +// +// Where possible, consider using Env::LocalTempFilename over this function. +std::string TmpDir(); + +// Returns the path to TensorFlow in the directory containing data +// dependencies. +// +// A better alternative would be making use if +// tensorflow/tsl/platform/resource_loader.h:GetDataDependencyFilepath. That +// function should do the right thing both within and outside of tests allowing +// avoiding test specific APIs. +std::string TensorFlowSrcRoot(); + +// Returns the path to XLA in the directory containing data +// dependencies. +std::string XlaSrcRoot(); + +// Returns the path to TSL in the directory containing data +// dependencies. +std::string TslSrcRoot(); + +// Return a random number generator seed to use in randomized tests. +// Returns the same value for the lifetime of the process. +int RandomSeed(); + +// Returns an unused port number, for use in multi-process testing. +// NOTE: This function is not thread-safe. +int PickUnusedPortOrDie(); + +// Constant which is false internally and true in open source. +inline constexpr bool kIsOpenSource = TSL_IS_IN_OSS; + +} // namespace testing +} // namespace tsl + +#endif // TENSORFLOW_TSL_PLATFORM_TEST_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/thread_annotations.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/thread_annotations.h new file mode 100644 index 0000000000000000000000000000000000000000..4f9604d6b695ce099e9e12207cfdffcbf4340348 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/thread_annotations.h @@ -0,0 +1,165 @@ +/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +// This header file contains the macro definitions for thread safety +// annotations that allow the developers to document the locking policies +// of their multi-threaded code. The annotations can also help program +// analysis tools to identify potential thread safety issues. +// +// The primary documentation on these annotations is external: +// http://clang.llvm.org/docs/ThreadSafetyAnalysis.html +// +// The annotations are implemented using compiler attributes. +// Using the macros defined here instead of the raw attributes allows +// for portability and future compatibility. +// +// When referring to mutexes in the arguments of the attributes, you should +// use variable names or more complex expressions (e.g. my_object->mutex_) +// that evaluate to a concrete mutex object whenever possible. If the mutex +// you want to refer to is not in scope, you may use a member pointer +// (e.g. &MyClass::mutex_) to refer to a mutex in some (unknown) object. +// + +#ifndef TENSORFLOW_TSL_PLATFORM_THREAD_ANNOTATIONS_H_ +#define TENSORFLOW_TSL_PLATFORM_THREAD_ANNOTATIONS_H_ + +// IWYU pragma: private, include "tsl/platform/thread_annotations.h" +// IWYU pragma: friend third_party/tensorflow/tsl/platform/thread_annotations.h + +#if defined(__clang__) && (!defined(SWIG)) +#define TF_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(x) __attribute__((x)) +#else +#define TF_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(x) // no-op +#endif + +// Document if a shared variable/field needs to be protected by a mutex. +// TF_GUARDED_BY allows the user to specify a particular mutex that should be +// held when accessing the annotated variable. GUARDED_VAR indicates that +// a shared variable is guarded by some unspecified mutex, for use in rare +// cases where a valid mutex expression cannot be specified. +#define TF_GUARDED_BY(x) TF_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(guarded_by(x)) +#define GUARDED_VAR // no-op + +// Document if the memory location pointed to by a pointer should be guarded +// by a mutex when dereferencing the pointer. PT_GUARDED_VAR is analogous to +// GUARDED_VAR. Note that a pointer variable to a shared memory location +// could itself be a shared variable. For example, if a shared global pointer +// q, which is guarded by mu1, points to a shared memory location that is +// guarded by mu2, q should be annotated as follows: +// int *q TF_GUARDED_BY(mu1) TF_PT_GUARDED_BY(mu2); +#define TF_PT_GUARDED_BY(x) \ + TF_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(pt_guarded_by(x)) +#define TF_PT_GUARDED_VAR // no-op + +// Document the acquisition order between locks that can be held +// simultaneously by a thread. For any two locks that need to be annotated +// to establish an acquisition order, only one of them needs the annotation. +// (i.e. You don't have to annotate both locks with both TF_ACQUIRED_AFTER +// and TF_ACQUIRED_BEFORE.) +#define TF_ACQUIRED_AFTER(...) \ + TF_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(acquired_after(__VA_ARGS__)) + +#define TF_ACQUIRED_BEFORE(...) \ + TF_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(acquired_before(__VA_ARGS__)) + +#define TF_ACQUIRE(...) \ + TF_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(acquire_capability(__VA_ARGS__)) + +#define TF_ACQUIRE_SHARED(...) \ + TF_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE( \ + acquire_shared_capability(__VA_ARGS__)) + +#define TF_RELEASE(...) \ + TF_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(release_capability(__VA_ARGS__)) + +// Document a function that expects a mutex to be held prior to entry. +// The mutex is expected to be held both on entry to and exit from the +// function. +#define TF_EXCLUSIVE_LOCKS_REQUIRED(...) \ + TF_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(exclusive_locks_required(__VA_ARGS__)) + +#define TF_SHARED_LOCKS_REQUIRED(...) \ + TF_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(shared_locks_required(__VA_ARGS__)) + +// Document the locks acquired in the body of the function. These locks +// cannot be held when calling this function (for instance, when the +// mutex implementation is non-reentrant). +#define TF_LOCKS_EXCLUDED(...) \ + TF_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(locks_excluded(__VA_ARGS__)) + +// Document a function that returns a mutex without acquiring it. For example, +// a public getter method that returns a pointer to a private mutex should +// be annotated with TF_LOCK_RETURNED. +#define TF_LOCK_RETURNED(x) \ + TF_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(lock_returned(x)) + +// Document if a class/type is a lockable type (such as the Mutex class). +#define TF_LOCKABLE TF_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(lockable) + +// Document if a class does RAII locking (such as the MutexLock class). +// The constructor should use LOCK_FUNCTION to specify the mutex that is +// acquired, and the destructor should use TF_UNLOCK_FUNCTION with no arguments; +// the analysis will assume that the destructor unlocks whatever the +// constructor locked. +#define TF_SCOPED_LOCKABLE \ + TF_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(scoped_lockable) + +// Document functions that acquire a lock in the body of a function, and do +// not release it. +#define TF_EXCLUSIVE_LOCK_FUNCTION(...) \ + TF_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(exclusive_lock_function(__VA_ARGS__)) + +#define TF_SHARED_LOCK_FUNCTION(...) \ + TF_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(shared_lock_function(__VA_ARGS__)) + +// Document functions that expect a lock to be held on entry to the function, +// and release it in the body of the function. +#define TF_UNLOCK_FUNCTION(...) \ + TF_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(unlock_function(__VA_ARGS__)) + +// Document functions that try to acquire a lock, and return success or failure +// (or a non-boolean value that can be interpreted as a boolean). +// The first argument should be true for functions that return true on success, +// or false for functions that return false on success. The second argument +// specifies the mutex that is locked on success. If unspecified, it is assumed +// to be 'this'. +#define TF_EXCLUSIVE_TRYLOCK_FUNCTION(...) \ + TF_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE( \ + exclusive_trylock_function(__VA_ARGS__)) + +#define TF_SHARED_TRYLOCK_FUNCTION(...) \ + TF_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(shared_trylock_function(__VA_ARGS__)) + +// Document functions that dynamically check to see if a lock is held, and fail +// if it is not held. +#define TF_ASSERT_EXCLUSIVE_LOCK(...) \ + TF_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(assert_exclusive_lock(__VA_ARGS__)) + +#define TF_ASSERT_SHARED_LOCK(...) \ + TF_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(assert_shared_lock(__VA_ARGS__)) + +// Turns off thread safety checking within the body of a particular function. +// This is used as an escape hatch for cases where either (a) the function +// is correct, but the locking is more complicated than the analyzer can handle, +// or (b) the function contains race conditions that are known to be benign. +#define TF_NO_THREAD_SAFETY_ANALYSIS \ + TF_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(no_thread_safety_analysis) + +// TF_TS_UNCHECKED should be placed around lock expressions that are not valid +// C++ syntax, but which are present for documentation purposes. These +// annotations will be ignored by the analysis. +#define TF_TS_UNCHECKED(x) "" + +#endif // TENSORFLOW_TSL_PLATFORM_THREAD_ANNOTATIONS_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/threadpool.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/threadpool.h new file mode 100644 index 0000000000000000000000000000000000000000..df650f6eccfd4cb3983543dc4a23349c3499e376 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/threadpool.h @@ -0,0 +1,245 @@ +/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_THREADPOOL_H_ +#define TENSORFLOW_TSL_PLATFORM_THREADPOOL_H_ + +#include +#include + +#include "absl/types/optional.h" +#include "tsl/platform/env.h" +#include "tsl/platform/macros.h" +#include "tsl/platform/threadpool_interface.h" +#include "tsl/platform/types.h" + +namespace Eigen { +class Allocator; +class ThreadPoolInterface; +struct ThreadPoolDevice; + +template +class ThreadPoolTempl; +} // namespace Eigen + +namespace tsl { +namespace thread { + +struct EigenEnvironment; + +class ThreadPool { + public: + // Scheduling strategies for ParallelFor. The strategy governs how the given + // units of work are distributed among the available threads in the + // threadpool. + enum class SchedulingStrategy { + // The Adaptive scheduling strategy adaptively chooses the shard sizes based + // on the cost of each unit of work, and the cost model of the underlying + // threadpool device. + // + // The 'cost_per_unit' is an estimate of the number of CPU cycles (or + // nanoseconds if not CPU-bound) to complete a unit of work. Overestimating + // creates too many shards and CPU time will be dominated by per-shard + // overhead, such as Context creation. Underestimating may not fully make + // use of the specified parallelism, and may also cause inefficiencies due + // to load balancing issues and stragglers. + kAdaptive, + // The Fixed Block Size scheduling strategy shards the given units of work + // into shards of fixed size. In case the total number of units is not + // evenly divisible by 'block_size', at most one of the shards may be of + // smaller size. The exact number of shards may be found by a call to + // NumShardsUsedByFixedBlockSizeScheduling. + // + // Each shard may be executed on a different thread in parallel, depending + // on the number of threads available in the pool. Note that when there + // aren't enough threads in the pool to achieve full parallelism, function + // calls will be automatically queued. + kFixedBlockSize + }; + + // Contains additional parameters for either the Adaptive or the Fixed Block + // Size scheduling strategy. + class SchedulingParams { + public: + explicit SchedulingParams(SchedulingStrategy strategy, + absl::optional cost_per_unit, + absl::optional block_size) + : strategy_(strategy), + cost_per_unit_(cost_per_unit), + block_size_(block_size) {} + + SchedulingStrategy strategy() const { return strategy_; } + absl::optional cost_per_unit() const { return cost_per_unit_; } + absl::optional block_size() const { return block_size_; } + + private: + // The underlying Scheduling Strategy for which this instance contains + // additional parameters. + SchedulingStrategy strategy_; + + // The estimated cost per unit of work in number of CPU cycles (or + // nanoseconds if not CPU-bound). Only applicable for Adaptive scheduling + // strategy. + absl::optional cost_per_unit_; + + // The block size of each shard. Only applicable for Fixed Block Size + // scheduling strategy. + absl::optional block_size_; + }; + + // Constructs a pool that contains "num_threads" threads with specified + // "name". env->StartThread() is used to create individual threads with the + // given ThreadOptions. If "low_latency_hint" is true the thread pool + // implementation may use it as a hint that lower latency is preferred at the + // cost of higher CPU usage, e.g. by letting one or more idle threads spin + // wait. Conversely, if the threadpool is used to schedule high-latency + // operations like I/O the hint should be set to false. + // + // REQUIRES: num_threads > 0 + ThreadPool(Env* env, const ThreadOptions& thread_options, + const std::string& name, int num_threads, bool low_latency_hint, + Eigen::Allocator* allocator = nullptr); + + // Constructs a pool for low-latency ops that contains "num_threads" threads + // with specified "name". env->StartThread() is used to create individual + // threads. + // REQUIRES: num_threads > 0 + ThreadPool(Env* env, const std::string& name, int num_threads); + + // Constructs a pool for low-latency ops that contains "num_threads" threads + // with specified "name". env->StartThread() is used to create individual + // threads with the given ThreadOptions. + // REQUIRES: num_threads > 0 + ThreadPool(Env* env, const ThreadOptions& thread_options, + const std::string& name, int num_threads); + + // Constructs a pool that wraps around the thread::ThreadPoolInterface + // instance provided by the caller. Caller retains ownership of + // `user_threadpool` and must ensure its lifetime is longer than the + // ThreadPool instance. + explicit ThreadPool(thread::ThreadPoolInterface* user_threadpool); + + // Waits until all scheduled work has finished and then destroy the + // set of threads. + ~ThreadPool(); + + // Schedules fn() for execution in the pool of threads. + void Schedule(std::function fn); + + void SetStealPartitions( + const std::vector>& partitions); + + void ScheduleWithHint(std::function fn, int start, int limit); + + // Returns the number of shards used by ParallelForFixedBlockSizeScheduling + // with these parameters. + int NumShardsUsedByFixedBlockSizeScheduling(const int64_t total, + const int64_t block_size); + + // Returns the number of threads spawned by calling TransformRangeConcurrently + // with these parameters. + // Deprecated. Use NumShardsUsedByFixedBlockSizeScheduling. + int NumShardsUsedByTransformRangeConcurrently(const int64_t block_size, + const int64_t total); + + // ParallelFor shards the "total" units of work assuming each unit of work + // having roughly "cost_per_unit" cost, in cycles. Each unit of work is + // indexed 0, 1, ..., total - 1. Each shard contains 1 or more units of work + // and the total cost of each shard is roughly the same. + // + // "cost_per_unit" is an estimate of the number of CPU cycles (or nanoseconds + // if not CPU-bound) to complete a unit of work. Overestimating creates too + // many shards and CPU time will be dominated by per-shard overhead, such as + // Context creation. Underestimating may not fully make use of the specified + // parallelism, and may also cause inefficiencies due to load balancing + // issues and stragglers. + void ParallelFor(int64_t total, int64_t cost_per_unit, + const std::function& fn); + + // Similar to ParallelFor above, but takes the specified scheduling strategy + // into account. + void ParallelFor(int64_t total, const SchedulingParams& scheduling_params, + const std::function& fn); + + // Same as ParallelFor with Fixed Block Size scheduling strategy. + // Deprecated. Prefer ParallelFor with a SchedulingStrategy argument. + void TransformRangeConcurrently( + const int64_t block_size, const int64_t total, + const std::function& fn); + + // Shards the "total" units of work. For more details, see "ParallelFor". + // + // The function is passed a thread_id between 0 and NumThreads() *inclusive*. + // This is because some work can happen on the caller thread while the threads + // in the pool are also being used. + // + // The caller can allocate NumThreads() + 1 separate buffers for each thread. + // Each thread can safely write to the buffer given by its id without + // synchronization. However, the worker fn may be called multiple times + // sequentially with the same id. + // + // At most NumThreads() unique ids will actually be used, and only a few may + // be used for small workloads. If each buffer is expensive, the buffers + // should be stored in an array initially filled with null, and a buffer + // should be allocated by fn the first time that the id is used. + void ParallelForWithWorkerId( + int64_t total, int64_t cost_per_unit, + const std::function& fn); + + // Similar to ParallelForWithWorkerId above, but takes the specified + // scheduling strategy into account. + void ParallelForWithWorkerId( + int64_t total, const SchedulingParams& scheduling_params, + const std::function& fn); + + // Returns the number of threads in the pool. + int NumThreads() const; + + // Returns current thread id between 0 and NumThreads() - 1, if called from a + // thread in the pool. Returns -1 otherwise. + int CurrentThreadId() const; + + // If ThreadPool implementation is compatible with Eigen::ThreadPoolInterface, + // returns a non-null pointer. The caller does not own the object the returned + // pointer points to, and should not attempt to delete. + Eigen::ThreadPoolInterface* AsEigenThreadPool() const; + + private: + // Divides the work represented by the range [0, total) into k shards. + // Calls fn(i*block_size, (i+1)*block_size) from the ith shard (0 <= i < k). + // Each shard may be executed on a different thread in parallel, depending on + // the number of threads available in the pool. + // When (i+1)*block_size > total, fn(i*block_size, total) is called instead. + // Here, k = NumShardsUsedByFixedBlockSizeScheduling(total, block_size). + // Requires 0 < block_size <= total. + void ParallelForFixedBlockSizeScheduling( + const int64_t total, const int64_t block_size, + const std::function& fn); + + // underlying_threadpool_ is the user_threadpool if user_threadpool is + // provided in the constructor. Otherwise it is the eigen_threadpool_. + Eigen::ThreadPoolInterface* underlying_threadpool_; + // eigen_threadpool_ is instantiated and owned by thread::ThreadPool if + // user_threadpool is not in the constructor. + std::unique_ptr> eigen_threadpool_; + std::unique_ptr threadpool_device_; + ThreadPool(const ThreadPool&) = delete; + void operator=(const ThreadPool&) = delete; +}; + +} // namespace thread +} // namespace tsl + +#endif // TENSORFLOW_TSL_PLATFORM_THREADPOOL_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/threadpool_interface.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/threadpool_interface.h new file mode 100644 index 0000000000000000000000000000000000000000..0dac04d5e7293d890ccd5dd37ce29286c1cf9b96 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/threadpool_interface.h @@ -0,0 +1,31 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_THREADPOOL_INTERFACE_H_ +#define TENSORFLOW_TSL_PLATFORM_THREADPOOL_INTERFACE_H_ + +#include "unsupported/Eigen/CXX11/ThreadPool" // from @eigen_archive +#include "tsl/platform/mutex.h" +#include "tsl/platform/types.h" + +namespace tsl { +namespace thread { + +class ThreadPoolInterface : public Eigen::ThreadPoolInterface {}; + +} // namespace thread +} // namespace tsl + +#endif // TENSORFLOW_TSL_PLATFORM_THREADPOOL_INTERFACE_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/tracing.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/tracing.h new file mode 100644 index 0000000000000000000000000000000000000000..90678dc5f60ada2f61a0931f5b93e874ca9907ed --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/tracing.h @@ -0,0 +1,150 @@ +/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_TRACING_H_ +#define TENSORFLOW_TSL_PLATFORM_TRACING_H_ + +// Tracing interface + +#include + +#include "tsl/platform/macros.h" +#include "tsl/platform/platform.h" +#include "tsl/platform/stringpiece.h" +#include "tsl/platform/types.h" + +namespace tsl { +namespace tracing { + +// This enumeration contains the identifiers of all TensorFlow CPU profiler +// events. It must be kept in sync with the code in GetEventCategoryName(). +enum struct EventCategory : unsigned { + kScheduleClosure = 0, + kRunClosure = 1, + kCompute = 2, + kNumCategories = 3 // sentinel - keep last +}; +constexpr unsigned GetNumEventCategories() { + return static_cast(EventCategory::kNumCategories); +} +const char* GetEventCategoryName(EventCategory); + +// Interface for CPU profiler events. +class EventCollector { + public: + virtual ~EventCollector() {} + virtual void RecordEvent(uint64 arg) const = 0; + virtual void StartRegion(uint64 arg) const = 0; + virtual void StopRegion() const = 0; + + // Annotates the current thread with a name. + static void SetCurrentThreadName(const char* name); + // Returns whether event collection is enabled. + static bool IsEnabled(); + + private: + friend void SetEventCollector(EventCategory, const EventCollector*); + friend const EventCollector* GetEventCollector(EventCategory); + + static std::array instances_; +}; +// Set the callback for RecordEvent and ScopedRegion of category. +// Not thread safe. Only call while EventCollector::IsEnabled returns false. +void SetEventCollector(EventCategory category, const EventCollector* collector); + +// Returns the callback for RecordEvent and ScopedRegion of category if +// EventCollector::IsEnabled(), otherwise returns null. +inline const EventCollector* GetEventCollector(EventCategory category) { + if (EventCollector::IsEnabled()) { + return EventCollector::instances_[static_cast(category)]; + } + return nullptr; +} + +// Returns a unique id to pass to RecordEvent/ScopedRegion. Never returns zero. +uint64 GetUniqueArg(); + +// Returns an id for name to pass to RecordEvent/ScopedRegion. +uint64 GetArgForName(StringPiece name); + +// Records an atomic event through the currently registered EventCollector. +inline void RecordEvent(EventCategory category, uint64 arg) { + if (auto collector = GetEventCollector(category)) { + collector->RecordEvent(arg); + } +} + +// Records an event for the duration of the instance lifetime through the +// currently registered EventCollector. +class ScopedRegion { + public: + ScopedRegion(ScopedRegion&& other) noexcept // Move-constructible. + : collector_(other.collector_) { + other.collector_ = nullptr; + } + + ScopedRegion(EventCategory category, uint64 arg) + : collector_(GetEventCollector(category)) { + if (collector_) { + collector_->StartRegion(arg); + } + } + + // Same as ScopedRegion(category, GetUniqueArg()), but faster if + // EventCollector::IsEnabled() returns false. + explicit ScopedRegion(EventCategory category) + : collector_(GetEventCollector(category)) { + if (collector_) { + collector_->StartRegion(GetUniqueArg()); + } + } + + // Same as ScopedRegion(category, GetArgForName(name)), but faster if + // EventCollector::IsEnabled() returns false. + ScopedRegion(EventCategory category, StringPiece name) + : collector_(GetEventCollector(category)) { + if (collector_) { + collector_->StartRegion(GetArgForName(name)); + } + } + + ~ScopedRegion() { + if (collector_) { + collector_->StopRegion(); + } + } + + bool IsEnabled() const { return collector_ != nullptr; } + + private: + ScopedRegion(const ScopedRegion&) = delete; + void operator=(const ScopedRegion&) = delete; + + const EventCollector* collector_; +}; + +// Return the pathname of the directory where we are writing log files. +const char* GetLogDir(); + +} // namespace tracing +} // namespace tsl + +#if defined(PLATFORM_GOOGLE) +#include "tsl/platform/google/tracing_impl.h" +#else +#include "tsl/platform/default/tracing_impl.h" +#endif + +#endif // TENSORFLOW_TSL_PLATFORM_TRACING_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/types.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/types.h new file mode 100644 index 0000000000000000000000000000000000000000..b480feff5b1cdeec47f2e9a57131c625a9a925e4 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/types.h @@ -0,0 +1,74 @@ +/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_TYPES_H_ +#define TENSORFLOW_TSL_PLATFORM_TYPES_H_ + +#include + +#include "tsl/platform/bfloat16.h" +#include "tsl/platform/ml_dtypes.h" // IWYU pragma: export +#include "tsl/platform/platform.h" +#include "tsl/platform/tstring.h" + +// Include appropriate platform-dependent implementations +#if defined(PLATFORM_GOOGLE) || defined(GOOGLE_INTEGRAL_TYPES) +#include "tsl/platform/google/integral_types.h" // IWYU pragma: export +#elif defined(PLATFORM_POSIX) || defined(PLATFORM_POSIX_ANDROID) || \ + defined(PLATFORM_GOOGLE_ANDROID) || defined(PLATFORM_POSIX_IOS) || \ + defined(PLATFORM_GOOGLE_IOS) || defined(PLATFORM_WINDOWS) +#include "tsl/platform/default/integral_types.h" // IWYU pragma: export +#else +#error Define the appropriate PLATFORM_ macro for this platform +#endif + +namespace tsl { + +// Alias tsl::string to std::string. +using std::string; + +static const uint4 kuint4max = static_cast(0x0F); +static const uint8 kuint8max = static_cast(0xFF); +static const uint16 kuint16max = static_cast(0xFFFF); +static const uint32 kuint32max = static_cast(0xFFFFFFFF); +static const uint64 kuint64max = static_cast(0xFFFFFFFFFFFFFFFFull); +static const int8_t kint8min = static_cast(~0x7F); +static const int8_t kint8max = static_cast(0x7F); +static const int4 kint4min = static_cast(0x08); +static const int4 kint4max = static_cast(0x07); +static const int16_t kint16min = static_cast(~0x7FFF); +static const int16_t kint16max = static_cast(0x7FFF); +static const int32_t kint32min = static_cast(~0x7FFFFFFF); +static const int32_t kint32max = static_cast(0x7FFFFFFF); +static const int64_t kint64min = static_cast(~0x7FFFFFFFFFFFFFFFll); +static const int64_t kint64max = static_cast(0x7FFFFFFFFFFFFFFFll); + +// A typedef for a uint64 used as a short fingerprint. +using Fprint = uint64; + +} // namespace tsl + +// Alias namespace ::stream_executor as ::tensorflow::se. +namespace stream_executor {} +namespace tensorflow { +namespace se = ::stream_executor; +} // namespace tensorflow + +#if defined(PLATFORM_WINDOWS) +#include +typedef std::ptrdiff_t ssize_t; +#endif + +#endif // TENSORFLOW_TSL_PLATFORM_TYPES_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/unbounded_work_queue.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/unbounded_work_queue.h new file mode 100644 index 0000000000000000000000000000000000000000..6432daf7d02b6367a516e2b37fc8e924b51c32b7 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/platform/unbounded_work_queue.h @@ -0,0 +1,34 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_TSL_PLATFORM_UNBOUNDED_WORK_QUEUE_H_ +#define TENSORFLOW_TSL_PLATFORM_UNBOUNDED_WORK_QUEUE_H_ + +#include "tsl/platform/platform.h" + +// An `UnboundedWorkQueue` feeds potentially-blocking work into a thread-pool +// whose size automatically increases with demand. + +#if defined(PLATFORM_GOOGLE) +#include "tsl/platform/google/unbounded_work_queue.h" // IWYU pragma: export +#elif defined(PLATFORM_POSIX) || defined(PLATFORM_POSIX_ANDROID) || \ + defined(PLATFORM_GOOGLE_ANDROID) || defined(PLATFORM_POSIX_IOS) || \ + defined(PLATFORM_GOOGLE_IOS) || defined(PLATFORM_WINDOWS) +#include "tsl/platform/default/unbounded_work_queue.h" // IWYU pragma: export +#else +#error Define the appropriate PLATFORM_ macro for this platform +#endif + +#endif // TENSORFLOW_TSL_PLATFORM_UNBOUNDED_WORK_QUEUE_H_ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/bfc_memory_map.proto b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/bfc_memory_map.proto new file mode 100644 index 0000000000000000000000000000000000000000..bca45cbf3f13190c26ff9bae909b091d0d0577b4 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/bfc_memory_map.proto @@ -0,0 +1,47 @@ +syntax = "proto3"; + +package tensorflow; + +option go_package = "github.com/google/tsl/tsl/go/protobuf/for_core_protos_go_proto"; + +// Some of the data from AllocatorStats +message MemAllocatorStats { + int64 num_allocs = 1; + int64 bytes_in_use = 2; + int64 peak_bytes_in_use = 3; + int64 largest_alloc_size = 4; + float fragmentation_metric = 5; +} + +message MemChunk { + uint64 address = 1; + int64 size = 2; + int64 requested_size = 3; + int32 bin = 4; + string op_name = 5; + uint64 freed_at_count = 6; + uint64 action_count = 7; + bool in_use = 8; + uint64 step_id = 9; +} + +message BinSummary { + int32 bin = 1; + int64 total_bytes_in_use = 2; + int64 total_bytes_in_bin = 3; + int64 total_chunks_in_use = 4; + int64 total_chunks_in_bin = 5; +} + +message SnapShot { + uint64 action_count = 1; + int64 size = 2; +} + +message MemoryDump { + string allocator_name = 1; + repeated BinSummary bin_summary = 2; + repeated MemChunk chunk = 3; + repeated SnapShot snap_shot = 4; + MemAllocatorStats stats = 5; +} diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/coordination_service.grpc.pb.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/coordination_service.grpc.pb.h new file mode 100644 index 0000000000000000000000000000000000000000..cf8d4fbdded33cf181f0678ed0054fdebed21267 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/coordination_service.grpc.pb.h @@ -0,0 +1,3416 @@ +// Generated by the gRPC C++ plugin. +// If you make any local change, they will be lost. +// source: tsl/protobuf/coordination_service.proto +#ifndef GRPC_tsl_2fprotobuf_2fcoordination_5fservice_2eproto__INCLUDED +#define GRPC_tsl_2fprotobuf_2fcoordination_5fservice_2eproto__INCLUDED + +#include "tsl/protobuf/coordination_service.pb.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace tensorflow { + + +namespace grpc { + +// Coordination Service defines a TensorFlow service that controls and +// coordinates distributed execution in a cluster of multiple tasks. +// +// The service keeps track of the cluster configuration and the state of cluster +// members or the leader depending on the role of the current task. The +// distributed runtime leverages this service to coordinate and perform cluster +// initialization, check the healthiness of tasks, and propagate error +// messages to the cluster. +class CoordinationService final { + public: + static constexpr char const* service_full_name() { + return "tensorflow.CoordinationService"; + } + class StubInterface { + public: + virtual ~StubInterface() {} + // Register task to coordination service so that the service starts to track + // liveness of the task. RPC blocks and returns only when it registers to + // the service successfully, or error happens in the registering process. + virtual ::grpc::Status RegisterTask(::grpc::ClientContext* context, const ::tensorflow::RegisterTaskRequest& request, ::tensorflow::RegisterTaskResponse* response) = 0; + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::RegisterTaskResponse>> AsyncRegisterTask(::grpc::ClientContext* context, const ::tensorflow::RegisterTaskRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::RegisterTaskResponse>>(AsyncRegisterTaskRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::RegisterTaskResponse>> PrepareAsyncRegisterTask(::grpc::ClientContext* context, const ::tensorflow::RegisterTaskRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::RegisterTaskResponse>>(PrepareAsyncRegisterTaskRaw(context, request, cq)); + } + // [AUTOMATION]: Internal rpc option goes here. + // Heartbeat message from task to coordination service. Heartbeat is sent from + // a task to refresh its timestamp on leader to avoid it becoming stale. + // RPC responds immediately after refreshing the timestamp on leader. + virtual ::grpc::Status Heartbeat(::grpc::ClientContext* context, const ::tensorflow::HeartbeatRequest& request, ::tensorflow::HeartbeatResponse* response) = 0; + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::HeartbeatResponse>> AsyncHeartbeat(::grpc::ClientContext* context, const ::tensorflow::HeartbeatRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::HeartbeatResponse>>(AsyncHeartbeatRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::HeartbeatResponse>> PrepareAsyncHeartbeat(::grpc::ClientContext* context, const ::tensorflow::HeartbeatRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::HeartbeatResponse>>(PrepareAsyncHeartbeatRaw(context, request, cq)); + } + // [AUTOMATION]: Internal rpc option goes here. + // Wait for all tasks in the cluster to be up and running. The RPC request + // only gets responded when all tasks have registered, or some error occurs. + virtual ::grpc::Status WaitForAllTasks(::grpc::ClientContext* context, const ::tensorflow::WaitForAllTasksRequest& request, ::tensorflow::WaitForAllTasksResponse* response) = 0; + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::WaitForAllTasksResponse>> AsyncWaitForAllTasks(::grpc::ClientContext* context, const ::tensorflow::WaitForAllTasksRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::WaitForAllTasksResponse>>(AsyncWaitForAllTasksRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::WaitForAllTasksResponse>> PrepareAsyncWaitForAllTasks(::grpc::ClientContext* context, const ::tensorflow::WaitForAllTasksRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::WaitForAllTasksResponse>>(PrepareAsyncWaitForAllTasksRaw(context, request, cq)); + } + // Disconnects task from the service. If `shutdown_barrier_timeout_in_ms` is + // specified in the config, blocks until all tasks reach the barrier before + // disconnecting together. If the barrier times out, tasks at the barrier will + // still disconnect, while an error is reported to tasks that did not reach + // the barrier on time. + virtual ::grpc::Status ShutdownTask(::grpc::ClientContext* context, const ::tensorflow::ShutdownTaskRequest& request, ::tensorflow::ShutdownTaskResponse* response) = 0; + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::ShutdownTaskResponse>> AsyncShutdownTask(::grpc::ClientContext* context, const ::tensorflow::ShutdownTaskRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::ShutdownTaskResponse>>(AsyncShutdownTaskRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::ShutdownTaskResponse>> PrepareAsyncShutdownTask(::grpc::ClientContext* context, const ::tensorflow::ShutdownTaskRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::ShutdownTaskResponse>>(PrepareAsyncShutdownTaskRaw(context, request, cq)); + } + // [AUTOMATION]: Internal rpc option goes here. + // Disconnects task from the service if it is in an ERROR state, thereby + // allowing it to reconnect via RegisterTask() in the future. + virtual ::grpc::Status ResetTask(::grpc::ClientContext* context, const ::tensorflow::ResetTaskRequest& request, ::tensorflow::ResetTaskResponse* response) = 0; + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::ResetTaskResponse>> AsyncResetTask(::grpc::ClientContext* context, const ::tensorflow::ResetTaskRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::ResetTaskResponse>>(AsyncResetTaskRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::ResetTaskResponse>> PrepareAsyncResetTask(::grpc::ClientContext* context, const ::tensorflow::ResetTaskRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::ResetTaskResponse>>(PrepareAsyncResetTaskRaw(context, request, cq)); + } + // Report error to the task. RPC sets the receiving instance of coordination + // service agent to error state permanently. + // TODO(b/195990880): Consider splitting this into a different RPC service. + virtual ::grpc::Status ReportErrorToTask(::grpc::ClientContext* context, const ::tensorflow::ReportErrorToTaskRequest& request, ::tensorflow::ReportErrorToTaskResponse* response) = 0; + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::ReportErrorToTaskResponse>> AsyncReportErrorToTask(::grpc::ClientContext* context, const ::tensorflow::ReportErrorToTaskRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::ReportErrorToTaskResponse>>(AsyncReportErrorToTaskRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::ReportErrorToTaskResponse>> PrepareAsyncReportErrorToTask(::grpc::ClientContext* context, const ::tensorflow::ReportErrorToTaskRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::ReportErrorToTaskResponse>>(PrepareAsyncReportErrorToTaskRaw(context, request, cq)); + } + // Report task error to coordination service. RPC sets the service-side task + // state to error, and propagate the error to other tasks in the cluster. + virtual ::grpc::Status ReportErrorToService(::grpc::ClientContext* context, const ::tensorflow::ReportErrorToServiceRequest& request, ::tensorflow::ReportErrorToServiceResponse* response) = 0; + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::ReportErrorToServiceResponse>> AsyncReportErrorToService(::grpc::ClientContext* context, const ::tensorflow::ReportErrorToServiceRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::ReportErrorToServiceResponse>>(AsyncReportErrorToServiceRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::ReportErrorToServiceResponse>> PrepareAsyncReportErrorToService(::grpc::ClientContext* context, const ::tensorflow::ReportErrorToServiceRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::ReportErrorToServiceResponse>>(PrepareAsyncReportErrorToServiceRaw(context, request, cq)); + } + // Get the state of a remote task. Specifically, RPC returns a + // CoordinatedTaskState, and if the task is in an error status, returns a + // non-OK error code, non-empty error message and error payload. + virtual ::grpc::Status GetTaskState(::grpc::ClientContext* context, const ::tensorflow::GetTaskStateRequest& request, ::tensorflow::GetTaskStateResponse* response) = 0; + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::GetTaskStateResponse>> AsyncGetTaskState(::grpc::ClientContext* context, const ::tensorflow::GetTaskStateRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::GetTaskStateResponse>>(AsyncGetTaskStateRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::GetTaskStateResponse>> PrepareAsyncGetTaskState(::grpc::ClientContext* context, const ::tensorflow::GetTaskStateRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::GetTaskStateResponse>>(PrepareAsyncGetTaskStateRaw(context, request, cq)); + } + // Insert configuration key-value that will be accessible to all cluster + // tasks. The key can be formatted as Unix file path with hierarchy. The + // coordination service key-value store should only be used for cluster + // configuration data. + virtual ::grpc::Status InsertKeyValue(::grpc::ClientContext* context, const ::tensorflow::InsertKeyValueRequest& request, ::tensorflow::InsertKeyValueResponse* response) = 0; + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::InsertKeyValueResponse>> AsyncInsertKeyValue(::grpc::ClientContext* context, const ::tensorflow::InsertKeyValueRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::InsertKeyValueResponse>>(AsyncInsertKeyValueRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::InsertKeyValueResponse>> PrepareAsyncInsertKeyValue(::grpc::ClientContext* context, const ::tensorflow::InsertKeyValueRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::InsertKeyValueResponse>>(PrepareAsyncInsertKeyValueRaw(context, request, cq)); + } + // [AUTOMATION]: Internal rpc option goes here. + // Get configuration key-value. The request blocks until the key-value data + // becomes available (i.e., set by a task in the cluster). + virtual ::grpc::Status GetKeyValue(::grpc::ClientContext* context, const ::tensorflow::GetKeyValueRequest& request, ::tensorflow::GetKeyValueResponse* response) = 0; + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::GetKeyValueResponse>> AsyncGetKeyValue(::grpc::ClientContext* context, const ::tensorflow::GetKeyValueRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::GetKeyValueResponse>>(AsyncGetKeyValueRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::GetKeyValueResponse>> PrepareAsyncGetKeyValue(::grpc::ClientContext* context, const ::tensorflow::GetKeyValueRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::GetKeyValueResponse>>(PrepareAsyncGetKeyValueRaw(context, request, cq)); + } + // [AUTOMATION]: Internal rpc option goes here. + // Get configuration key-value. The request does not block, but returns an + // error if the requested key does not exist. + virtual ::grpc::Status TryGetKeyValue(::grpc::ClientContext* context, const ::tensorflow::TryGetKeyValueRequest& request, ::tensorflow::TryGetKeyValueResponse* response) = 0; + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::TryGetKeyValueResponse>> AsyncTryGetKeyValue(::grpc::ClientContext* context, const ::tensorflow::TryGetKeyValueRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::TryGetKeyValueResponse>>(AsyncTryGetKeyValueRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::TryGetKeyValueResponse>> PrepareAsyncTryGetKeyValue(::grpc::ClientContext* context, const ::tensorflow::TryGetKeyValueRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::TryGetKeyValueResponse>>(PrepareAsyncTryGetKeyValueRaw(context, request, cq)); + } + // Same as GetKeyValue, but returns all values that have keys which are + // prefixed with the directory key. + virtual ::grpc::Status GetKeyValueDir(::grpc::ClientContext* context, const ::tensorflow::GetKeyValueDirRequest& request, ::tensorflow::GetKeyValueDirResponse* response) = 0; + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::GetKeyValueDirResponse>> AsyncGetKeyValueDir(::grpc::ClientContext* context, const ::tensorflow::GetKeyValueDirRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::GetKeyValueDirResponse>>(AsyncGetKeyValueDirRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::GetKeyValueDirResponse>> PrepareAsyncGetKeyValueDir(::grpc::ClientContext* context, const ::tensorflow::GetKeyValueDirRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::GetKeyValueDirResponse>>(PrepareAsyncGetKeyValueDirRaw(context, request, cq)); + } + // [AUTOMATION]: Internal rpc option goes here. + // Delete configuration key-value. If is_directory is set in request, + // recursively clean up all key-values under the path specified by `key`. + virtual ::grpc::Status DeleteKeyValue(::grpc::ClientContext* context, const ::tensorflow::DeleteKeyValueRequest& request, ::tensorflow::DeleteKeyValueResponse* response) = 0; + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::DeleteKeyValueResponse>> AsyncDeleteKeyValue(::grpc::ClientContext* context, const ::tensorflow::DeleteKeyValueRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::DeleteKeyValueResponse>>(AsyncDeleteKeyValueRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::DeleteKeyValueResponse>> PrepareAsyncDeleteKeyValue(::grpc::ClientContext* context, const ::tensorflow::DeleteKeyValueRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::DeleteKeyValueResponse>>(PrepareAsyncDeleteKeyValueRaw(context, request, cq)); + } + // Blocks until all (or a subset of) tasks are at the barrier or the barrier + // fails. + // + // `barrier_id` should be unique across barriers. Once the barrier has passed + // or failed, subsequent calls will not block, and immediately respond with + // the previous response. + // + // The first WaitAtBarrier() call received by the service for a particular + // barrier id is special in that it determines the barrier deadline based on + // timeout duration. + // However, if subsequent calls by different agents specify a different set of + // `tasks` for the same `barrier_id`, the barrier will fail instantly. + // + // If no tasks are specified (default), the barrier will block for all the + // connected tasks. + // + // Possible service errors: + // - DeadlineExceeded: Timed out waiting for specified tasks at the barrier. + // Deadline is determined by the server timestamp when it receives the + // first WaitAtBarrier() + timeout duration. + // - Cancelled: One of the tasks called CancelBarrier(). + // - Aborted: Service is shutting down. + // - Internal: Any participating task is in ERROR state. + // - InvalidArgument: (1) Conflicting tasks specified by different agents + // for the same barrier, (2) one of the participating tasks is not in + // the cluster, or (3) task making the request is not included in the + // list of participating tasks. + virtual ::grpc::Status Barrier(::grpc::ClientContext* context, const ::tensorflow::BarrierRequest& request, ::tensorflow::BarrierResponse* response) = 0; + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::BarrierResponse>> AsyncBarrier(::grpc::ClientContext* context, const ::tensorflow::BarrierRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::BarrierResponse>>(AsyncBarrierRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::BarrierResponse>> PrepareAsyncBarrier(::grpc::ClientContext* context, const ::tensorflow::BarrierRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::BarrierResponse>>(PrepareAsyncBarrierRaw(context, request, cq)); + } + // [AUTOMATION]: Internal rpc option goes here. + // Aborts the barrier if it is ongoing. + // Current and future WaitAtBarrier() calls with the same id will return a + // CANCELLED error status. + // Possible service errors: + // - FailedPrecondition: Barrier has already been passed. + virtual ::grpc::Status CancelBarrier(::grpc::ClientContext* context, const ::tensorflow::CancelBarrierRequest& request, ::tensorflow::CancelBarrierResponse* response) = 0; + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::CancelBarrierResponse>> AsyncCancelBarrier(::grpc::ClientContext* context, const ::tensorflow::CancelBarrierRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::CancelBarrierResponse>>(AsyncCancelBarrierRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::CancelBarrierResponse>> PrepareAsyncCancelBarrier(::grpc::ClientContext* context, const ::tensorflow::CancelBarrierRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::CancelBarrierResponse>>(PrepareAsyncCancelBarrierRaw(context, request, cq)); + } + class experimental_async_interface { + public: + virtual ~experimental_async_interface() {} + // Register task to coordination service so that the service starts to track + // liveness of the task. RPC blocks and returns only when it registers to + // the service successfully, or error happens in the registering process. + virtual void RegisterTask(::grpc::ClientContext* context, const ::tensorflow::RegisterTaskRequest* request, ::tensorflow::RegisterTaskResponse* response, std::function) = 0; + virtual void RegisterTask(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::RegisterTaskResponse* response, std::function) = 0; + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual void RegisterTask(::grpc::ClientContext* context, const ::tensorflow::RegisterTaskRequest* request, ::tensorflow::RegisterTaskResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0; + #else + virtual void RegisterTask(::grpc::ClientContext* context, const ::tensorflow::RegisterTaskRequest* request, ::tensorflow::RegisterTaskResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + #endif + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual void RegisterTask(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::RegisterTaskResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0; + #else + virtual void RegisterTask(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::RegisterTaskResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + #endif + // [AUTOMATION]: Internal rpc option goes here. + // Heartbeat message from task to coordination service. Heartbeat is sent from + // a task to refresh its timestamp on leader to avoid it becoming stale. + // RPC responds immediately after refreshing the timestamp on leader. + virtual void Heartbeat(::grpc::ClientContext* context, const ::tensorflow::HeartbeatRequest* request, ::tensorflow::HeartbeatResponse* response, std::function) = 0; + virtual void Heartbeat(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::HeartbeatResponse* response, std::function) = 0; + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual void Heartbeat(::grpc::ClientContext* context, const ::tensorflow::HeartbeatRequest* request, ::tensorflow::HeartbeatResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0; + #else + virtual void Heartbeat(::grpc::ClientContext* context, const ::tensorflow::HeartbeatRequest* request, ::tensorflow::HeartbeatResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + #endif + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual void Heartbeat(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::HeartbeatResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0; + #else + virtual void Heartbeat(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::HeartbeatResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + #endif + // [AUTOMATION]: Internal rpc option goes here. + // Wait for all tasks in the cluster to be up and running. The RPC request + // only gets responded when all tasks have registered, or some error occurs. + virtual void WaitForAllTasks(::grpc::ClientContext* context, const ::tensorflow::WaitForAllTasksRequest* request, ::tensorflow::WaitForAllTasksResponse* response, std::function) = 0; + virtual void WaitForAllTasks(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::WaitForAllTasksResponse* response, std::function) = 0; + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual void WaitForAllTasks(::grpc::ClientContext* context, const ::tensorflow::WaitForAllTasksRequest* request, ::tensorflow::WaitForAllTasksResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0; + #else + virtual void WaitForAllTasks(::grpc::ClientContext* context, const ::tensorflow::WaitForAllTasksRequest* request, ::tensorflow::WaitForAllTasksResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + #endif + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual void WaitForAllTasks(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::WaitForAllTasksResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0; + #else + virtual void WaitForAllTasks(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::WaitForAllTasksResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + #endif + // Disconnects task from the service. If `shutdown_barrier_timeout_in_ms` is + // specified in the config, blocks until all tasks reach the barrier before + // disconnecting together. If the barrier times out, tasks at the barrier will + // still disconnect, while an error is reported to tasks that did not reach + // the barrier on time. + virtual void ShutdownTask(::grpc::ClientContext* context, const ::tensorflow::ShutdownTaskRequest* request, ::tensorflow::ShutdownTaskResponse* response, std::function) = 0; + virtual void ShutdownTask(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::ShutdownTaskResponse* response, std::function) = 0; + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual void ShutdownTask(::grpc::ClientContext* context, const ::tensorflow::ShutdownTaskRequest* request, ::tensorflow::ShutdownTaskResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0; + #else + virtual void ShutdownTask(::grpc::ClientContext* context, const ::tensorflow::ShutdownTaskRequest* request, ::tensorflow::ShutdownTaskResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + #endif + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual void ShutdownTask(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::ShutdownTaskResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0; + #else + virtual void ShutdownTask(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::ShutdownTaskResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + #endif + // [AUTOMATION]: Internal rpc option goes here. + // Disconnects task from the service if it is in an ERROR state, thereby + // allowing it to reconnect via RegisterTask() in the future. + virtual void ResetTask(::grpc::ClientContext* context, const ::tensorflow::ResetTaskRequest* request, ::tensorflow::ResetTaskResponse* response, std::function) = 0; + virtual void ResetTask(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::ResetTaskResponse* response, std::function) = 0; + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual void ResetTask(::grpc::ClientContext* context, const ::tensorflow::ResetTaskRequest* request, ::tensorflow::ResetTaskResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0; + #else + virtual void ResetTask(::grpc::ClientContext* context, const ::tensorflow::ResetTaskRequest* request, ::tensorflow::ResetTaskResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + #endif + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual void ResetTask(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::ResetTaskResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0; + #else + virtual void ResetTask(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::ResetTaskResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + #endif + // Report error to the task. RPC sets the receiving instance of coordination + // service agent to error state permanently. + // TODO(b/195990880): Consider splitting this into a different RPC service. + virtual void ReportErrorToTask(::grpc::ClientContext* context, const ::tensorflow::ReportErrorToTaskRequest* request, ::tensorflow::ReportErrorToTaskResponse* response, std::function) = 0; + virtual void ReportErrorToTask(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::ReportErrorToTaskResponse* response, std::function) = 0; + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual void ReportErrorToTask(::grpc::ClientContext* context, const ::tensorflow::ReportErrorToTaskRequest* request, ::tensorflow::ReportErrorToTaskResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0; + #else + virtual void ReportErrorToTask(::grpc::ClientContext* context, const ::tensorflow::ReportErrorToTaskRequest* request, ::tensorflow::ReportErrorToTaskResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + #endif + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual void ReportErrorToTask(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::ReportErrorToTaskResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0; + #else + virtual void ReportErrorToTask(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::ReportErrorToTaskResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + #endif + // Report task error to coordination service. RPC sets the service-side task + // state to error, and propagate the error to other tasks in the cluster. + virtual void ReportErrorToService(::grpc::ClientContext* context, const ::tensorflow::ReportErrorToServiceRequest* request, ::tensorflow::ReportErrorToServiceResponse* response, std::function) = 0; + virtual void ReportErrorToService(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::ReportErrorToServiceResponse* response, std::function) = 0; + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual void ReportErrorToService(::grpc::ClientContext* context, const ::tensorflow::ReportErrorToServiceRequest* request, ::tensorflow::ReportErrorToServiceResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0; + #else + virtual void ReportErrorToService(::grpc::ClientContext* context, const ::tensorflow::ReportErrorToServiceRequest* request, ::tensorflow::ReportErrorToServiceResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + #endif + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual void ReportErrorToService(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::ReportErrorToServiceResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0; + #else + virtual void ReportErrorToService(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::ReportErrorToServiceResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + #endif + // Get the state of a remote task. Specifically, RPC returns a + // CoordinatedTaskState, and if the task is in an error status, returns a + // non-OK error code, non-empty error message and error payload. + virtual void GetTaskState(::grpc::ClientContext* context, const ::tensorflow::GetTaskStateRequest* request, ::tensorflow::GetTaskStateResponse* response, std::function) = 0; + virtual void GetTaskState(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::GetTaskStateResponse* response, std::function) = 0; + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual void GetTaskState(::grpc::ClientContext* context, const ::tensorflow::GetTaskStateRequest* request, ::tensorflow::GetTaskStateResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0; + #else + virtual void GetTaskState(::grpc::ClientContext* context, const ::tensorflow::GetTaskStateRequest* request, ::tensorflow::GetTaskStateResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + #endif + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual void GetTaskState(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::GetTaskStateResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0; + #else + virtual void GetTaskState(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::GetTaskStateResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + #endif + // Insert configuration key-value that will be accessible to all cluster + // tasks. The key can be formatted as Unix file path with hierarchy. The + // coordination service key-value store should only be used for cluster + // configuration data. + virtual void InsertKeyValue(::grpc::ClientContext* context, const ::tensorflow::InsertKeyValueRequest* request, ::tensorflow::InsertKeyValueResponse* response, std::function) = 0; + virtual void InsertKeyValue(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::InsertKeyValueResponse* response, std::function) = 0; + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual void InsertKeyValue(::grpc::ClientContext* context, const ::tensorflow::InsertKeyValueRequest* request, ::tensorflow::InsertKeyValueResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0; + #else + virtual void InsertKeyValue(::grpc::ClientContext* context, const ::tensorflow::InsertKeyValueRequest* request, ::tensorflow::InsertKeyValueResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + #endif + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual void InsertKeyValue(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::InsertKeyValueResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0; + #else + virtual void InsertKeyValue(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::InsertKeyValueResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + #endif + // [AUTOMATION]: Internal rpc option goes here. + // Get configuration key-value. The request blocks until the key-value data + // becomes available (i.e., set by a task in the cluster). + virtual void GetKeyValue(::grpc::ClientContext* context, const ::tensorflow::GetKeyValueRequest* request, ::tensorflow::GetKeyValueResponse* response, std::function) = 0; + virtual void GetKeyValue(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::GetKeyValueResponse* response, std::function) = 0; + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual void GetKeyValue(::grpc::ClientContext* context, const ::tensorflow::GetKeyValueRequest* request, ::tensorflow::GetKeyValueResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0; + #else + virtual void GetKeyValue(::grpc::ClientContext* context, const ::tensorflow::GetKeyValueRequest* request, ::tensorflow::GetKeyValueResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + #endif + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual void GetKeyValue(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::GetKeyValueResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0; + #else + virtual void GetKeyValue(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::GetKeyValueResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + #endif + // [AUTOMATION]: Internal rpc option goes here. + // Get configuration key-value. The request does not block, but returns an + // error if the requested key does not exist. + virtual void TryGetKeyValue(::grpc::ClientContext* context, const ::tensorflow::TryGetKeyValueRequest* request, ::tensorflow::TryGetKeyValueResponse* response, std::function) = 0; + virtual void TryGetKeyValue(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::TryGetKeyValueResponse* response, std::function) = 0; + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual void TryGetKeyValue(::grpc::ClientContext* context, const ::tensorflow::TryGetKeyValueRequest* request, ::tensorflow::TryGetKeyValueResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0; + #else + virtual void TryGetKeyValue(::grpc::ClientContext* context, const ::tensorflow::TryGetKeyValueRequest* request, ::tensorflow::TryGetKeyValueResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + #endif + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual void TryGetKeyValue(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::TryGetKeyValueResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0; + #else + virtual void TryGetKeyValue(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::TryGetKeyValueResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + #endif + // Same as GetKeyValue, but returns all values that have keys which are + // prefixed with the directory key. + virtual void GetKeyValueDir(::grpc::ClientContext* context, const ::tensorflow::GetKeyValueDirRequest* request, ::tensorflow::GetKeyValueDirResponse* response, std::function) = 0; + virtual void GetKeyValueDir(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::GetKeyValueDirResponse* response, std::function) = 0; + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual void GetKeyValueDir(::grpc::ClientContext* context, const ::tensorflow::GetKeyValueDirRequest* request, ::tensorflow::GetKeyValueDirResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0; + #else + virtual void GetKeyValueDir(::grpc::ClientContext* context, const ::tensorflow::GetKeyValueDirRequest* request, ::tensorflow::GetKeyValueDirResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + #endif + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual void GetKeyValueDir(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::GetKeyValueDirResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0; + #else + virtual void GetKeyValueDir(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::GetKeyValueDirResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + #endif + // [AUTOMATION]: Internal rpc option goes here. + // Delete configuration key-value. If is_directory is set in request, + // recursively clean up all key-values under the path specified by `key`. + virtual void DeleteKeyValue(::grpc::ClientContext* context, const ::tensorflow::DeleteKeyValueRequest* request, ::tensorflow::DeleteKeyValueResponse* response, std::function) = 0; + virtual void DeleteKeyValue(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::DeleteKeyValueResponse* response, std::function) = 0; + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual void DeleteKeyValue(::grpc::ClientContext* context, const ::tensorflow::DeleteKeyValueRequest* request, ::tensorflow::DeleteKeyValueResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0; + #else + virtual void DeleteKeyValue(::grpc::ClientContext* context, const ::tensorflow::DeleteKeyValueRequest* request, ::tensorflow::DeleteKeyValueResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + #endif + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual void DeleteKeyValue(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::DeleteKeyValueResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0; + #else + virtual void DeleteKeyValue(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::DeleteKeyValueResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + #endif + // Blocks until all (or a subset of) tasks are at the barrier or the barrier + // fails. + // + // `barrier_id` should be unique across barriers. Once the barrier has passed + // or failed, subsequent calls will not block, and immediately respond with + // the previous response. + // + // The first WaitAtBarrier() call received by the service for a particular + // barrier id is special in that it determines the barrier deadline based on + // timeout duration. + // However, if subsequent calls by different agents specify a different set of + // `tasks` for the same `barrier_id`, the barrier will fail instantly. + // + // If no tasks are specified (default), the barrier will block for all the + // connected tasks. + // + // Possible service errors: + // - DeadlineExceeded: Timed out waiting for specified tasks at the barrier. + // Deadline is determined by the server timestamp when it receives the + // first WaitAtBarrier() + timeout duration. + // - Cancelled: One of the tasks called CancelBarrier(). + // - Aborted: Service is shutting down. + // - Internal: Any participating task is in ERROR state. + // - InvalidArgument: (1) Conflicting tasks specified by different agents + // for the same barrier, (2) one of the participating tasks is not in + // the cluster, or (3) task making the request is not included in the + // list of participating tasks. + virtual void Barrier(::grpc::ClientContext* context, const ::tensorflow::BarrierRequest* request, ::tensorflow::BarrierResponse* response, std::function) = 0; + virtual void Barrier(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::BarrierResponse* response, std::function) = 0; + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual void Barrier(::grpc::ClientContext* context, const ::tensorflow::BarrierRequest* request, ::tensorflow::BarrierResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0; + #else + virtual void Barrier(::grpc::ClientContext* context, const ::tensorflow::BarrierRequest* request, ::tensorflow::BarrierResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + #endif + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual void Barrier(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::BarrierResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0; + #else + virtual void Barrier(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::BarrierResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + #endif + // [AUTOMATION]: Internal rpc option goes here. + // Aborts the barrier if it is ongoing. + // Current and future WaitAtBarrier() calls with the same id will return a + // CANCELLED error status. + // Possible service errors: + // - FailedPrecondition: Barrier has already been passed. + virtual void CancelBarrier(::grpc::ClientContext* context, const ::tensorflow::CancelBarrierRequest* request, ::tensorflow::CancelBarrierResponse* response, std::function) = 0; + virtual void CancelBarrier(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::CancelBarrierResponse* response, std::function) = 0; + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual void CancelBarrier(::grpc::ClientContext* context, const ::tensorflow::CancelBarrierRequest* request, ::tensorflow::CancelBarrierResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0; + #else + virtual void CancelBarrier(::grpc::ClientContext* context, const ::tensorflow::CancelBarrierRequest* request, ::tensorflow::CancelBarrierResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + #endif + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual void CancelBarrier(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::CancelBarrierResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0; + #else + virtual void CancelBarrier(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::CancelBarrierResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + #endif + }; + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + typedef class experimental_async_interface async_interface; + #endif + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + async_interface* async() { return experimental_async(); } + #endif + virtual class experimental_async_interface* experimental_async() { return nullptr; } + private: + virtual ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::RegisterTaskResponse>* AsyncRegisterTaskRaw(::grpc::ClientContext* context, const ::tensorflow::RegisterTaskRequest& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::RegisterTaskResponse>* PrepareAsyncRegisterTaskRaw(::grpc::ClientContext* context, const ::tensorflow::RegisterTaskRequest& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::HeartbeatResponse>* AsyncHeartbeatRaw(::grpc::ClientContext* context, const ::tensorflow::HeartbeatRequest& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::HeartbeatResponse>* PrepareAsyncHeartbeatRaw(::grpc::ClientContext* context, const ::tensorflow::HeartbeatRequest& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::WaitForAllTasksResponse>* AsyncWaitForAllTasksRaw(::grpc::ClientContext* context, const ::tensorflow::WaitForAllTasksRequest& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::WaitForAllTasksResponse>* PrepareAsyncWaitForAllTasksRaw(::grpc::ClientContext* context, const ::tensorflow::WaitForAllTasksRequest& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::ShutdownTaskResponse>* AsyncShutdownTaskRaw(::grpc::ClientContext* context, const ::tensorflow::ShutdownTaskRequest& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::ShutdownTaskResponse>* PrepareAsyncShutdownTaskRaw(::grpc::ClientContext* context, const ::tensorflow::ShutdownTaskRequest& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::ResetTaskResponse>* AsyncResetTaskRaw(::grpc::ClientContext* context, const ::tensorflow::ResetTaskRequest& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::ResetTaskResponse>* PrepareAsyncResetTaskRaw(::grpc::ClientContext* context, const ::tensorflow::ResetTaskRequest& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::ReportErrorToTaskResponse>* AsyncReportErrorToTaskRaw(::grpc::ClientContext* context, const ::tensorflow::ReportErrorToTaskRequest& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::ReportErrorToTaskResponse>* PrepareAsyncReportErrorToTaskRaw(::grpc::ClientContext* context, const ::tensorflow::ReportErrorToTaskRequest& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::ReportErrorToServiceResponse>* AsyncReportErrorToServiceRaw(::grpc::ClientContext* context, const ::tensorflow::ReportErrorToServiceRequest& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::ReportErrorToServiceResponse>* PrepareAsyncReportErrorToServiceRaw(::grpc::ClientContext* context, const ::tensorflow::ReportErrorToServiceRequest& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::GetTaskStateResponse>* AsyncGetTaskStateRaw(::grpc::ClientContext* context, const ::tensorflow::GetTaskStateRequest& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::GetTaskStateResponse>* PrepareAsyncGetTaskStateRaw(::grpc::ClientContext* context, const ::tensorflow::GetTaskStateRequest& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::InsertKeyValueResponse>* AsyncInsertKeyValueRaw(::grpc::ClientContext* context, const ::tensorflow::InsertKeyValueRequest& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::InsertKeyValueResponse>* PrepareAsyncInsertKeyValueRaw(::grpc::ClientContext* context, const ::tensorflow::InsertKeyValueRequest& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::GetKeyValueResponse>* AsyncGetKeyValueRaw(::grpc::ClientContext* context, const ::tensorflow::GetKeyValueRequest& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::GetKeyValueResponse>* PrepareAsyncGetKeyValueRaw(::grpc::ClientContext* context, const ::tensorflow::GetKeyValueRequest& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::TryGetKeyValueResponse>* AsyncTryGetKeyValueRaw(::grpc::ClientContext* context, const ::tensorflow::TryGetKeyValueRequest& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::TryGetKeyValueResponse>* PrepareAsyncTryGetKeyValueRaw(::grpc::ClientContext* context, const ::tensorflow::TryGetKeyValueRequest& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::GetKeyValueDirResponse>* AsyncGetKeyValueDirRaw(::grpc::ClientContext* context, const ::tensorflow::GetKeyValueDirRequest& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::GetKeyValueDirResponse>* PrepareAsyncGetKeyValueDirRaw(::grpc::ClientContext* context, const ::tensorflow::GetKeyValueDirRequest& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::DeleteKeyValueResponse>* AsyncDeleteKeyValueRaw(::grpc::ClientContext* context, const ::tensorflow::DeleteKeyValueRequest& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::DeleteKeyValueResponse>* PrepareAsyncDeleteKeyValueRaw(::grpc::ClientContext* context, const ::tensorflow::DeleteKeyValueRequest& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::BarrierResponse>* AsyncBarrierRaw(::grpc::ClientContext* context, const ::tensorflow::BarrierRequest& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::BarrierResponse>* PrepareAsyncBarrierRaw(::grpc::ClientContext* context, const ::tensorflow::BarrierRequest& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::CancelBarrierResponse>* AsyncCancelBarrierRaw(::grpc::ClientContext* context, const ::tensorflow::CancelBarrierRequest& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::CancelBarrierResponse>* PrepareAsyncCancelBarrierRaw(::grpc::ClientContext* context, const ::tensorflow::CancelBarrierRequest& request, ::grpc::CompletionQueue* cq) = 0; + }; + class Stub final : public StubInterface { + public: + Stub(const std::shared_ptr< ::grpc::ChannelInterface>& channel); + ::grpc::Status RegisterTask(::grpc::ClientContext* context, const ::tensorflow::RegisterTaskRequest& request, ::tensorflow::RegisterTaskResponse* response) override; + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::RegisterTaskResponse>> AsyncRegisterTask(::grpc::ClientContext* context, const ::tensorflow::RegisterTaskRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::RegisterTaskResponse>>(AsyncRegisterTaskRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::RegisterTaskResponse>> PrepareAsyncRegisterTask(::grpc::ClientContext* context, const ::tensorflow::RegisterTaskRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::RegisterTaskResponse>>(PrepareAsyncRegisterTaskRaw(context, request, cq)); + } + ::grpc::Status Heartbeat(::grpc::ClientContext* context, const ::tensorflow::HeartbeatRequest& request, ::tensorflow::HeartbeatResponse* response) override; + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::HeartbeatResponse>> AsyncHeartbeat(::grpc::ClientContext* context, const ::tensorflow::HeartbeatRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::HeartbeatResponse>>(AsyncHeartbeatRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::HeartbeatResponse>> PrepareAsyncHeartbeat(::grpc::ClientContext* context, const ::tensorflow::HeartbeatRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::HeartbeatResponse>>(PrepareAsyncHeartbeatRaw(context, request, cq)); + } + ::grpc::Status WaitForAllTasks(::grpc::ClientContext* context, const ::tensorflow::WaitForAllTasksRequest& request, ::tensorflow::WaitForAllTasksResponse* response) override; + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::WaitForAllTasksResponse>> AsyncWaitForAllTasks(::grpc::ClientContext* context, const ::tensorflow::WaitForAllTasksRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::WaitForAllTasksResponse>>(AsyncWaitForAllTasksRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::WaitForAllTasksResponse>> PrepareAsyncWaitForAllTasks(::grpc::ClientContext* context, const ::tensorflow::WaitForAllTasksRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::WaitForAllTasksResponse>>(PrepareAsyncWaitForAllTasksRaw(context, request, cq)); + } + ::grpc::Status ShutdownTask(::grpc::ClientContext* context, const ::tensorflow::ShutdownTaskRequest& request, ::tensorflow::ShutdownTaskResponse* response) override; + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::ShutdownTaskResponse>> AsyncShutdownTask(::grpc::ClientContext* context, const ::tensorflow::ShutdownTaskRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::ShutdownTaskResponse>>(AsyncShutdownTaskRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::ShutdownTaskResponse>> PrepareAsyncShutdownTask(::grpc::ClientContext* context, const ::tensorflow::ShutdownTaskRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::ShutdownTaskResponse>>(PrepareAsyncShutdownTaskRaw(context, request, cq)); + } + ::grpc::Status ResetTask(::grpc::ClientContext* context, const ::tensorflow::ResetTaskRequest& request, ::tensorflow::ResetTaskResponse* response) override; + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::ResetTaskResponse>> AsyncResetTask(::grpc::ClientContext* context, const ::tensorflow::ResetTaskRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::ResetTaskResponse>>(AsyncResetTaskRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::ResetTaskResponse>> PrepareAsyncResetTask(::grpc::ClientContext* context, const ::tensorflow::ResetTaskRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::ResetTaskResponse>>(PrepareAsyncResetTaskRaw(context, request, cq)); + } + ::grpc::Status ReportErrorToTask(::grpc::ClientContext* context, const ::tensorflow::ReportErrorToTaskRequest& request, ::tensorflow::ReportErrorToTaskResponse* response) override; + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::ReportErrorToTaskResponse>> AsyncReportErrorToTask(::grpc::ClientContext* context, const ::tensorflow::ReportErrorToTaskRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::ReportErrorToTaskResponse>>(AsyncReportErrorToTaskRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::ReportErrorToTaskResponse>> PrepareAsyncReportErrorToTask(::grpc::ClientContext* context, const ::tensorflow::ReportErrorToTaskRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::ReportErrorToTaskResponse>>(PrepareAsyncReportErrorToTaskRaw(context, request, cq)); + } + ::grpc::Status ReportErrorToService(::grpc::ClientContext* context, const ::tensorflow::ReportErrorToServiceRequest& request, ::tensorflow::ReportErrorToServiceResponse* response) override; + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::ReportErrorToServiceResponse>> AsyncReportErrorToService(::grpc::ClientContext* context, const ::tensorflow::ReportErrorToServiceRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::ReportErrorToServiceResponse>>(AsyncReportErrorToServiceRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::ReportErrorToServiceResponse>> PrepareAsyncReportErrorToService(::grpc::ClientContext* context, const ::tensorflow::ReportErrorToServiceRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::ReportErrorToServiceResponse>>(PrepareAsyncReportErrorToServiceRaw(context, request, cq)); + } + ::grpc::Status GetTaskState(::grpc::ClientContext* context, const ::tensorflow::GetTaskStateRequest& request, ::tensorflow::GetTaskStateResponse* response) override; + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::GetTaskStateResponse>> AsyncGetTaskState(::grpc::ClientContext* context, const ::tensorflow::GetTaskStateRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::GetTaskStateResponse>>(AsyncGetTaskStateRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::GetTaskStateResponse>> PrepareAsyncGetTaskState(::grpc::ClientContext* context, const ::tensorflow::GetTaskStateRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::GetTaskStateResponse>>(PrepareAsyncGetTaskStateRaw(context, request, cq)); + } + ::grpc::Status InsertKeyValue(::grpc::ClientContext* context, const ::tensorflow::InsertKeyValueRequest& request, ::tensorflow::InsertKeyValueResponse* response) override; + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::InsertKeyValueResponse>> AsyncInsertKeyValue(::grpc::ClientContext* context, const ::tensorflow::InsertKeyValueRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::InsertKeyValueResponse>>(AsyncInsertKeyValueRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::InsertKeyValueResponse>> PrepareAsyncInsertKeyValue(::grpc::ClientContext* context, const ::tensorflow::InsertKeyValueRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::InsertKeyValueResponse>>(PrepareAsyncInsertKeyValueRaw(context, request, cq)); + } + ::grpc::Status GetKeyValue(::grpc::ClientContext* context, const ::tensorflow::GetKeyValueRequest& request, ::tensorflow::GetKeyValueResponse* response) override; + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::GetKeyValueResponse>> AsyncGetKeyValue(::grpc::ClientContext* context, const ::tensorflow::GetKeyValueRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::GetKeyValueResponse>>(AsyncGetKeyValueRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::GetKeyValueResponse>> PrepareAsyncGetKeyValue(::grpc::ClientContext* context, const ::tensorflow::GetKeyValueRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::GetKeyValueResponse>>(PrepareAsyncGetKeyValueRaw(context, request, cq)); + } + ::grpc::Status TryGetKeyValue(::grpc::ClientContext* context, const ::tensorflow::TryGetKeyValueRequest& request, ::tensorflow::TryGetKeyValueResponse* response) override; + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::TryGetKeyValueResponse>> AsyncTryGetKeyValue(::grpc::ClientContext* context, const ::tensorflow::TryGetKeyValueRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::TryGetKeyValueResponse>>(AsyncTryGetKeyValueRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::TryGetKeyValueResponse>> PrepareAsyncTryGetKeyValue(::grpc::ClientContext* context, const ::tensorflow::TryGetKeyValueRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::TryGetKeyValueResponse>>(PrepareAsyncTryGetKeyValueRaw(context, request, cq)); + } + ::grpc::Status GetKeyValueDir(::grpc::ClientContext* context, const ::tensorflow::GetKeyValueDirRequest& request, ::tensorflow::GetKeyValueDirResponse* response) override; + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::GetKeyValueDirResponse>> AsyncGetKeyValueDir(::grpc::ClientContext* context, const ::tensorflow::GetKeyValueDirRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::GetKeyValueDirResponse>>(AsyncGetKeyValueDirRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::GetKeyValueDirResponse>> PrepareAsyncGetKeyValueDir(::grpc::ClientContext* context, const ::tensorflow::GetKeyValueDirRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::GetKeyValueDirResponse>>(PrepareAsyncGetKeyValueDirRaw(context, request, cq)); + } + ::grpc::Status DeleteKeyValue(::grpc::ClientContext* context, const ::tensorflow::DeleteKeyValueRequest& request, ::tensorflow::DeleteKeyValueResponse* response) override; + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::DeleteKeyValueResponse>> AsyncDeleteKeyValue(::grpc::ClientContext* context, const ::tensorflow::DeleteKeyValueRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::DeleteKeyValueResponse>>(AsyncDeleteKeyValueRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::DeleteKeyValueResponse>> PrepareAsyncDeleteKeyValue(::grpc::ClientContext* context, const ::tensorflow::DeleteKeyValueRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::DeleteKeyValueResponse>>(PrepareAsyncDeleteKeyValueRaw(context, request, cq)); + } + ::grpc::Status Barrier(::grpc::ClientContext* context, const ::tensorflow::BarrierRequest& request, ::tensorflow::BarrierResponse* response) override; + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::BarrierResponse>> AsyncBarrier(::grpc::ClientContext* context, const ::tensorflow::BarrierRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::BarrierResponse>>(AsyncBarrierRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::BarrierResponse>> PrepareAsyncBarrier(::grpc::ClientContext* context, const ::tensorflow::BarrierRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::BarrierResponse>>(PrepareAsyncBarrierRaw(context, request, cq)); + } + ::grpc::Status CancelBarrier(::grpc::ClientContext* context, const ::tensorflow::CancelBarrierRequest& request, ::tensorflow::CancelBarrierResponse* response) override; + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::CancelBarrierResponse>> AsyncCancelBarrier(::grpc::ClientContext* context, const ::tensorflow::CancelBarrierRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::CancelBarrierResponse>>(AsyncCancelBarrierRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::CancelBarrierResponse>> PrepareAsyncCancelBarrier(::grpc::ClientContext* context, const ::tensorflow::CancelBarrierRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::CancelBarrierResponse>>(PrepareAsyncCancelBarrierRaw(context, request, cq)); + } + class experimental_async final : + public StubInterface::experimental_async_interface { + public: + void RegisterTask(::grpc::ClientContext* context, const ::tensorflow::RegisterTaskRequest* request, ::tensorflow::RegisterTaskResponse* response, std::function) override; + void RegisterTask(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::RegisterTaskResponse* response, std::function) override; + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + void RegisterTask(::grpc::ClientContext* context, const ::tensorflow::RegisterTaskRequest* request, ::tensorflow::RegisterTaskResponse* response, ::grpc::ClientUnaryReactor* reactor) override; + #else + void RegisterTask(::grpc::ClientContext* context, const ::tensorflow::RegisterTaskRequest* request, ::tensorflow::RegisterTaskResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + #endif + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + void RegisterTask(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::RegisterTaskResponse* response, ::grpc::ClientUnaryReactor* reactor) override; + #else + void RegisterTask(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::RegisterTaskResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + #endif + void Heartbeat(::grpc::ClientContext* context, const ::tensorflow::HeartbeatRequest* request, ::tensorflow::HeartbeatResponse* response, std::function) override; + void Heartbeat(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::HeartbeatResponse* response, std::function) override; + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + void Heartbeat(::grpc::ClientContext* context, const ::tensorflow::HeartbeatRequest* request, ::tensorflow::HeartbeatResponse* response, ::grpc::ClientUnaryReactor* reactor) override; + #else + void Heartbeat(::grpc::ClientContext* context, const ::tensorflow::HeartbeatRequest* request, ::tensorflow::HeartbeatResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + #endif + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + void Heartbeat(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::HeartbeatResponse* response, ::grpc::ClientUnaryReactor* reactor) override; + #else + void Heartbeat(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::HeartbeatResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + #endif + void WaitForAllTasks(::grpc::ClientContext* context, const ::tensorflow::WaitForAllTasksRequest* request, ::tensorflow::WaitForAllTasksResponse* response, std::function) override; + void WaitForAllTasks(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::WaitForAllTasksResponse* response, std::function) override; + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + void WaitForAllTasks(::grpc::ClientContext* context, const ::tensorflow::WaitForAllTasksRequest* request, ::tensorflow::WaitForAllTasksResponse* response, ::grpc::ClientUnaryReactor* reactor) override; + #else + void WaitForAllTasks(::grpc::ClientContext* context, const ::tensorflow::WaitForAllTasksRequest* request, ::tensorflow::WaitForAllTasksResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + #endif + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + void WaitForAllTasks(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::WaitForAllTasksResponse* response, ::grpc::ClientUnaryReactor* reactor) override; + #else + void WaitForAllTasks(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::WaitForAllTasksResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + #endif + void ShutdownTask(::grpc::ClientContext* context, const ::tensorflow::ShutdownTaskRequest* request, ::tensorflow::ShutdownTaskResponse* response, std::function) override; + void ShutdownTask(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::ShutdownTaskResponse* response, std::function) override; + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + void ShutdownTask(::grpc::ClientContext* context, const ::tensorflow::ShutdownTaskRequest* request, ::tensorflow::ShutdownTaskResponse* response, ::grpc::ClientUnaryReactor* reactor) override; + #else + void ShutdownTask(::grpc::ClientContext* context, const ::tensorflow::ShutdownTaskRequest* request, ::tensorflow::ShutdownTaskResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + #endif + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + void ShutdownTask(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::ShutdownTaskResponse* response, ::grpc::ClientUnaryReactor* reactor) override; + #else + void ShutdownTask(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::ShutdownTaskResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + #endif + void ResetTask(::grpc::ClientContext* context, const ::tensorflow::ResetTaskRequest* request, ::tensorflow::ResetTaskResponse* response, std::function) override; + void ResetTask(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::ResetTaskResponse* response, std::function) override; + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + void ResetTask(::grpc::ClientContext* context, const ::tensorflow::ResetTaskRequest* request, ::tensorflow::ResetTaskResponse* response, ::grpc::ClientUnaryReactor* reactor) override; + #else + void ResetTask(::grpc::ClientContext* context, const ::tensorflow::ResetTaskRequest* request, ::tensorflow::ResetTaskResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + #endif + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + void ResetTask(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::ResetTaskResponse* response, ::grpc::ClientUnaryReactor* reactor) override; + #else + void ResetTask(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::ResetTaskResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + #endif + void ReportErrorToTask(::grpc::ClientContext* context, const ::tensorflow::ReportErrorToTaskRequest* request, ::tensorflow::ReportErrorToTaskResponse* response, std::function) override; + void ReportErrorToTask(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::ReportErrorToTaskResponse* response, std::function) override; + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + void ReportErrorToTask(::grpc::ClientContext* context, const ::tensorflow::ReportErrorToTaskRequest* request, ::tensorflow::ReportErrorToTaskResponse* response, ::grpc::ClientUnaryReactor* reactor) override; + #else + void ReportErrorToTask(::grpc::ClientContext* context, const ::tensorflow::ReportErrorToTaskRequest* request, ::tensorflow::ReportErrorToTaskResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + #endif + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + void ReportErrorToTask(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::ReportErrorToTaskResponse* response, ::grpc::ClientUnaryReactor* reactor) override; + #else + void ReportErrorToTask(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::ReportErrorToTaskResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + #endif + void ReportErrorToService(::grpc::ClientContext* context, const ::tensorflow::ReportErrorToServiceRequest* request, ::tensorflow::ReportErrorToServiceResponse* response, std::function) override; + void ReportErrorToService(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::ReportErrorToServiceResponse* response, std::function) override; + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + void ReportErrorToService(::grpc::ClientContext* context, const ::tensorflow::ReportErrorToServiceRequest* request, ::tensorflow::ReportErrorToServiceResponse* response, ::grpc::ClientUnaryReactor* reactor) override; + #else + void ReportErrorToService(::grpc::ClientContext* context, const ::tensorflow::ReportErrorToServiceRequest* request, ::tensorflow::ReportErrorToServiceResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + #endif + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + void ReportErrorToService(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::ReportErrorToServiceResponse* response, ::grpc::ClientUnaryReactor* reactor) override; + #else + void ReportErrorToService(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::ReportErrorToServiceResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + #endif + void GetTaskState(::grpc::ClientContext* context, const ::tensorflow::GetTaskStateRequest* request, ::tensorflow::GetTaskStateResponse* response, std::function) override; + void GetTaskState(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::GetTaskStateResponse* response, std::function) override; + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + void GetTaskState(::grpc::ClientContext* context, const ::tensorflow::GetTaskStateRequest* request, ::tensorflow::GetTaskStateResponse* response, ::grpc::ClientUnaryReactor* reactor) override; + #else + void GetTaskState(::grpc::ClientContext* context, const ::tensorflow::GetTaskStateRequest* request, ::tensorflow::GetTaskStateResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + #endif + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + void GetTaskState(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::GetTaskStateResponse* response, ::grpc::ClientUnaryReactor* reactor) override; + #else + void GetTaskState(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::GetTaskStateResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + #endif + void InsertKeyValue(::grpc::ClientContext* context, const ::tensorflow::InsertKeyValueRequest* request, ::tensorflow::InsertKeyValueResponse* response, std::function) override; + void InsertKeyValue(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::InsertKeyValueResponse* response, std::function) override; + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + void InsertKeyValue(::grpc::ClientContext* context, const ::tensorflow::InsertKeyValueRequest* request, ::tensorflow::InsertKeyValueResponse* response, ::grpc::ClientUnaryReactor* reactor) override; + #else + void InsertKeyValue(::grpc::ClientContext* context, const ::tensorflow::InsertKeyValueRequest* request, ::tensorflow::InsertKeyValueResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + #endif + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + void InsertKeyValue(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::InsertKeyValueResponse* response, ::grpc::ClientUnaryReactor* reactor) override; + #else + void InsertKeyValue(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::InsertKeyValueResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + #endif + void GetKeyValue(::grpc::ClientContext* context, const ::tensorflow::GetKeyValueRequest* request, ::tensorflow::GetKeyValueResponse* response, std::function) override; + void GetKeyValue(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::GetKeyValueResponse* response, std::function) override; + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + void GetKeyValue(::grpc::ClientContext* context, const ::tensorflow::GetKeyValueRequest* request, ::tensorflow::GetKeyValueResponse* response, ::grpc::ClientUnaryReactor* reactor) override; + #else + void GetKeyValue(::grpc::ClientContext* context, const ::tensorflow::GetKeyValueRequest* request, ::tensorflow::GetKeyValueResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + #endif + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + void GetKeyValue(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::GetKeyValueResponse* response, ::grpc::ClientUnaryReactor* reactor) override; + #else + void GetKeyValue(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::GetKeyValueResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + #endif + void TryGetKeyValue(::grpc::ClientContext* context, const ::tensorflow::TryGetKeyValueRequest* request, ::tensorflow::TryGetKeyValueResponse* response, std::function) override; + void TryGetKeyValue(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::TryGetKeyValueResponse* response, std::function) override; + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + void TryGetKeyValue(::grpc::ClientContext* context, const ::tensorflow::TryGetKeyValueRequest* request, ::tensorflow::TryGetKeyValueResponse* response, ::grpc::ClientUnaryReactor* reactor) override; + #else + void TryGetKeyValue(::grpc::ClientContext* context, const ::tensorflow::TryGetKeyValueRequest* request, ::tensorflow::TryGetKeyValueResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + #endif + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + void TryGetKeyValue(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::TryGetKeyValueResponse* response, ::grpc::ClientUnaryReactor* reactor) override; + #else + void TryGetKeyValue(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::TryGetKeyValueResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + #endif + void GetKeyValueDir(::grpc::ClientContext* context, const ::tensorflow::GetKeyValueDirRequest* request, ::tensorflow::GetKeyValueDirResponse* response, std::function) override; + void GetKeyValueDir(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::GetKeyValueDirResponse* response, std::function) override; + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + void GetKeyValueDir(::grpc::ClientContext* context, const ::tensorflow::GetKeyValueDirRequest* request, ::tensorflow::GetKeyValueDirResponse* response, ::grpc::ClientUnaryReactor* reactor) override; + #else + void GetKeyValueDir(::grpc::ClientContext* context, const ::tensorflow::GetKeyValueDirRequest* request, ::tensorflow::GetKeyValueDirResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + #endif + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + void GetKeyValueDir(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::GetKeyValueDirResponse* response, ::grpc::ClientUnaryReactor* reactor) override; + #else + void GetKeyValueDir(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::GetKeyValueDirResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + #endif + void DeleteKeyValue(::grpc::ClientContext* context, const ::tensorflow::DeleteKeyValueRequest* request, ::tensorflow::DeleteKeyValueResponse* response, std::function) override; + void DeleteKeyValue(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::DeleteKeyValueResponse* response, std::function) override; + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + void DeleteKeyValue(::grpc::ClientContext* context, const ::tensorflow::DeleteKeyValueRequest* request, ::tensorflow::DeleteKeyValueResponse* response, ::grpc::ClientUnaryReactor* reactor) override; + #else + void DeleteKeyValue(::grpc::ClientContext* context, const ::tensorflow::DeleteKeyValueRequest* request, ::tensorflow::DeleteKeyValueResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + #endif + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + void DeleteKeyValue(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::DeleteKeyValueResponse* response, ::grpc::ClientUnaryReactor* reactor) override; + #else + void DeleteKeyValue(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::DeleteKeyValueResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + #endif + void Barrier(::grpc::ClientContext* context, const ::tensorflow::BarrierRequest* request, ::tensorflow::BarrierResponse* response, std::function) override; + void Barrier(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::BarrierResponse* response, std::function) override; + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + void Barrier(::grpc::ClientContext* context, const ::tensorflow::BarrierRequest* request, ::tensorflow::BarrierResponse* response, ::grpc::ClientUnaryReactor* reactor) override; + #else + void Barrier(::grpc::ClientContext* context, const ::tensorflow::BarrierRequest* request, ::tensorflow::BarrierResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + #endif + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + void Barrier(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::BarrierResponse* response, ::grpc::ClientUnaryReactor* reactor) override; + #else + void Barrier(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::BarrierResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + #endif + void CancelBarrier(::grpc::ClientContext* context, const ::tensorflow::CancelBarrierRequest* request, ::tensorflow::CancelBarrierResponse* response, std::function) override; + void CancelBarrier(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::CancelBarrierResponse* response, std::function) override; + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + void CancelBarrier(::grpc::ClientContext* context, const ::tensorflow::CancelBarrierRequest* request, ::tensorflow::CancelBarrierResponse* response, ::grpc::ClientUnaryReactor* reactor) override; + #else + void CancelBarrier(::grpc::ClientContext* context, const ::tensorflow::CancelBarrierRequest* request, ::tensorflow::CancelBarrierResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + #endif + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + void CancelBarrier(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::CancelBarrierResponse* response, ::grpc::ClientUnaryReactor* reactor) override; + #else + void CancelBarrier(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::CancelBarrierResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + #endif + private: + friend class Stub; + explicit experimental_async(Stub* stub): stub_(stub) { } + Stub* stub() { return stub_; } + Stub* stub_; + }; + class experimental_async_interface* experimental_async() override { return &async_stub_; } + + private: + std::shared_ptr< ::grpc::ChannelInterface> channel_; + class experimental_async async_stub_{this}; + ::grpc::ClientAsyncResponseReader< ::tensorflow::RegisterTaskResponse>* AsyncRegisterTaskRaw(::grpc::ClientContext* context, const ::tensorflow::RegisterTaskRequest& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::tensorflow::RegisterTaskResponse>* PrepareAsyncRegisterTaskRaw(::grpc::ClientContext* context, const ::tensorflow::RegisterTaskRequest& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::tensorflow::HeartbeatResponse>* AsyncHeartbeatRaw(::grpc::ClientContext* context, const ::tensorflow::HeartbeatRequest& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::tensorflow::HeartbeatResponse>* PrepareAsyncHeartbeatRaw(::grpc::ClientContext* context, const ::tensorflow::HeartbeatRequest& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::tensorflow::WaitForAllTasksResponse>* AsyncWaitForAllTasksRaw(::grpc::ClientContext* context, const ::tensorflow::WaitForAllTasksRequest& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::tensorflow::WaitForAllTasksResponse>* PrepareAsyncWaitForAllTasksRaw(::grpc::ClientContext* context, const ::tensorflow::WaitForAllTasksRequest& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::tensorflow::ShutdownTaskResponse>* AsyncShutdownTaskRaw(::grpc::ClientContext* context, const ::tensorflow::ShutdownTaskRequest& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::tensorflow::ShutdownTaskResponse>* PrepareAsyncShutdownTaskRaw(::grpc::ClientContext* context, const ::tensorflow::ShutdownTaskRequest& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::tensorflow::ResetTaskResponse>* AsyncResetTaskRaw(::grpc::ClientContext* context, const ::tensorflow::ResetTaskRequest& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::tensorflow::ResetTaskResponse>* PrepareAsyncResetTaskRaw(::grpc::ClientContext* context, const ::tensorflow::ResetTaskRequest& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::tensorflow::ReportErrorToTaskResponse>* AsyncReportErrorToTaskRaw(::grpc::ClientContext* context, const ::tensorflow::ReportErrorToTaskRequest& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::tensorflow::ReportErrorToTaskResponse>* PrepareAsyncReportErrorToTaskRaw(::grpc::ClientContext* context, const ::tensorflow::ReportErrorToTaskRequest& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::tensorflow::ReportErrorToServiceResponse>* AsyncReportErrorToServiceRaw(::grpc::ClientContext* context, const ::tensorflow::ReportErrorToServiceRequest& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::tensorflow::ReportErrorToServiceResponse>* PrepareAsyncReportErrorToServiceRaw(::grpc::ClientContext* context, const ::tensorflow::ReportErrorToServiceRequest& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::tensorflow::GetTaskStateResponse>* AsyncGetTaskStateRaw(::grpc::ClientContext* context, const ::tensorflow::GetTaskStateRequest& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::tensorflow::GetTaskStateResponse>* PrepareAsyncGetTaskStateRaw(::grpc::ClientContext* context, const ::tensorflow::GetTaskStateRequest& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::tensorflow::InsertKeyValueResponse>* AsyncInsertKeyValueRaw(::grpc::ClientContext* context, const ::tensorflow::InsertKeyValueRequest& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::tensorflow::InsertKeyValueResponse>* PrepareAsyncInsertKeyValueRaw(::grpc::ClientContext* context, const ::tensorflow::InsertKeyValueRequest& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::tensorflow::GetKeyValueResponse>* AsyncGetKeyValueRaw(::grpc::ClientContext* context, const ::tensorflow::GetKeyValueRequest& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::tensorflow::GetKeyValueResponse>* PrepareAsyncGetKeyValueRaw(::grpc::ClientContext* context, const ::tensorflow::GetKeyValueRequest& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::tensorflow::TryGetKeyValueResponse>* AsyncTryGetKeyValueRaw(::grpc::ClientContext* context, const ::tensorflow::TryGetKeyValueRequest& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::tensorflow::TryGetKeyValueResponse>* PrepareAsyncTryGetKeyValueRaw(::grpc::ClientContext* context, const ::tensorflow::TryGetKeyValueRequest& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::tensorflow::GetKeyValueDirResponse>* AsyncGetKeyValueDirRaw(::grpc::ClientContext* context, const ::tensorflow::GetKeyValueDirRequest& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::tensorflow::GetKeyValueDirResponse>* PrepareAsyncGetKeyValueDirRaw(::grpc::ClientContext* context, const ::tensorflow::GetKeyValueDirRequest& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::tensorflow::DeleteKeyValueResponse>* AsyncDeleteKeyValueRaw(::grpc::ClientContext* context, const ::tensorflow::DeleteKeyValueRequest& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::tensorflow::DeleteKeyValueResponse>* PrepareAsyncDeleteKeyValueRaw(::grpc::ClientContext* context, const ::tensorflow::DeleteKeyValueRequest& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::tensorflow::BarrierResponse>* AsyncBarrierRaw(::grpc::ClientContext* context, const ::tensorflow::BarrierRequest& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::tensorflow::BarrierResponse>* PrepareAsyncBarrierRaw(::grpc::ClientContext* context, const ::tensorflow::BarrierRequest& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::tensorflow::CancelBarrierResponse>* AsyncCancelBarrierRaw(::grpc::ClientContext* context, const ::tensorflow::CancelBarrierRequest& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::tensorflow::CancelBarrierResponse>* PrepareAsyncCancelBarrierRaw(::grpc::ClientContext* context, const ::tensorflow::CancelBarrierRequest& request, ::grpc::CompletionQueue* cq) override; + const ::grpc::internal::RpcMethod rpcmethod_RegisterTask_; + const ::grpc::internal::RpcMethod rpcmethod_Heartbeat_; + const ::grpc::internal::RpcMethod rpcmethod_WaitForAllTasks_; + const ::grpc::internal::RpcMethod rpcmethod_ShutdownTask_; + const ::grpc::internal::RpcMethod rpcmethod_ResetTask_; + const ::grpc::internal::RpcMethod rpcmethod_ReportErrorToTask_; + const ::grpc::internal::RpcMethod rpcmethod_ReportErrorToService_; + const ::grpc::internal::RpcMethod rpcmethod_GetTaskState_; + const ::grpc::internal::RpcMethod rpcmethod_InsertKeyValue_; + const ::grpc::internal::RpcMethod rpcmethod_GetKeyValue_; + const ::grpc::internal::RpcMethod rpcmethod_TryGetKeyValue_; + const ::grpc::internal::RpcMethod rpcmethod_GetKeyValueDir_; + const ::grpc::internal::RpcMethod rpcmethod_DeleteKeyValue_; + const ::grpc::internal::RpcMethod rpcmethod_Barrier_; + const ::grpc::internal::RpcMethod rpcmethod_CancelBarrier_; + }; + static std::unique_ptr NewStub(const std::shared_ptr< ::grpc::ChannelInterface>& channel, const ::grpc::StubOptions& options = ::grpc::StubOptions()); + + class Service : public ::grpc::Service { + public: + Service(); + virtual ~Service(); + // Register task to coordination service so that the service starts to track + // liveness of the task. RPC blocks and returns only when it registers to + // the service successfully, or error happens in the registering process. + virtual ::grpc::Status RegisterTask(::grpc::ServerContext* context, const ::tensorflow::RegisterTaskRequest* request, ::tensorflow::RegisterTaskResponse* response); + // [AUTOMATION]: Internal rpc option goes here. + // Heartbeat message from task to coordination service. Heartbeat is sent from + // a task to refresh its timestamp on leader to avoid it becoming stale. + // RPC responds immediately after refreshing the timestamp on leader. + virtual ::grpc::Status Heartbeat(::grpc::ServerContext* context, const ::tensorflow::HeartbeatRequest* request, ::tensorflow::HeartbeatResponse* response); + // [AUTOMATION]: Internal rpc option goes here. + // Wait for all tasks in the cluster to be up and running. The RPC request + // only gets responded when all tasks have registered, or some error occurs. + virtual ::grpc::Status WaitForAllTasks(::grpc::ServerContext* context, const ::tensorflow::WaitForAllTasksRequest* request, ::tensorflow::WaitForAllTasksResponse* response); + // Disconnects task from the service. If `shutdown_barrier_timeout_in_ms` is + // specified in the config, blocks until all tasks reach the barrier before + // disconnecting together. If the barrier times out, tasks at the barrier will + // still disconnect, while an error is reported to tasks that did not reach + // the barrier on time. + virtual ::grpc::Status ShutdownTask(::grpc::ServerContext* context, const ::tensorflow::ShutdownTaskRequest* request, ::tensorflow::ShutdownTaskResponse* response); + // [AUTOMATION]: Internal rpc option goes here. + // Disconnects task from the service if it is in an ERROR state, thereby + // allowing it to reconnect via RegisterTask() in the future. + virtual ::grpc::Status ResetTask(::grpc::ServerContext* context, const ::tensorflow::ResetTaskRequest* request, ::tensorflow::ResetTaskResponse* response); + // Report error to the task. RPC sets the receiving instance of coordination + // service agent to error state permanently. + // TODO(b/195990880): Consider splitting this into a different RPC service. + virtual ::grpc::Status ReportErrorToTask(::grpc::ServerContext* context, const ::tensorflow::ReportErrorToTaskRequest* request, ::tensorflow::ReportErrorToTaskResponse* response); + // Report task error to coordination service. RPC sets the service-side task + // state to error, and propagate the error to other tasks in the cluster. + virtual ::grpc::Status ReportErrorToService(::grpc::ServerContext* context, const ::tensorflow::ReportErrorToServiceRequest* request, ::tensorflow::ReportErrorToServiceResponse* response); + // Get the state of a remote task. Specifically, RPC returns a + // CoordinatedTaskState, and if the task is in an error status, returns a + // non-OK error code, non-empty error message and error payload. + virtual ::grpc::Status GetTaskState(::grpc::ServerContext* context, const ::tensorflow::GetTaskStateRequest* request, ::tensorflow::GetTaskStateResponse* response); + // Insert configuration key-value that will be accessible to all cluster + // tasks. The key can be formatted as Unix file path with hierarchy. The + // coordination service key-value store should only be used for cluster + // configuration data. + virtual ::grpc::Status InsertKeyValue(::grpc::ServerContext* context, const ::tensorflow::InsertKeyValueRequest* request, ::tensorflow::InsertKeyValueResponse* response); + // [AUTOMATION]: Internal rpc option goes here. + // Get configuration key-value. The request blocks until the key-value data + // becomes available (i.e., set by a task in the cluster). + virtual ::grpc::Status GetKeyValue(::grpc::ServerContext* context, const ::tensorflow::GetKeyValueRequest* request, ::tensorflow::GetKeyValueResponse* response); + // [AUTOMATION]: Internal rpc option goes here. + // Get configuration key-value. The request does not block, but returns an + // error if the requested key does not exist. + virtual ::grpc::Status TryGetKeyValue(::grpc::ServerContext* context, const ::tensorflow::TryGetKeyValueRequest* request, ::tensorflow::TryGetKeyValueResponse* response); + // Same as GetKeyValue, but returns all values that have keys which are + // prefixed with the directory key. + virtual ::grpc::Status GetKeyValueDir(::grpc::ServerContext* context, const ::tensorflow::GetKeyValueDirRequest* request, ::tensorflow::GetKeyValueDirResponse* response); + // [AUTOMATION]: Internal rpc option goes here. + // Delete configuration key-value. If is_directory is set in request, + // recursively clean up all key-values under the path specified by `key`. + virtual ::grpc::Status DeleteKeyValue(::grpc::ServerContext* context, const ::tensorflow::DeleteKeyValueRequest* request, ::tensorflow::DeleteKeyValueResponse* response); + // Blocks until all (or a subset of) tasks are at the barrier or the barrier + // fails. + // + // `barrier_id` should be unique across barriers. Once the barrier has passed + // or failed, subsequent calls will not block, and immediately respond with + // the previous response. + // + // The first WaitAtBarrier() call received by the service for a particular + // barrier id is special in that it determines the barrier deadline based on + // timeout duration. + // However, if subsequent calls by different agents specify a different set of + // `tasks` for the same `barrier_id`, the barrier will fail instantly. + // + // If no tasks are specified (default), the barrier will block for all the + // connected tasks. + // + // Possible service errors: + // - DeadlineExceeded: Timed out waiting for specified tasks at the barrier. + // Deadline is determined by the server timestamp when it receives the + // first WaitAtBarrier() + timeout duration. + // - Cancelled: One of the tasks called CancelBarrier(). + // - Aborted: Service is shutting down. + // - Internal: Any participating task is in ERROR state. + // - InvalidArgument: (1) Conflicting tasks specified by different agents + // for the same barrier, (2) one of the participating tasks is not in + // the cluster, or (3) task making the request is not included in the + // list of participating tasks. + virtual ::grpc::Status Barrier(::grpc::ServerContext* context, const ::tensorflow::BarrierRequest* request, ::tensorflow::BarrierResponse* response); + // [AUTOMATION]: Internal rpc option goes here. + // Aborts the barrier if it is ongoing. + // Current and future WaitAtBarrier() calls with the same id will return a + // CANCELLED error status. + // Possible service errors: + // - FailedPrecondition: Barrier has already been passed. + virtual ::grpc::Status CancelBarrier(::grpc::ServerContext* context, const ::tensorflow::CancelBarrierRequest* request, ::tensorflow::CancelBarrierResponse* response); + }; + template + class WithAsyncMethod_RegisterTask : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithAsyncMethod_RegisterTask() { + ::grpc::Service::MarkMethodAsync(0); + } + ~WithAsyncMethod_RegisterTask() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status RegisterTask(::grpc::ServerContext* /*context*/, const ::tensorflow::RegisterTaskRequest* /*request*/, ::tensorflow::RegisterTaskResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestRegisterTask(::grpc::ServerContext* context, ::tensorflow::RegisterTaskRequest* request, ::grpc::ServerAsyncResponseWriter< ::tensorflow::RegisterTaskResponse>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(0, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithAsyncMethod_Heartbeat : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithAsyncMethod_Heartbeat() { + ::grpc::Service::MarkMethodAsync(1); + } + ~WithAsyncMethod_Heartbeat() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status Heartbeat(::grpc::ServerContext* /*context*/, const ::tensorflow::HeartbeatRequest* /*request*/, ::tensorflow::HeartbeatResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestHeartbeat(::grpc::ServerContext* context, ::tensorflow::HeartbeatRequest* request, ::grpc::ServerAsyncResponseWriter< ::tensorflow::HeartbeatResponse>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(1, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithAsyncMethod_WaitForAllTasks : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithAsyncMethod_WaitForAllTasks() { + ::grpc::Service::MarkMethodAsync(2); + } + ~WithAsyncMethod_WaitForAllTasks() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status WaitForAllTasks(::grpc::ServerContext* /*context*/, const ::tensorflow::WaitForAllTasksRequest* /*request*/, ::tensorflow::WaitForAllTasksResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestWaitForAllTasks(::grpc::ServerContext* context, ::tensorflow::WaitForAllTasksRequest* request, ::grpc::ServerAsyncResponseWriter< ::tensorflow::WaitForAllTasksResponse>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(2, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithAsyncMethod_ShutdownTask : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithAsyncMethod_ShutdownTask() { + ::grpc::Service::MarkMethodAsync(3); + } + ~WithAsyncMethod_ShutdownTask() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status ShutdownTask(::grpc::ServerContext* /*context*/, const ::tensorflow::ShutdownTaskRequest* /*request*/, ::tensorflow::ShutdownTaskResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestShutdownTask(::grpc::ServerContext* context, ::tensorflow::ShutdownTaskRequest* request, ::grpc::ServerAsyncResponseWriter< ::tensorflow::ShutdownTaskResponse>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(3, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithAsyncMethod_ResetTask : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithAsyncMethod_ResetTask() { + ::grpc::Service::MarkMethodAsync(4); + } + ~WithAsyncMethod_ResetTask() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status ResetTask(::grpc::ServerContext* /*context*/, const ::tensorflow::ResetTaskRequest* /*request*/, ::tensorflow::ResetTaskResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestResetTask(::grpc::ServerContext* context, ::tensorflow::ResetTaskRequest* request, ::grpc::ServerAsyncResponseWriter< ::tensorflow::ResetTaskResponse>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(4, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithAsyncMethod_ReportErrorToTask : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithAsyncMethod_ReportErrorToTask() { + ::grpc::Service::MarkMethodAsync(5); + } + ~WithAsyncMethod_ReportErrorToTask() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status ReportErrorToTask(::grpc::ServerContext* /*context*/, const ::tensorflow::ReportErrorToTaskRequest* /*request*/, ::tensorflow::ReportErrorToTaskResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestReportErrorToTask(::grpc::ServerContext* context, ::tensorflow::ReportErrorToTaskRequest* request, ::grpc::ServerAsyncResponseWriter< ::tensorflow::ReportErrorToTaskResponse>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(5, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithAsyncMethod_ReportErrorToService : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithAsyncMethod_ReportErrorToService() { + ::grpc::Service::MarkMethodAsync(6); + } + ~WithAsyncMethod_ReportErrorToService() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status ReportErrorToService(::grpc::ServerContext* /*context*/, const ::tensorflow::ReportErrorToServiceRequest* /*request*/, ::tensorflow::ReportErrorToServiceResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestReportErrorToService(::grpc::ServerContext* context, ::tensorflow::ReportErrorToServiceRequest* request, ::grpc::ServerAsyncResponseWriter< ::tensorflow::ReportErrorToServiceResponse>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(6, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithAsyncMethod_GetTaskState : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithAsyncMethod_GetTaskState() { + ::grpc::Service::MarkMethodAsync(7); + } + ~WithAsyncMethod_GetTaskState() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status GetTaskState(::grpc::ServerContext* /*context*/, const ::tensorflow::GetTaskStateRequest* /*request*/, ::tensorflow::GetTaskStateResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestGetTaskState(::grpc::ServerContext* context, ::tensorflow::GetTaskStateRequest* request, ::grpc::ServerAsyncResponseWriter< ::tensorflow::GetTaskStateResponse>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(7, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithAsyncMethod_InsertKeyValue : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithAsyncMethod_InsertKeyValue() { + ::grpc::Service::MarkMethodAsync(8); + } + ~WithAsyncMethod_InsertKeyValue() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status InsertKeyValue(::grpc::ServerContext* /*context*/, const ::tensorflow::InsertKeyValueRequest* /*request*/, ::tensorflow::InsertKeyValueResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestInsertKeyValue(::grpc::ServerContext* context, ::tensorflow::InsertKeyValueRequest* request, ::grpc::ServerAsyncResponseWriter< ::tensorflow::InsertKeyValueResponse>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(8, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithAsyncMethod_GetKeyValue : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithAsyncMethod_GetKeyValue() { + ::grpc::Service::MarkMethodAsync(9); + } + ~WithAsyncMethod_GetKeyValue() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status GetKeyValue(::grpc::ServerContext* /*context*/, const ::tensorflow::GetKeyValueRequest* /*request*/, ::tensorflow::GetKeyValueResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestGetKeyValue(::grpc::ServerContext* context, ::tensorflow::GetKeyValueRequest* request, ::grpc::ServerAsyncResponseWriter< ::tensorflow::GetKeyValueResponse>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(9, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithAsyncMethod_TryGetKeyValue : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithAsyncMethod_TryGetKeyValue() { + ::grpc::Service::MarkMethodAsync(10); + } + ~WithAsyncMethod_TryGetKeyValue() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status TryGetKeyValue(::grpc::ServerContext* /*context*/, const ::tensorflow::TryGetKeyValueRequest* /*request*/, ::tensorflow::TryGetKeyValueResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestTryGetKeyValue(::grpc::ServerContext* context, ::tensorflow::TryGetKeyValueRequest* request, ::grpc::ServerAsyncResponseWriter< ::tensorflow::TryGetKeyValueResponse>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(10, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithAsyncMethod_GetKeyValueDir : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithAsyncMethod_GetKeyValueDir() { + ::grpc::Service::MarkMethodAsync(11); + } + ~WithAsyncMethod_GetKeyValueDir() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status GetKeyValueDir(::grpc::ServerContext* /*context*/, const ::tensorflow::GetKeyValueDirRequest* /*request*/, ::tensorflow::GetKeyValueDirResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestGetKeyValueDir(::grpc::ServerContext* context, ::tensorflow::GetKeyValueDirRequest* request, ::grpc::ServerAsyncResponseWriter< ::tensorflow::GetKeyValueDirResponse>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(11, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithAsyncMethod_DeleteKeyValue : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithAsyncMethod_DeleteKeyValue() { + ::grpc::Service::MarkMethodAsync(12); + } + ~WithAsyncMethod_DeleteKeyValue() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status DeleteKeyValue(::grpc::ServerContext* /*context*/, const ::tensorflow::DeleteKeyValueRequest* /*request*/, ::tensorflow::DeleteKeyValueResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestDeleteKeyValue(::grpc::ServerContext* context, ::tensorflow::DeleteKeyValueRequest* request, ::grpc::ServerAsyncResponseWriter< ::tensorflow::DeleteKeyValueResponse>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(12, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithAsyncMethod_Barrier : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithAsyncMethod_Barrier() { + ::grpc::Service::MarkMethodAsync(13); + } + ~WithAsyncMethod_Barrier() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status Barrier(::grpc::ServerContext* /*context*/, const ::tensorflow::BarrierRequest* /*request*/, ::tensorflow::BarrierResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestBarrier(::grpc::ServerContext* context, ::tensorflow::BarrierRequest* request, ::grpc::ServerAsyncResponseWriter< ::tensorflow::BarrierResponse>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(13, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithAsyncMethod_CancelBarrier : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithAsyncMethod_CancelBarrier() { + ::grpc::Service::MarkMethodAsync(14); + } + ~WithAsyncMethod_CancelBarrier() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status CancelBarrier(::grpc::ServerContext* /*context*/, const ::tensorflow::CancelBarrierRequest* /*request*/, ::tensorflow::CancelBarrierResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestCancelBarrier(::grpc::ServerContext* context, ::tensorflow::CancelBarrierRequest* request, ::grpc::ServerAsyncResponseWriter< ::tensorflow::CancelBarrierResponse>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(14, context, request, response, new_call_cq, notification_cq, tag); + } + }; + typedef WithAsyncMethod_RegisterTask > > > > > > > > > > > > > > AsyncService; + template + class ExperimentalWithCallbackMethod_RegisterTask : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithCallbackMethod_RegisterTask() { + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::Service:: + #else + ::grpc::Service::experimental(). + #endif + MarkMethodCallback(0, + new ::grpc_impl::internal::CallbackUnaryHandler< ::tensorflow::RegisterTaskRequest, ::tensorflow::RegisterTaskResponse>( + [this]( + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::CallbackServerContext* + #else + ::grpc::experimental::CallbackServerContext* + #endif + context, const ::tensorflow::RegisterTaskRequest* request, ::tensorflow::RegisterTaskResponse* response) { return this->RegisterTask(context, request, response); }));} + void SetMessageAllocatorFor_RegisterTask( + ::grpc::experimental::MessageAllocator< ::tensorflow::RegisterTaskRequest, ::tensorflow::RegisterTaskResponse>* allocator) { + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::internal::MethodHandler* const handler = ::grpc::Service::GetHandler(0); + #else + ::grpc::internal::MethodHandler* const handler = ::grpc::Service::experimental().GetHandler(0); + #endif + static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::tensorflow::RegisterTaskRequest, ::tensorflow::RegisterTaskResponse>*>(handler) + ->SetMessageAllocator(allocator); + } + ~ExperimentalWithCallbackMethod_RegisterTask() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status RegisterTask(::grpc::ServerContext* /*context*/, const ::tensorflow::RegisterTaskRequest* /*request*/, ::tensorflow::RegisterTaskResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual ::grpc::ServerUnaryReactor* RegisterTask( + ::grpc::CallbackServerContext* /*context*/, const ::tensorflow::RegisterTaskRequest* /*request*/, ::tensorflow::RegisterTaskResponse* /*response*/) + #else + virtual ::grpc::experimental::ServerUnaryReactor* RegisterTask( + ::grpc::experimental::CallbackServerContext* /*context*/, const ::tensorflow::RegisterTaskRequest* /*request*/, ::tensorflow::RegisterTaskResponse* /*response*/) + #endif + { return nullptr; } + }; + template + class ExperimentalWithCallbackMethod_Heartbeat : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithCallbackMethod_Heartbeat() { + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::Service:: + #else + ::grpc::Service::experimental(). + #endif + MarkMethodCallback(1, + new ::grpc_impl::internal::CallbackUnaryHandler< ::tensorflow::HeartbeatRequest, ::tensorflow::HeartbeatResponse>( + [this]( + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::CallbackServerContext* + #else + ::grpc::experimental::CallbackServerContext* + #endif + context, const ::tensorflow::HeartbeatRequest* request, ::tensorflow::HeartbeatResponse* response) { return this->Heartbeat(context, request, response); }));} + void SetMessageAllocatorFor_Heartbeat( + ::grpc::experimental::MessageAllocator< ::tensorflow::HeartbeatRequest, ::tensorflow::HeartbeatResponse>* allocator) { + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::internal::MethodHandler* const handler = ::grpc::Service::GetHandler(1); + #else + ::grpc::internal::MethodHandler* const handler = ::grpc::Service::experimental().GetHandler(1); + #endif + static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::tensorflow::HeartbeatRequest, ::tensorflow::HeartbeatResponse>*>(handler) + ->SetMessageAllocator(allocator); + } + ~ExperimentalWithCallbackMethod_Heartbeat() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status Heartbeat(::grpc::ServerContext* /*context*/, const ::tensorflow::HeartbeatRequest* /*request*/, ::tensorflow::HeartbeatResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual ::grpc::ServerUnaryReactor* Heartbeat( + ::grpc::CallbackServerContext* /*context*/, const ::tensorflow::HeartbeatRequest* /*request*/, ::tensorflow::HeartbeatResponse* /*response*/) + #else + virtual ::grpc::experimental::ServerUnaryReactor* Heartbeat( + ::grpc::experimental::CallbackServerContext* /*context*/, const ::tensorflow::HeartbeatRequest* /*request*/, ::tensorflow::HeartbeatResponse* /*response*/) + #endif + { return nullptr; } + }; + template + class ExperimentalWithCallbackMethod_WaitForAllTasks : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithCallbackMethod_WaitForAllTasks() { + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::Service:: + #else + ::grpc::Service::experimental(). + #endif + MarkMethodCallback(2, + new ::grpc_impl::internal::CallbackUnaryHandler< ::tensorflow::WaitForAllTasksRequest, ::tensorflow::WaitForAllTasksResponse>( + [this]( + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::CallbackServerContext* + #else + ::grpc::experimental::CallbackServerContext* + #endif + context, const ::tensorflow::WaitForAllTasksRequest* request, ::tensorflow::WaitForAllTasksResponse* response) { return this->WaitForAllTasks(context, request, response); }));} + void SetMessageAllocatorFor_WaitForAllTasks( + ::grpc::experimental::MessageAllocator< ::tensorflow::WaitForAllTasksRequest, ::tensorflow::WaitForAllTasksResponse>* allocator) { + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::internal::MethodHandler* const handler = ::grpc::Service::GetHandler(2); + #else + ::grpc::internal::MethodHandler* const handler = ::grpc::Service::experimental().GetHandler(2); + #endif + static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::tensorflow::WaitForAllTasksRequest, ::tensorflow::WaitForAllTasksResponse>*>(handler) + ->SetMessageAllocator(allocator); + } + ~ExperimentalWithCallbackMethod_WaitForAllTasks() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status WaitForAllTasks(::grpc::ServerContext* /*context*/, const ::tensorflow::WaitForAllTasksRequest* /*request*/, ::tensorflow::WaitForAllTasksResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual ::grpc::ServerUnaryReactor* WaitForAllTasks( + ::grpc::CallbackServerContext* /*context*/, const ::tensorflow::WaitForAllTasksRequest* /*request*/, ::tensorflow::WaitForAllTasksResponse* /*response*/) + #else + virtual ::grpc::experimental::ServerUnaryReactor* WaitForAllTasks( + ::grpc::experimental::CallbackServerContext* /*context*/, const ::tensorflow::WaitForAllTasksRequest* /*request*/, ::tensorflow::WaitForAllTasksResponse* /*response*/) + #endif + { return nullptr; } + }; + template + class ExperimentalWithCallbackMethod_ShutdownTask : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithCallbackMethod_ShutdownTask() { + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::Service:: + #else + ::grpc::Service::experimental(). + #endif + MarkMethodCallback(3, + new ::grpc_impl::internal::CallbackUnaryHandler< ::tensorflow::ShutdownTaskRequest, ::tensorflow::ShutdownTaskResponse>( + [this]( + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::CallbackServerContext* + #else + ::grpc::experimental::CallbackServerContext* + #endif + context, const ::tensorflow::ShutdownTaskRequest* request, ::tensorflow::ShutdownTaskResponse* response) { return this->ShutdownTask(context, request, response); }));} + void SetMessageAllocatorFor_ShutdownTask( + ::grpc::experimental::MessageAllocator< ::tensorflow::ShutdownTaskRequest, ::tensorflow::ShutdownTaskResponse>* allocator) { + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::internal::MethodHandler* const handler = ::grpc::Service::GetHandler(3); + #else + ::grpc::internal::MethodHandler* const handler = ::grpc::Service::experimental().GetHandler(3); + #endif + static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::tensorflow::ShutdownTaskRequest, ::tensorflow::ShutdownTaskResponse>*>(handler) + ->SetMessageAllocator(allocator); + } + ~ExperimentalWithCallbackMethod_ShutdownTask() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status ShutdownTask(::grpc::ServerContext* /*context*/, const ::tensorflow::ShutdownTaskRequest* /*request*/, ::tensorflow::ShutdownTaskResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual ::grpc::ServerUnaryReactor* ShutdownTask( + ::grpc::CallbackServerContext* /*context*/, const ::tensorflow::ShutdownTaskRequest* /*request*/, ::tensorflow::ShutdownTaskResponse* /*response*/) + #else + virtual ::grpc::experimental::ServerUnaryReactor* ShutdownTask( + ::grpc::experimental::CallbackServerContext* /*context*/, const ::tensorflow::ShutdownTaskRequest* /*request*/, ::tensorflow::ShutdownTaskResponse* /*response*/) + #endif + { return nullptr; } + }; + template + class ExperimentalWithCallbackMethod_ResetTask : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithCallbackMethod_ResetTask() { + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::Service:: + #else + ::grpc::Service::experimental(). + #endif + MarkMethodCallback(4, + new ::grpc_impl::internal::CallbackUnaryHandler< ::tensorflow::ResetTaskRequest, ::tensorflow::ResetTaskResponse>( + [this]( + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::CallbackServerContext* + #else + ::grpc::experimental::CallbackServerContext* + #endif + context, const ::tensorflow::ResetTaskRequest* request, ::tensorflow::ResetTaskResponse* response) { return this->ResetTask(context, request, response); }));} + void SetMessageAllocatorFor_ResetTask( + ::grpc::experimental::MessageAllocator< ::tensorflow::ResetTaskRequest, ::tensorflow::ResetTaskResponse>* allocator) { + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::internal::MethodHandler* const handler = ::grpc::Service::GetHandler(4); + #else + ::grpc::internal::MethodHandler* const handler = ::grpc::Service::experimental().GetHandler(4); + #endif + static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::tensorflow::ResetTaskRequest, ::tensorflow::ResetTaskResponse>*>(handler) + ->SetMessageAllocator(allocator); + } + ~ExperimentalWithCallbackMethod_ResetTask() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status ResetTask(::grpc::ServerContext* /*context*/, const ::tensorflow::ResetTaskRequest* /*request*/, ::tensorflow::ResetTaskResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual ::grpc::ServerUnaryReactor* ResetTask( + ::grpc::CallbackServerContext* /*context*/, const ::tensorflow::ResetTaskRequest* /*request*/, ::tensorflow::ResetTaskResponse* /*response*/) + #else + virtual ::grpc::experimental::ServerUnaryReactor* ResetTask( + ::grpc::experimental::CallbackServerContext* /*context*/, const ::tensorflow::ResetTaskRequest* /*request*/, ::tensorflow::ResetTaskResponse* /*response*/) + #endif + { return nullptr; } + }; + template + class ExperimentalWithCallbackMethod_ReportErrorToTask : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithCallbackMethod_ReportErrorToTask() { + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::Service:: + #else + ::grpc::Service::experimental(). + #endif + MarkMethodCallback(5, + new ::grpc_impl::internal::CallbackUnaryHandler< ::tensorflow::ReportErrorToTaskRequest, ::tensorflow::ReportErrorToTaskResponse>( + [this]( + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::CallbackServerContext* + #else + ::grpc::experimental::CallbackServerContext* + #endif + context, const ::tensorflow::ReportErrorToTaskRequest* request, ::tensorflow::ReportErrorToTaskResponse* response) { return this->ReportErrorToTask(context, request, response); }));} + void SetMessageAllocatorFor_ReportErrorToTask( + ::grpc::experimental::MessageAllocator< ::tensorflow::ReportErrorToTaskRequest, ::tensorflow::ReportErrorToTaskResponse>* allocator) { + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::internal::MethodHandler* const handler = ::grpc::Service::GetHandler(5); + #else + ::grpc::internal::MethodHandler* const handler = ::grpc::Service::experimental().GetHandler(5); + #endif + static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::tensorflow::ReportErrorToTaskRequest, ::tensorflow::ReportErrorToTaskResponse>*>(handler) + ->SetMessageAllocator(allocator); + } + ~ExperimentalWithCallbackMethod_ReportErrorToTask() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status ReportErrorToTask(::grpc::ServerContext* /*context*/, const ::tensorflow::ReportErrorToTaskRequest* /*request*/, ::tensorflow::ReportErrorToTaskResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual ::grpc::ServerUnaryReactor* ReportErrorToTask( + ::grpc::CallbackServerContext* /*context*/, const ::tensorflow::ReportErrorToTaskRequest* /*request*/, ::tensorflow::ReportErrorToTaskResponse* /*response*/) + #else + virtual ::grpc::experimental::ServerUnaryReactor* ReportErrorToTask( + ::grpc::experimental::CallbackServerContext* /*context*/, const ::tensorflow::ReportErrorToTaskRequest* /*request*/, ::tensorflow::ReportErrorToTaskResponse* /*response*/) + #endif + { return nullptr; } + }; + template + class ExperimentalWithCallbackMethod_ReportErrorToService : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithCallbackMethod_ReportErrorToService() { + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::Service:: + #else + ::grpc::Service::experimental(). + #endif + MarkMethodCallback(6, + new ::grpc_impl::internal::CallbackUnaryHandler< ::tensorflow::ReportErrorToServiceRequest, ::tensorflow::ReportErrorToServiceResponse>( + [this]( + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::CallbackServerContext* + #else + ::grpc::experimental::CallbackServerContext* + #endif + context, const ::tensorflow::ReportErrorToServiceRequest* request, ::tensorflow::ReportErrorToServiceResponse* response) { return this->ReportErrorToService(context, request, response); }));} + void SetMessageAllocatorFor_ReportErrorToService( + ::grpc::experimental::MessageAllocator< ::tensorflow::ReportErrorToServiceRequest, ::tensorflow::ReportErrorToServiceResponse>* allocator) { + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::internal::MethodHandler* const handler = ::grpc::Service::GetHandler(6); + #else + ::grpc::internal::MethodHandler* const handler = ::grpc::Service::experimental().GetHandler(6); + #endif + static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::tensorflow::ReportErrorToServiceRequest, ::tensorflow::ReportErrorToServiceResponse>*>(handler) + ->SetMessageAllocator(allocator); + } + ~ExperimentalWithCallbackMethod_ReportErrorToService() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status ReportErrorToService(::grpc::ServerContext* /*context*/, const ::tensorflow::ReportErrorToServiceRequest* /*request*/, ::tensorflow::ReportErrorToServiceResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual ::grpc::ServerUnaryReactor* ReportErrorToService( + ::grpc::CallbackServerContext* /*context*/, const ::tensorflow::ReportErrorToServiceRequest* /*request*/, ::tensorflow::ReportErrorToServiceResponse* /*response*/) + #else + virtual ::grpc::experimental::ServerUnaryReactor* ReportErrorToService( + ::grpc::experimental::CallbackServerContext* /*context*/, const ::tensorflow::ReportErrorToServiceRequest* /*request*/, ::tensorflow::ReportErrorToServiceResponse* /*response*/) + #endif + { return nullptr; } + }; + template + class ExperimentalWithCallbackMethod_GetTaskState : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithCallbackMethod_GetTaskState() { + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::Service:: + #else + ::grpc::Service::experimental(). + #endif + MarkMethodCallback(7, + new ::grpc_impl::internal::CallbackUnaryHandler< ::tensorflow::GetTaskStateRequest, ::tensorflow::GetTaskStateResponse>( + [this]( + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::CallbackServerContext* + #else + ::grpc::experimental::CallbackServerContext* + #endif + context, const ::tensorflow::GetTaskStateRequest* request, ::tensorflow::GetTaskStateResponse* response) { return this->GetTaskState(context, request, response); }));} + void SetMessageAllocatorFor_GetTaskState( + ::grpc::experimental::MessageAllocator< ::tensorflow::GetTaskStateRequest, ::tensorflow::GetTaskStateResponse>* allocator) { + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::internal::MethodHandler* const handler = ::grpc::Service::GetHandler(7); + #else + ::grpc::internal::MethodHandler* const handler = ::grpc::Service::experimental().GetHandler(7); + #endif + static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::tensorflow::GetTaskStateRequest, ::tensorflow::GetTaskStateResponse>*>(handler) + ->SetMessageAllocator(allocator); + } + ~ExperimentalWithCallbackMethod_GetTaskState() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status GetTaskState(::grpc::ServerContext* /*context*/, const ::tensorflow::GetTaskStateRequest* /*request*/, ::tensorflow::GetTaskStateResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual ::grpc::ServerUnaryReactor* GetTaskState( + ::grpc::CallbackServerContext* /*context*/, const ::tensorflow::GetTaskStateRequest* /*request*/, ::tensorflow::GetTaskStateResponse* /*response*/) + #else + virtual ::grpc::experimental::ServerUnaryReactor* GetTaskState( + ::grpc::experimental::CallbackServerContext* /*context*/, const ::tensorflow::GetTaskStateRequest* /*request*/, ::tensorflow::GetTaskStateResponse* /*response*/) + #endif + { return nullptr; } + }; + template + class ExperimentalWithCallbackMethod_InsertKeyValue : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithCallbackMethod_InsertKeyValue() { + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::Service:: + #else + ::grpc::Service::experimental(). + #endif + MarkMethodCallback(8, + new ::grpc_impl::internal::CallbackUnaryHandler< ::tensorflow::InsertKeyValueRequest, ::tensorflow::InsertKeyValueResponse>( + [this]( + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::CallbackServerContext* + #else + ::grpc::experimental::CallbackServerContext* + #endif + context, const ::tensorflow::InsertKeyValueRequest* request, ::tensorflow::InsertKeyValueResponse* response) { return this->InsertKeyValue(context, request, response); }));} + void SetMessageAllocatorFor_InsertKeyValue( + ::grpc::experimental::MessageAllocator< ::tensorflow::InsertKeyValueRequest, ::tensorflow::InsertKeyValueResponse>* allocator) { + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::internal::MethodHandler* const handler = ::grpc::Service::GetHandler(8); + #else + ::grpc::internal::MethodHandler* const handler = ::grpc::Service::experimental().GetHandler(8); + #endif + static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::tensorflow::InsertKeyValueRequest, ::tensorflow::InsertKeyValueResponse>*>(handler) + ->SetMessageAllocator(allocator); + } + ~ExperimentalWithCallbackMethod_InsertKeyValue() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status InsertKeyValue(::grpc::ServerContext* /*context*/, const ::tensorflow::InsertKeyValueRequest* /*request*/, ::tensorflow::InsertKeyValueResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual ::grpc::ServerUnaryReactor* InsertKeyValue( + ::grpc::CallbackServerContext* /*context*/, const ::tensorflow::InsertKeyValueRequest* /*request*/, ::tensorflow::InsertKeyValueResponse* /*response*/) + #else + virtual ::grpc::experimental::ServerUnaryReactor* InsertKeyValue( + ::grpc::experimental::CallbackServerContext* /*context*/, const ::tensorflow::InsertKeyValueRequest* /*request*/, ::tensorflow::InsertKeyValueResponse* /*response*/) + #endif + { return nullptr; } + }; + template + class ExperimentalWithCallbackMethod_GetKeyValue : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithCallbackMethod_GetKeyValue() { + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::Service:: + #else + ::grpc::Service::experimental(). + #endif + MarkMethodCallback(9, + new ::grpc_impl::internal::CallbackUnaryHandler< ::tensorflow::GetKeyValueRequest, ::tensorflow::GetKeyValueResponse>( + [this]( + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::CallbackServerContext* + #else + ::grpc::experimental::CallbackServerContext* + #endif + context, const ::tensorflow::GetKeyValueRequest* request, ::tensorflow::GetKeyValueResponse* response) { return this->GetKeyValue(context, request, response); }));} + void SetMessageAllocatorFor_GetKeyValue( + ::grpc::experimental::MessageAllocator< ::tensorflow::GetKeyValueRequest, ::tensorflow::GetKeyValueResponse>* allocator) { + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::internal::MethodHandler* const handler = ::grpc::Service::GetHandler(9); + #else + ::grpc::internal::MethodHandler* const handler = ::grpc::Service::experimental().GetHandler(9); + #endif + static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::tensorflow::GetKeyValueRequest, ::tensorflow::GetKeyValueResponse>*>(handler) + ->SetMessageAllocator(allocator); + } + ~ExperimentalWithCallbackMethod_GetKeyValue() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status GetKeyValue(::grpc::ServerContext* /*context*/, const ::tensorflow::GetKeyValueRequest* /*request*/, ::tensorflow::GetKeyValueResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual ::grpc::ServerUnaryReactor* GetKeyValue( + ::grpc::CallbackServerContext* /*context*/, const ::tensorflow::GetKeyValueRequest* /*request*/, ::tensorflow::GetKeyValueResponse* /*response*/) + #else + virtual ::grpc::experimental::ServerUnaryReactor* GetKeyValue( + ::grpc::experimental::CallbackServerContext* /*context*/, const ::tensorflow::GetKeyValueRequest* /*request*/, ::tensorflow::GetKeyValueResponse* /*response*/) + #endif + { return nullptr; } + }; + template + class ExperimentalWithCallbackMethod_TryGetKeyValue : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithCallbackMethod_TryGetKeyValue() { + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::Service:: + #else + ::grpc::Service::experimental(). + #endif + MarkMethodCallback(10, + new ::grpc_impl::internal::CallbackUnaryHandler< ::tensorflow::TryGetKeyValueRequest, ::tensorflow::TryGetKeyValueResponse>( + [this]( + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::CallbackServerContext* + #else + ::grpc::experimental::CallbackServerContext* + #endif + context, const ::tensorflow::TryGetKeyValueRequest* request, ::tensorflow::TryGetKeyValueResponse* response) { return this->TryGetKeyValue(context, request, response); }));} + void SetMessageAllocatorFor_TryGetKeyValue( + ::grpc::experimental::MessageAllocator< ::tensorflow::TryGetKeyValueRequest, ::tensorflow::TryGetKeyValueResponse>* allocator) { + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::internal::MethodHandler* const handler = ::grpc::Service::GetHandler(10); + #else + ::grpc::internal::MethodHandler* const handler = ::grpc::Service::experimental().GetHandler(10); + #endif + static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::tensorflow::TryGetKeyValueRequest, ::tensorflow::TryGetKeyValueResponse>*>(handler) + ->SetMessageAllocator(allocator); + } + ~ExperimentalWithCallbackMethod_TryGetKeyValue() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status TryGetKeyValue(::grpc::ServerContext* /*context*/, const ::tensorflow::TryGetKeyValueRequest* /*request*/, ::tensorflow::TryGetKeyValueResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual ::grpc::ServerUnaryReactor* TryGetKeyValue( + ::grpc::CallbackServerContext* /*context*/, const ::tensorflow::TryGetKeyValueRequest* /*request*/, ::tensorflow::TryGetKeyValueResponse* /*response*/) + #else + virtual ::grpc::experimental::ServerUnaryReactor* TryGetKeyValue( + ::grpc::experimental::CallbackServerContext* /*context*/, const ::tensorflow::TryGetKeyValueRequest* /*request*/, ::tensorflow::TryGetKeyValueResponse* /*response*/) + #endif + { return nullptr; } + }; + template + class ExperimentalWithCallbackMethod_GetKeyValueDir : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithCallbackMethod_GetKeyValueDir() { + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::Service:: + #else + ::grpc::Service::experimental(). + #endif + MarkMethodCallback(11, + new ::grpc_impl::internal::CallbackUnaryHandler< ::tensorflow::GetKeyValueDirRequest, ::tensorflow::GetKeyValueDirResponse>( + [this]( + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::CallbackServerContext* + #else + ::grpc::experimental::CallbackServerContext* + #endif + context, const ::tensorflow::GetKeyValueDirRequest* request, ::tensorflow::GetKeyValueDirResponse* response) { return this->GetKeyValueDir(context, request, response); }));} + void SetMessageAllocatorFor_GetKeyValueDir( + ::grpc::experimental::MessageAllocator< ::tensorflow::GetKeyValueDirRequest, ::tensorflow::GetKeyValueDirResponse>* allocator) { + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::internal::MethodHandler* const handler = ::grpc::Service::GetHandler(11); + #else + ::grpc::internal::MethodHandler* const handler = ::grpc::Service::experimental().GetHandler(11); + #endif + static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::tensorflow::GetKeyValueDirRequest, ::tensorflow::GetKeyValueDirResponse>*>(handler) + ->SetMessageAllocator(allocator); + } + ~ExperimentalWithCallbackMethod_GetKeyValueDir() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status GetKeyValueDir(::grpc::ServerContext* /*context*/, const ::tensorflow::GetKeyValueDirRequest* /*request*/, ::tensorflow::GetKeyValueDirResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual ::grpc::ServerUnaryReactor* GetKeyValueDir( + ::grpc::CallbackServerContext* /*context*/, const ::tensorflow::GetKeyValueDirRequest* /*request*/, ::tensorflow::GetKeyValueDirResponse* /*response*/) + #else + virtual ::grpc::experimental::ServerUnaryReactor* GetKeyValueDir( + ::grpc::experimental::CallbackServerContext* /*context*/, const ::tensorflow::GetKeyValueDirRequest* /*request*/, ::tensorflow::GetKeyValueDirResponse* /*response*/) + #endif + { return nullptr; } + }; + template + class ExperimentalWithCallbackMethod_DeleteKeyValue : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithCallbackMethod_DeleteKeyValue() { + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::Service:: + #else + ::grpc::Service::experimental(). + #endif + MarkMethodCallback(12, + new ::grpc_impl::internal::CallbackUnaryHandler< ::tensorflow::DeleteKeyValueRequest, ::tensorflow::DeleteKeyValueResponse>( + [this]( + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::CallbackServerContext* + #else + ::grpc::experimental::CallbackServerContext* + #endif + context, const ::tensorflow::DeleteKeyValueRequest* request, ::tensorflow::DeleteKeyValueResponse* response) { return this->DeleteKeyValue(context, request, response); }));} + void SetMessageAllocatorFor_DeleteKeyValue( + ::grpc::experimental::MessageAllocator< ::tensorflow::DeleteKeyValueRequest, ::tensorflow::DeleteKeyValueResponse>* allocator) { + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::internal::MethodHandler* const handler = ::grpc::Service::GetHandler(12); + #else + ::grpc::internal::MethodHandler* const handler = ::grpc::Service::experimental().GetHandler(12); + #endif + static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::tensorflow::DeleteKeyValueRequest, ::tensorflow::DeleteKeyValueResponse>*>(handler) + ->SetMessageAllocator(allocator); + } + ~ExperimentalWithCallbackMethod_DeleteKeyValue() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status DeleteKeyValue(::grpc::ServerContext* /*context*/, const ::tensorflow::DeleteKeyValueRequest* /*request*/, ::tensorflow::DeleteKeyValueResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual ::grpc::ServerUnaryReactor* DeleteKeyValue( + ::grpc::CallbackServerContext* /*context*/, const ::tensorflow::DeleteKeyValueRequest* /*request*/, ::tensorflow::DeleteKeyValueResponse* /*response*/) + #else + virtual ::grpc::experimental::ServerUnaryReactor* DeleteKeyValue( + ::grpc::experimental::CallbackServerContext* /*context*/, const ::tensorflow::DeleteKeyValueRequest* /*request*/, ::tensorflow::DeleteKeyValueResponse* /*response*/) + #endif + { return nullptr; } + }; + template + class ExperimentalWithCallbackMethod_Barrier : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithCallbackMethod_Barrier() { + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::Service:: + #else + ::grpc::Service::experimental(). + #endif + MarkMethodCallback(13, + new ::grpc_impl::internal::CallbackUnaryHandler< ::tensorflow::BarrierRequest, ::tensorflow::BarrierResponse>( + [this]( + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::CallbackServerContext* + #else + ::grpc::experimental::CallbackServerContext* + #endif + context, const ::tensorflow::BarrierRequest* request, ::tensorflow::BarrierResponse* response) { return this->Barrier(context, request, response); }));} + void SetMessageAllocatorFor_Barrier( + ::grpc::experimental::MessageAllocator< ::tensorflow::BarrierRequest, ::tensorflow::BarrierResponse>* allocator) { + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::internal::MethodHandler* const handler = ::grpc::Service::GetHandler(13); + #else + ::grpc::internal::MethodHandler* const handler = ::grpc::Service::experimental().GetHandler(13); + #endif + static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::tensorflow::BarrierRequest, ::tensorflow::BarrierResponse>*>(handler) + ->SetMessageAllocator(allocator); + } + ~ExperimentalWithCallbackMethod_Barrier() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status Barrier(::grpc::ServerContext* /*context*/, const ::tensorflow::BarrierRequest* /*request*/, ::tensorflow::BarrierResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual ::grpc::ServerUnaryReactor* Barrier( + ::grpc::CallbackServerContext* /*context*/, const ::tensorflow::BarrierRequest* /*request*/, ::tensorflow::BarrierResponse* /*response*/) + #else + virtual ::grpc::experimental::ServerUnaryReactor* Barrier( + ::grpc::experimental::CallbackServerContext* /*context*/, const ::tensorflow::BarrierRequest* /*request*/, ::tensorflow::BarrierResponse* /*response*/) + #endif + { return nullptr; } + }; + template + class ExperimentalWithCallbackMethod_CancelBarrier : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithCallbackMethod_CancelBarrier() { + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::Service:: + #else + ::grpc::Service::experimental(). + #endif + MarkMethodCallback(14, + new ::grpc_impl::internal::CallbackUnaryHandler< ::tensorflow::CancelBarrierRequest, ::tensorflow::CancelBarrierResponse>( + [this]( + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::CallbackServerContext* + #else + ::grpc::experimental::CallbackServerContext* + #endif + context, const ::tensorflow::CancelBarrierRequest* request, ::tensorflow::CancelBarrierResponse* response) { return this->CancelBarrier(context, request, response); }));} + void SetMessageAllocatorFor_CancelBarrier( + ::grpc::experimental::MessageAllocator< ::tensorflow::CancelBarrierRequest, ::tensorflow::CancelBarrierResponse>* allocator) { + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::internal::MethodHandler* const handler = ::grpc::Service::GetHandler(14); + #else + ::grpc::internal::MethodHandler* const handler = ::grpc::Service::experimental().GetHandler(14); + #endif + static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::tensorflow::CancelBarrierRequest, ::tensorflow::CancelBarrierResponse>*>(handler) + ->SetMessageAllocator(allocator); + } + ~ExperimentalWithCallbackMethod_CancelBarrier() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status CancelBarrier(::grpc::ServerContext* /*context*/, const ::tensorflow::CancelBarrierRequest* /*request*/, ::tensorflow::CancelBarrierResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual ::grpc::ServerUnaryReactor* CancelBarrier( + ::grpc::CallbackServerContext* /*context*/, const ::tensorflow::CancelBarrierRequest* /*request*/, ::tensorflow::CancelBarrierResponse* /*response*/) + #else + virtual ::grpc::experimental::ServerUnaryReactor* CancelBarrier( + ::grpc::experimental::CallbackServerContext* /*context*/, const ::tensorflow::CancelBarrierRequest* /*request*/, ::tensorflow::CancelBarrierResponse* /*response*/) + #endif + { return nullptr; } + }; + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + typedef ExperimentalWithCallbackMethod_RegisterTask > > > > > > > > > > > > > > CallbackService; + #endif + + typedef ExperimentalWithCallbackMethod_RegisterTask > > > > > > > > > > > > > > ExperimentalCallbackService; + template + class WithGenericMethod_RegisterTask : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithGenericMethod_RegisterTask() { + ::grpc::Service::MarkMethodGeneric(0); + } + ~WithGenericMethod_RegisterTask() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status RegisterTask(::grpc::ServerContext* /*context*/, const ::tensorflow::RegisterTaskRequest* /*request*/, ::tensorflow::RegisterTaskResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + }; + template + class WithGenericMethod_Heartbeat : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithGenericMethod_Heartbeat() { + ::grpc::Service::MarkMethodGeneric(1); + } + ~WithGenericMethod_Heartbeat() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status Heartbeat(::grpc::ServerContext* /*context*/, const ::tensorflow::HeartbeatRequest* /*request*/, ::tensorflow::HeartbeatResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + }; + template + class WithGenericMethod_WaitForAllTasks : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithGenericMethod_WaitForAllTasks() { + ::grpc::Service::MarkMethodGeneric(2); + } + ~WithGenericMethod_WaitForAllTasks() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status WaitForAllTasks(::grpc::ServerContext* /*context*/, const ::tensorflow::WaitForAllTasksRequest* /*request*/, ::tensorflow::WaitForAllTasksResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + }; + template + class WithGenericMethod_ShutdownTask : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithGenericMethod_ShutdownTask() { + ::grpc::Service::MarkMethodGeneric(3); + } + ~WithGenericMethod_ShutdownTask() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status ShutdownTask(::grpc::ServerContext* /*context*/, const ::tensorflow::ShutdownTaskRequest* /*request*/, ::tensorflow::ShutdownTaskResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + }; + template + class WithGenericMethod_ResetTask : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithGenericMethod_ResetTask() { + ::grpc::Service::MarkMethodGeneric(4); + } + ~WithGenericMethod_ResetTask() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status ResetTask(::grpc::ServerContext* /*context*/, const ::tensorflow::ResetTaskRequest* /*request*/, ::tensorflow::ResetTaskResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + }; + template + class WithGenericMethod_ReportErrorToTask : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithGenericMethod_ReportErrorToTask() { + ::grpc::Service::MarkMethodGeneric(5); + } + ~WithGenericMethod_ReportErrorToTask() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status ReportErrorToTask(::grpc::ServerContext* /*context*/, const ::tensorflow::ReportErrorToTaskRequest* /*request*/, ::tensorflow::ReportErrorToTaskResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + }; + template + class WithGenericMethod_ReportErrorToService : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithGenericMethod_ReportErrorToService() { + ::grpc::Service::MarkMethodGeneric(6); + } + ~WithGenericMethod_ReportErrorToService() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status ReportErrorToService(::grpc::ServerContext* /*context*/, const ::tensorflow::ReportErrorToServiceRequest* /*request*/, ::tensorflow::ReportErrorToServiceResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + }; + template + class WithGenericMethod_GetTaskState : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithGenericMethod_GetTaskState() { + ::grpc::Service::MarkMethodGeneric(7); + } + ~WithGenericMethod_GetTaskState() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status GetTaskState(::grpc::ServerContext* /*context*/, const ::tensorflow::GetTaskStateRequest* /*request*/, ::tensorflow::GetTaskStateResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + }; + template + class WithGenericMethod_InsertKeyValue : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithGenericMethod_InsertKeyValue() { + ::grpc::Service::MarkMethodGeneric(8); + } + ~WithGenericMethod_InsertKeyValue() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status InsertKeyValue(::grpc::ServerContext* /*context*/, const ::tensorflow::InsertKeyValueRequest* /*request*/, ::tensorflow::InsertKeyValueResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + }; + template + class WithGenericMethod_GetKeyValue : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithGenericMethod_GetKeyValue() { + ::grpc::Service::MarkMethodGeneric(9); + } + ~WithGenericMethod_GetKeyValue() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status GetKeyValue(::grpc::ServerContext* /*context*/, const ::tensorflow::GetKeyValueRequest* /*request*/, ::tensorflow::GetKeyValueResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + }; + template + class WithGenericMethod_TryGetKeyValue : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithGenericMethod_TryGetKeyValue() { + ::grpc::Service::MarkMethodGeneric(10); + } + ~WithGenericMethod_TryGetKeyValue() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status TryGetKeyValue(::grpc::ServerContext* /*context*/, const ::tensorflow::TryGetKeyValueRequest* /*request*/, ::tensorflow::TryGetKeyValueResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + }; + template + class WithGenericMethod_GetKeyValueDir : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithGenericMethod_GetKeyValueDir() { + ::grpc::Service::MarkMethodGeneric(11); + } + ~WithGenericMethod_GetKeyValueDir() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status GetKeyValueDir(::grpc::ServerContext* /*context*/, const ::tensorflow::GetKeyValueDirRequest* /*request*/, ::tensorflow::GetKeyValueDirResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + }; + template + class WithGenericMethod_DeleteKeyValue : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithGenericMethod_DeleteKeyValue() { + ::grpc::Service::MarkMethodGeneric(12); + } + ~WithGenericMethod_DeleteKeyValue() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status DeleteKeyValue(::grpc::ServerContext* /*context*/, const ::tensorflow::DeleteKeyValueRequest* /*request*/, ::tensorflow::DeleteKeyValueResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + }; + template + class WithGenericMethod_Barrier : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithGenericMethod_Barrier() { + ::grpc::Service::MarkMethodGeneric(13); + } + ~WithGenericMethod_Barrier() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status Barrier(::grpc::ServerContext* /*context*/, const ::tensorflow::BarrierRequest* /*request*/, ::tensorflow::BarrierResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + }; + template + class WithGenericMethod_CancelBarrier : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithGenericMethod_CancelBarrier() { + ::grpc::Service::MarkMethodGeneric(14); + } + ~WithGenericMethod_CancelBarrier() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status CancelBarrier(::grpc::ServerContext* /*context*/, const ::tensorflow::CancelBarrierRequest* /*request*/, ::tensorflow::CancelBarrierResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + }; + template + class WithRawMethod_RegisterTask : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithRawMethod_RegisterTask() { + ::grpc::Service::MarkMethodRaw(0); + } + ~WithRawMethod_RegisterTask() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status RegisterTask(::grpc::ServerContext* /*context*/, const ::tensorflow::RegisterTaskRequest* /*request*/, ::tensorflow::RegisterTaskResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestRegisterTask(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(0, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithRawMethod_Heartbeat : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithRawMethod_Heartbeat() { + ::grpc::Service::MarkMethodRaw(1); + } + ~WithRawMethod_Heartbeat() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status Heartbeat(::grpc::ServerContext* /*context*/, const ::tensorflow::HeartbeatRequest* /*request*/, ::tensorflow::HeartbeatResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestHeartbeat(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(1, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithRawMethod_WaitForAllTasks : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithRawMethod_WaitForAllTasks() { + ::grpc::Service::MarkMethodRaw(2); + } + ~WithRawMethod_WaitForAllTasks() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status WaitForAllTasks(::grpc::ServerContext* /*context*/, const ::tensorflow::WaitForAllTasksRequest* /*request*/, ::tensorflow::WaitForAllTasksResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestWaitForAllTasks(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(2, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithRawMethod_ShutdownTask : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithRawMethod_ShutdownTask() { + ::grpc::Service::MarkMethodRaw(3); + } + ~WithRawMethod_ShutdownTask() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status ShutdownTask(::grpc::ServerContext* /*context*/, const ::tensorflow::ShutdownTaskRequest* /*request*/, ::tensorflow::ShutdownTaskResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestShutdownTask(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(3, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithRawMethod_ResetTask : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithRawMethod_ResetTask() { + ::grpc::Service::MarkMethodRaw(4); + } + ~WithRawMethod_ResetTask() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status ResetTask(::grpc::ServerContext* /*context*/, const ::tensorflow::ResetTaskRequest* /*request*/, ::tensorflow::ResetTaskResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestResetTask(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(4, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithRawMethod_ReportErrorToTask : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithRawMethod_ReportErrorToTask() { + ::grpc::Service::MarkMethodRaw(5); + } + ~WithRawMethod_ReportErrorToTask() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status ReportErrorToTask(::grpc::ServerContext* /*context*/, const ::tensorflow::ReportErrorToTaskRequest* /*request*/, ::tensorflow::ReportErrorToTaskResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestReportErrorToTask(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(5, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithRawMethod_ReportErrorToService : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithRawMethod_ReportErrorToService() { + ::grpc::Service::MarkMethodRaw(6); + } + ~WithRawMethod_ReportErrorToService() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status ReportErrorToService(::grpc::ServerContext* /*context*/, const ::tensorflow::ReportErrorToServiceRequest* /*request*/, ::tensorflow::ReportErrorToServiceResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestReportErrorToService(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(6, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithRawMethod_GetTaskState : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithRawMethod_GetTaskState() { + ::grpc::Service::MarkMethodRaw(7); + } + ~WithRawMethod_GetTaskState() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status GetTaskState(::grpc::ServerContext* /*context*/, const ::tensorflow::GetTaskStateRequest* /*request*/, ::tensorflow::GetTaskStateResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestGetTaskState(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(7, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithRawMethod_InsertKeyValue : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithRawMethod_InsertKeyValue() { + ::grpc::Service::MarkMethodRaw(8); + } + ~WithRawMethod_InsertKeyValue() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status InsertKeyValue(::grpc::ServerContext* /*context*/, const ::tensorflow::InsertKeyValueRequest* /*request*/, ::tensorflow::InsertKeyValueResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestInsertKeyValue(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(8, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithRawMethod_GetKeyValue : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithRawMethod_GetKeyValue() { + ::grpc::Service::MarkMethodRaw(9); + } + ~WithRawMethod_GetKeyValue() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status GetKeyValue(::grpc::ServerContext* /*context*/, const ::tensorflow::GetKeyValueRequest* /*request*/, ::tensorflow::GetKeyValueResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestGetKeyValue(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(9, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithRawMethod_TryGetKeyValue : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithRawMethod_TryGetKeyValue() { + ::grpc::Service::MarkMethodRaw(10); + } + ~WithRawMethod_TryGetKeyValue() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status TryGetKeyValue(::grpc::ServerContext* /*context*/, const ::tensorflow::TryGetKeyValueRequest* /*request*/, ::tensorflow::TryGetKeyValueResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestTryGetKeyValue(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(10, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithRawMethod_GetKeyValueDir : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithRawMethod_GetKeyValueDir() { + ::grpc::Service::MarkMethodRaw(11); + } + ~WithRawMethod_GetKeyValueDir() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status GetKeyValueDir(::grpc::ServerContext* /*context*/, const ::tensorflow::GetKeyValueDirRequest* /*request*/, ::tensorflow::GetKeyValueDirResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestGetKeyValueDir(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(11, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithRawMethod_DeleteKeyValue : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithRawMethod_DeleteKeyValue() { + ::grpc::Service::MarkMethodRaw(12); + } + ~WithRawMethod_DeleteKeyValue() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status DeleteKeyValue(::grpc::ServerContext* /*context*/, const ::tensorflow::DeleteKeyValueRequest* /*request*/, ::tensorflow::DeleteKeyValueResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestDeleteKeyValue(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(12, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithRawMethod_Barrier : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithRawMethod_Barrier() { + ::grpc::Service::MarkMethodRaw(13); + } + ~WithRawMethod_Barrier() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status Barrier(::grpc::ServerContext* /*context*/, const ::tensorflow::BarrierRequest* /*request*/, ::tensorflow::BarrierResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestBarrier(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(13, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithRawMethod_CancelBarrier : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithRawMethod_CancelBarrier() { + ::grpc::Service::MarkMethodRaw(14); + } + ~WithRawMethod_CancelBarrier() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status CancelBarrier(::grpc::ServerContext* /*context*/, const ::tensorflow::CancelBarrierRequest* /*request*/, ::tensorflow::CancelBarrierResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestCancelBarrier(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(14, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class ExperimentalWithRawCallbackMethod_RegisterTask : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithRawCallbackMethod_RegisterTask() { + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::Service:: + #else + ::grpc::Service::experimental(). + #endif + MarkMethodRawCallback(0, + new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( + [this]( + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::CallbackServerContext* + #else + ::grpc::experimental::CallbackServerContext* + #endif + context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response) { return this->RegisterTask(context, request, response); })); + } + ~ExperimentalWithRawCallbackMethod_RegisterTask() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status RegisterTask(::grpc::ServerContext* /*context*/, const ::tensorflow::RegisterTaskRequest* /*request*/, ::tensorflow::RegisterTaskResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual ::grpc::ServerUnaryReactor* RegisterTask( + ::grpc::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/) + #else + virtual ::grpc::experimental::ServerUnaryReactor* RegisterTask( + ::grpc::experimental::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/) + #endif + { return nullptr; } + }; + template + class ExperimentalWithRawCallbackMethod_Heartbeat : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithRawCallbackMethod_Heartbeat() { + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::Service:: + #else + ::grpc::Service::experimental(). + #endif + MarkMethodRawCallback(1, + new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( + [this]( + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::CallbackServerContext* + #else + ::grpc::experimental::CallbackServerContext* + #endif + context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response) { return this->Heartbeat(context, request, response); })); + } + ~ExperimentalWithRawCallbackMethod_Heartbeat() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status Heartbeat(::grpc::ServerContext* /*context*/, const ::tensorflow::HeartbeatRequest* /*request*/, ::tensorflow::HeartbeatResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual ::grpc::ServerUnaryReactor* Heartbeat( + ::grpc::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/) + #else + virtual ::grpc::experimental::ServerUnaryReactor* Heartbeat( + ::grpc::experimental::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/) + #endif + { return nullptr; } + }; + template + class ExperimentalWithRawCallbackMethod_WaitForAllTasks : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithRawCallbackMethod_WaitForAllTasks() { + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::Service:: + #else + ::grpc::Service::experimental(). + #endif + MarkMethodRawCallback(2, + new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( + [this]( + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::CallbackServerContext* + #else + ::grpc::experimental::CallbackServerContext* + #endif + context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response) { return this->WaitForAllTasks(context, request, response); })); + } + ~ExperimentalWithRawCallbackMethod_WaitForAllTasks() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status WaitForAllTasks(::grpc::ServerContext* /*context*/, const ::tensorflow::WaitForAllTasksRequest* /*request*/, ::tensorflow::WaitForAllTasksResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual ::grpc::ServerUnaryReactor* WaitForAllTasks( + ::grpc::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/) + #else + virtual ::grpc::experimental::ServerUnaryReactor* WaitForAllTasks( + ::grpc::experimental::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/) + #endif + { return nullptr; } + }; + template + class ExperimentalWithRawCallbackMethod_ShutdownTask : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithRawCallbackMethod_ShutdownTask() { + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::Service:: + #else + ::grpc::Service::experimental(). + #endif + MarkMethodRawCallback(3, + new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( + [this]( + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::CallbackServerContext* + #else + ::grpc::experimental::CallbackServerContext* + #endif + context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response) { return this->ShutdownTask(context, request, response); })); + } + ~ExperimentalWithRawCallbackMethod_ShutdownTask() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status ShutdownTask(::grpc::ServerContext* /*context*/, const ::tensorflow::ShutdownTaskRequest* /*request*/, ::tensorflow::ShutdownTaskResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual ::grpc::ServerUnaryReactor* ShutdownTask( + ::grpc::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/) + #else + virtual ::grpc::experimental::ServerUnaryReactor* ShutdownTask( + ::grpc::experimental::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/) + #endif + { return nullptr; } + }; + template + class ExperimentalWithRawCallbackMethod_ResetTask : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithRawCallbackMethod_ResetTask() { + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::Service:: + #else + ::grpc::Service::experimental(). + #endif + MarkMethodRawCallback(4, + new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( + [this]( + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::CallbackServerContext* + #else + ::grpc::experimental::CallbackServerContext* + #endif + context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response) { return this->ResetTask(context, request, response); })); + } + ~ExperimentalWithRawCallbackMethod_ResetTask() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status ResetTask(::grpc::ServerContext* /*context*/, const ::tensorflow::ResetTaskRequest* /*request*/, ::tensorflow::ResetTaskResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual ::grpc::ServerUnaryReactor* ResetTask( + ::grpc::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/) + #else + virtual ::grpc::experimental::ServerUnaryReactor* ResetTask( + ::grpc::experimental::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/) + #endif + { return nullptr; } + }; + template + class ExperimentalWithRawCallbackMethod_ReportErrorToTask : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithRawCallbackMethod_ReportErrorToTask() { + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::Service:: + #else + ::grpc::Service::experimental(). + #endif + MarkMethodRawCallback(5, + new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( + [this]( + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::CallbackServerContext* + #else + ::grpc::experimental::CallbackServerContext* + #endif + context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response) { return this->ReportErrorToTask(context, request, response); })); + } + ~ExperimentalWithRawCallbackMethod_ReportErrorToTask() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status ReportErrorToTask(::grpc::ServerContext* /*context*/, const ::tensorflow::ReportErrorToTaskRequest* /*request*/, ::tensorflow::ReportErrorToTaskResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual ::grpc::ServerUnaryReactor* ReportErrorToTask( + ::grpc::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/) + #else + virtual ::grpc::experimental::ServerUnaryReactor* ReportErrorToTask( + ::grpc::experimental::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/) + #endif + { return nullptr; } + }; + template + class ExperimentalWithRawCallbackMethod_ReportErrorToService : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithRawCallbackMethod_ReportErrorToService() { + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::Service:: + #else + ::grpc::Service::experimental(). + #endif + MarkMethodRawCallback(6, + new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( + [this]( + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::CallbackServerContext* + #else + ::grpc::experimental::CallbackServerContext* + #endif + context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response) { return this->ReportErrorToService(context, request, response); })); + } + ~ExperimentalWithRawCallbackMethod_ReportErrorToService() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status ReportErrorToService(::grpc::ServerContext* /*context*/, const ::tensorflow::ReportErrorToServiceRequest* /*request*/, ::tensorflow::ReportErrorToServiceResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual ::grpc::ServerUnaryReactor* ReportErrorToService( + ::grpc::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/) + #else + virtual ::grpc::experimental::ServerUnaryReactor* ReportErrorToService( + ::grpc::experimental::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/) + #endif + { return nullptr; } + }; + template + class ExperimentalWithRawCallbackMethod_GetTaskState : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithRawCallbackMethod_GetTaskState() { + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::Service:: + #else + ::grpc::Service::experimental(). + #endif + MarkMethodRawCallback(7, + new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( + [this]( + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::CallbackServerContext* + #else + ::grpc::experimental::CallbackServerContext* + #endif + context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response) { return this->GetTaskState(context, request, response); })); + } + ~ExperimentalWithRawCallbackMethod_GetTaskState() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status GetTaskState(::grpc::ServerContext* /*context*/, const ::tensorflow::GetTaskStateRequest* /*request*/, ::tensorflow::GetTaskStateResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual ::grpc::ServerUnaryReactor* GetTaskState( + ::grpc::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/) + #else + virtual ::grpc::experimental::ServerUnaryReactor* GetTaskState( + ::grpc::experimental::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/) + #endif + { return nullptr; } + }; + template + class ExperimentalWithRawCallbackMethod_InsertKeyValue : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithRawCallbackMethod_InsertKeyValue() { + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::Service:: + #else + ::grpc::Service::experimental(). + #endif + MarkMethodRawCallback(8, + new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( + [this]( + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::CallbackServerContext* + #else + ::grpc::experimental::CallbackServerContext* + #endif + context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response) { return this->InsertKeyValue(context, request, response); })); + } + ~ExperimentalWithRawCallbackMethod_InsertKeyValue() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status InsertKeyValue(::grpc::ServerContext* /*context*/, const ::tensorflow::InsertKeyValueRequest* /*request*/, ::tensorflow::InsertKeyValueResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual ::grpc::ServerUnaryReactor* InsertKeyValue( + ::grpc::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/) + #else + virtual ::grpc::experimental::ServerUnaryReactor* InsertKeyValue( + ::grpc::experimental::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/) + #endif + { return nullptr; } + }; + template + class ExperimentalWithRawCallbackMethod_GetKeyValue : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithRawCallbackMethod_GetKeyValue() { + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::Service:: + #else + ::grpc::Service::experimental(). + #endif + MarkMethodRawCallback(9, + new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( + [this]( + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::CallbackServerContext* + #else + ::grpc::experimental::CallbackServerContext* + #endif + context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response) { return this->GetKeyValue(context, request, response); })); + } + ~ExperimentalWithRawCallbackMethod_GetKeyValue() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status GetKeyValue(::grpc::ServerContext* /*context*/, const ::tensorflow::GetKeyValueRequest* /*request*/, ::tensorflow::GetKeyValueResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual ::grpc::ServerUnaryReactor* GetKeyValue( + ::grpc::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/) + #else + virtual ::grpc::experimental::ServerUnaryReactor* GetKeyValue( + ::grpc::experimental::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/) + #endif + { return nullptr; } + }; + template + class ExperimentalWithRawCallbackMethod_TryGetKeyValue : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithRawCallbackMethod_TryGetKeyValue() { + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::Service:: + #else + ::grpc::Service::experimental(). + #endif + MarkMethodRawCallback(10, + new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( + [this]( + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::CallbackServerContext* + #else + ::grpc::experimental::CallbackServerContext* + #endif + context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response) { return this->TryGetKeyValue(context, request, response); })); + } + ~ExperimentalWithRawCallbackMethod_TryGetKeyValue() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status TryGetKeyValue(::grpc::ServerContext* /*context*/, const ::tensorflow::TryGetKeyValueRequest* /*request*/, ::tensorflow::TryGetKeyValueResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual ::grpc::ServerUnaryReactor* TryGetKeyValue( + ::grpc::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/) + #else + virtual ::grpc::experimental::ServerUnaryReactor* TryGetKeyValue( + ::grpc::experimental::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/) + #endif + { return nullptr; } + }; + template + class ExperimentalWithRawCallbackMethod_GetKeyValueDir : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithRawCallbackMethod_GetKeyValueDir() { + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::Service:: + #else + ::grpc::Service::experimental(). + #endif + MarkMethodRawCallback(11, + new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( + [this]( + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::CallbackServerContext* + #else + ::grpc::experimental::CallbackServerContext* + #endif + context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response) { return this->GetKeyValueDir(context, request, response); })); + } + ~ExperimentalWithRawCallbackMethod_GetKeyValueDir() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status GetKeyValueDir(::grpc::ServerContext* /*context*/, const ::tensorflow::GetKeyValueDirRequest* /*request*/, ::tensorflow::GetKeyValueDirResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual ::grpc::ServerUnaryReactor* GetKeyValueDir( + ::grpc::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/) + #else + virtual ::grpc::experimental::ServerUnaryReactor* GetKeyValueDir( + ::grpc::experimental::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/) + #endif + { return nullptr; } + }; + template + class ExperimentalWithRawCallbackMethod_DeleteKeyValue : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithRawCallbackMethod_DeleteKeyValue() { + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::Service:: + #else + ::grpc::Service::experimental(). + #endif + MarkMethodRawCallback(12, + new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( + [this]( + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::CallbackServerContext* + #else + ::grpc::experimental::CallbackServerContext* + #endif + context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response) { return this->DeleteKeyValue(context, request, response); })); + } + ~ExperimentalWithRawCallbackMethod_DeleteKeyValue() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status DeleteKeyValue(::grpc::ServerContext* /*context*/, const ::tensorflow::DeleteKeyValueRequest* /*request*/, ::tensorflow::DeleteKeyValueResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual ::grpc::ServerUnaryReactor* DeleteKeyValue( + ::grpc::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/) + #else + virtual ::grpc::experimental::ServerUnaryReactor* DeleteKeyValue( + ::grpc::experimental::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/) + #endif + { return nullptr; } + }; + template + class ExperimentalWithRawCallbackMethod_Barrier : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithRawCallbackMethod_Barrier() { + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::Service:: + #else + ::grpc::Service::experimental(). + #endif + MarkMethodRawCallback(13, + new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( + [this]( + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::CallbackServerContext* + #else + ::grpc::experimental::CallbackServerContext* + #endif + context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response) { return this->Barrier(context, request, response); })); + } + ~ExperimentalWithRawCallbackMethod_Barrier() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status Barrier(::grpc::ServerContext* /*context*/, const ::tensorflow::BarrierRequest* /*request*/, ::tensorflow::BarrierResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual ::grpc::ServerUnaryReactor* Barrier( + ::grpc::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/) + #else + virtual ::grpc::experimental::ServerUnaryReactor* Barrier( + ::grpc::experimental::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/) + #endif + { return nullptr; } + }; + template + class ExperimentalWithRawCallbackMethod_CancelBarrier : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithRawCallbackMethod_CancelBarrier() { + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::Service:: + #else + ::grpc::Service::experimental(). + #endif + MarkMethodRawCallback(14, + new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( + [this]( + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + ::grpc::CallbackServerContext* + #else + ::grpc::experimental::CallbackServerContext* + #endif + context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response) { return this->CancelBarrier(context, request, response); })); + } + ~ExperimentalWithRawCallbackMethod_CancelBarrier() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status CancelBarrier(::grpc::ServerContext* /*context*/, const ::tensorflow::CancelBarrierRequest* /*request*/, ::tensorflow::CancelBarrierResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + virtual ::grpc::ServerUnaryReactor* CancelBarrier( + ::grpc::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/) + #else + virtual ::grpc::experimental::ServerUnaryReactor* CancelBarrier( + ::grpc::experimental::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/) + #endif + { return nullptr; } + }; + template + class WithStreamedUnaryMethod_RegisterTask : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithStreamedUnaryMethod_RegisterTask() { + ::grpc::Service::MarkMethodStreamed(0, + new ::grpc::internal::StreamedUnaryHandler< ::tensorflow::RegisterTaskRequest, ::tensorflow::RegisterTaskResponse>(std::bind(&WithStreamedUnaryMethod_RegisterTask::StreamedRegisterTask, this, std::placeholders::_1, std::placeholders::_2))); + } + ~WithStreamedUnaryMethod_RegisterTask() override { + BaseClassMustBeDerivedFromService(this); + } + // disable regular version of this method + ::grpc::Status RegisterTask(::grpc::ServerContext* /*context*/, const ::tensorflow::RegisterTaskRequest* /*request*/, ::tensorflow::RegisterTaskResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + // replace default version of method with streamed unary + virtual ::grpc::Status StreamedRegisterTask(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::tensorflow::RegisterTaskRequest,::tensorflow::RegisterTaskResponse>* server_unary_streamer) = 0; + }; + template + class WithStreamedUnaryMethod_Heartbeat : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithStreamedUnaryMethod_Heartbeat() { + ::grpc::Service::MarkMethodStreamed(1, + new ::grpc::internal::StreamedUnaryHandler< ::tensorflow::HeartbeatRequest, ::tensorflow::HeartbeatResponse>(std::bind(&WithStreamedUnaryMethod_Heartbeat::StreamedHeartbeat, this, std::placeholders::_1, std::placeholders::_2))); + } + ~WithStreamedUnaryMethod_Heartbeat() override { + BaseClassMustBeDerivedFromService(this); + } + // disable regular version of this method + ::grpc::Status Heartbeat(::grpc::ServerContext* /*context*/, const ::tensorflow::HeartbeatRequest* /*request*/, ::tensorflow::HeartbeatResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + // replace default version of method with streamed unary + virtual ::grpc::Status StreamedHeartbeat(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::tensorflow::HeartbeatRequest,::tensorflow::HeartbeatResponse>* server_unary_streamer) = 0; + }; + template + class WithStreamedUnaryMethod_WaitForAllTasks : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithStreamedUnaryMethod_WaitForAllTasks() { + ::grpc::Service::MarkMethodStreamed(2, + new ::grpc::internal::StreamedUnaryHandler< ::tensorflow::WaitForAllTasksRequest, ::tensorflow::WaitForAllTasksResponse>(std::bind(&WithStreamedUnaryMethod_WaitForAllTasks::StreamedWaitForAllTasks, this, std::placeholders::_1, std::placeholders::_2))); + } + ~WithStreamedUnaryMethod_WaitForAllTasks() override { + BaseClassMustBeDerivedFromService(this); + } + // disable regular version of this method + ::grpc::Status WaitForAllTasks(::grpc::ServerContext* /*context*/, const ::tensorflow::WaitForAllTasksRequest* /*request*/, ::tensorflow::WaitForAllTasksResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + // replace default version of method with streamed unary + virtual ::grpc::Status StreamedWaitForAllTasks(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::tensorflow::WaitForAllTasksRequest,::tensorflow::WaitForAllTasksResponse>* server_unary_streamer) = 0; + }; + template + class WithStreamedUnaryMethod_ShutdownTask : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithStreamedUnaryMethod_ShutdownTask() { + ::grpc::Service::MarkMethodStreamed(3, + new ::grpc::internal::StreamedUnaryHandler< ::tensorflow::ShutdownTaskRequest, ::tensorflow::ShutdownTaskResponse>(std::bind(&WithStreamedUnaryMethod_ShutdownTask::StreamedShutdownTask, this, std::placeholders::_1, std::placeholders::_2))); + } + ~WithStreamedUnaryMethod_ShutdownTask() override { + BaseClassMustBeDerivedFromService(this); + } + // disable regular version of this method + ::grpc::Status ShutdownTask(::grpc::ServerContext* /*context*/, const ::tensorflow::ShutdownTaskRequest* /*request*/, ::tensorflow::ShutdownTaskResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + // replace default version of method with streamed unary + virtual ::grpc::Status StreamedShutdownTask(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::tensorflow::ShutdownTaskRequest,::tensorflow::ShutdownTaskResponse>* server_unary_streamer) = 0; + }; + template + class WithStreamedUnaryMethod_ResetTask : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithStreamedUnaryMethod_ResetTask() { + ::grpc::Service::MarkMethodStreamed(4, + new ::grpc::internal::StreamedUnaryHandler< ::tensorflow::ResetTaskRequest, ::tensorflow::ResetTaskResponse>(std::bind(&WithStreamedUnaryMethod_ResetTask::StreamedResetTask, this, std::placeholders::_1, std::placeholders::_2))); + } + ~WithStreamedUnaryMethod_ResetTask() override { + BaseClassMustBeDerivedFromService(this); + } + // disable regular version of this method + ::grpc::Status ResetTask(::grpc::ServerContext* /*context*/, const ::tensorflow::ResetTaskRequest* /*request*/, ::tensorflow::ResetTaskResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + // replace default version of method with streamed unary + virtual ::grpc::Status StreamedResetTask(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::tensorflow::ResetTaskRequest,::tensorflow::ResetTaskResponse>* server_unary_streamer) = 0; + }; + template + class WithStreamedUnaryMethod_ReportErrorToTask : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithStreamedUnaryMethod_ReportErrorToTask() { + ::grpc::Service::MarkMethodStreamed(5, + new ::grpc::internal::StreamedUnaryHandler< ::tensorflow::ReportErrorToTaskRequest, ::tensorflow::ReportErrorToTaskResponse>(std::bind(&WithStreamedUnaryMethod_ReportErrorToTask::StreamedReportErrorToTask, this, std::placeholders::_1, std::placeholders::_2))); + } + ~WithStreamedUnaryMethod_ReportErrorToTask() override { + BaseClassMustBeDerivedFromService(this); + } + // disable regular version of this method + ::grpc::Status ReportErrorToTask(::grpc::ServerContext* /*context*/, const ::tensorflow::ReportErrorToTaskRequest* /*request*/, ::tensorflow::ReportErrorToTaskResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + // replace default version of method with streamed unary + virtual ::grpc::Status StreamedReportErrorToTask(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::tensorflow::ReportErrorToTaskRequest,::tensorflow::ReportErrorToTaskResponse>* server_unary_streamer) = 0; + }; + template + class WithStreamedUnaryMethod_ReportErrorToService : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithStreamedUnaryMethod_ReportErrorToService() { + ::grpc::Service::MarkMethodStreamed(6, + new ::grpc::internal::StreamedUnaryHandler< ::tensorflow::ReportErrorToServiceRequest, ::tensorflow::ReportErrorToServiceResponse>(std::bind(&WithStreamedUnaryMethod_ReportErrorToService::StreamedReportErrorToService, this, std::placeholders::_1, std::placeholders::_2))); + } + ~WithStreamedUnaryMethod_ReportErrorToService() override { + BaseClassMustBeDerivedFromService(this); + } + // disable regular version of this method + ::grpc::Status ReportErrorToService(::grpc::ServerContext* /*context*/, const ::tensorflow::ReportErrorToServiceRequest* /*request*/, ::tensorflow::ReportErrorToServiceResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + // replace default version of method with streamed unary + virtual ::grpc::Status StreamedReportErrorToService(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::tensorflow::ReportErrorToServiceRequest,::tensorflow::ReportErrorToServiceResponse>* server_unary_streamer) = 0; + }; + template + class WithStreamedUnaryMethod_GetTaskState : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithStreamedUnaryMethod_GetTaskState() { + ::grpc::Service::MarkMethodStreamed(7, + new ::grpc::internal::StreamedUnaryHandler< ::tensorflow::GetTaskStateRequest, ::tensorflow::GetTaskStateResponse>(std::bind(&WithStreamedUnaryMethod_GetTaskState::StreamedGetTaskState, this, std::placeholders::_1, std::placeholders::_2))); + } + ~WithStreamedUnaryMethod_GetTaskState() override { + BaseClassMustBeDerivedFromService(this); + } + // disable regular version of this method + ::grpc::Status GetTaskState(::grpc::ServerContext* /*context*/, const ::tensorflow::GetTaskStateRequest* /*request*/, ::tensorflow::GetTaskStateResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + // replace default version of method with streamed unary + virtual ::grpc::Status StreamedGetTaskState(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::tensorflow::GetTaskStateRequest,::tensorflow::GetTaskStateResponse>* server_unary_streamer) = 0; + }; + template + class WithStreamedUnaryMethod_InsertKeyValue : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithStreamedUnaryMethod_InsertKeyValue() { + ::grpc::Service::MarkMethodStreamed(8, + new ::grpc::internal::StreamedUnaryHandler< ::tensorflow::InsertKeyValueRequest, ::tensorflow::InsertKeyValueResponse>(std::bind(&WithStreamedUnaryMethod_InsertKeyValue::StreamedInsertKeyValue, this, std::placeholders::_1, std::placeholders::_2))); + } + ~WithStreamedUnaryMethod_InsertKeyValue() override { + BaseClassMustBeDerivedFromService(this); + } + // disable regular version of this method + ::grpc::Status InsertKeyValue(::grpc::ServerContext* /*context*/, const ::tensorflow::InsertKeyValueRequest* /*request*/, ::tensorflow::InsertKeyValueResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + // replace default version of method with streamed unary + virtual ::grpc::Status StreamedInsertKeyValue(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::tensorflow::InsertKeyValueRequest,::tensorflow::InsertKeyValueResponse>* server_unary_streamer) = 0; + }; + template + class WithStreamedUnaryMethod_GetKeyValue : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithStreamedUnaryMethod_GetKeyValue() { + ::grpc::Service::MarkMethodStreamed(9, + new ::grpc::internal::StreamedUnaryHandler< ::tensorflow::GetKeyValueRequest, ::tensorflow::GetKeyValueResponse>(std::bind(&WithStreamedUnaryMethod_GetKeyValue::StreamedGetKeyValue, this, std::placeholders::_1, std::placeholders::_2))); + } + ~WithStreamedUnaryMethod_GetKeyValue() override { + BaseClassMustBeDerivedFromService(this); + } + // disable regular version of this method + ::grpc::Status GetKeyValue(::grpc::ServerContext* /*context*/, const ::tensorflow::GetKeyValueRequest* /*request*/, ::tensorflow::GetKeyValueResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + // replace default version of method with streamed unary + virtual ::grpc::Status StreamedGetKeyValue(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::tensorflow::GetKeyValueRequest,::tensorflow::GetKeyValueResponse>* server_unary_streamer) = 0; + }; + template + class WithStreamedUnaryMethod_TryGetKeyValue : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithStreamedUnaryMethod_TryGetKeyValue() { + ::grpc::Service::MarkMethodStreamed(10, + new ::grpc::internal::StreamedUnaryHandler< ::tensorflow::TryGetKeyValueRequest, ::tensorflow::TryGetKeyValueResponse>(std::bind(&WithStreamedUnaryMethod_TryGetKeyValue::StreamedTryGetKeyValue, this, std::placeholders::_1, std::placeholders::_2))); + } + ~WithStreamedUnaryMethod_TryGetKeyValue() override { + BaseClassMustBeDerivedFromService(this); + } + // disable regular version of this method + ::grpc::Status TryGetKeyValue(::grpc::ServerContext* /*context*/, const ::tensorflow::TryGetKeyValueRequest* /*request*/, ::tensorflow::TryGetKeyValueResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + // replace default version of method with streamed unary + virtual ::grpc::Status StreamedTryGetKeyValue(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::tensorflow::TryGetKeyValueRequest,::tensorflow::TryGetKeyValueResponse>* server_unary_streamer) = 0; + }; + template + class WithStreamedUnaryMethod_GetKeyValueDir : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithStreamedUnaryMethod_GetKeyValueDir() { + ::grpc::Service::MarkMethodStreamed(11, + new ::grpc::internal::StreamedUnaryHandler< ::tensorflow::GetKeyValueDirRequest, ::tensorflow::GetKeyValueDirResponse>(std::bind(&WithStreamedUnaryMethod_GetKeyValueDir::StreamedGetKeyValueDir, this, std::placeholders::_1, std::placeholders::_2))); + } + ~WithStreamedUnaryMethod_GetKeyValueDir() override { + BaseClassMustBeDerivedFromService(this); + } + // disable regular version of this method + ::grpc::Status GetKeyValueDir(::grpc::ServerContext* /*context*/, const ::tensorflow::GetKeyValueDirRequest* /*request*/, ::tensorflow::GetKeyValueDirResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + // replace default version of method with streamed unary + virtual ::grpc::Status StreamedGetKeyValueDir(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::tensorflow::GetKeyValueDirRequest,::tensorflow::GetKeyValueDirResponse>* server_unary_streamer) = 0; + }; + template + class WithStreamedUnaryMethod_DeleteKeyValue : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithStreamedUnaryMethod_DeleteKeyValue() { + ::grpc::Service::MarkMethodStreamed(12, + new ::grpc::internal::StreamedUnaryHandler< ::tensorflow::DeleteKeyValueRequest, ::tensorflow::DeleteKeyValueResponse>(std::bind(&WithStreamedUnaryMethod_DeleteKeyValue::StreamedDeleteKeyValue, this, std::placeholders::_1, std::placeholders::_2))); + } + ~WithStreamedUnaryMethod_DeleteKeyValue() override { + BaseClassMustBeDerivedFromService(this); + } + // disable regular version of this method + ::grpc::Status DeleteKeyValue(::grpc::ServerContext* /*context*/, const ::tensorflow::DeleteKeyValueRequest* /*request*/, ::tensorflow::DeleteKeyValueResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + // replace default version of method with streamed unary + virtual ::grpc::Status StreamedDeleteKeyValue(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::tensorflow::DeleteKeyValueRequest,::tensorflow::DeleteKeyValueResponse>* server_unary_streamer) = 0; + }; + template + class WithStreamedUnaryMethod_Barrier : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithStreamedUnaryMethod_Barrier() { + ::grpc::Service::MarkMethodStreamed(13, + new ::grpc::internal::StreamedUnaryHandler< ::tensorflow::BarrierRequest, ::tensorflow::BarrierResponse>(std::bind(&WithStreamedUnaryMethod_Barrier::StreamedBarrier, this, std::placeholders::_1, std::placeholders::_2))); + } + ~WithStreamedUnaryMethod_Barrier() override { + BaseClassMustBeDerivedFromService(this); + } + // disable regular version of this method + ::grpc::Status Barrier(::grpc::ServerContext* /*context*/, const ::tensorflow::BarrierRequest* /*request*/, ::tensorflow::BarrierResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + // replace default version of method with streamed unary + virtual ::grpc::Status StreamedBarrier(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::tensorflow::BarrierRequest,::tensorflow::BarrierResponse>* server_unary_streamer) = 0; + }; + template + class WithStreamedUnaryMethod_CancelBarrier : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithStreamedUnaryMethod_CancelBarrier() { + ::grpc::Service::MarkMethodStreamed(14, + new ::grpc::internal::StreamedUnaryHandler< ::tensorflow::CancelBarrierRequest, ::tensorflow::CancelBarrierResponse>(std::bind(&WithStreamedUnaryMethod_CancelBarrier::StreamedCancelBarrier, this, std::placeholders::_1, std::placeholders::_2))); + } + ~WithStreamedUnaryMethod_CancelBarrier() override { + BaseClassMustBeDerivedFromService(this); + } + // disable regular version of this method + ::grpc::Status CancelBarrier(::grpc::ServerContext* /*context*/, const ::tensorflow::CancelBarrierRequest* /*request*/, ::tensorflow::CancelBarrierResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + // replace default version of method with streamed unary + virtual ::grpc::Status StreamedCancelBarrier(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::tensorflow::CancelBarrierRequest,::tensorflow::CancelBarrierResponse>* server_unary_streamer) = 0; + }; + typedef WithStreamedUnaryMethod_RegisterTask > > > > > > > > > > > > > > StreamedUnaryService; + typedef Service SplitStreamedService; + typedef WithStreamedUnaryMethod_RegisterTask > > > > > > > > > > > > > > StreamedService; +}; + +} // namespace grpc + +} // namespace tensorflow + + +#endif // GRPC_tsl_2fprotobuf_2fcoordination_5fservice_2eproto__INCLUDED diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/histogram.proto b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/histogram.proto new file mode 100644 index 0000000000000000000000000000000000000000..2a5f6d9362a63b7254ed54bd150092cca0a65605 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/tsl/protobuf/histogram.proto @@ -0,0 +1,26 @@ +syntax = "proto3"; + +package tensorflow; + +option cc_enable_arenas = true; +option java_multiple_files = true; +option java_package = "org.tensorflow.framework"; +option go_package = "github.com/google/tsl/tsl/go/core/protobuf/summary_go_proto"; + +// Serialization format for histogram module in +// tsl/lib/histogram/histogram.h +message HistogramProto { + double min = 1; + double max = 2; + double num = 3; + double sum = 4; + double sum_squares = 5; + + // Parallel arrays encoding the bucket boundaries and the bucket values. + // bucket(i) is the count for the bucket i. The range for + // a bucket is: + // i == 0: -DBL_MAX .. bucket_limit(0) + // i != 0: bucket_limit(i-1) .. bucket_limit(i) + repeated double bucket_limit = 6 [packed = true]; + repeated double bucket = 7 [packed = true]; +}