Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- .venv/lib/python3.11/site-packages/ray/_raylet.so +3 -0
- .venv/lib/python3.11/site-packages/ray/core/generated/agent_manager_pb2_grpc.py +4 -0
- .venv/lib/python3.11/site-packages/ray/core/generated/core_worker_pb2.py +542 -0
- .venv/lib/python3.11/site-packages/ray/core/generated/export_actor_data_pb2_grpc.py +4 -0
- .venv/lib/python3.11/site-packages/ray/core/generated/reporter_pb2_grpc.py +259 -0
- .venv/lib/python3.11/site-packages/ray/core/generated/usage_pb2.py +111 -0
- .venv/lib/python3.11/site-packages/ray/core/generated/usage_pb2_grpc.py +4 -0
- .venv/lib/python3.11/site-packages/ray/core/src/__pycache__/__init__.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/ray/core/src/plasma/__init__.py +0 -0
- .venv/lib/python3.11/site-packages/ray/core/src/plasma/__pycache__/__init__.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/ray/core/src/ray/__init__.py +0 -0
- .venv/lib/python3.11/site-packages/ray/core/src/ray/__pycache__/__init__.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/ray/core/src/ray/raylet/__init__.py +0 -0
- .venv/lib/python3.11/site-packages/ray/core/src/ray/raylet/__pycache__/__init__.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/ray/dashboard/modules/metrics/__pycache__/install_and_start_prometheus.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/ray/dashboard/modules/metrics/dashboards/__init__.py +0 -0
- .venv/lib/python3.11/site-packages/ray/dashboard/modules/metrics/dashboards/__pycache__/__init__.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/ray/dashboard/modules/metrics/dashboards/__pycache__/common.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/ray/dashboard/modules/metrics/dashboards/__pycache__/data_dashboard_panels.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/ray/dashboard/modules/metrics/dashboards/__pycache__/default_dashboard_panels.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/ray/dashboard/modules/metrics/dashboards/__pycache__/serve_dashboard_panels.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/ray/dashboard/modules/metrics/dashboards/__pycache__/serve_deployment_dashboard_panels.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/ray/dashboard/modules/metrics/dashboards/data_grafana_dashboard_base.json +147 -0
- .venv/lib/python3.11/site-packages/ray/dashboard/modules/metrics/dashboards/default_grafana_dashboard_base.json +142 -0
- .venv/lib/python3.11/site-packages/ray/util/client/__init__.py +302 -0
- .venv/lib/python3.11/site-packages/ray/util/client/__pycache__/__init__.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/ray/util/client/__pycache__/api.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/ray/util/client/__pycache__/client_app.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/ray/util/client/__pycache__/client_pickler.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/ray/util/client/__pycache__/common.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/ray/util/client/__pycache__/dataclient.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/ray/util/client/__pycache__/logsclient.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/ray/util/client/__pycache__/options.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/ray/util/client/__pycache__/ray_client_helpers.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/ray/util/client/__pycache__/runtime_context.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/ray/util/client/__pycache__/worker.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/ray/util/client/client_pickler.py +178 -0
- .venv/lib/python3.11/site-packages/ray/util/client/logsclient.py +136 -0
- .venv/lib/python3.11/site-packages/ray/util/client/server/__init__.py +1 -0
- .venv/lib/python3.11/site-packages/ray/util/client/server/__main__.py +4 -0
- .venv/lib/python3.11/site-packages/ray/util/client/server/__pycache__/__init__.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/ray/util/client/server/__pycache__/__main__.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/ray/util/client/server/__pycache__/dataservicer.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/ray/util/client/server/__pycache__/logservicer.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/ray/util/client/server/__pycache__/proxier.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/ray/util/client/server/__pycache__/server.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/ray/util/client/server/__pycache__/server_pickler.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/ray/util/client/server/__pycache__/server_stubs.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/ray/util/client/server/dataservicer.py +416 -0
.gitattributes
CHANGED
|
@@ -157,3 +157,4 @@ tuning-competition-baseline/.venv/lib/python3.11/site-packages/torch/_inductor/_
|
|
| 157 |
.venv/lib/python3.11/site-packages/numpy/ma/tests/__pycache__/test_extras.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
|
| 158 |
.venv/lib/python3.11/site-packages/ray/serve/_private/__pycache__/deployment_state.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
|
| 159 |
.venv/lib/python3.11/site-packages/xgrammar/xgrammar_bindings.cpython-311-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 157 |
.venv/lib/python3.11/site-packages/numpy/ma/tests/__pycache__/test_extras.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
|
| 158 |
.venv/lib/python3.11/site-packages/ray/serve/_private/__pycache__/deployment_state.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
|
| 159 |
.venv/lib/python3.11/site-packages/xgrammar/xgrammar_bindings.cpython-311-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 160 |
+
.venv/lib/python3.11/site-packages/ray/_raylet.so filter=lfs diff=lfs merge=lfs -text
|
.venv/lib/python3.11/site-packages/ray/_raylet.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e5516ec5efa37efb034ca0fa6c8403331a430101356bdf9829cded6351510037
|
| 3 |
+
size 35971224
|
.venv/lib/python3.11/site-packages/ray/core/generated/agent_manager_pb2_grpc.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
|
| 2 |
+
"""Client and server classes corresponding to protobuf-defined services."""
|
| 3 |
+
import grpc
|
| 4 |
+
|
.venv/lib/python3.11/site-packages/ray/core/generated/core_worker_pb2.py
ADDED
|
@@ -0,0 +1,542 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
| 3 |
+
# source: src/ray/protobuf/core_worker.proto
|
| 4 |
+
"""Generated protocol buffer code."""
|
| 5 |
+
from google.protobuf.internal import enum_type_wrapper
|
| 6 |
+
from google.protobuf import descriptor as _descriptor
|
| 7 |
+
from google.protobuf import descriptor_pool as _descriptor_pool
|
| 8 |
+
from google.protobuf import message as _message
|
| 9 |
+
from google.protobuf import reflection as _reflection
|
| 10 |
+
from google.protobuf import symbol_database as _symbol_database
|
| 11 |
+
# @@protoc_insertion_point(imports)
|
| 12 |
+
|
| 13 |
+
_sym_db = _symbol_database.Default()
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
from . import common_pb2 as src_dot_ray_dot_protobuf_dot_common__pb2
|
| 17 |
+
from . import pubsub_pb2 as src_dot_ray_dot_protobuf_dot_pubsub__pb2
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\"src/ray/protobuf/core_worker.proto\x12\x07ray.rpc\x1a\x1dsrc/ray/protobuf/common.proto\x1a\x1dsrc/ray/protobuf/pubsub.proto\"0\n\x0f\x41\x63tiveObjectIDs\x12\x1d\n\nobject_ids\x18\x01 \x03(\x0cR\tobjectIds\"\xfc\x05\n\x0b\x41\x63torHandle\x12\x19\n\x08\x61\x63tor_id\x18\x01 \x01(\x0cR\x07\x61\x63torId\x12\x19\n\x08owner_id\x18\x02 \x01(\x0cR\x07ownerId\x12\x35\n\rowner_address\x18\x03 \x01(\x0b\x32\x10.ray.rpc.AddressR\x0cownerAddress\x12&\n\x0f\x63reation_job_id\x18\x04 \x01(\x0cR\rcreationJobId\x12\x38\n\x0e\x61\x63tor_language\x18\x05 \x01(\x0e\x32\x11.ray.rpc.LanguageR\ractorLanguage\x12q\n\'actor_creation_task_function_descriptor\x18\x06 \x01(\x0b\x32\x1b.ray.rpc.FunctionDescriptorR#actorCreationTaskFunctionDescriptor\x12!\n\x0c\x61\x63tor_cursor\x18\x07 \x01(\x0cR\x0b\x61\x63torCursor\x12%\n\x0e\x65xtension_data\x18\x08 \x01(\x0cR\rextensionData\x12(\n\x10max_task_retries\x18\t \x01(\x03R\x0emaxTaskRetries\x12\x12\n\x04name\x18\n \x01(\tR\x04name\x12#\n\rray_namespace\x18\x0b \x01(\tR\x0crayNamespace\x12/\n\x14\x65xecute_out_of_order\x18\x0c \x01(\x08R\x11\x65xecuteOutOfOrder\x12*\n\x11max_pending_calls\x18\r \x01(\x05R\x0fmaxPendingCalls\x12,\n\x12\x65nable_task_events\x18\x0e \x01(\x08R\x10\x65nableTaskEvents\x12\x38\n\x06labels\x18\x0f \x03(\x0b\x32 .ray.rpc.ActorHandle.LabelsEntryR\x06labels\x1a\x39\n\x0bLabelsEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value:\x02\x38\x01\"\x93\x02\n\x0fPushTaskRequest\x12,\n\x12intended_worker_id\x18\x01 \x01(\x0cR\x10intendedWorkerId\x12.\n\ttask_spec\x18\x02 \x01(\x0b\x32\x11.ray.rpc.TaskSpecR\x08taskSpec\x12\'\n\x0fsequence_number\x18\x03 \x01(\x03R\x0esequenceNumber\x12\x33\n\x16\x63lient_processed_up_to\x18\x04 \x01(\x03R\x13\x63lientProcessedUpTo\x12\x44\n\x10resource_mapping\x18\x05 \x03(\x0b\x32\x19.ray.rpc.ResourceMapEntryR\x0fresourceMapping\"\x87\x05\n\rPushTaskReply\x12<\n\x0ereturn_objects\x18\x01 \x03(\x0b\x32\x15.ray.rpc.ReturnObjectR\rreturnObjects\x12K\n\x16\x64ynamic_return_objects\x18\x02 \x03(\x0b\x32\x15.ray.rpc.ReturnObjectR\x14\x64ynamicReturnObjects\x12%\n\x0eworker_exiting\x18\x03 \x01(\x08R\rworkerExiting\x12\x42\n\rborrowed_refs\x18\x04 \x03(\x0b\x32\x1d.ray.rpc.ObjectReferenceCountR\x0c\x62orrowedRefs\x12,\n\x12is_retryable_error\x18\x05 \x01(\x08R\x10isRetryableError\x12\x30\n\x14is_application_error\x18\x06 \x01(\x08R\x12isApplicationError\x12?\n\x1cwas_cancelled_before_running\x18\x07 \x01(\x08R\x19wasCancelledBeforeRunning\x12+\n\x0f\x61\x63tor_repr_name\x18\x08 \x01(\tH\x00R\ractorReprName\x88\x01\x01\x12\x30\n\x14task_execution_error\x18\t \x01(\tR\x12taskExecutionError\x12l\n\x1estreaming_generator_return_ids\x18\n \x03(\x0b\x32\'.ray.rpc.StreamingGeneratorReturnIdInfoR\x1bstreamingGeneratorReturnIdsB\x12\n\x10_actor_repr_name\"g\n%DirectActorCallArgWaitCompleteRequest\x12,\n\x12intended_worker_id\x18\x01 \x01(\x0cR\x10intendedWorkerId\x12\x10\n\x03tag\x18\x02 \x01(\x03R\x03tag\"%\n#DirectActorCallArgWaitCompleteReply\"]\n\x16GetObjectStatusRequest\x12&\n\x0fowner_worker_id\x18\x01 \x01(\x0cR\rownerWorkerId\x12\x1b\n\tobject_id\x18\x02 \x01(\x0cR\x08objectId\"\x85\x01\n\tRayObject\x12\x12\n\x04\x64\x61ta\x18\x01 \x01(\x0cR\x04\x64\x61ta\x12\x1a\n\x08metadata\x18\x02 \x01(\x0cR\x08metadata\x12H\n\x13nested_inlined_refs\x18\x03 \x03(\x0b\x32\x18.ray.rpc.ObjectReferenceR\x11nestedInlinedRefs\"\xfc\x01\n\x14GetObjectStatusReply\x12\x42\n\x06status\x18\x01 \x01(\x0e\x32*.ray.rpc.GetObjectStatusReply.ObjectStatusR\x06status\x12*\n\x06object\x18\x02 \x01(\x0b\x32\x12.ray.rpc.RayObjectR\x06object\x12\x19\n\x08node_ids\x18\x03 \x03(\x0cR\x07nodeIds\x12\x1f\n\x0bobject_size\x18\x04 \x01(\x04R\nobjectSize\"8\n\x0cObjectStatus\x12\x0b\n\x07\x43REATED\x10\x00\x12\x10\n\x0cOUT_OF_SCOPE\x10\x01\x12\t\n\x05\x46REED\x10\x02\"h\n\x1dWaitForActorRefDeletedRequest\x12,\n\x12intended_worker_id\x18\x01 \x01(\x0cR\x10intendedWorkerId\x12\x19\n\x08\x61\x63tor_id\x18\x02 \x01(\x0cR\x07\x61\x63torId\"\x1d\n\x1bWaitForActorRefDeletedReply\"\xc0\x01\n UpdateObjectLocationBatchRequest\x12,\n\x12intended_worker_id\x18\x01 \x01(\x0cR\x10intendedWorkerId\x12\x17\n\x07node_id\x18\x02 \x01(\x0cR\x06nodeId\x12U\n\x17object_location_updates\x18\x03 \x03(\x0b\x32\x1d.ray.rpc.ObjectLocationUpdateR\x15objectLocationUpdates\" \n\x1eUpdateObjectLocationBatchReply\"w\n\x1bObjectSpilledLocationUpdate\x12\x1f\n\x0bspilled_url\x18\x03 \x01(\tR\nspilledUrl\x12\x37\n\x18spilled_to_local_storage\x18\x04 \x01(\x08R\x15spilledToLocalStorage\"\xe6\x02\n\x14ObjectLocationUpdate\x12\x1b\n\tobject_id\x18\x01 \x01(\x0cR\x08objectId\x12^\n\x16plasma_location_update\x18\x02 \x01(\x0e\x32#.ray.rpc.ObjectPlasmaLocationUpdateH\x00R\x14plasmaLocationUpdate\x88\x01\x01\x12\x61\n\x17spilled_location_update\x18\x03 \x01(\x0b\x32$.ray.rpc.ObjectSpilledLocationUpdateH\x01R\x15spilledLocationUpdate\x88\x01\x01\x12&\n\x0cgenerator_id\x18\x04 \x01(\x0cH\x02R\x0bgeneratorId\x88\x01\x01\x42\x19\n\x17_plasma_location_updateB\x1a\n\x18_spilled_location_updateB\x0f\n\r_generator_id\"m\n\x1eGetObjectLocationsOwnerRequest\x12,\n\x12intended_worker_id\x18\x01 \x01(\x0cR\x10intendedWorkerId\x12\x1d\n\nobject_ids\x18\x02 \x03(\x0cR\tobjectIds\"|\n\x1cGetObjectLocationsOwnerReply\x12\\\n\x15object_location_infos\x18\x01 \x03(\x0b\x32(.ray.rpc.WorkerObjectLocationsPubMessageR\x13objectLocationInfos\"\x98\x01\n\x10KillActorRequest\x12*\n\x11intended_actor_id\x18\x01 \x01(\x0cR\x0fintendedActorId\x12\x1d\n\nforce_kill\x18\x02 \x01(\x08R\tforceKill\x12\x39\n\x0b\x64\x65\x61th_cause\x18\x03 \x01(\x0b\x32\x18.ray.rpc.ActorDeathCauseR\ndeathCause\"\x10\n\x0eKillActorReply\"\xa4\x01\n\x11\x43\x61ncelTaskRequest\x12(\n\x10intended_task_id\x18\x01 \x01(\x0cR\x0eintendedTaskId\x12\x1d\n\nforce_kill\x18\x02 \x01(\x08R\tforceKill\x12\x1c\n\trecursive\x18\x03 \x01(\x08R\trecursive\x12(\n\x10\x63\x61ller_worker_id\x18\x04 \x01(\x0cR\x0e\x63\x61llerWorkerId\"t\n\x0f\x43\x61ncelTaskReply\x12\x34\n\x16requested_task_running\x18\x01 \x01(\x08R\x14requestedTaskRunning\x12+\n\x11\x61ttempt_succeeded\x18\x02 \x01(\x08R\x10\x61ttemptSucceeded\"\x80\x01\n\x17RemoteCancelTaskRequest\x12(\n\x10remote_object_id\x18\x01 \x01(\x0cR\x0eremoteObjectId\x12\x1d\n\nforce_kill\x18\x02 \x01(\x08R\tforceKill\x12\x1c\n\trecursive\x18\x03 \x01(\x08R\trecursive\"\x17\n\x15RemoteCancelTaskReply\"\xca\x01\n\x19GetCoreWorkerStatsRequest\x12,\n\x12intended_worker_id\x18\x01 \x01(\x0cR\x10intendedWorkerId\x12.\n\x13include_memory_info\x18\x02 \x01(\x08R\x11includeMemoryInfo\x12*\n\x11include_task_info\x18\x03 \x01(\x08R\x0fincludeTaskInfo\x12\x19\n\x05limit\x18\x04 \x01(\x03H\x00R\x05limit\x88\x01\x01\x42\x08\n\x06_limit\"\xf9\x01\n\x17GetCoreWorkerStatsReply\x12\x44\n\x11\x63ore_worker_stats\x18\x01 \x01(\x0b\x32\x18.ray.rpc.CoreWorkerStatsR\x0f\x63oreWorkerStats\x12M\n\x17owned_task_info_entries\x18\x02 \x03(\x0b\x32\x16.ray.rpc.TaskInfoEntryR\x14ownedTaskInfoEntries\x12(\n\x10running_task_ids\x18\x03 \x03(\x0cR\x0erunningTaskIds\x12\x1f\n\x0btasks_total\x18\x04 \x01(\x03R\ntasksTotal\"E\n\x0eLocalGCRequest\x12\x33\n\x16triggered_by_global_gc\x18\x01 \x01(\x08R\x13triggeredByGlobalGc\"\x0e\n\x0cLocalGCReply\"7\n\x18PlasmaObjectReadyRequest\x12\x1b\n\tobject_id\x18\x01 \x01(\x0cR\x08objectId\"\x18\n\x16PlasmaObjectReadyReply\"T\n\x14\x44\x65leteObjectsRequest\x12\x1d\n\nobject_ids\x18\x01 \x03(\x0cR\tobjectIds\x12\x1d\n\nlocal_only\x18\x02 \x01(\x08R\tlocalOnly\"\x14\n\x12\x44\x65leteObjectsReply\"\xa6\x01\n\x13SpillObjectsRequest\x12I\n\x14object_refs_to_spill\x18\x01 \x03(\x0b\x32\x18.ray.rpc.ObjectReferenceR\x11objectRefsToSpill\x12\x44\n\x0e\x64\x65lete_request\x18\x02 \x01(\x0b\x32\x1d.ray.rpc.DeleteObjectsRequestR\rdeleteRequest\"C\n\x11SpillObjectsReply\x12.\n\x13spilled_objects_url\x18\x01 \x03(\tR\x11spilledObjectsUrl\"\x81\x01\n\x1cRestoreSpilledObjectsRequest\x12.\n\x13spilled_objects_url\x18\x01 \x03(\tR\x11spilledObjectsUrl\x12\x31\n\x15object_ids_to_restore\x18\x02 \x03(\x0cR\x12objectIdsToRestore\"N\n\x1aRestoreSpilledObjectsReply\x12\x30\n\x14\x62ytes_restored_total\x18\x01 \x01(\x03R\x12\x62ytesRestoredTotal\"M\n\x1b\x44\x65leteSpilledObjectsRequest\x12.\n\x13spilled_objects_url\x18\x01 \x03(\tR\x11spilledObjectsUrl\"\x1b\n\x19\x44\x65leteSpilledObjectsReply\",\n\x0b\x45xitRequest\x12\x1d\n\nforce_exit\x18\x01 \x01(\x08R\tforceExit\"%\n\tExitReply\x12\x18\n\x07success\x18\x01 \x01(\x08R\x07success\"\xe4\x01\n\x18\x41ssignObjectOwnerRequest\x12\x1b\n\tobject_id\x18\x01 \x01(\x0cR\x08objectId\x12\x1f\n\x0bobject_size\x18\x02 \x01(\x04R\nobjectSize\x12\x30\n\x14\x63ontained_object_ids\x18\x03 \x03(\x0cR\x12\x63ontainedObjectIds\x12;\n\x10\x62orrower_address\x18\x04 \x01(\x0b\x32\x10.ray.rpc.AddressR\x0f\x62orrowerAddress\x12\x1b\n\tcall_site\x18\x05 \x01(\tR\x08\x63\x61llSite\"\x18\n\x16\x41ssignObjectOwnerReply\"\x1f\n\x1dRayletNotifyGCSRestartRequest\"\x1d\n\x1bRayletNotifyGCSRestartReply\"\x18\n\x16NumPendingTasksRequest\"B\n\x14NumPendingTasksReply\x12*\n\x11num_pending_tasks\x18\x01 \x01(\x03R\x0fnumPendingTasks\"\x8c\x02\n!ReportGeneratorItemReturnsRequest\x12K\n\x16\x64ynamic_return_objects\x18\x01 \x03(\x0b\x32\x15.ray.rpc.ReturnObjectR\x14\x64ynamicReturnObjects\x12\x31\n\x0bworker_addr\x18\x02 \x01(\x0b\x32\x10.ray.rpc.AddressR\nworkerAddr\x12\x1d\n\nitem_index\x18\x03 \x01(\x03R\titemIndex\x12!\n\x0cgenerator_id\x18\x05 \x01(\x0cR\x0bgeneratorId\x12%\n\x0e\x61ttempt_number\x18\x06 \x01(\x04R\rattemptNumber\"\\\n\x1fReportGeneratorItemReturnsReply\x12\x39\n\x19total_num_object_consumed\x18\x01 \x01(\x03R\x16totalNumObjectConsumed\"\x99\x01\n\"RegisterMutableObjectReaderRequest\x12(\n\x10writer_object_id\x18\x01 \x01(\x0cR\x0ewriterObjectId\x12\x1f\n\x0bnum_readers\x18\x02 \x01(\x03R\nnumReaders\x12(\n\x10reader_object_id\x18\x03 \x01(\x0cR\x0ereaderObjectId\"\"\n RegisterMutableObjectReaderReply*4\n\x1aObjectPlasmaLocationUpdate\x12\t\n\x05\x41\x44\x44\x45\x44\x10\x00\x12\x0b\n\x07REMOVED\x10\x01\x32\xf7\x10\n\x11\x43oreWorkerService\x12\x66\n\x16RayletNotifyGCSRestart\x12&.ray.rpc.RayletNotifyGCSRestartRequest\x1a$.ray.rpc.RayletNotifyGCSRestartReply\x12<\n\x08PushTask\x12\x18.ray.rpc.PushTaskRequest\x1a\x16.ray.rpc.PushTaskReply\x12~\n\x1e\x44irectActorCallArgWaitComplete\x12..ray.rpc.DirectActorCallArgWaitCompleteRequest\x1a,.ray.rpc.DirectActorCallArgWaitCompleteReply\x12Q\n\x0fGetObjectStatus\x12\x1f.ray.rpc.GetObjectStatusRequest\x1a\x1d.ray.rpc.GetObjectStatusReply\x12\x66\n\x16WaitForActorRefDeleted\x12&.ray.rpc.WaitForActorRefDeletedRequest\x1a$.ray.rpc.WaitForActorRefDeletedReply\x12W\n\x11PubsubLongPolling\x12!.ray.rpc.PubsubLongPollingRequest\x1a\x1f.ray.rpc.PubsubLongPollingReply\x12r\n\x1aReportGeneratorItemReturns\x12*.ray.rpc.ReportGeneratorItemReturnsRequest\x1a(.ray.rpc.ReportGeneratorItemReturnsReply\x12Z\n\x12PubsubCommandBatch\x12\".ray.rpc.PubsubCommandBatchRequest\x1a .ray.rpc.PubsubCommandBatchReply\x12o\n\x19UpdateObjectLocationBatch\x12).ray.rpc.UpdateObjectLocationBatchRequest\x1a\'.ray.rpc.UpdateObjectLocationBatchReply\x12i\n\x17GetObjectLocationsOwner\x12\'.ray.rpc.GetObjectLocationsOwnerRequest\x1a%.ray.rpc.GetObjectLocationsOwnerReply\x12?\n\tKillActor\x12\x19.ray.rpc.KillActorRequest\x1a\x17.ray.rpc.KillActorReply\x12\x42\n\nCancelTask\x12\x1a.ray.rpc.CancelTaskRequest\x1a\x18.ray.rpc.CancelTaskReply\x12T\n\x10RemoteCancelTask\x12 .ray.rpc.RemoteCancelTaskRequest\x1a\x1e.ray.rpc.RemoteCancelTaskReply\x12Z\n\x12GetCoreWorkerStats\x12\".ray.rpc.GetCoreWorkerStatsRequest\x1a .ray.rpc.GetCoreWorkerStatsReply\x12\x39\n\x07LocalGC\x12\x17.ray.rpc.LocalGCRequest\x1a\x15.ray.rpc.LocalGCReply\x12K\n\rDeleteObjects\x12\x1d.ray.rpc.DeleteObjectsRequest\x1a\x1b.ray.rpc.DeleteObjectsReply\x12H\n\x0cSpillObjects\x12\x1c.ray.rpc.SpillObjectsRequest\x1a\x1a.ray.rpc.SpillObjectsReply\x12\x63\n\x15RestoreSpilledObjects\x12%.ray.rpc.RestoreSpilledObjectsRequest\x1a#.ray.rpc.RestoreSpilledObjectsReply\x12`\n\x14\x44\x65leteSpilledObjects\x12$.ray.rpc.DeleteSpilledObjectsRequest\x1a\".ray.rpc.DeleteSpilledObjectsReply\x12W\n\x11PlasmaObjectReady\x12!.ray.rpc.PlasmaObjectReadyRequest\x1a\x1f.ray.rpc.PlasmaObjectReadyReply\x12\x30\n\x04\x45xit\x12\x14.ray.rpc.ExitRequest\x1a\x12.ray.rpc.ExitReply\x12W\n\x11\x41ssignObjectOwner\x12!.ray.rpc.AssignObjectOwnerRequest\x1a\x1f.ray.rpc.AssignObjectOwnerReply\x12Q\n\x0fNumPendingTasks\x12\x1f.ray.rpc.NumPendingTasksRequest\x1a\x1d.ray.rpc.NumPendingTasksReply\x12u\n\x1bRegisterMutableObjectReader\x12+.ray.rpc.RegisterMutableObjectReaderRequest\x1a).ray.rpc.RegisterMutableObjectReaderReplyb\x06proto3')
|
| 21 |
+
|
| 22 |
+
_OBJECTPLASMALOCATIONUPDATE = DESCRIPTOR.enum_types_by_name['ObjectPlasmaLocationUpdate']
|
| 23 |
+
ObjectPlasmaLocationUpdate = enum_type_wrapper.EnumTypeWrapper(_OBJECTPLASMALOCATIONUPDATE)
|
| 24 |
+
ADDED = 0
|
| 25 |
+
REMOVED = 1
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
_ACTIVEOBJECTIDS = DESCRIPTOR.message_types_by_name['ActiveObjectIDs']
|
| 29 |
+
_ACTORHANDLE = DESCRIPTOR.message_types_by_name['ActorHandle']
|
| 30 |
+
_ACTORHANDLE_LABELSENTRY = _ACTORHANDLE.nested_types_by_name['LabelsEntry']
|
| 31 |
+
_PUSHTASKREQUEST = DESCRIPTOR.message_types_by_name['PushTaskRequest']
|
| 32 |
+
_PUSHTASKREPLY = DESCRIPTOR.message_types_by_name['PushTaskReply']
|
| 33 |
+
_DIRECTACTORCALLARGWAITCOMPLETEREQUEST = DESCRIPTOR.message_types_by_name['DirectActorCallArgWaitCompleteRequest']
|
| 34 |
+
_DIRECTACTORCALLARGWAITCOMPLETEREPLY = DESCRIPTOR.message_types_by_name['DirectActorCallArgWaitCompleteReply']
|
| 35 |
+
_GETOBJECTSTATUSREQUEST = DESCRIPTOR.message_types_by_name['GetObjectStatusRequest']
|
| 36 |
+
_RAYOBJECT = DESCRIPTOR.message_types_by_name['RayObject']
|
| 37 |
+
_GETOBJECTSTATUSREPLY = DESCRIPTOR.message_types_by_name['GetObjectStatusReply']
|
| 38 |
+
_WAITFORACTORREFDELETEDREQUEST = DESCRIPTOR.message_types_by_name['WaitForActorRefDeletedRequest']
|
| 39 |
+
_WAITFORACTORREFDELETEDREPLY = DESCRIPTOR.message_types_by_name['WaitForActorRefDeletedReply']
|
| 40 |
+
_UPDATEOBJECTLOCATIONBATCHREQUEST = DESCRIPTOR.message_types_by_name['UpdateObjectLocationBatchRequest']
|
| 41 |
+
_UPDATEOBJECTLOCATIONBATCHREPLY = DESCRIPTOR.message_types_by_name['UpdateObjectLocationBatchReply']
|
| 42 |
+
_OBJECTSPILLEDLOCATIONUPDATE = DESCRIPTOR.message_types_by_name['ObjectSpilledLocationUpdate']
|
| 43 |
+
_OBJECTLOCATIONUPDATE = DESCRIPTOR.message_types_by_name['ObjectLocationUpdate']
|
| 44 |
+
_GETOBJECTLOCATIONSOWNERREQUEST = DESCRIPTOR.message_types_by_name['GetObjectLocationsOwnerRequest']
|
| 45 |
+
_GETOBJECTLOCATIONSOWNERREPLY = DESCRIPTOR.message_types_by_name['GetObjectLocationsOwnerReply']
|
| 46 |
+
_KILLACTORREQUEST = DESCRIPTOR.message_types_by_name['KillActorRequest']
|
| 47 |
+
_KILLACTORREPLY = DESCRIPTOR.message_types_by_name['KillActorReply']
|
| 48 |
+
_CANCELTASKREQUEST = DESCRIPTOR.message_types_by_name['CancelTaskRequest']
|
| 49 |
+
_CANCELTASKREPLY = DESCRIPTOR.message_types_by_name['CancelTaskReply']
|
| 50 |
+
_REMOTECANCELTASKREQUEST = DESCRIPTOR.message_types_by_name['RemoteCancelTaskRequest']
|
| 51 |
+
_REMOTECANCELTASKREPLY = DESCRIPTOR.message_types_by_name['RemoteCancelTaskReply']
|
| 52 |
+
_GETCOREWORKERSTATSREQUEST = DESCRIPTOR.message_types_by_name['GetCoreWorkerStatsRequest']
|
| 53 |
+
_GETCOREWORKERSTATSREPLY = DESCRIPTOR.message_types_by_name['GetCoreWorkerStatsReply']
|
| 54 |
+
_LOCALGCREQUEST = DESCRIPTOR.message_types_by_name['LocalGCRequest']
|
| 55 |
+
_LOCALGCREPLY = DESCRIPTOR.message_types_by_name['LocalGCReply']
|
| 56 |
+
_PLASMAOBJECTREADYREQUEST = DESCRIPTOR.message_types_by_name['PlasmaObjectReadyRequest']
|
| 57 |
+
_PLASMAOBJECTREADYREPLY = DESCRIPTOR.message_types_by_name['PlasmaObjectReadyReply']
|
| 58 |
+
_DELETEOBJECTSREQUEST = DESCRIPTOR.message_types_by_name['DeleteObjectsRequest']
|
| 59 |
+
_DELETEOBJECTSREPLY = DESCRIPTOR.message_types_by_name['DeleteObjectsReply']
|
| 60 |
+
_SPILLOBJECTSREQUEST = DESCRIPTOR.message_types_by_name['SpillObjectsRequest']
|
| 61 |
+
_SPILLOBJECTSREPLY = DESCRIPTOR.message_types_by_name['SpillObjectsReply']
|
| 62 |
+
_RESTORESPILLEDOBJECTSREQUEST = DESCRIPTOR.message_types_by_name['RestoreSpilledObjectsRequest']
|
| 63 |
+
_RESTORESPILLEDOBJECTSREPLY = DESCRIPTOR.message_types_by_name['RestoreSpilledObjectsReply']
|
| 64 |
+
_DELETESPILLEDOBJECTSREQUEST = DESCRIPTOR.message_types_by_name['DeleteSpilledObjectsRequest']
|
| 65 |
+
_DELETESPILLEDOBJECTSREPLY = DESCRIPTOR.message_types_by_name['DeleteSpilledObjectsReply']
|
| 66 |
+
_EXITREQUEST = DESCRIPTOR.message_types_by_name['ExitRequest']
|
| 67 |
+
_EXITREPLY = DESCRIPTOR.message_types_by_name['ExitReply']
|
| 68 |
+
_ASSIGNOBJECTOWNERREQUEST = DESCRIPTOR.message_types_by_name['AssignObjectOwnerRequest']
|
| 69 |
+
_ASSIGNOBJECTOWNERREPLY = DESCRIPTOR.message_types_by_name['AssignObjectOwnerReply']
|
| 70 |
+
_RAYLETNOTIFYGCSRESTARTREQUEST = DESCRIPTOR.message_types_by_name['RayletNotifyGCSRestartRequest']
|
| 71 |
+
_RAYLETNOTIFYGCSRESTARTREPLY = DESCRIPTOR.message_types_by_name['RayletNotifyGCSRestartReply']
|
| 72 |
+
_NUMPENDINGTASKSREQUEST = DESCRIPTOR.message_types_by_name['NumPendingTasksRequest']
|
| 73 |
+
_NUMPENDINGTASKSREPLY = DESCRIPTOR.message_types_by_name['NumPendingTasksReply']
|
| 74 |
+
_REPORTGENERATORITEMRETURNSREQUEST = DESCRIPTOR.message_types_by_name['ReportGeneratorItemReturnsRequest']
|
| 75 |
+
_REPORTGENERATORITEMRETURNSREPLY = DESCRIPTOR.message_types_by_name['ReportGeneratorItemReturnsReply']
|
| 76 |
+
_REGISTERMUTABLEOBJECTREADERREQUEST = DESCRIPTOR.message_types_by_name['RegisterMutableObjectReaderRequest']
|
| 77 |
+
_REGISTERMUTABLEOBJECTREADERREPLY = DESCRIPTOR.message_types_by_name['RegisterMutableObjectReaderReply']
|
| 78 |
+
_GETOBJECTSTATUSREPLY_OBJECTSTATUS = _GETOBJECTSTATUSREPLY.enum_types_by_name['ObjectStatus']
|
| 79 |
+
ActiveObjectIDs = _reflection.GeneratedProtocolMessageType('ActiveObjectIDs', (_message.Message,), {
|
| 80 |
+
'DESCRIPTOR' : _ACTIVEOBJECTIDS,
|
| 81 |
+
'__module__' : 'src.ray.protobuf.core_worker_pb2'
|
| 82 |
+
# @@protoc_insertion_point(class_scope:ray.rpc.ActiveObjectIDs)
|
| 83 |
+
})
|
| 84 |
+
_sym_db.RegisterMessage(ActiveObjectIDs)
|
| 85 |
+
|
| 86 |
+
ActorHandle = _reflection.GeneratedProtocolMessageType('ActorHandle', (_message.Message,), {
|
| 87 |
+
|
| 88 |
+
'LabelsEntry' : _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), {
|
| 89 |
+
'DESCRIPTOR' : _ACTORHANDLE_LABELSENTRY,
|
| 90 |
+
'__module__' : 'src.ray.protobuf.core_worker_pb2'
|
| 91 |
+
# @@protoc_insertion_point(class_scope:ray.rpc.ActorHandle.LabelsEntry)
|
| 92 |
+
})
|
| 93 |
+
,
|
| 94 |
+
'DESCRIPTOR' : _ACTORHANDLE,
|
| 95 |
+
'__module__' : 'src.ray.protobuf.core_worker_pb2'
|
| 96 |
+
# @@protoc_insertion_point(class_scope:ray.rpc.ActorHandle)
|
| 97 |
+
})
|
| 98 |
+
_sym_db.RegisterMessage(ActorHandle)
|
| 99 |
+
_sym_db.RegisterMessage(ActorHandle.LabelsEntry)
|
| 100 |
+
|
| 101 |
+
PushTaskRequest = _reflection.GeneratedProtocolMessageType('PushTaskRequest', (_message.Message,), {
|
| 102 |
+
'DESCRIPTOR' : _PUSHTASKREQUEST,
|
| 103 |
+
'__module__' : 'src.ray.protobuf.core_worker_pb2'
|
| 104 |
+
# @@protoc_insertion_point(class_scope:ray.rpc.PushTaskRequest)
|
| 105 |
+
})
|
| 106 |
+
_sym_db.RegisterMessage(PushTaskRequest)
|
| 107 |
+
|
| 108 |
+
PushTaskReply = _reflection.GeneratedProtocolMessageType('PushTaskReply', (_message.Message,), {
|
| 109 |
+
'DESCRIPTOR' : _PUSHTASKREPLY,
|
| 110 |
+
'__module__' : 'src.ray.protobuf.core_worker_pb2'
|
| 111 |
+
# @@protoc_insertion_point(class_scope:ray.rpc.PushTaskReply)
|
| 112 |
+
})
|
| 113 |
+
_sym_db.RegisterMessage(PushTaskReply)
|
| 114 |
+
|
| 115 |
+
DirectActorCallArgWaitCompleteRequest = _reflection.GeneratedProtocolMessageType('DirectActorCallArgWaitCompleteRequest', (_message.Message,), {
|
| 116 |
+
'DESCRIPTOR' : _DIRECTACTORCALLARGWAITCOMPLETEREQUEST,
|
| 117 |
+
'__module__' : 'src.ray.protobuf.core_worker_pb2'
|
| 118 |
+
# @@protoc_insertion_point(class_scope:ray.rpc.DirectActorCallArgWaitCompleteRequest)
|
| 119 |
+
})
|
| 120 |
+
_sym_db.RegisterMessage(DirectActorCallArgWaitCompleteRequest)
|
| 121 |
+
|
| 122 |
+
DirectActorCallArgWaitCompleteReply = _reflection.GeneratedProtocolMessageType('DirectActorCallArgWaitCompleteReply', (_message.Message,), {
|
| 123 |
+
'DESCRIPTOR' : _DIRECTACTORCALLARGWAITCOMPLETEREPLY,
|
| 124 |
+
'__module__' : 'src.ray.protobuf.core_worker_pb2'
|
| 125 |
+
# @@protoc_insertion_point(class_scope:ray.rpc.DirectActorCallArgWaitCompleteReply)
|
| 126 |
+
})
|
| 127 |
+
_sym_db.RegisterMessage(DirectActorCallArgWaitCompleteReply)
|
| 128 |
+
|
| 129 |
+
GetObjectStatusRequest = _reflection.GeneratedProtocolMessageType('GetObjectStatusRequest', (_message.Message,), {
|
| 130 |
+
'DESCRIPTOR' : _GETOBJECTSTATUSREQUEST,
|
| 131 |
+
'__module__' : 'src.ray.protobuf.core_worker_pb2'
|
| 132 |
+
# @@protoc_insertion_point(class_scope:ray.rpc.GetObjectStatusRequest)
|
| 133 |
+
})
|
| 134 |
+
_sym_db.RegisterMessage(GetObjectStatusRequest)
|
| 135 |
+
|
| 136 |
+
RayObject = _reflection.GeneratedProtocolMessageType('RayObject', (_message.Message,), {
|
| 137 |
+
'DESCRIPTOR' : _RAYOBJECT,
|
| 138 |
+
'__module__' : 'src.ray.protobuf.core_worker_pb2'
|
| 139 |
+
# @@protoc_insertion_point(class_scope:ray.rpc.RayObject)
|
| 140 |
+
})
|
| 141 |
+
_sym_db.RegisterMessage(RayObject)
|
| 142 |
+
|
| 143 |
+
GetObjectStatusReply = _reflection.GeneratedProtocolMessageType('GetObjectStatusReply', (_message.Message,), {
|
| 144 |
+
'DESCRIPTOR' : _GETOBJECTSTATUSREPLY,
|
| 145 |
+
'__module__' : 'src.ray.protobuf.core_worker_pb2'
|
| 146 |
+
# @@protoc_insertion_point(class_scope:ray.rpc.GetObjectStatusReply)
|
| 147 |
+
})
|
| 148 |
+
_sym_db.RegisterMessage(GetObjectStatusReply)
|
| 149 |
+
|
| 150 |
+
WaitForActorRefDeletedRequest = _reflection.GeneratedProtocolMessageType('WaitForActorRefDeletedRequest', (_message.Message,), {
|
| 151 |
+
'DESCRIPTOR' : _WAITFORACTORREFDELETEDREQUEST,
|
| 152 |
+
'__module__' : 'src.ray.protobuf.core_worker_pb2'
|
| 153 |
+
# @@protoc_insertion_point(class_scope:ray.rpc.WaitForActorRefDeletedRequest)
|
| 154 |
+
})
|
| 155 |
+
_sym_db.RegisterMessage(WaitForActorRefDeletedRequest)
|
| 156 |
+
|
| 157 |
+
WaitForActorRefDeletedReply = _reflection.GeneratedProtocolMessageType('WaitForActorRefDeletedReply', (_message.Message,), {
|
| 158 |
+
'DESCRIPTOR' : _WAITFORACTORREFDELETEDREPLY,
|
| 159 |
+
'__module__' : 'src.ray.protobuf.core_worker_pb2'
|
| 160 |
+
# @@protoc_insertion_point(class_scope:ray.rpc.WaitForActorRefDeletedReply)
|
| 161 |
+
})
|
| 162 |
+
_sym_db.RegisterMessage(WaitForActorRefDeletedReply)
|
| 163 |
+
|
| 164 |
+
UpdateObjectLocationBatchRequest = _reflection.GeneratedProtocolMessageType('UpdateObjectLocationBatchRequest', (_message.Message,), {
|
| 165 |
+
'DESCRIPTOR' : _UPDATEOBJECTLOCATIONBATCHREQUEST,
|
| 166 |
+
'__module__' : 'src.ray.protobuf.core_worker_pb2'
|
| 167 |
+
# @@protoc_insertion_point(class_scope:ray.rpc.UpdateObjectLocationBatchRequest)
|
| 168 |
+
})
|
| 169 |
+
_sym_db.RegisterMessage(UpdateObjectLocationBatchRequest)
|
| 170 |
+
|
| 171 |
+
UpdateObjectLocationBatchReply = _reflection.GeneratedProtocolMessageType('UpdateObjectLocationBatchReply', (_message.Message,), {
|
| 172 |
+
'DESCRIPTOR' : _UPDATEOBJECTLOCATIONBATCHREPLY,
|
| 173 |
+
'__module__' : 'src.ray.protobuf.core_worker_pb2'
|
| 174 |
+
# @@protoc_insertion_point(class_scope:ray.rpc.UpdateObjectLocationBatchReply)
|
| 175 |
+
})
|
| 176 |
+
_sym_db.RegisterMessage(UpdateObjectLocationBatchReply)
|
| 177 |
+
|
| 178 |
+
ObjectSpilledLocationUpdate = _reflection.GeneratedProtocolMessageType('ObjectSpilledLocationUpdate', (_message.Message,), {
|
| 179 |
+
'DESCRIPTOR' : _OBJECTSPILLEDLOCATIONUPDATE,
|
| 180 |
+
'__module__' : 'src.ray.protobuf.core_worker_pb2'
|
| 181 |
+
# @@protoc_insertion_point(class_scope:ray.rpc.ObjectSpilledLocationUpdate)
|
| 182 |
+
})
|
| 183 |
+
_sym_db.RegisterMessage(ObjectSpilledLocationUpdate)
|
| 184 |
+
|
| 185 |
+
ObjectLocationUpdate = _reflection.GeneratedProtocolMessageType('ObjectLocationUpdate', (_message.Message,), {
|
| 186 |
+
'DESCRIPTOR' : _OBJECTLOCATIONUPDATE,
|
| 187 |
+
'__module__' : 'src.ray.protobuf.core_worker_pb2'
|
| 188 |
+
# @@protoc_insertion_point(class_scope:ray.rpc.ObjectLocationUpdate)
|
| 189 |
+
})
|
| 190 |
+
_sym_db.RegisterMessage(ObjectLocationUpdate)
|
| 191 |
+
|
| 192 |
+
GetObjectLocationsOwnerRequest = _reflection.GeneratedProtocolMessageType('GetObjectLocationsOwnerRequest', (_message.Message,), {
|
| 193 |
+
'DESCRIPTOR' : _GETOBJECTLOCATIONSOWNERREQUEST,
|
| 194 |
+
'__module__' : 'src.ray.protobuf.core_worker_pb2'
|
| 195 |
+
# @@protoc_insertion_point(class_scope:ray.rpc.GetObjectLocationsOwnerRequest)
|
| 196 |
+
})
|
| 197 |
+
_sym_db.RegisterMessage(GetObjectLocationsOwnerRequest)
|
| 198 |
+
|
| 199 |
+
GetObjectLocationsOwnerReply = _reflection.GeneratedProtocolMessageType('GetObjectLocationsOwnerReply', (_message.Message,), {
|
| 200 |
+
'DESCRIPTOR' : _GETOBJECTLOCATIONSOWNERREPLY,
|
| 201 |
+
'__module__' : 'src.ray.protobuf.core_worker_pb2'
|
| 202 |
+
# @@protoc_insertion_point(class_scope:ray.rpc.GetObjectLocationsOwnerReply)
|
| 203 |
+
})
|
| 204 |
+
_sym_db.RegisterMessage(GetObjectLocationsOwnerReply)
|
| 205 |
+
|
| 206 |
+
KillActorRequest = _reflection.GeneratedProtocolMessageType('KillActorRequest', (_message.Message,), {
|
| 207 |
+
'DESCRIPTOR' : _KILLACTORREQUEST,
|
| 208 |
+
'__module__' : 'src.ray.protobuf.core_worker_pb2'
|
| 209 |
+
# @@protoc_insertion_point(class_scope:ray.rpc.KillActorRequest)
|
| 210 |
+
})
|
| 211 |
+
_sym_db.RegisterMessage(KillActorRequest)
|
| 212 |
+
|
| 213 |
+
KillActorReply = _reflection.GeneratedProtocolMessageType('KillActorReply', (_message.Message,), {
|
| 214 |
+
'DESCRIPTOR' : _KILLACTORREPLY,
|
| 215 |
+
'__module__' : 'src.ray.protobuf.core_worker_pb2'
|
| 216 |
+
# @@protoc_insertion_point(class_scope:ray.rpc.KillActorReply)
|
| 217 |
+
})
|
| 218 |
+
_sym_db.RegisterMessage(KillActorReply)
|
| 219 |
+
|
| 220 |
+
CancelTaskRequest = _reflection.GeneratedProtocolMessageType('CancelTaskRequest', (_message.Message,), {
|
| 221 |
+
'DESCRIPTOR' : _CANCELTASKREQUEST,
|
| 222 |
+
'__module__' : 'src.ray.protobuf.core_worker_pb2'
|
| 223 |
+
# @@protoc_insertion_point(class_scope:ray.rpc.CancelTaskRequest)
|
| 224 |
+
})
|
| 225 |
+
_sym_db.RegisterMessage(CancelTaskRequest)
|
| 226 |
+
|
| 227 |
+
CancelTaskReply = _reflection.GeneratedProtocolMessageType('CancelTaskReply', (_message.Message,), {
|
| 228 |
+
'DESCRIPTOR' : _CANCELTASKREPLY,
|
| 229 |
+
'__module__' : 'src.ray.protobuf.core_worker_pb2'
|
| 230 |
+
# @@protoc_insertion_point(class_scope:ray.rpc.CancelTaskReply)
|
| 231 |
+
})
|
| 232 |
+
_sym_db.RegisterMessage(CancelTaskReply)
|
| 233 |
+
|
| 234 |
+
RemoteCancelTaskRequest = _reflection.GeneratedProtocolMessageType('RemoteCancelTaskRequest', (_message.Message,), {
|
| 235 |
+
'DESCRIPTOR' : _REMOTECANCELTASKREQUEST,
|
| 236 |
+
'__module__' : 'src.ray.protobuf.core_worker_pb2'
|
| 237 |
+
# @@protoc_insertion_point(class_scope:ray.rpc.RemoteCancelTaskRequest)
|
| 238 |
+
})
|
| 239 |
+
_sym_db.RegisterMessage(RemoteCancelTaskRequest)
|
| 240 |
+
|
| 241 |
+
RemoteCancelTaskReply = _reflection.GeneratedProtocolMessageType('RemoteCancelTaskReply', (_message.Message,), {
|
| 242 |
+
'DESCRIPTOR' : _REMOTECANCELTASKREPLY,
|
| 243 |
+
'__module__' : 'src.ray.protobuf.core_worker_pb2'
|
| 244 |
+
# @@protoc_insertion_point(class_scope:ray.rpc.RemoteCancelTaskReply)
|
| 245 |
+
})
|
| 246 |
+
_sym_db.RegisterMessage(RemoteCancelTaskReply)
|
| 247 |
+
|
| 248 |
+
GetCoreWorkerStatsRequest = _reflection.GeneratedProtocolMessageType('GetCoreWorkerStatsRequest', (_message.Message,), {
|
| 249 |
+
'DESCRIPTOR' : _GETCOREWORKERSTATSREQUEST,
|
| 250 |
+
'__module__' : 'src.ray.protobuf.core_worker_pb2'
|
| 251 |
+
# @@protoc_insertion_point(class_scope:ray.rpc.GetCoreWorkerStatsRequest)
|
| 252 |
+
})
|
| 253 |
+
_sym_db.RegisterMessage(GetCoreWorkerStatsRequest)
|
| 254 |
+
|
| 255 |
+
GetCoreWorkerStatsReply = _reflection.GeneratedProtocolMessageType('GetCoreWorkerStatsReply', (_message.Message,), {
|
| 256 |
+
'DESCRIPTOR' : _GETCOREWORKERSTATSREPLY,
|
| 257 |
+
'__module__' : 'src.ray.protobuf.core_worker_pb2'
|
| 258 |
+
# @@protoc_insertion_point(class_scope:ray.rpc.GetCoreWorkerStatsReply)
|
| 259 |
+
})
|
| 260 |
+
_sym_db.RegisterMessage(GetCoreWorkerStatsReply)
|
| 261 |
+
|
| 262 |
+
LocalGCRequest = _reflection.GeneratedProtocolMessageType('LocalGCRequest', (_message.Message,), {
|
| 263 |
+
'DESCRIPTOR' : _LOCALGCREQUEST,
|
| 264 |
+
'__module__' : 'src.ray.protobuf.core_worker_pb2'
|
| 265 |
+
# @@protoc_insertion_point(class_scope:ray.rpc.LocalGCRequest)
|
| 266 |
+
})
|
| 267 |
+
_sym_db.RegisterMessage(LocalGCRequest)
|
| 268 |
+
|
| 269 |
+
LocalGCReply = _reflection.GeneratedProtocolMessageType('LocalGCReply', (_message.Message,), {
|
| 270 |
+
'DESCRIPTOR' : _LOCALGCREPLY,
|
| 271 |
+
'__module__' : 'src.ray.protobuf.core_worker_pb2'
|
| 272 |
+
# @@protoc_insertion_point(class_scope:ray.rpc.LocalGCReply)
|
| 273 |
+
})
|
| 274 |
+
_sym_db.RegisterMessage(LocalGCReply)
|
| 275 |
+
|
| 276 |
+
PlasmaObjectReadyRequest = _reflection.GeneratedProtocolMessageType('PlasmaObjectReadyRequest', (_message.Message,), {
|
| 277 |
+
'DESCRIPTOR' : _PLASMAOBJECTREADYREQUEST,
|
| 278 |
+
'__module__' : 'src.ray.protobuf.core_worker_pb2'
|
| 279 |
+
# @@protoc_insertion_point(class_scope:ray.rpc.PlasmaObjectReadyRequest)
|
| 280 |
+
})
|
| 281 |
+
_sym_db.RegisterMessage(PlasmaObjectReadyRequest)
|
| 282 |
+
|
| 283 |
+
PlasmaObjectReadyReply = _reflection.GeneratedProtocolMessageType('PlasmaObjectReadyReply', (_message.Message,), {
|
| 284 |
+
'DESCRIPTOR' : _PLASMAOBJECTREADYREPLY,
|
| 285 |
+
'__module__' : 'src.ray.protobuf.core_worker_pb2'
|
| 286 |
+
# @@protoc_insertion_point(class_scope:ray.rpc.PlasmaObjectReadyReply)
|
| 287 |
+
})
|
| 288 |
+
_sym_db.RegisterMessage(PlasmaObjectReadyReply)
|
| 289 |
+
|
| 290 |
+
DeleteObjectsRequest = _reflection.GeneratedProtocolMessageType('DeleteObjectsRequest', (_message.Message,), {
|
| 291 |
+
'DESCRIPTOR' : _DELETEOBJECTSREQUEST,
|
| 292 |
+
'__module__' : 'src.ray.protobuf.core_worker_pb2'
|
| 293 |
+
# @@protoc_insertion_point(class_scope:ray.rpc.DeleteObjectsRequest)
|
| 294 |
+
})
|
| 295 |
+
_sym_db.RegisterMessage(DeleteObjectsRequest)
|
| 296 |
+
|
| 297 |
+
DeleteObjectsReply = _reflection.GeneratedProtocolMessageType('DeleteObjectsReply', (_message.Message,), {
|
| 298 |
+
'DESCRIPTOR' : _DELETEOBJECTSREPLY,
|
| 299 |
+
'__module__' : 'src.ray.protobuf.core_worker_pb2'
|
| 300 |
+
# @@protoc_insertion_point(class_scope:ray.rpc.DeleteObjectsReply)
|
| 301 |
+
})
|
| 302 |
+
_sym_db.RegisterMessage(DeleteObjectsReply)
|
| 303 |
+
|
| 304 |
+
SpillObjectsRequest = _reflection.GeneratedProtocolMessageType('SpillObjectsRequest', (_message.Message,), {
|
| 305 |
+
'DESCRIPTOR' : _SPILLOBJECTSREQUEST,
|
| 306 |
+
'__module__' : 'src.ray.protobuf.core_worker_pb2'
|
| 307 |
+
# @@protoc_insertion_point(class_scope:ray.rpc.SpillObjectsRequest)
|
| 308 |
+
})
|
| 309 |
+
_sym_db.RegisterMessage(SpillObjectsRequest)
|
| 310 |
+
|
| 311 |
+
SpillObjectsReply = _reflection.GeneratedProtocolMessageType('SpillObjectsReply', (_message.Message,), {
|
| 312 |
+
'DESCRIPTOR' : _SPILLOBJECTSREPLY,
|
| 313 |
+
'__module__' : 'src.ray.protobuf.core_worker_pb2'
|
| 314 |
+
# @@protoc_insertion_point(class_scope:ray.rpc.SpillObjectsReply)
|
| 315 |
+
})
|
| 316 |
+
_sym_db.RegisterMessage(SpillObjectsReply)
|
| 317 |
+
|
| 318 |
+
RestoreSpilledObjectsRequest = _reflection.GeneratedProtocolMessageType('RestoreSpilledObjectsRequest', (_message.Message,), {
|
| 319 |
+
'DESCRIPTOR' : _RESTORESPILLEDOBJECTSREQUEST,
|
| 320 |
+
'__module__' : 'src.ray.protobuf.core_worker_pb2'
|
| 321 |
+
# @@protoc_insertion_point(class_scope:ray.rpc.RestoreSpilledObjectsRequest)
|
| 322 |
+
})
|
| 323 |
+
_sym_db.RegisterMessage(RestoreSpilledObjectsRequest)
|
| 324 |
+
|
| 325 |
+
RestoreSpilledObjectsReply = _reflection.GeneratedProtocolMessageType('RestoreSpilledObjectsReply', (_message.Message,), {
|
| 326 |
+
'DESCRIPTOR' : _RESTORESPILLEDOBJECTSREPLY,
|
| 327 |
+
'__module__' : 'src.ray.protobuf.core_worker_pb2'
|
| 328 |
+
# @@protoc_insertion_point(class_scope:ray.rpc.RestoreSpilledObjectsReply)
|
| 329 |
+
})
|
| 330 |
+
_sym_db.RegisterMessage(RestoreSpilledObjectsReply)
|
| 331 |
+
|
| 332 |
+
DeleteSpilledObjectsRequest = _reflection.GeneratedProtocolMessageType('DeleteSpilledObjectsRequest', (_message.Message,), {
|
| 333 |
+
'DESCRIPTOR' : _DELETESPILLEDOBJECTSREQUEST,
|
| 334 |
+
'__module__' : 'src.ray.protobuf.core_worker_pb2'
|
| 335 |
+
# @@protoc_insertion_point(class_scope:ray.rpc.DeleteSpilledObjectsRequest)
|
| 336 |
+
})
|
| 337 |
+
_sym_db.RegisterMessage(DeleteSpilledObjectsRequest)
|
| 338 |
+
|
| 339 |
+
DeleteSpilledObjectsReply = _reflection.GeneratedProtocolMessageType('DeleteSpilledObjectsReply', (_message.Message,), {
|
| 340 |
+
'DESCRIPTOR' : _DELETESPILLEDOBJECTSREPLY,
|
| 341 |
+
'__module__' : 'src.ray.protobuf.core_worker_pb2'
|
| 342 |
+
# @@protoc_insertion_point(class_scope:ray.rpc.DeleteSpilledObjectsReply)
|
| 343 |
+
})
|
| 344 |
+
_sym_db.RegisterMessage(DeleteSpilledObjectsReply)
|
| 345 |
+
|
| 346 |
+
ExitRequest = _reflection.GeneratedProtocolMessageType('ExitRequest', (_message.Message,), {
|
| 347 |
+
'DESCRIPTOR' : _EXITREQUEST,
|
| 348 |
+
'__module__' : 'src.ray.protobuf.core_worker_pb2'
|
| 349 |
+
# @@protoc_insertion_point(class_scope:ray.rpc.ExitRequest)
|
| 350 |
+
})
|
| 351 |
+
_sym_db.RegisterMessage(ExitRequest)
|
| 352 |
+
|
| 353 |
+
ExitReply = _reflection.GeneratedProtocolMessageType('ExitReply', (_message.Message,), {
|
| 354 |
+
'DESCRIPTOR' : _EXITREPLY,
|
| 355 |
+
'__module__' : 'src.ray.protobuf.core_worker_pb2'
|
| 356 |
+
# @@protoc_insertion_point(class_scope:ray.rpc.ExitReply)
|
| 357 |
+
})
|
| 358 |
+
_sym_db.RegisterMessage(ExitReply)
|
| 359 |
+
|
| 360 |
+
AssignObjectOwnerRequest = _reflection.GeneratedProtocolMessageType('AssignObjectOwnerRequest', (_message.Message,), {
|
| 361 |
+
'DESCRIPTOR' : _ASSIGNOBJECTOWNERREQUEST,
|
| 362 |
+
'__module__' : 'src.ray.protobuf.core_worker_pb2'
|
| 363 |
+
# @@protoc_insertion_point(class_scope:ray.rpc.AssignObjectOwnerRequest)
|
| 364 |
+
})
|
| 365 |
+
_sym_db.RegisterMessage(AssignObjectOwnerRequest)
|
| 366 |
+
|
| 367 |
+
AssignObjectOwnerReply = _reflection.GeneratedProtocolMessageType('AssignObjectOwnerReply', (_message.Message,), {
|
| 368 |
+
'DESCRIPTOR' : _ASSIGNOBJECTOWNERREPLY,
|
| 369 |
+
'__module__' : 'src.ray.protobuf.core_worker_pb2'
|
| 370 |
+
# @@protoc_insertion_point(class_scope:ray.rpc.AssignObjectOwnerReply)
|
| 371 |
+
})
|
| 372 |
+
_sym_db.RegisterMessage(AssignObjectOwnerReply)
|
| 373 |
+
|
| 374 |
+
RayletNotifyGCSRestartRequest = _reflection.GeneratedProtocolMessageType('RayletNotifyGCSRestartRequest', (_message.Message,), {
|
| 375 |
+
'DESCRIPTOR' : _RAYLETNOTIFYGCSRESTARTREQUEST,
|
| 376 |
+
'__module__' : 'src.ray.protobuf.core_worker_pb2'
|
| 377 |
+
# @@protoc_insertion_point(class_scope:ray.rpc.RayletNotifyGCSRestartRequest)
|
| 378 |
+
})
|
| 379 |
+
_sym_db.RegisterMessage(RayletNotifyGCSRestartRequest)
|
| 380 |
+
|
| 381 |
+
RayletNotifyGCSRestartReply = _reflection.GeneratedProtocolMessageType('RayletNotifyGCSRestartReply', (_message.Message,), {
|
| 382 |
+
'DESCRIPTOR' : _RAYLETNOTIFYGCSRESTARTREPLY,
|
| 383 |
+
'__module__' : 'src.ray.protobuf.core_worker_pb2'
|
| 384 |
+
# @@protoc_insertion_point(class_scope:ray.rpc.RayletNotifyGCSRestartReply)
|
| 385 |
+
})
|
| 386 |
+
_sym_db.RegisterMessage(RayletNotifyGCSRestartReply)
|
| 387 |
+
|
| 388 |
+
NumPendingTasksRequest = _reflection.GeneratedProtocolMessageType('NumPendingTasksRequest', (_message.Message,), {
|
| 389 |
+
'DESCRIPTOR' : _NUMPENDINGTASKSREQUEST,
|
| 390 |
+
'__module__' : 'src.ray.protobuf.core_worker_pb2'
|
| 391 |
+
# @@protoc_insertion_point(class_scope:ray.rpc.NumPendingTasksRequest)
|
| 392 |
+
})
|
| 393 |
+
_sym_db.RegisterMessage(NumPendingTasksRequest)
|
| 394 |
+
|
| 395 |
+
NumPendingTasksReply = _reflection.GeneratedProtocolMessageType('NumPendingTasksReply', (_message.Message,), {
|
| 396 |
+
'DESCRIPTOR' : _NUMPENDINGTASKSREPLY,
|
| 397 |
+
'__module__' : 'src.ray.protobuf.core_worker_pb2'
|
| 398 |
+
# @@protoc_insertion_point(class_scope:ray.rpc.NumPendingTasksReply)
|
| 399 |
+
})
|
| 400 |
+
_sym_db.RegisterMessage(NumPendingTasksReply)
|
| 401 |
+
|
| 402 |
+
ReportGeneratorItemReturnsRequest = _reflection.GeneratedProtocolMessageType('ReportGeneratorItemReturnsRequest', (_message.Message,), {
|
| 403 |
+
'DESCRIPTOR' : _REPORTGENERATORITEMRETURNSREQUEST,
|
| 404 |
+
'__module__' : 'src.ray.protobuf.core_worker_pb2'
|
| 405 |
+
# @@protoc_insertion_point(class_scope:ray.rpc.ReportGeneratorItemReturnsRequest)
|
| 406 |
+
})
|
| 407 |
+
_sym_db.RegisterMessage(ReportGeneratorItemReturnsRequest)
|
| 408 |
+
|
| 409 |
+
ReportGeneratorItemReturnsReply = _reflection.GeneratedProtocolMessageType('ReportGeneratorItemReturnsReply', (_message.Message,), {
|
| 410 |
+
'DESCRIPTOR' : _REPORTGENERATORITEMRETURNSREPLY,
|
| 411 |
+
'__module__' : 'src.ray.protobuf.core_worker_pb2'
|
| 412 |
+
# @@protoc_insertion_point(class_scope:ray.rpc.ReportGeneratorItemReturnsReply)
|
| 413 |
+
})
|
| 414 |
+
_sym_db.RegisterMessage(ReportGeneratorItemReturnsReply)
|
| 415 |
+
|
| 416 |
+
RegisterMutableObjectReaderRequest = _reflection.GeneratedProtocolMessageType('RegisterMutableObjectReaderRequest', (_message.Message,), {
|
| 417 |
+
'DESCRIPTOR' : _REGISTERMUTABLEOBJECTREADERREQUEST,
|
| 418 |
+
'__module__' : 'src.ray.protobuf.core_worker_pb2'
|
| 419 |
+
# @@protoc_insertion_point(class_scope:ray.rpc.RegisterMutableObjectReaderRequest)
|
| 420 |
+
})
|
| 421 |
+
_sym_db.RegisterMessage(RegisterMutableObjectReaderRequest)
|
| 422 |
+
|
| 423 |
+
RegisterMutableObjectReaderReply = _reflection.GeneratedProtocolMessageType('RegisterMutableObjectReaderReply', (_message.Message,), {
|
| 424 |
+
'DESCRIPTOR' : _REGISTERMUTABLEOBJECTREADERREPLY,
|
| 425 |
+
'__module__' : 'src.ray.protobuf.core_worker_pb2'
|
| 426 |
+
# @@protoc_insertion_point(class_scope:ray.rpc.RegisterMutableObjectReaderReply)
|
| 427 |
+
})
|
| 428 |
+
_sym_db.RegisterMessage(RegisterMutableObjectReaderReply)
|
| 429 |
+
|
| 430 |
+
_COREWORKERSERVICE = DESCRIPTOR.services_by_name['CoreWorkerService']
|
| 431 |
+
if _descriptor._USE_C_DESCRIPTORS == False:
|
| 432 |
+
|
| 433 |
+
DESCRIPTOR._options = None
|
| 434 |
+
_ACTORHANDLE_LABELSENTRY._options = None
|
| 435 |
+
_ACTORHANDLE_LABELSENTRY._serialized_options = b'8\001'
|
| 436 |
+
_OBJECTPLASMALOCATIONUPDATE._serialized_start=6533
|
| 437 |
+
_OBJECTPLASMALOCATIONUPDATE._serialized_end=6585
|
| 438 |
+
_ACTIVEOBJECTIDS._serialized_start=109
|
| 439 |
+
_ACTIVEOBJECTIDS._serialized_end=157
|
| 440 |
+
_ACTORHANDLE._serialized_start=160
|
| 441 |
+
_ACTORHANDLE._serialized_end=924
|
| 442 |
+
_ACTORHANDLE_LABELSENTRY._serialized_start=867
|
| 443 |
+
_ACTORHANDLE_LABELSENTRY._serialized_end=924
|
| 444 |
+
_PUSHTASKREQUEST._serialized_start=927
|
| 445 |
+
_PUSHTASKREQUEST._serialized_end=1202
|
| 446 |
+
_PUSHTASKREPLY._serialized_start=1205
|
| 447 |
+
_PUSHTASKREPLY._serialized_end=1852
|
| 448 |
+
_DIRECTACTORCALLARGWAITCOMPLETEREQUEST._serialized_start=1854
|
| 449 |
+
_DIRECTACTORCALLARGWAITCOMPLETEREQUEST._serialized_end=1957
|
| 450 |
+
_DIRECTACTORCALLARGWAITCOMPLETEREPLY._serialized_start=1959
|
| 451 |
+
_DIRECTACTORCALLARGWAITCOMPLETEREPLY._serialized_end=1996
|
| 452 |
+
_GETOBJECTSTATUSREQUEST._serialized_start=1998
|
| 453 |
+
_GETOBJECTSTATUSREQUEST._serialized_end=2091
|
| 454 |
+
_RAYOBJECT._serialized_start=2094
|
| 455 |
+
_RAYOBJECT._serialized_end=2227
|
| 456 |
+
_GETOBJECTSTATUSREPLY._serialized_start=2230
|
| 457 |
+
_GETOBJECTSTATUSREPLY._serialized_end=2482
|
| 458 |
+
_GETOBJECTSTATUSREPLY_OBJECTSTATUS._serialized_start=2426
|
| 459 |
+
_GETOBJECTSTATUSREPLY_OBJECTSTATUS._serialized_end=2482
|
| 460 |
+
_WAITFORACTORREFDELETEDREQUEST._serialized_start=2484
|
| 461 |
+
_WAITFORACTORREFDELETEDREQUEST._serialized_end=2588
|
| 462 |
+
_WAITFORACTORREFDELETEDREPLY._serialized_start=2590
|
| 463 |
+
_WAITFORACTORREFDELETEDREPLY._serialized_end=2619
|
| 464 |
+
_UPDATEOBJECTLOCATIONBATCHREQUEST._serialized_start=2622
|
| 465 |
+
_UPDATEOBJECTLOCATIONBATCHREQUEST._serialized_end=2814
|
| 466 |
+
_UPDATEOBJECTLOCATIONBATCHREPLY._serialized_start=2816
|
| 467 |
+
_UPDATEOBJECTLOCATIONBATCHREPLY._serialized_end=2848
|
| 468 |
+
_OBJECTSPILLEDLOCATIONUPDATE._serialized_start=2850
|
| 469 |
+
_OBJECTSPILLEDLOCATIONUPDATE._serialized_end=2969
|
| 470 |
+
_OBJECTLOCATIONUPDATE._serialized_start=2972
|
| 471 |
+
_OBJECTLOCATIONUPDATE._serialized_end=3330
|
| 472 |
+
_GETOBJECTLOCATIONSOWNERREQUEST._serialized_start=3332
|
| 473 |
+
_GETOBJECTLOCATIONSOWNERREQUEST._serialized_end=3441
|
| 474 |
+
_GETOBJECTLOCATIONSOWNERREPLY._serialized_start=3443
|
| 475 |
+
_GETOBJECTLOCATIONSOWNERREPLY._serialized_end=3567
|
| 476 |
+
_KILLACTORREQUEST._serialized_start=3570
|
| 477 |
+
_KILLACTORREQUEST._serialized_end=3722
|
| 478 |
+
_KILLACTORREPLY._serialized_start=3724
|
| 479 |
+
_KILLACTORREPLY._serialized_end=3740
|
| 480 |
+
_CANCELTASKREQUEST._serialized_start=3743
|
| 481 |
+
_CANCELTASKREQUEST._serialized_end=3907
|
| 482 |
+
_CANCELTASKREPLY._serialized_start=3909
|
| 483 |
+
_CANCELTASKREPLY._serialized_end=4025
|
| 484 |
+
_REMOTECANCELTASKREQUEST._serialized_start=4028
|
| 485 |
+
_REMOTECANCELTASKREQUEST._serialized_end=4156
|
| 486 |
+
_REMOTECANCELTASKREPLY._serialized_start=4158
|
| 487 |
+
_REMOTECANCELTASKREPLY._serialized_end=4181
|
| 488 |
+
_GETCOREWORKERSTATSREQUEST._serialized_start=4184
|
| 489 |
+
_GETCOREWORKERSTATSREQUEST._serialized_end=4386
|
| 490 |
+
_GETCOREWORKERSTATSREPLY._serialized_start=4389
|
| 491 |
+
_GETCOREWORKERSTATSREPLY._serialized_end=4638
|
| 492 |
+
_LOCALGCREQUEST._serialized_start=4640
|
| 493 |
+
_LOCALGCREQUEST._serialized_end=4709
|
| 494 |
+
_LOCALGCREPLY._serialized_start=4711
|
| 495 |
+
_LOCALGCREPLY._serialized_end=4725
|
| 496 |
+
_PLASMAOBJECTREADYREQUEST._serialized_start=4727
|
| 497 |
+
_PLASMAOBJECTREADYREQUEST._serialized_end=4782
|
| 498 |
+
_PLASMAOBJECTREADYREPLY._serialized_start=4784
|
| 499 |
+
_PLASMAOBJECTREADYREPLY._serialized_end=4808
|
| 500 |
+
_DELETEOBJECTSREQUEST._serialized_start=4810
|
| 501 |
+
_DELETEOBJECTSREQUEST._serialized_end=4894
|
| 502 |
+
_DELETEOBJECTSREPLY._serialized_start=4896
|
| 503 |
+
_DELETEOBJECTSREPLY._serialized_end=4916
|
| 504 |
+
_SPILLOBJECTSREQUEST._serialized_start=4919
|
| 505 |
+
_SPILLOBJECTSREQUEST._serialized_end=5085
|
| 506 |
+
_SPILLOBJECTSREPLY._serialized_start=5087
|
| 507 |
+
_SPILLOBJECTSREPLY._serialized_end=5154
|
| 508 |
+
_RESTORESPILLEDOBJECTSREQUEST._serialized_start=5157
|
| 509 |
+
_RESTORESPILLEDOBJECTSREQUEST._serialized_end=5286
|
| 510 |
+
_RESTORESPILLEDOBJECTSREPLY._serialized_start=5288
|
| 511 |
+
_RESTORESPILLEDOBJECTSREPLY._serialized_end=5366
|
| 512 |
+
_DELETESPILLEDOBJECTSREQUEST._serialized_start=5368
|
| 513 |
+
_DELETESPILLEDOBJECTSREQUEST._serialized_end=5445
|
| 514 |
+
_DELETESPILLEDOBJECTSREPLY._serialized_start=5447
|
| 515 |
+
_DELETESPILLEDOBJECTSREPLY._serialized_end=5474
|
| 516 |
+
_EXITREQUEST._serialized_start=5476
|
| 517 |
+
_EXITREQUEST._serialized_end=5520
|
| 518 |
+
_EXITREPLY._serialized_start=5522
|
| 519 |
+
_EXITREPLY._serialized_end=5559
|
| 520 |
+
_ASSIGNOBJECTOWNERREQUEST._serialized_start=5562
|
| 521 |
+
_ASSIGNOBJECTOWNERREQUEST._serialized_end=5790
|
| 522 |
+
_ASSIGNOBJECTOWNERREPLY._serialized_start=5792
|
| 523 |
+
_ASSIGNOBJECTOWNERREPLY._serialized_end=5816
|
| 524 |
+
_RAYLETNOTIFYGCSRESTARTREQUEST._serialized_start=5818
|
| 525 |
+
_RAYLETNOTIFYGCSRESTARTREQUEST._serialized_end=5849
|
| 526 |
+
_RAYLETNOTIFYGCSRESTARTREPLY._serialized_start=5851
|
| 527 |
+
_RAYLETNOTIFYGCSRESTARTREPLY._serialized_end=5880
|
| 528 |
+
_NUMPENDINGTASKSREQUEST._serialized_start=5882
|
| 529 |
+
_NUMPENDINGTASKSREQUEST._serialized_end=5906
|
| 530 |
+
_NUMPENDINGTASKSREPLY._serialized_start=5908
|
| 531 |
+
_NUMPENDINGTASKSREPLY._serialized_end=5974
|
| 532 |
+
_REPORTGENERATORITEMRETURNSREQUEST._serialized_start=5977
|
| 533 |
+
_REPORTGENERATORITEMRETURNSREQUEST._serialized_end=6245
|
| 534 |
+
_REPORTGENERATORITEMRETURNSREPLY._serialized_start=6247
|
| 535 |
+
_REPORTGENERATORITEMRETURNSREPLY._serialized_end=6339
|
| 536 |
+
_REGISTERMUTABLEOBJECTREADERREQUEST._serialized_start=6342
|
| 537 |
+
_REGISTERMUTABLEOBJECTREADERREQUEST._serialized_end=6495
|
| 538 |
+
_REGISTERMUTABLEOBJECTREADERREPLY._serialized_start=6497
|
| 539 |
+
_REGISTERMUTABLEOBJECTREADERREPLY._serialized_end=6531
|
| 540 |
+
_COREWORKERSERVICE._serialized_start=6588
|
| 541 |
+
_COREWORKERSERVICE._serialized_end=8755
|
| 542 |
+
# @@protoc_insertion_point(module_scope)
|
.venv/lib/python3.11/site-packages/ray/core/generated/export_actor_data_pb2_grpc.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
|
| 2 |
+
"""Client and server classes corresponding to protobuf-defined services."""
|
| 3 |
+
import grpc
|
| 4 |
+
|
.venv/lib/python3.11/site-packages/ray/core/generated/reporter_pb2_grpc.py
ADDED
|
@@ -0,0 +1,259 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
|
| 2 |
+
"""Client and server classes corresponding to protobuf-defined services."""
|
| 3 |
+
import grpc
|
| 4 |
+
|
| 5 |
+
from . import reporter_pb2 as src_dot_ray_dot_protobuf_dot_reporter__pb2
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class ReporterServiceStub(object):
|
| 9 |
+
"""Missing associated documentation comment in .proto file."""
|
| 10 |
+
|
| 11 |
+
def __init__(self, channel):
|
| 12 |
+
"""Constructor.
|
| 13 |
+
|
| 14 |
+
Args:
|
| 15 |
+
channel: A grpc.Channel.
|
| 16 |
+
"""
|
| 17 |
+
self.ReportOCMetrics = channel.unary_unary(
|
| 18 |
+
'/ray.rpc.ReporterService/ReportOCMetrics',
|
| 19 |
+
request_serializer=src_dot_ray_dot_protobuf_dot_reporter__pb2.ReportOCMetricsRequest.SerializeToString,
|
| 20 |
+
response_deserializer=src_dot_ray_dot_protobuf_dot_reporter__pb2.ReportOCMetricsReply.FromString,
|
| 21 |
+
)
|
| 22 |
+
self.GetTraceback = channel.unary_unary(
|
| 23 |
+
'/ray.rpc.ReporterService/GetTraceback',
|
| 24 |
+
request_serializer=src_dot_ray_dot_protobuf_dot_reporter__pb2.GetTracebackRequest.SerializeToString,
|
| 25 |
+
response_deserializer=src_dot_ray_dot_protobuf_dot_reporter__pb2.GetTracebackReply.FromString,
|
| 26 |
+
)
|
| 27 |
+
self.CpuProfiling = channel.unary_unary(
|
| 28 |
+
'/ray.rpc.ReporterService/CpuProfiling',
|
| 29 |
+
request_serializer=src_dot_ray_dot_protobuf_dot_reporter__pb2.CpuProfilingRequest.SerializeToString,
|
| 30 |
+
response_deserializer=src_dot_ray_dot_protobuf_dot_reporter__pb2.CpuProfilingReply.FromString,
|
| 31 |
+
)
|
| 32 |
+
self.MemoryProfiling = channel.unary_unary(
|
| 33 |
+
'/ray.rpc.ReporterService/MemoryProfiling',
|
| 34 |
+
request_serializer=src_dot_ray_dot_protobuf_dot_reporter__pb2.MemoryProfilingRequest.SerializeToString,
|
| 35 |
+
response_deserializer=src_dot_ray_dot_protobuf_dot_reporter__pb2.MemoryProfilingReply.FromString,
|
| 36 |
+
)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
class ReporterServiceServicer(object):
|
| 40 |
+
"""Missing associated documentation comment in .proto file."""
|
| 41 |
+
|
| 42 |
+
def ReportOCMetrics(self, request, context):
|
| 43 |
+
"""Missing associated documentation comment in .proto file."""
|
| 44 |
+
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
| 45 |
+
context.set_details('Method not implemented!')
|
| 46 |
+
raise NotImplementedError('Method not implemented!')
|
| 47 |
+
|
| 48 |
+
def GetTraceback(self, request, context):
|
| 49 |
+
"""Missing associated documentation comment in .proto file."""
|
| 50 |
+
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
| 51 |
+
context.set_details('Method not implemented!')
|
| 52 |
+
raise NotImplementedError('Method not implemented!')
|
| 53 |
+
|
| 54 |
+
def CpuProfiling(self, request, context):
|
| 55 |
+
"""Missing associated documentation comment in .proto file."""
|
| 56 |
+
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
| 57 |
+
context.set_details('Method not implemented!')
|
| 58 |
+
raise NotImplementedError('Method not implemented!')
|
| 59 |
+
|
| 60 |
+
def MemoryProfiling(self, request, context):
|
| 61 |
+
"""Missing associated documentation comment in .proto file."""
|
| 62 |
+
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
| 63 |
+
context.set_details('Method not implemented!')
|
| 64 |
+
raise NotImplementedError('Method not implemented!')
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def add_ReporterServiceServicer_to_server(servicer, server):
|
| 68 |
+
rpc_method_handlers = {
|
| 69 |
+
'ReportOCMetrics': grpc.unary_unary_rpc_method_handler(
|
| 70 |
+
servicer.ReportOCMetrics,
|
| 71 |
+
request_deserializer=src_dot_ray_dot_protobuf_dot_reporter__pb2.ReportOCMetricsRequest.FromString,
|
| 72 |
+
response_serializer=src_dot_ray_dot_protobuf_dot_reporter__pb2.ReportOCMetricsReply.SerializeToString,
|
| 73 |
+
),
|
| 74 |
+
'GetTraceback': grpc.unary_unary_rpc_method_handler(
|
| 75 |
+
servicer.GetTraceback,
|
| 76 |
+
request_deserializer=src_dot_ray_dot_protobuf_dot_reporter__pb2.GetTracebackRequest.FromString,
|
| 77 |
+
response_serializer=src_dot_ray_dot_protobuf_dot_reporter__pb2.GetTracebackReply.SerializeToString,
|
| 78 |
+
),
|
| 79 |
+
'CpuProfiling': grpc.unary_unary_rpc_method_handler(
|
| 80 |
+
servicer.CpuProfiling,
|
| 81 |
+
request_deserializer=src_dot_ray_dot_protobuf_dot_reporter__pb2.CpuProfilingRequest.FromString,
|
| 82 |
+
response_serializer=src_dot_ray_dot_protobuf_dot_reporter__pb2.CpuProfilingReply.SerializeToString,
|
| 83 |
+
),
|
| 84 |
+
'MemoryProfiling': grpc.unary_unary_rpc_method_handler(
|
| 85 |
+
servicer.MemoryProfiling,
|
| 86 |
+
request_deserializer=src_dot_ray_dot_protobuf_dot_reporter__pb2.MemoryProfilingRequest.FromString,
|
| 87 |
+
response_serializer=src_dot_ray_dot_protobuf_dot_reporter__pb2.MemoryProfilingReply.SerializeToString,
|
| 88 |
+
),
|
| 89 |
+
}
|
| 90 |
+
generic_handler = grpc.method_handlers_generic_handler(
|
| 91 |
+
'ray.rpc.ReporterService', rpc_method_handlers)
|
| 92 |
+
server.add_generic_rpc_handlers((generic_handler,))
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
# This class is part of an EXPERIMENTAL API.
|
| 96 |
+
class ReporterService(object):
|
| 97 |
+
"""Missing associated documentation comment in .proto file."""
|
| 98 |
+
|
| 99 |
+
@staticmethod
|
| 100 |
+
def ReportOCMetrics(request,
|
| 101 |
+
target,
|
| 102 |
+
options=(),
|
| 103 |
+
channel_credentials=None,
|
| 104 |
+
call_credentials=None,
|
| 105 |
+
insecure=False,
|
| 106 |
+
compression=None,
|
| 107 |
+
wait_for_ready=None,
|
| 108 |
+
timeout=None,
|
| 109 |
+
metadata=None):
|
| 110 |
+
return grpc.experimental.unary_unary(request, target, '/ray.rpc.ReporterService/ReportOCMetrics',
|
| 111 |
+
src_dot_ray_dot_protobuf_dot_reporter__pb2.ReportOCMetricsRequest.SerializeToString,
|
| 112 |
+
src_dot_ray_dot_protobuf_dot_reporter__pb2.ReportOCMetricsReply.FromString,
|
| 113 |
+
options, channel_credentials,
|
| 114 |
+
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
| 115 |
+
|
| 116 |
+
@staticmethod
|
| 117 |
+
def GetTraceback(request,
|
| 118 |
+
target,
|
| 119 |
+
options=(),
|
| 120 |
+
channel_credentials=None,
|
| 121 |
+
call_credentials=None,
|
| 122 |
+
insecure=False,
|
| 123 |
+
compression=None,
|
| 124 |
+
wait_for_ready=None,
|
| 125 |
+
timeout=None,
|
| 126 |
+
metadata=None):
|
| 127 |
+
return grpc.experimental.unary_unary(request, target, '/ray.rpc.ReporterService/GetTraceback',
|
| 128 |
+
src_dot_ray_dot_protobuf_dot_reporter__pb2.GetTracebackRequest.SerializeToString,
|
| 129 |
+
src_dot_ray_dot_protobuf_dot_reporter__pb2.GetTracebackReply.FromString,
|
| 130 |
+
options, channel_credentials,
|
| 131 |
+
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
| 132 |
+
|
| 133 |
+
@staticmethod
|
| 134 |
+
def CpuProfiling(request,
|
| 135 |
+
target,
|
| 136 |
+
options=(),
|
| 137 |
+
channel_credentials=None,
|
| 138 |
+
call_credentials=None,
|
| 139 |
+
insecure=False,
|
| 140 |
+
compression=None,
|
| 141 |
+
wait_for_ready=None,
|
| 142 |
+
timeout=None,
|
| 143 |
+
metadata=None):
|
| 144 |
+
return grpc.experimental.unary_unary(request, target, '/ray.rpc.ReporterService/CpuProfiling',
|
| 145 |
+
src_dot_ray_dot_protobuf_dot_reporter__pb2.CpuProfilingRequest.SerializeToString,
|
| 146 |
+
src_dot_ray_dot_protobuf_dot_reporter__pb2.CpuProfilingReply.FromString,
|
| 147 |
+
options, channel_credentials,
|
| 148 |
+
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
| 149 |
+
|
| 150 |
+
@staticmethod
|
| 151 |
+
def MemoryProfiling(request,
|
| 152 |
+
target,
|
| 153 |
+
options=(),
|
| 154 |
+
channel_credentials=None,
|
| 155 |
+
call_credentials=None,
|
| 156 |
+
insecure=False,
|
| 157 |
+
compression=None,
|
| 158 |
+
wait_for_ready=None,
|
| 159 |
+
timeout=None,
|
| 160 |
+
metadata=None):
|
| 161 |
+
return grpc.experimental.unary_unary(request, target, '/ray.rpc.ReporterService/MemoryProfiling',
|
| 162 |
+
src_dot_ray_dot_protobuf_dot_reporter__pb2.MemoryProfilingRequest.SerializeToString,
|
| 163 |
+
src_dot_ray_dot_protobuf_dot_reporter__pb2.MemoryProfilingReply.FromString,
|
| 164 |
+
options, channel_credentials,
|
| 165 |
+
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
class LogServiceStub(object):
|
| 169 |
+
"""Missing associated documentation comment in .proto file."""
|
| 170 |
+
|
| 171 |
+
def __init__(self, channel):
|
| 172 |
+
"""Constructor.
|
| 173 |
+
|
| 174 |
+
Args:
|
| 175 |
+
channel: A grpc.Channel.
|
| 176 |
+
"""
|
| 177 |
+
self.ListLogs = channel.unary_unary(
|
| 178 |
+
'/ray.rpc.LogService/ListLogs',
|
| 179 |
+
request_serializer=src_dot_ray_dot_protobuf_dot_reporter__pb2.ListLogsRequest.SerializeToString,
|
| 180 |
+
response_deserializer=src_dot_ray_dot_protobuf_dot_reporter__pb2.ListLogsReply.FromString,
|
| 181 |
+
)
|
| 182 |
+
self.StreamLog = channel.unary_stream(
|
| 183 |
+
'/ray.rpc.LogService/StreamLog',
|
| 184 |
+
request_serializer=src_dot_ray_dot_protobuf_dot_reporter__pb2.StreamLogRequest.SerializeToString,
|
| 185 |
+
response_deserializer=src_dot_ray_dot_protobuf_dot_reporter__pb2.StreamLogReply.FromString,
|
| 186 |
+
)
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
class LogServiceServicer(object):
|
| 190 |
+
"""Missing associated documentation comment in .proto file."""
|
| 191 |
+
|
| 192 |
+
def ListLogs(self, request, context):
|
| 193 |
+
"""Missing associated documentation comment in .proto file."""
|
| 194 |
+
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
| 195 |
+
context.set_details('Method not implemented!')
|
| 196 |
+
raise NotImplementedError('Method not implemented!')
|
| 197 |
+
|
| 198 |
+
def StreamLog(self, request, context):
|
| 199 |
+
"""Missing associated documentation comment in .proto file."""
|
| 200 |
+
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
| 201 |
+
context.set_details('Method not implemented!')
|
| 202 |
+
raise NotImplementedError('Method not implemented!')
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
def add_LogServiceServicer_to_server(servicer, server):
|
| 206 |
+
rpc_method_handlers = {
|
| 207 |
+
'ListLogs': grpc.unary_unary_rpc_method_handler(
|
| 208 |
+
servicer.ListLogs,
|
| 209 |
+
request_deserializer=src_dot_ray_dot_protobuf_dot_reporter__pb2.ListLogsRequest.FromString,
|
| 210 |
+
response_serializer=src_dot_ray_dot_protobuf_dot_reporter__pb2.ListLogsReply.SerializeToString,
|
| 211 |
+
),
|
| 212 |
+
'StreamLog': grpc.unary_stream_rpc_method_handler(
|
| 213 |
+
servicer.StreamLog,
|
| 214 |
+
request_deserializer=src_dot_ray_dot_protobuf_dot_reporter__pb2.StreamLogRequest.FromString,
|
| 215 |
+
response_serializer=src_dot_ray_dot_protobuf_dot_reporter__pb2.StreamLogReply.SerializeToString,
|
| 216 |
+
),
|
| 217 |
+
}
|
| 218 |
+
generic_handler = grpc.method_handlers_generic_handler(
|
| 219 |
+
'ray.rpc.LogService', rpc_method_handlers)
|
| 220 |
+
server.add_generic_rpc_handlers((generic_handler,))
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
# This class is part of an EXPERIMENTAL API.
|
| 224 |
+
class LogService(object):
|
| 225 |
+
"""Missing associated documentation comment in .proto file."""
|
| 226 |
+
|
| 227 |
+
@staticmethod
|
| 228 |
+
def ListLogs(request,
|
| 229 |
+
target,
|
| 230 |
+
options=(),
|
| 231 |
+
channel_credentials=None,
|
| 232 |
+
call_credentials=None,
|
| 233 |
+
insecure=False,
|
| 234 |
+
compression=None,
|
| 235 |
+
wait_for_ready=None,
|
| 236 |
+
timeout=None,
|
| 237 |
+
metadata=None):
|
| 238 |
+
return grpc.experimental.unary_unary(request, target, '/ray.rpc.LogService/ListLogs',
|
| 239 |
+
src_dot_ray_dot_protobuf_dot_reporter__pb2.ListLogsRequest.SerializeToString,
|
| 240 |
+
src_dot_ray_dot_protobuf_dot_reporter__pb2.ListLogsReply.FromString,
|
| 241 |
+
options, channel_credentials,
|
| 242 |
+
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
| 243 |
+
|
| 244 |
+
@staticmethod
|
| 245 |
+
def StreamLog(request,
|
| 246 |
+
target,
|
| 247 |
+
options=(),
|
| 248 |
+
channel_credentials=None,
|
| 249 |
+
call_credentials=None,
|
| 250 |
+
insecure=False,
|
| 251 |
+
compression=None,
|
| 252 |
+
wait_for_ready=None,
|
| 253 |
+
timeout=None,
|
| 254 |
+
metadata=None):
|
| 255 |
+
return grpc.experimental.unary_stream(request, target, '/ray.rpc.LogService/StreamLog',
|
| 256 |
+
src_dot_ray_dot_protobuf_dot_reporter__pb2.StreamLogRequest.SerializeToString,
|
| 257 |
+
src_dot_ray_dot_protobuf_dot_reporter__pb2.StreamLogReply.FromString,
|
| 258 |
+
options, channel_credentials,
|
| 259 |
+
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
.venv/lib/python3.11/site-packages/ray/core/generated/usage_pb2.py
ADDED
|
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
| 3 |
+
# source: src/ray/protobuf/usage.proto
|
| 4 |
+
"""Generated protocol buffer code."""
|
| 5 |
+
from google.protobuf.internal import enum_type_wrapper
|
| 6 |
+
from google.protobuf import descriptor as _descriptor
|
| 7 |
+
from google.protobuf import descriptor_pool as _descriptor_pool
|
| 8 |
+
from google.protobuf import message as _message
|
| 9 |
+
from google.protobuf import reflection as _reflection
|
| 10 |
+
from google.protobuf import symbol_database as _symbol_database
|
| 11 |
+
# @@protoc_insertion_point(imports)
|
| 12 |
+
|
| 13 |
+
_sym_db = _symbol_database.Default()
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1csrc/ray/protobuf/usage.proto\x12\tray.usage*\x9b\x14\n\x06TagKey\x12\n\n\x06_TEST1\x10\x00\x12\n\n\x06_TEST2\x10\x01\x12\x13\n\x0fRLLIB_FRAMEWORK\x10\x02\x12\x13\n\x0fRLLIB_ALGORITHM\x10\x03\x12\x15\n\x11RLLIB_NUM_WORKERS\x10\x04\x12\x15\n\x11SERVE_API_VERSION\x10\x05\x12\x19\n\x15SERVE_NUM_DEPLOYMENTS\x10\x06\x12\x0f\n\x0bGCS_STORAGE\x10\x07\x12\x1d\n\x19SERVE_NUM_GPU_DEPLOYMENTS\x10\x08\x12\x16\n\x12SERVE_FASTAPI_USED\x10\t\x12\x19\n\x15SERVE_DAG_DRIVER_USED\x10\n\x12\x1b\n\x17SERVE_HTTP_ADAPTER_USED\x10\x0b\x12\x1b\n\x17SERVE_GRPC_INGRESS_USED\x10\x0c\x12\x1a\n\x16SERVE_REST_API_VERSION\x10\r\x12\x12\n\x0eSERVE_NUM_APPS\x10\x0e\x12*\n&SERVE_NUM_REPLICAS_LIGHTWEIGHT_UPDATED\x10\x0f\x12)\n%SERVE_USER_CONFIG_LIGHTWEIGHT_UPDATED\x10\x10\x12\x30\n,SERVE_AUTOSCALING_CONFIG_LIGHTWEIGHT_UPDATED\x10\x11\x12#\n\x1fSERVE_RAY_SERVE_HANDLE_API_USED\x10\x12\x12(\n$SERVE_RAY_SERVE_SYNC_HANDLE_API_USED\x10\x13\x12$\n SERVE_DEPLOYMENT_HANDLE_API_USED\x10\x14\x12\x32\n.SERVE_DEPLOYMENT_HANDLE_TO_OBJECT_REF_API_USED\x10\x15\x12\x1e\n\x1aSERVE_MULTIPLEXED_API_USED\x10\x16\x12\x19\n\x15SERVE_HTTP_PROXY_USED\x10\x17\x12\x19\n\x15SERVE_GRPC_PROXY_USED\x10\x18\x12\x19\n\x15SERVE_STATUS_API_USED\x10\x19\x12!\n\x1dSERVE_GET_APP_HANDLE_API_USED\x10\x1a\x12(\n$SERVE_GET_DEPLOYMENT_HANDLE_API_USED\x10\x1b\x12(\n$SERVE_APP_CONTAINER_RUNTIME_ENV_USED\x10\x1c\x12/\n+SERVE_DEPLOYMENT_CONTAINER_RUNTIME_ENV_USED\x10\x1d\x12\x1e\n\x1aSERVE_NUM_NODE_COMPACTIONS\x10\x1e\x12 \n\x1cSERVE_AUTO_NUM_REPLICAS_USED\x10\x1f\x12\x1e\n\x1a\x43ORE_STATE_API_LIST_ACTORS\x10\x64\x12\x1d\n\x19\x43ORE_STATE_API_LIST_TASKS\x10\x65\x12\x1c\n\x18\x43ORE_STATE_API_LIST_JOBS\x10\x66\x12\x1d\n\x19\x43ORE_STATE_API_LIST_NODES\x10g\x12(\n$CORE_STATE_API_LIST_PLACEMENT_GROUPS\x10h\x12\x1f\n\x1b\x43ORE_STATE_API_LIST_WORKERS\x10i\x12\x1f\n\x1b\x43ORE_STATE_API_LIST_OBJECTS\x10j\x12$\n CORE_STATE_API_LIST_RUNTIME_ENVS\x10k\x12&\n\"CORE_STATE_API_LIST_CLUSTER_EVENTS\x10l\x12\x1c\n\x18\x43ORE_STATE_API_LIST_LOGS\x10m\x12\x1a\n\x16\x43ORE_STATE_API_GET_LOG\x10n\x12\"\n\x1e\x43ORE_STATE_API_SUMMARIZE_TASKS\x10o\x12#\n\x1f\x43ORE_STATE_API_SUMMARIZE_ACTORS\x10p\x12$\n CORE_STATE_API_SUMMARIZE_OBJECTS\x10q\x12\x13\n\x0e\x44\x41SHBOARD_USED\x10\xc8\x01\x12)\n$DASHBOARD_METRICS_PROMETHEUS_ENABLED\x10\xc9\x01\x12&\n!DASHBOARD_METRICS_GRAFANA_ENABLED\x10\xca\x01\x12\x13\n\x0ePG_NUM_CREATED\x10\xac\x02\x12\x16\n\x11\x41\x43TOR_NUM_CREATED\x10\xad\x02\x12\x1e\n\x19WORKER_CRASH_SYSTEM_ERROR\x10\xae\x02\x12\x15\n\x10WORKER_CRASH_OOM\x10\xaf\x02\x12\x19\n\x14RAY_GET_TIMEOUT_ZERO\x10\xb0\x02\x12\x1d\n\x18NUM_ACTOR_CREATION_TASKS\x10\xb1\x02\x12\x14\n\x0fNUM_ACTOR_TASKS\x10\xb2\x02\x12\x15\n\x10NUM_NORMAL_TASKS\x10\xb3\x02\x12\x10\n\x0bNUM_DRIVERS\x10\xb4\x02\x12\"\n\x1d\x45XPERIMENTAL_STATE_API_IMPORT\x10\xb5\x02\x12\x17\n\x12\x41UTOSCALER_VERSION\x10\xb6\x02\x12\x15\n\x10\x44\x41TA_LOGICAL_OPS\x10\x90\x03\x12\x10\n\x0b\x41IR_TRAINER\x10\xf4\x03\x12\x12\n\rTUNE_SEARCHER\x10\xf5\x03\x12\x13\n\x0eTUNE_SCHEDULER\x10\xf6\x03\x12\x11\n\x0c\x41IR_ENV_VARS\x10\xf7\x03\x12%\n AIR_SETUP_WANDB_INTEGRATION_USED\x10\xf8\x03\x12&\n!AIR_SETUP_MLFLOW_INTEGRATION_USED\x10\xf9\x03\x12\x12\n\rAIR_CALLBACKS\x10\xfa\x03\x12\x1e\n\x19\x41IR_STORAGE_CONFIGURATION\x10\xfb\x03\x12\x13\n\x0e\x41IR_ENTRYPOINT\x10\xfc\x03\x12\x1b\n\x16TRAIN_TORCH_GET_DEVICE\x10\xfd\x03\x12\x1e\n\x19TRAIN_TORCH_PREPARE_MODEL\x10\xfe\x03\x12#\n\x1eTRAIN_TORCH_PREPARE_DATALOADER\x10\xff\x03\x12$\n\x1fTRAIN_LIGHTNING_PREPARE_TRAINER\x10\x80\x04\x12+\n&TRAIN_LIGHTNING_RAYTRAINREPORTCALLBACK\x10\x81\x04\x12#\n\x1eTRAIN_LIGHTNING_RAYDDPSTRATEGY\x10\x82\x04\x12$\n\x1fTRAIN_LIGHTNING_RAYFSDPSTRATEGY\x10\x83\x04\x12)\n$TRAIN_LIGHTNING_RAYDEEPSPEEDSTRATEGY\x10\x84\x04\x12,\n\'TRAIN_LIGHTNING_RAYLIGHTNINGENVIRONMENT\x10\x85\x04\x12\'\n\"TRAIN_TRANSFORMERS_PREPARE_TRAINER\x10\x86\x04\x12.\n)TRAIN_TRANSFORMERS_RAYTRAINREPORTCALLBACK\x10\x87\x04\x12\x1c\n\x17TRAIN_TORCH_GET_DEVICES\x10\x88\x04\x62\x06proto3')
|
| 19 |
+
|
| 20 |
+
_TAGKEY = DESCRIPTOR.enum_types_by_name['TagKey']
|
| 21 |
+
TagKey = enum_type_wrapper.EnumTypeWrapper(_TAGKEY)
|
| 22 |
+
_TEST1 = 0
|
| 23 |
+
_TEST2 = 1
|
| 24 |
+
RLLIB_FRAMEWORK = 2
|
| 25 |
+
RLLIB_ALGORITHM = 3
|
| 26 |
+
RLLIB_NUM_WORKERS = 4
|
| 27 |
+
SERVE_API_VERSION = 5
|
| 28 |
+
SERVE_NUM_DEPLOYMENTS = 6
|
| 29 |
+
GCS_STORAGE = 7
|
| 30 |
+
SERVE_NUM_GPU_DEPLOYMENTS = 8
|
| 31 |
+
SERVE_FASTAPI_USED = 9
|
| 32 |
+
SERVE_DAG_DRIVER_USED = 10
|
| 33 |
+
SERVE_HTTP_ADAPTER_USED = 11
|
| 34 |
+
SERVE_GRPC_INGRESS_USED = 12
|
| 35 |
+
SERVE_REST_API_VERSION = 13
|
| 36 |
+
SERVE_NUM_APPS = 14
|
| 37 |
+
SERVE_NUM_REPLICAS_LIGHTWEIGHT_UPDATED = 15
|
| 38 |
+
SERVE_USER_CONFIG_LIGHTWEIGHT_UPDATED = 16
|
| 39 |
+
SERVE_AUTOSCALING_CONFIG_LIGHTWEIGHT_UPDATED = 17
|
| 40 |
+
SERVE_RAY_SERVE_HANDLE_API_USED = 18
|
| 41 |
+
SERVE_RAY_SERVE_SYNC_HANDLE_API_USED = 19
|
| 42 |
+
SERVE_DEPLOYMENT_HANDLE_API_USED = 20
|
| 43 |
+
SERVE_DEPLOYMENT_HANDLE_TO_OBJECT_REF_API_USED = 21
|
| 44 |
+
SERVE_MULTIPLEXED_API_USED = 22
|
| 45 |
+
SERVE_HTTP_PROXY_USED = 23
|
| 46 |
+
SERVE_GRPC_PROXY_USED = 24
|
| 47 |
+
SERVE_STATUS_API_USED = 25
|
| 48 |
+
SERVE_GET_APP_HANDLE_API_USED = 26
|
| 49 |
+
SERVE_GET_DEPLOYMENT_HANDLE_API_USED = 27
|
| 50 |
+
SERVE_APP_CONTAINER_RUNTIME_ENV_USED = 28
|
| 51 |
+
SERVE_DEPLOYMENT_CONTAINER_RUNTIME_ENV_USED = 29
|
| 52 |
+
SERVE_NUM_NODE_COMPACTIONS = 30
|
| 53 |
+
SERVE_AUTO_NUM_REPLICAS_USED = 31
|
| 54 |
+
CORE_STATE_API_LIST_ACTORS = 100
|
| 55 |
+
CORE_STATE_API_LIST_TASKS = 101
|
| 56 |
+
CORE_STATE_API_LIST_JOBS = 102
|
| 57 |
+
CORE_STATE_API_LIST_NODES = 103
|
| 58 |
+
CORE_STATE_API_LIST_PLACEMENT_GROUPS = 104
|
| 59 |
+
CORE_STATE_API_LIST_WORKERS = 105
|
| 60 |
+
CORE_STATE_API_LIST_OBJECTS = 106
|
| 61 |
+
CORE_STATE_API_LIST_RUNTIME_ENVS = 107
|
| 62 |
+
CORE_STATE_API_LIST_CLUSTER_EVENTS = 108
|
| 63 |
+
CORE_STATE_API_LIST_LOGS = 109
|
| 64 |
+
CORE_STATE_API_GET_LOG = 110
|
| 65 |
+
CORE_STATE_API_SUMMARIZE_TASKS = 111
|
| 66 |
+
CORE_STATE_API_SUMMARIZE_ACTORS = 112
|
| 67 |
+
CORE_STATE_API_SUMMARIZE_OBJECTS = 113
|
| 68 |
+
DASHBOARD_USED = 200
|
| 69 |
+
DASHBOARD_METRICS_PROMETHEUS_ENABLED = 201
|
| 70 |
+
DASHBOARD_METRICS_GRAFANA_ENABLED = 202
|
| 71 |
+
PG_NUM_CREATED = 300
|
| 72 |
+
ACTOR_NUM_CREATED = 301
|
| 73 |
+
WORKER_CRASH_SYSTEM_ERROR = 302
|
| 74 |
+
WORKER_CRASH_OOM = 303
|
| 75 |
+
RAY_GET_TIMEOUT_ZERO = 304
|
| 76 |
+
NUM_ACTOR_CREATION_TASKS = 305
|
| 77 |
+
NUM_ACTOR_TASKS = 306
|
| 78 |
+
NUM_NORMAL_TASKS = 307
|
| 79 |
+
NUM_DRIVERS = 308
|
| 80 |
+
EXPERIMENTAL_STATE_API_IMPORT = 309
|
| 81 |
+
AUTOSCALER_VERSION = 310
|
| 82 |
+
DATA_LOGICAL_OPS = 400
|
| 83 |
+
AIR_TRAINER = 500
|
| 84 |
+
TUNE_SEARCHER = 501
|
| 85 |
+
TUNE_SCHEDULER = 502
|
| 86 |
+
AIR_ENV_VARS = 503
|
| 87 |
+
AIR_SETUP_WANDB_INTEGRATION_USED = 504
|
| 88 |
+
AIR_SETUP_MLFLOW_INTEGRATION_USED = 505
|
| 89 |
+
AIR_CALLBACKS = 506
|
| 90 |
+
AIR_STORAGE_CONFIGURATION = 507
|
| 91 |
+
AIR_ENTRYPOINT = 508
|
| 92 |
+
TRAIN_TORCH_GET_DEVICE = 509
|
| 93 |
+
TRAIN_TORCH_PREPARE_MODEL = 510
|
| 94 |
+
TRAIN_TORCH_PREPARE_DATALOADER = 511
|
| 95 |
+
TRAIN_LIGHTNING_PREPARE_TRAINER = 512
|
| 96 |
+
TRAIN_LIGHTNING_RAYTRAINREPORTCALLBACK = 513
|
| 97 |
+
TRAIN_LIGHTNING_RAYDDPSTRATEGY = 514
|
| 98 |
+
TRAIN_LIGHTNING_RAYFSDPSTRATEGY = 515
|
| 99 |
+
TRAIN_LIGHTNING_RAYDEEPSPEEDSTRATEGY = 516
|
| 100 |
+
TRAIN_LIGHTNING_RAYLIGHTNINGENVIRONMENT = 517
|
| 101 |
+
TRAIN_TRANSFORMERS_PREPARE_TRAINER = 518
|
| 102 |
+
TRAIN_TRANSFORMERS_RAYTRAINREPORTCALLBACK = 519
|
| 103 |
+
TRAIN_TORCH_GET_DEVICES = 520
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
if _descriptor._USE_C_DESCRIPTORS == False:
|
| 107 |
+
|
| 108 |
+
DESCRIPTOR._options = None
|
| 109 |
+
_TAGKEY._serialized_start=44
|
| 110 |
+
_TAGKEY._serialized_end=2631
|
| 111 |
+
# @@protoc_insertion_point(module_scope)
|
.venv/lib/python3.11/site-packages/ray/core/generated/usage_pb2_grpc.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
|
| 2 |
+
"""Client and server classes corresponding to protobuf-defined services."""
|
| 3 |
+
import grpc
|
| 4 |
+
|
.venv/lib/python3.11/site-packages/ray/core/src/__pycache__/__init__.cpython-311.pyc
ADDED
|
Binary file (185 Bytes). View file
|
|
|
.venv/lib/python3.11/site-packages/ray/core/src/plasma/__init__.py
ADDED
|
File without changes
|
.venv/lib/python3.11/site-packages/ray/core/src/plasma/__pycache__/__init__.cpython-311.pyc
ADDED
|
Binary file (192 Bytes). View file
|
|
|
.venv/lib/python3.11/site-packages/ray/core/src/ray/__init__.py
ADDED
|
File without changes
|
.venv/lib/python3.11/site-packages/ray/core/src/ray/__pycache__/__init__.cpython-311.pyc
ADDED
|
Binary file (189 Bytes). View file
|
|
|
.venv/lib/python3.11/site-packages/ray/core/src/ray/raylet/__init__.py
ADDED
|
File without changes
|
.venv/lib/python3.11/site-packages/ray/core/src/ray/raylet/__pycache__/__init__.cpython-311.pyc
ADDED
|
Binary file (196 Bytes). View file
|
|
|
.venv/lib/python3.11/site-packages/ray/dashboard/modules/metrics/__pycache__/install_and_start_prometheus.cpython-311.pyc
ADDED
|
Binary file (8.82 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/ray/dashboard/modules/metrics/dashboards/__init__.py
ADDED
|
File without changes
|
.venv/lib/python3.11/site-packages/ray/dashboard/modules/metrics/dashboards/__pycache__/__init__.cpython-311.pyc
ADDED
|
Binary file (213 Bytes). View file
|
|
|
.venv/lib/python3.11/site-packages/ray/dashboard/modules/metrics/dashboards/__pycache__/common.cpython-311.pyc
ADDED
|
Binary file (3.28 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/ray/dashboard/modules/metrics/dashboards/__pycache__/data_dashboard_panels.cpython-311.pyc
ADDED
|
Binary file (13.6 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/ray/dashboard/modules/metrics/dashboards/__pycache__/default_dashboard_panels.cpython-311.pyc
ADDED
|
Binary file (18.6 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/ray/dashboard/modules/metrics/dashboards/__pycache__/serve_dashboard_panels.cpython-311.pyc
ADDED
|
Binary file (14.3 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/ray/dashboard/modules/metrics/dashboards/__pycache__/serve_deployment_dashboard_panels.cpython-311.pyc
ADDED
|
Binary file (8.05 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/ray/dashboard/modules/metrics/dashboards/data_grafana_dashboard_base.json
ADDED
|
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"annotations": {
|
| 3 |
+
"list": [
|
| 4 |
+
{
|
| 5 |
+
"builtIn": 1,
|
| 6 |
+
"datasource": "-- Grafana --",
|
| 7 |
+
"enable": true,
|
| 8 |
+
"hide": true,
|
| 9 |
+
"iconColor": "rgba(0, 211, 255, 1)",
|
| 10 |
+
"name": "Annotations & Alerts",
|
| 11 |
+
"type": "dashboard"
|
| 12 |
+
}
|
| 13 |
+
]
|
| 14 |
+
},
|
| 15 |
+
"editable": true,
|
| 16 |
+
"gnetId": null,
|
| 17 |
+
"graphTooltip": 0,
|
| 18 |
+
"iteration": 1667344411089,
|
| 19 |
+
"links": [],
|
| 20 |
+
"panels": [],
|
| 21 |
+
"refresh": false,
|
| 22 |
+
"schemaVersion": 27,
|
| 23 |
+
"style": "dark",
|
| 24 |
+
"tags": [],
|
| 25 |
+
"templating": {
|
| 26 |
+
"list": [
|
| 27 |
+
{
|
| 28 |
+
"current": {
|
| 29 |
+
"selected": false
|
| 30 |
+
},
|
| 31 |
+
"description": "Filter queries of a specific Prometheus type.",
|
| 32 |
+
"hide": 2,
|
| 33 |
+
"includeAll": false,
|
| 34 |
+
"multi": false,
|
| 35 |
+
"name": "datasource",
|
| 36 |
+
"options": [],
|
| 37 |
+
"query": "prometheus",
|
| 38 |
+
"refresh": 1,
|
| 39 |
+
"regex": "",
|
| 40 |
+
"skipUrlSync": false,
|
| 41 |
+
"type": "datasource"
|
| 42 |
+
},
|
| 43 |
+
{
|
| 44 |
+
"allValue": ".+",
|
| 45 |
+
"current": {
|
| 46 |
+
"selected": false
|
| 47 |
+
},
|
| 48 |
+
"datasource": "${datasource}",
|
| 49 |
+
"definition": "label_values(ray_data_allocated_bytes{{{global_filters}}}, SessionName)",
|
| 50 |
+
"description": "Filter queries to specific ray sessions.",
|
| 51 |
+
"error": null,
|
| 52 |
+
"hide": 0,
|
| 53 |
+
"includeAll": true,
|
| 54 |
+
"label": null,
|
| 55 |
+
"multi": false,
|
| 56 |
+
"name": "SessionName",
|
| 57 |
+
"options": [],
|
| 58 |
+
"query": {
|
| 59 |
+
"query": "label_values(ray_data_allocated_bytes{{{global_filters}}}, SessionName)",
|
| 60 |
+
"refId": "StandardVariableQuery"
|
| 61 |
+
},
|
| 62 |
+
"refresh": 2,
|
| 63 |
+
"regex": "",
|
| 64 |
+
"skipUrlSync": false,
|
| 65 |
+
"sort": 2,
|
| 66 |
+
"tagValuesQuery": "",
|
| 67 |
+
"tags": [],
|
| 68 |
+
"tagsQuery": "",
|
| 69 |
+
"type": "query",
|
| 70 |
+
"useTags": false
|
| 71 |
+
},
|
| 72 |
+
{
|
| 73 |
+
"allValue": ".+",
|
| 74 |
+
"current": {
|
| 75 |
+
"selected": true,
|
| 76 |
+
"text": [
|
| 77 |
+
"All"
|
| 78 |
+
],
|
| 79 |
+
"value": [
|
| 80 |
+
"$__all"
|
| 81 |
+
]
|
| 82 |
+
},
|
| 83 |
+
"datasource": "${datasource}",
|
| 84 |
+
"definition": "label_values(ray_data_allocated_bytes{{{global_filters}}}, dataset)",
|
| 85 |
+
"description": null,
|
| 86 |
+
"error": null,
|
| 87 |
+
"hide": 0,
|
| 88 |
+
"includeAll": true,
|
| 89 |
+
"label": null,
|
| 90 |
+
"multi": true,
|
| 91 |
+
"name": "DatasetID",
|
| 92 |
+
"options": [],
|
| 93 |
+
"query": {
|
| 94 |
+
"query": "label_values(ray_data_allocated_bytes{{{global_filters}}}, dataset)",
|
| 95 |
+
"refId": "Prometheus-Dataset-Variable-Query"
|
| 96 |
+
},
|
| 97 |
+
"refresh": 2,
|
| 98 |
+
"regex": "",
|
| 99 |
+
"skipUrlSync": false,
|
| 100 |
+
"sort": 0,
|
| 101 |
+
"tagValuesQuery": "",
|
| 102 |
+
"tags": [],
|
| 103 |
+
"tagsQuery": "",
|
| 104 |
+
"type": "query",
|
| 105 |
+
"useTags": false
|
| 106 |
+
},
|
| 107 |
+
{
|
| 108 |
+
"current": {
|
| 109 |
+
"selected": false
|
| 110 |
+
},
|
| 111 |
+
"datasource": "${datasource}",
|
| 112 |
+
"definition": "label_values(ray_node_network_receive_speed{{{global_filters}}}, ray_io_cluster)",
|
| 113 |
+
"description": "Filter queries to specific Ray clusters for KubeRay. When ingesting metrics across multiple ray clusters, the ray_io_cluster label should be set per cluster. For KubeRay users, this is done automaticaly with Prometheus PodMonitor.",
|
| 114 |
+
"error": null,
|
| 115 |
+
"hide": 0,
|
| 116 |
+
"includeAll": false,
|
| 117 |
+
"label": null,
|
| 118 |
+
"multi": false,
|
| 119 |
+
"name": "Cluster",
|
| 120 |
+
"options": [],
|
| 121 |
+
"query": {
|
| 122 |
+
"query": "label_values(ray_node_network_receive_speed{{{global_filters}}}, ray_io_cluster)",
|
| 123 |
+
"refId": "StandardVariableQuery"
|
| 124 |
+
},
|
| 125 |
+
"refresh": 2,
|
| 126 |
+
"regex": "",
|
| 127 |
+
"skipUrlSync": false,
|
| 128 |
+
"sort": 2,
|
| 129 |
+
"tagValuesQuery": "",
|
| 130 |
+
"tags": [],
|
| 131 |
+
"tagsQuery": "",
|
| 132 |
+
"type": "query",
|
| 133 |
+
"useTags": false
|
| 134 |
+
}
|
| 135 |
+
]
|
| 136 |
+
},
|
| 137 |
+
"rayMeta": ["excludesSystemRoutes"],
|
| 138 |
+
"time": {
|
| 139 |
+
"from": "now-30m",
|
| 140 |
+
"to": "now"
|
| 141 |
+
},
|
| 142 |
+
"timepicker": {},
|
| 143 |
+
"timezone": "",
|
| 144 |
+
"title": "Data Dashboard",
|
| 145 |
+
"uid": "rayDataDashboard",
|
| 146 |
+
"version": 1
|
| 147 |
+
}
|
.venv/lib/python3.11/site-packages/ray/dashboard/modules/metrics/dashboards/default_grafana_dashboard_base.json
ADDED
|
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"annotations": {
|
| 3 |
+
"list": [
|
| 4 |
+
{
|
| 5 |
+
"builtIn": 1,
|
| 6 |
+
"datasource": "-- Grafana --",
|
| 7 |
+
"enable": true,
|
| 8 |
+
"hide": true,
|
| 9 |
+
"iconColor": "rgba(0, 211, 255, 1)",
|
| 10 |
+
"name": "Annotations & Alerts",
|
| 11 |
+
"type": "dashboard"
|
| 12 |
+
}
|
| 13 |
+
]
|
| 14 |
+
},
|
| 15 |
+
"editable": true,
|
| 16 |
+
"gnetId": null,
|
| 17 |
+
"graphTooltip": 0,
|
| 18 |
+
"iteration": 1667344411089,
|
| 19 |
+
"links": [],
|
| 20 |
+
"panels": [],
|
| 21 |
+
"refresh": false,
|
| 22 |
+
"schemaVersion": 27,
|
| 23 |
+
"style": "dark",
|
| 24 |
+
"tags": [],
|
| 25 |
+
"templating": {
|
| 26 |
+
"list": [
|
| 27 |
+
{
|
| 28 |
+
"current": {
|
| 29 |
+
"selected": false
|
| 30 |
+
},
|
| 31 |
+
"description": "Filter queries of a specific Prometheus type.",
|
| 32 |
+
"hide": 2,
|
| 33 |
+
"includeAll": false,
|
| 34 |
+
"multi": false,
|
| 35 |
+
"name": "datasource",
|
| 36 |
+
"options": [],
|
| 37 |
+
"query": "prometheus",
|
| 38 |
+
"refresh": 1,
|
| 39 |
+
"regex": "",
|
| 40 |
+
"skipUrlSync": false,
|
| 41 |
+
"type": "datasource"
|
| 42 |
+
},
|
| 43 |
+
{
|
| 44 |
+
"allValue": ".+",
|
| 45 |
+
"current": {
|
| 46 |
+
"selected": false
|
| 47 |
+
},
|
| 48 |
+
"datasource": "${datasource}",
|
| 49 |
+
"definition": "label_values(ray_node_network_receive_speed{{{global_filters}}}, SessionName)",
|
| 50 |
+
"description": "Filter queries to specific ray sessions.",
|
| 51 |
+
"error": null,
|
| 52 |
+
"hide": 0,
|
| 53 |
+
"includeAll": true,
|
| 54 |
+
"label": null,
|
| 55 |
+
"multi": false,
|
| 56 |
+
"name": "SessionName",
|
| 57 |
+
"options": [],
|
| 58 |
+
"query": {
|
| 59 |
+
"query": "label_values(ray_node_network_receive_speed{{{global_filters}}}, SessionName)",
|
| 60 |
+
"refId": "StandardVariableQuery"
|
| 61 |
+
},
|
| 62 |
+
"refresh": 2,
|
| 63 |
+
"regex": "",
|
| 64 |
+
"skipUrlSync": false,
|
| 65 |
+
"sort": 2,
|
| 66 |
+
"tagValuesQuery": "",
|
| 67 |
+
"tags": [],
|
| 68 |
+
"tagsQuery": "",
|
| 69 |
+
"type": "query",
|
| 70 |
+
"useTags": false
|
| 71 |
+
},
|
| 72 |
+
{
|
| 73 |
+
"allValue": ".+",
|
| 74 |
+
"current": {
|
| 75 |
+
"selected": true,
|
| 76 |
+
"text": ["All"],
|
| 77 |
+
"value": ["$__all"]
|
| 78 |
+
},
|
| 79 |
+
"datasource": "${datasource}",
|
| 80 |
+
"definition": "label_values(ray_node_network_receive_speed{{SessionName=~\"$SessionName\",{global_filters}}}, instance)",
|
| 81 |
+
"description": null,
|
| 82 |
+
"error": null,
|
| 83 |
+
"hide": 0,
|
| 84 |
+
"includeAll": true,
|
| 85 |
+
"label": null,
|
| 86 |
+
"multi": true,
|
| 87 |
+
"name": "Instance",
|
| 88 |
+
"options": [],
|
| 89 |
+
"query": {
|
| 90 |
+
"query": "label_values(ray_node_network_receive_speed{{SessionName=~\"$SessionName\",{global_filters}}}, instance)",
|
| 91 |
+
"refId": "Prometheus-Instance-Variable-Query"
|
| 92 |
+
},
|
| 93 |
+
"refresh": 2,
|
| 94 |
+
"regex": "",
|
| 95 |
+
"skipUrlSync": false,
|
| 96 |
+
"sort": 0,
|
| 97 |
+
"tagValuesQuery": "",
|
| 98 |
+
"tags": [],
|
| 99 |
+
"tagsQuery": "",
|
| 100 |
+
"type": "query",
|
| 101 |
+
"useTags": false
|
| 102 |
+
},
|
| 103 |
+
{
|
| 104 |
+
"current": {
|
| 105 |
+
"selected": false
|
| 106 |
+
},
|
| 107 |
+
"datasource": "${datasource}",
|
| 108 |
+
"definition": "label_values(ray_node_network_receive_speed{{{global_filters}}}, ray_io_cluster)",
|
| 109 |
+
"description": "Filter queries to specific Ray clusters for KubeRay. When ingesting metrics across multiple ray clusters, the ray_io_cluster label should be set per cluster. For KubeRay users, this is done automaticaly with Prometheus PodMonitor.",
|
| 110 |
+
"error": null,
|
| 111 |
+
"hide": 0,
|
| 112 |
+
"includeAll": false,
|
| 113 |
+
"label": null,
|
| 114 |
+
"multi": false,
|
| 115 |
+
"name": "Cluster",
|
| 116 |
+
"options": [],
|
| 117 |
+
"query": {
|
| 118 |
+
"query": "label_values(ray_node_network_receive_speed{{{global_filters}}}, ray_io_cluster)",
|
| 119 |
+
"refId": "StandardVariableQuery"
|
| 120 |
+
},
|
| 121 |
+
"refresh": 2,
|
| 122 |
+
"regex": "",
|
| 123 |
+
"skipUrlSync": false,
|
| 124 |
+
"sort": 2,
|
| 125 |
+
"tagValuesQuery": "",
|
| 126 |
+
"tags": [],
|
| 127 |
+
"tagsQuery": "",
|
| 128 |
+
"type": "query",
|
| 129 |
+
"useTags": false
|
| 130 |
+
}
|
| 131 |
+
]
|
| 132 |
+
},
|
| 133 |
+
"time": {
|
| 134 |
+
"from": "now-30m",
|
| 135 |
+
"to": "now"
|
| 136 |
+
},
|
| 137 |
+
"timepicker": {},
|
| 138 |
+
"timezone": "",
|
| 139 |
+
"title": "Default Dashboard",
|
| 140 |
+
"uid": "rayDefaultDashboard",
|
| 141 |
+
"version": 4
|
| 142 |
+
}
|
.venv/lib/python3.11/site-packages/ray/util/client/__init__.py
ADDED
|
@@ -0,0 +1,302 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import os
|
| 3 |
+
import threading
|
| 4 |
+
from typing import Any, Dict, List, Optional, Tuple
|
| 5 |
+
|
| 6 |
+
import ray._private.ray_constants as ray_constants
|
| 7 |
+
from ray._private.client_mode_hook import (
|
| 8 |
+
_explicitly_disable_client_mode,
|
| 9 |
+
_explicitly_enable_client_mode,
|
| 10 |
+
)
|
| 11 |
+
from ray._private.ray_logging import setup_logger
|
| 12 |
+
from ray.job_config import JobConfig
|
| 13 |
+
from ray.util.annotations import DeveloperAPI
|
| 14 |
+
from ray._private.utils import check_version_info
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
logger = logging.getLogger(__name__)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class _ClientContext:
|
| 21 |
+
def __init__(self):
|
| 22 |
+
from ray.util.client.api import _ClientAPI
|
| 23 |
+
|
| 24 |
+
self.api = _ClientAPI()
|
| 25 |
+
self.client_worker = None
|
| 26 |
+
self._server = None
|
| 27 |
+
self._connected_with_init = False
|
| 28 |
+
self._inside_client_test = False
|
| 29 |
+
|
| 30 |
+
def connect(
|
| 31 |
+
self,
|
| 32 |
+
conn_str: str,
|
| 33 |
+
job_config: JobConfig = None,
|
| 34 |
+
secure: bool = False,
|
| 35 |
+
metadata: List[Tuple[str, str]] = None,
|
| 36 |
+
connection_retries: int = 3,
|
| 37 |
+
namespace: str = None,
|
| 38 |
+
*,
|
| 39 |
+
ignore_version: bool = False,
|
| 40 |
+
_credentials: Optional["grpc.ChannelCredentials"] = None, # noqa: F821
|
| 41 |
+
ray_init_kwargs: Optional[Dict[str, Any]] = None,
|
| 42 |
+
) -> Dict[str, Any]:
|
| 43 |
+
"""Connect the Ray Client to a server.
|
| 44 |
+
|
| 45 |
+
Args:
|
| 46 |
+
conn_str: Connection string, in the form "[host]:port"
|
| 47 |
+
job_config: The job config of the server.
|
| 48 |
+
secure: Whether to use a TLS secured gRPC channel
|
| 49 |
+
metadata: gRPC metadata to send on connect
|
| 50 |
+
connection_retries: number of connection attempts to make
|
| 51 |
+
ignore_version: whether to ignore Python or Ray version mismatches.
|
| 52 |
+
This should only be used for debugging purposes.
|
| 53 |
+
|
| 54 |
+
Returns:
|
| 55 |
+
Dictionary of connection info, e.g., {"num_clients": 1}.
|
| 56 |
+
"""
|
| 57 |
+
# Delay imports until connect to avoid circular imports.
|
| 58 |
+
from ray.util.client.worker import Worker
|
| 59 |
+
|
| 60 |
+
if self.client_worker is not None:
|
| 61 |
+
if self._connected_with_init:
|
| 62 |
+
return
|
| 63 |
+
raise Exception("ray.init() called, but ray client is already connected")
|
| 64 |
+
if not self._inside_client_test:
|
| 65 |
+
# If we're calling a client connect specifically and we're not
|
| 66 |
+
# currently in client mode, ensure we are.
|
| 67 |
+
_explicitly_enable_client_mode()
|
| 68 |
+
if namespace is not None:
|
| 69 |
+
job_config = job_config or JobConfig()
|
| 70 |
+
job_config.set_ray_namespace(namespace)
|
| 71 |
+
|
| 72 |
+
logging_level = ray_constants.LOGGER_LEVEL
|
| 73 |
+
logging_format = ray_constants.LOGGER_FORMAT
|
| 74 |
+
|
| 75 |
+
if ray_init_kwargs is None:
|
| 76 |
+
ray_init_kwargs = {}
|
| 77 |
+
|
| 78 |
+
# NOTE(architkulkarni): env_hook is not supported with Ray Client.
|
| 79 |
+
ray_init_kwargs["_skip_env_hook"] = True
|
| 80 |
+
|
| 81 |
+
if ray_init_kwargs.get("logging_level") is not None:
|
| 82 |
+
logging_level = ray_init_kwargs["logging_level"]
|
| 83 |
+
if ray_init_kwargs.get("logging_format") is not None:
|
| 84 |
+
logging_format = ray_init_kwargs["logging_format"]
|
| 85 |
+
|
| 86 |
+
setup_logger(logging_level, logging_format)
|
| 87 |
+
|
| 88 |
+
try:
|
| 89 |
+
self.client_worker = Worker(
|
| 90 |
+
conn_str,
|
| 91 |
+
secure=secure,
|
| 92 |
+
_credentials=_credentials,
|
| 93 |
+
metadata=metadata,
|
| 94 |
+
connection_retries=connection_retries,
|
| 95 |
+
)
|
| 96 |
+
self.api.worker = self.client_worker
|
| 97 |
+
self.client_worker._server_init(job_config, ray_init_kwargs)
|
| 98 |
+
conn_info = self.client_worker.connection_info()
|
| 99 |
+
self._check_versions(conn_info, ignore_version)
|
| 100 |
+
self._register_serializers()
|
| 101 |
+
return conn_info
|
| 102 |
+
except Exception:
|
| 103 |
+
self.disconnect()
|
| 104 |
+
raise
|
| 105 |
+
|
| 106 |
+
def _register_serializers(self):
|
| 107 |
+
"""Register the custom serializer addons at the client side.
|
| 108 |
+
|
| 109 |
+
The server side should have already registered the serializers via
|
| 110 |
+
regular worker's serialization_context mechanism.
|
| 111 |
+
"""
|
| 112 |
+
import ray.util.serialization_addons
|
| 113 |
+
from ray.util.serialization import StandaloneSerializationContext
|
| 114 |
+
|
| 115 |
+
ctx = StandaloneSerializationContext()
|
| 116 |
+
ray.util.serialization_addons.apply(ctx)
|
| 117 |
+
|
| 118 |
+
def _check_versions(self, conn_info: Dict[str, Any], ignore_version: bool) -> None:
|
| 119 |
+
# conn_info has "python_version" and "ray_version" so it can be used to compare.
|
| 120 |
+
ignore_version = ignore_version or ("RAY_IGNORE_VERSION_MISMATCH" in os.environ)
|
| 121 |
+
check_version_info(
|
| 122 |
+
conn_info,
|
| 123 |
+
"Ray Client",
|
| 124 |
+
raise_on_mismatch=not ignore_version,
|
| 125 |
+
python_version_match_level="minor",
|
| 126 |
+
)
|
| 127 |
+
|
| 128 |
+
def disconnect(self):
|
| 129 |
+
"""Disconnect the Ray Client."""
|
| 130 |
+
from ray.util.client.api import _ClientAPI
|
| 131 |
+
|
| 132 |
+
if self.client_worker is not None:
|
| 133 |
+
self.client_worker.close()
|
| 134 |
+
self.api = _ClientAPI()
|
| 135 |
+
self.client_worker = None
|
| 136 |
+
|
| 137 |
+
# remote can be called outside of a connection, which is why it
|
| 138 |
+
# exists on the same API layer as connect() itself.
|
| 139 |
+
def remote(self, *args, **kwargs):
|
| 140 |
+
"""remote is the hook stub passed on to replace `ray.remote`.
|
| 141 |
+
|
| 142 |
+
This sets up remote functions or actors, as the decorator,
|
| 143 |
+
but does not execute them.
|
| 144 |
+
|
| 145 |
+
Args:
|
| 146 |
+
args: opaque arguments
|
| 147 |
+
kwargs: opaque keyword arguments
|
| 148 |
+
"""
|
| 149 |
+
return self.api.remote(*args, **kwargs)
|
| 150 |
+
|
| 151 |
+
def __getattr__(self, key: str):
|
| 152 |
+
if self.is_connected():
|
| 153 |
+
return getattr(self.api, key)
|
| 154 |
+
elif key in ["is_initialized", "_internal_kv_initialized"]:
|
| 155 |
+
# Client is not connected, thus Ray is not considered initialized.
|
| 156 |
+
return lambda: False
|
| 157 |
+
else:
|
| 158 |
+
raise Exception(
|
| 159 |
+
"Ray Client is not connected. Please connect by calling `ray.init`."
|
| 160 |
+
)
|
| 161 |
+
|
| 162 |
+
def is_connected(self) -> bool:
|
| 163 |
+
if self.client_worker is None:
|
| 164 |
+
return False
|
| 165 |
+
return self.client_worker.is_connected()
|
| 166 |
+
|
| 167 |
+
def init(self, *args, **kwargs):
|
| 168 |
+
if self._server is not None:
|
| 169 |
+
raise Exception("Trying to start two instances of ray via client")
|
| 170 |
+
import ray.util.client.server.server as ray_client_server
|
| 171 |
+
|
| 172 |
+
server_handle, address_info = ray_client_server.init_and_serve(
|
| 173 |
+
"127.0.0.1:50051", *args, **kwargs
|
| 174 |
+
)
|
| 175 |
+
self._server = server_handle.grpc_server
|
| 176 |
+
self.connect("127.0.0.1:50051")
|
| 177 |
+
self._connected_with_init = True
|
| 178 |
+
return address_info
|
| 179 |
+
|
| 180 |
+
def shutdown(self, _exiting_interpreter=False):
|
| 181 |
+
self.disconnect()
|
| 182 |
+
import ray.util.client.server.server as ray_client_server
|
| 183 |
+
|
| 184 |
+
if self._server is None:
|
| 185 |
+
return
|
| 186 |
+
ray_client_server.shutdown_with_server(self._server, _exiting_interpreter)
|
| 187 |
+
self._server = None
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
# All connected context will be put here
|
| 191 |
+
# This struct will be guarded by a lock for thread safety
|
| 192 |
+
_all_contexts = set()
|
| 193 |
+
_lock = threading.Lock()
|
| 194 |
+
|
| 195 |
+
# This is the default context which is used when allow_multiple is not True
|
| 196 |
+
_default_context = _ClientContext()
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
@DeveloperAPI
|
| 200 |
+
class RayAPIStub:
|
| 201 |
+
"""This class stands in as the replacement API for the `import ray` module.
|
| 202 |
+
|
| 203 |
+
Much like the ray module, this mostly delegates the work to the
|
| 204 |
+
_client_worker. As parts of the ray API are covered, they are piped through
|
| 205 |
+
here or on the client worker API.
|
| 206 |
+
"""
|
| 207 |
+
|
| 208 |
+
def __init__(self):
|
| 209 |
+
self._cxt = threading.local()
|
| 210 |
+
self._cxt.handler = _default_context
|
| 211 |
+
self._inside_client_test = False
|
| 212 |
+
|
| 213 |
+
def get_context(self):
|
| 214 |
+
try:
|
| 215 |
+
return self._cxt.__getattribute__("handler")
|
| 216 |
+
except AttributeError:
|
| 217 |
+
self._cxt.handler = _default_context
|
| 218 |
+
return self._cxt.handler
|
| 219 |
+
|
| 220 |
+
def set_context(self, cxt):
|
| 221 |
+
old_cxt = self.get_context()
|
| 222 |
+
if cxt is None:
|
| 223 |
+
self._cxt.handler = _ClientContext()
|
| 224 |
+
else:
|
| 225 |
+
self._cxt.handler = cxt
|
| 226 |
+
return old_cxt
|
| 227 |
+
|
| 228 |
+
def is_default(self):
|
| 229 |
+
return self.get_context() == _default_context
|
| 230 |
+
|
| 231 |
+
def connect(self, *args, **kw_args):
|
| 232 |
+
self.get_context()._inside_client_test = self._inside_client_test
|
| 233 |
+
conn = self.get_context().connect(*args, **kw_args)
|
| 234 |
+
global _lock, _all_contexts
|
| 235 |
+
with _lock:
|
| 236 |
+
_all_contexts.add(self._cxt.handler)
|
| 237 |
+
return conn
|
| 238 |
+
|
| 239 |
+
def disconnect(self, *args, **kw_args):
|
| 240 |
+
global _lock, _all_contexts, _default_context
|
| 241 |
+
with _lock:
|
| 242 |
+
if _default_context == self.get_context():
|
| 243 |
+
for cxt in _all_contexts:
|
| 244 |
+
cxt.disconnect(*args, **kw_args)
|
| 245 |
+
_all_contexts = set()
|
| 246 |
+
else:
|
| 247 |
+
self.get_context().disconnect(*args, **kw_args)
|
| 248 |
+
if self.get_context() in _all_contexts:
|
| 249 |
+
_all_contexts.remove(self.get_context())
|
| 250 |
+
if len(_all_contexts) == 0:
|
| 251 |
+
_explicitly_disable_client_mode()
|
| 252 |
+
|
| 253 |
+
def remote(self, *args, **kwargs):
|
| 254 |
+
return self.get_context().remote(*args, **kwargs)
|
| 255 |
+
|
| 256 |
+
def __getattr__(self, name):
|
| 257 |
+
return self.get_context().__getattr__(name)
|
| 258 |
+
|
| 259 |
+
def is_connected(self, *args, **kwargs):
|
| 260 |
+
return self.get_context().is_connected(*args, **kwargs)
|
| 261 |
+
|
| 262 |
+
def init(self, *args, **kwargs):
|
| 263 |
+
ret = self.get_context().init(*args, **kwargs)
|
| 264 |
+
global _lock, _all_contexts
|
| 265 |
+
with _lock:
|
| 266 |
+
_all_contexts.add(self._cxt.handler)
|
| 267 |
+
return ret
|
| 268 |
+
|
| 269 |
+
def shutdown(self, *args, **kwargs):
|
| 270 |
+
global _lock, _all_contexts
|
| 271 |
+
with _lock:
|
| 272 |
+
if _default_context == self.get_context():
|
| 273 |
+
for cxt in _all_contexts:
|
| 274 |
+
cxt.shutdown(*args, **kwargs)
|
| 275 |
+
_all_contexts = set()
|
| 276 |
+
else:
|
| 277 |
+
self.get_context().shutdown(*args, **kwargs)
|
| 278 |
+
if self.get_context() in _all_contexts:
|
| 279 |
+
_all_contexts.remove(self.get_context())
|
| 280 |
+
if len(_all_contexts) == 0:
|
| 281 |
+
_explicitly_disable_client_mode()
|
| 282 |
+
|
| 283 |
+
|
| 284 |
+
ray = RayAPIStub()
|
| 285 |
+
|
| 286 |
+
|
| 287 |
+
@DeveloperAPI
|
| 288 |
+
def num_connected_contexts():
|
| 289 |
+
"""Return the number of client connections active."""
|
| 290 |
+
global _lock, _all_contexts
|
| 291 |
+
with _lock:
|
| 292 |
+
return len(_all_contexts)
|
| 293 |
+
|
| 294 |
+
|
| 295 |
+
# Someday we might add methods in this module so that someone who
|
| 296 |
+
# tries to `import ray_client as ray` -- as a module, instead of
|
| 297 |
+
# `from ray_client import ray` -- as the API stub
|
| 298 |
+
# still gets expected functionality. This is the way the ray package
|
| 299 |
+
# worked in the past.
|
| 300 |
+
#
|
| 301 |
+
# This really calls for PEP 562: https://www.python.org/dev/peps/pep-0562/
|
| 302 |
+
# But until Python 3.6 is EOL, here we are.
|
.venv/lib/python3.11/site-packages/ray/util/client/__pycache__/__init__.cpython-311.pyc
ADDED
|
Binary file (15.1 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/ray/util/client/__pycache__/api.cpython-311.pyc
ADDED
|
Binary file (21 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/ray/util/client/__pycache__/client_app.cpython-311.pyc
ADDED
|
Binary file (4.07 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/ray/util/client/__pycache__/client_pickler.cpython-311.pyc
ADDED
|
Binary file (8.17 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/ray/util/client/__pycache__/common.cpython-311.pyc
ADDED
|
Binary file (50.8 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/ray/util/client/__pycache__/dataclient.cpython-311.pyc
ADDED
|
Binary file (28.3 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/ray/util/client/__pycache__/logsclient.cpython-311.pyc
ADDED
|
Binary file (7.57 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/ray/util/client/__pycache__/options.cpython-311.pyc
ADDED
|
Binary file (2.29 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/ray/util/client/__pycache__/ray_client_helpers.cpython-311.pyc
ADDED
|
Binary file (7.58 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/ray/util/client/__pycache__/runtime_context.cpython-311.pyc
ADDED
|
Binary file (4.36 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/ray/util/client/__pycache__/worker.cpython-311.pyc
ADDED
|
Binary file (46.3 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/ray/util/client/client_pickler.py
ADDED
|
@@ -0,0 +1,178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Implements the client side of the client/server pickling protocol.
|
| 2 |
+
|
| 3 |
+
All ray client client/server data transfer happens through this pickling
|
| 4 |
+
protocol. The model is as follows:
|
| 5 |
+
|
| 6 |
+
* All Client objects (eg ClientObjectRef) always live on the client and
|
| 7 |
+
are never represented in the server
|
| 8 |
+
* All Ray objects (eg, ray.ObjectRef) always live on the server and are
|
| 9 |
+
never returned to the client
|
| 10 |
+
* In order to translate between these two references, PickleStub tuples
|
| 11 |
+
are generated as persistent ids in the data blobs during the pickling
|
| 12 |
+
and unpickling of these objects.
|
| 13 |
+
|
| 14 |
+
The PickleStubs have just enough information to find or generate their
|
| 15 |
+
associated partner object on either side.
|
| 16 |
+
|
| 17 |
+
This also has the advantage of avoiding predefined pickle behavior for ray
|
| 18 |
+
objects, which may include ray internal reference counting.
|
| 19 |
+
|
| 20 |
+
ClientPickler dumps things from the client into the appropriate stubs
|
| 21 |
+
ServerUnpickler loads stubs from the server into their client counterparts.
|
| 22 |
+
"""
|
| 23 |
+
|
| 24 |
+
import io
|
| 25 |
+
|
| 26 |
+
from typing import NamedTuple
|
| 27 |
+
from typing import Any
|
| 28 |
+
from typing import Dict
|
| 29 |
+
from typing import Optional
|
| 30 |
+
|
| 31 |
+
import ray.cloudpickle as cloudpickle
|
| 32 |
+
from ray.util.client import RayAPIStub
|
| 33 |
+
from ray.util.client.common import ClientObjectRef
|
| 34 |
+
from ray.util.client.common import ClientActorHandle
|
| 35 |
+
from ray.util.client.common import ClientActorRef
|
| 36 |
+
from ray.util.client.common import ClientActorClass
|
| 37 |
+
from ray.util.client.common import ClientRemoteFunc
|
| 38 |
+
from ray.util.client.common import ClientRemoteMethod
|
| 39 |
+
from ray.util.client.common import OptionWrapper
|
| 40 |
+
from ray.util.client.common import InProgressSentinel
|
| 41 |
+
import ray.core.generated.ray_client_pb2 as ray_client_pb2
|
| 42 |
+
|
| 43 |
+
import pickle # noqa: F401
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
# NOTE(barakmich): These PickleStubs are really close to
|
| 47 |
+
# the data for an execution, with no arguments. Combine the two?
|
| 48 |
+
class PickleStub(
|
| 49 |
+
NamedTuple(
|
| 50 |
+
"PickleStub",
|
| 51 |
+
[
|
| 52 |
+
("type", str),
|
| 53 |
+
("client_id", str),
|
| 54 |
+
("ref_id", bytes),
|
| 55 |
+
("name", Optional[str]),
|
| 56 |
+
("baseline_options", Optional[Dict]),
|
| 57 |
+
],
|
| 58 |
+
)
|
| 59 |
+
):
|
| 60 |
+
def __reduce__(self):
|
| 61 |
+
# PySpark's namedtuple monkey patch breaks compatibility with
|
| 62 |
+
# cloudpickle. Thus we revert this patch here if it exists.
|
| 63 |
+
return object.__reduce__(self)
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
class ClientPickler(cloudpickle.CloudPickler):
|
| 67 |
+
def __init__(self, client_id, *args, **kwargs):
|
| 68 |
+
super().__init__(*args, **kwargs)
|
| 69 |
+
self.client_id = client_id
|
| 70 |
+
|
| 71 |
+
def persistent_id(self, obj):
|
| 72 |
+
if isinstance(obj, RayAPIStub):
|
| 73 |
+
return PickleStub(
|
| 74 |
+
type="Ray",
|
| 75 |
+
client_id=self.client_id,
|
| 76 |
+
ref_id=b"",
|
| 77 |
+
name=None,
|
| 78 |
+
baseline_options=None,
|
| 79 |
+
)
|
| 80 |
+
elif isinstance(obj, ClientObjectRef):
|
| 81 |
+
return PickleStub(
|
| 82 |
+
type="Object",
|
| 83 |
+
client_id=self.client_id,
|
| 84 |
+
ref_id=obj.id,
|
| 85 |
+
name=None,
|
| 86 |
+
baseline_options=None,
|
| 87 |
+
)
|
| 88 |
+
elif isinstance(obj, ClientActorHandle):
|
| 89 |
+
return PickleStub(
|
| 90 |
+
type="Actor",
|
| 91 |
+
client_id=self.client_id,
|
| 92 |
+
ref_id=obj._actor_id.id,
|
| 93 |
+
name=None,
|
| 94 |
+
baseline_options=None,
|
| 95 |
+
)
|
| 96 |
+
elif isinstance(obj, ClientRemoteFunc):
|
| 97 |
+
if obj._ref is None:
|
| 98 |
+
obj._ensure_ref()
|
| 99 |
+
if type(obj._ref) is InProgressSentinel:
|
| 100 |
+
return PickleStub(
|
| 101 |
+
type="RemoteFuncSelfReference",
|
| 102 |
+
client_id=self.client_id,
|
| 103 |
+
ref_id=obj._client_side_ref.id,
|
| 104 |
+
name=None,
|
| 105 |
+
baseline_options=None,
|
| 106 |
+
)
|
| 107 |
+
return PickleStub(
|
| 108 |
+
type="RemoteFunc",
|
| 109 |
+
client_id=self.client_id,
|
| 110 |
+
ref_id=obj._ref.id,
|
| 111 |
+
name=None,
|
| 112 |
+
baseline_options=obj._options,
|
| 113 |
+
)
|
| 114 |
+
elif isinstance(obj, ClientActorClass):
|
| 115 |
+
if obj._ref is None:
|
| 116 |
+
obj._ensure_ref()
|
| 117 |
+
if type(obj._ref) is InProgressSentinel:
|
| 118 |
+
return PickleStub(
|
| 119 |
+
type="RemoteActorSelfReference",
|
| 120 |
+
client_id=self.client_id,
|
| 121 |
+
ref_id=obj._client_side_ref.id,
|
| 122 |
+
name=None,
|
| 123 |
+
baseline_options=None,
|
| 124 |
+
)
|
| 125 |
+
return PickleStub(
|
| 126 |
+
type="RemoteActor",
|
| 127 |
+
client_id=self.client_id,
|
| 128 |
+
ref_id=obj._ref.id,
|
| 129 |
+
name=None,
|
| 130 |
+
baseline_options=obj._options,
|
| 131 |
+
)
|
| 132 |
+
elif isinstance(obj, ClientRemoteMethod):
|
| 133 |
+
return PickleStub(
|
| 134 |
+
type="RemoteMethod",
|
| 135 |
+
client_id=self.client_id,
|
| 136 |
+
ref_id=obj._actor_handle.actor_ref.id,
|
| 137 |
+
name=obj._method_name,
|
| 138 |
+
baseline_options=None,
|
| 139 |
+
)
|
| 140 |
+
elif isinstance(obj, OptionWrapper):
|
| 141 |
+
raise NotImplementedError("Sending a partial option is unimplemented")
|
| 142 |
+
return None
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
class ServerUnpickler(pickle.Unpickler):
|
| 146 |
+
def persistent_load(self, pid):
|
| 147 |
+
assert isinstance(pid, PickleStub)
|
| 148 |
+
if pid.type == "Object":
|
| 149 |
+
return ClientObjectRef(pid.ref_id)
|
| 150 |
+
elif pid.type == "Actor":
|
| 151 |
+
return ClientActorHandle(ClientActorRef(pid.ref_id))
|
| 152 |
+
else:
|
| 153 |
+
raise NotImplementedError("Being passed back an unknown stub")
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
def dumps_from_client(obj: Any, client_id: str, protocol=None) -> bytes:
|
| 157 |
+
with io.BytesIO() as file:
|
| 158 |
+
cp = ClientPickler(client_id, file, protocol=protocol)
|
| 159 |
+
cp.dump(obj)
|
| 160 |
+
return file.getvalue()
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
def loads_from_server(
|
| 164 |
+
data: bytes, *, fix_imports=True, encoding="ASCII", errors="strict"
|
| 165 |
+
) -> Any:
|
| 166 |
+
if isinstance(data, str):
|
| 167 |
+
raise TypeError("Can't load pickle from unicode string")
|
| 168 |
+
file = io.BytesIO(data)
|
| 169 |
+
return ServerUnpickler(
|
| 170 |
+
file, fix_imports=fix_imports, encoding=encoding, errors=errors
|
| 171 |
+
).load()
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
def convert_to_arg(val: Any, client_id: str) -> ray_client_pb2.Arg:
|
| 175 |
+
out = ray_client_pb2.Arg()
|
| 176 |
+
out.local = ray_client_pb2.Arg.Locality.INTERNED
|
| 177 |
+
out.data = dumps_from_client(val, client_id)
|
| 178 |
+
return out
|
.venv/lib/python3.11/site-packages/ray/util/client/logsclient.py
ADDED
|
@@ -0,0 +1,136 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""This file implements a threaded stream controller to return logs back from
|
| 2 |
+
the ray clientserver.
|
| 3 |
+
"""
|
| 4 |
+
import sys
|
| 5 |
+
import logging
|
| 6 |
+
import queue
|
| 7 |
+
import threading
|
| 8 |
+
import time
|
| 9 |
+
import grpc
|
| 10 |
+
|
| 11 |
+
from typing import TYPE_CHECKING
|
| 12 |
+
|
| 13 |
+
import ray.core.generated.ray_client_pb2 as ray_client_pb2
|
| 14 |
+
import ray.core.generated.ray_client_pb2_grpc as ray_client_pb2_grpc
|
| 15 |
+
|
| 16 |
+
from ray.util.debug import log_once
|
| 17 |
+
|
| 18 |
+
if TYPE_CHECKING:
|
| 19 |
+
from ray.util.client.worker import Worker
|
| 20 |
+
|
| 21 |
+
logger = logging.getLogger(__name__)
|
| 22 |
+
# TODO(barakmich): Running a logger in a logger causes loopback.
|
| 23 |
+
# The client logger need its own root -- possibly this one.
|
| 24 |
+
# For the moment, let's just not propogate beyond this point.
|
| 25 |
+
logger.propagate = False
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class LogstreamClient:
|
| 29 |
+
def __init__(self, client_worker: "Worker", metadata: list):
|
| 30 |
+
"""Initializes a thread-safe log stream over a Ray Client gRPC channel.
|
| 31 |
+
|
| 32 |
+
Args:
|
| 33 |
+
client_worker: The Ray Client worker that manages this client
|
| 34 |
+
metadata: metadata to pass to gRPC requests
|
| 35 |
+
"""
|
| 36 |
+
self.client_worker = client_worker
|
| 37 |
+
self._metadata = metadata
|
| 38 |
+
self.request_queue = queue.Queue()
|
| 39 |
+
self.log_thread = self._start_logthread()
|
| 40 |
+
self.log_thread.start()
|
| 41 |
+
self.last_req = None
|
| 42 |
+
|
| 43 |
+
def _start_logthread(self) -> threading.Thread:
|
| 44 |
+
return threading.Thread(target=self._log_main, args=(), daemon=True)
|
| 45 |
+
|
| 46 |
+
def _log_main(self) -> None:
|
| 47 |
+
reconnecting = False
|
| 48 |
+
while not self.client_worker._in_shutdown:
|
| 49 |
+
if reconnecting:
|
| 50 |
+
# Refresh queue and retry last request
|
| 51 |
+
self.request_queue = queue.Queue()
|
| 52 |
+
if self.last_req:
|
| 53 |
+
self.request_queue.put(self.last_req)
|
| 54 |
+
stub = ray_client_pb2_grpc.RayletLogStreamerStub(self.client_worker.channel)
|
| 55 |
+
try:
|
| 56 |
+
log_stream = stub.Logstream(
|
| 57 |
+
iter(self.request_queue.get, None), metadata=self._metadata
|
| 58 |
+
)
|
| 59 |
+
except ValueError:
|
| 60 |
+
# Trying to use the stub on a cancelled channel will raise
|
| 61 |
+
# ValueError. This should only happen when the data client
|
| 62 |
+
# is attempting to reset the connection -- sleep and try
|
| 63 |
+
# again.
|
| 64 |
+
time.sleep(0.5)
|
| 65 |
+
continue
|
| 66 |
+
try:
|
| 67 |
+
for record in log_stream:
|
| 68 |
+
if record.level < 0:
|
| 69 |
+
self.stdstream(level=record.level, msg=record.msg)
|
| 70 |
+
self.log(level=record.level, msg=record.msg)
|
| 71 |
+
return
|
| 72 |
+
except grpc.RpcError as e:
|
| 73 |
+
reconnecting = self._process_rpc_error(e)
|
| 74 |
+
if not reconnecting:
|
| 75 |
+
return
|
| 76 |
+
|
| 77 |
+
def _process_rpc_error(self, e: grpc.RpcError) -> bool:
|
| 78 |
+
"""
|
| 79 |
+
Processes RPC errors that occur while reading from data stream.
|
| 80 |
+
Returns True if the error can be recovered from, False otherwise.
|
| 81 |
+
"""
|
| 82 |
+
if self.client_worker._can_reconnect(e):
|
| 83 |
+
if log_once("lost_reconnect_logs"):
|
| 84 |
+
logger.warning(
|
| 85 |
+
"Log channel is reconnecting. Logs produced while "
|
| 86 |
+
"the connection was down can be found on the head "
|
| 87 |
+
"node of the cluster in "
|
| 88 |
+
"`ray_client_server_[port].out`"
|
| 89 |
+
)
|
| 90 |
+
logger.debug("Log channel dropped, retrying.")
|
| 91 |
+
time.sleep(0.5)
|
| 92 |
+
return True
|
| 93 |
+
logger.debug("Shutting down log channel.")
|
| 94 |
+
if not self.client_worker._in_shutdown:
|
| 95 |
+
logger.exception("Unexpected exception:")
|
| 96 |
+
return False
|
| 97 |
+
|
| 98 |
+
def log(self, level: int, msg: str):
|
| 99 |
+
"""Log the message from the log stream.
|
| 100 |
+
By default, calls logger.log but this can be overridden.
|
| 101 |
+
|
| 102 |
+
Args:
|
| 103 |
+
level: The loglevel of the received log message
|
| 104 |
+
msg: The content of the message
|
| 105 |
+
"""
|
| 106 |
+
logger.log(level=level, msg=msg)
|
| 107 |
+
|
| 108 |
+
def stdstream(self, level: int, msg: str):
|
| 109 |
+
"""Log the stdout/stderr entry from the log stream.
|
| 110 |
+
By default, calls print but this can be overridden.
|
| 111 |
+
|
| 112 |
+
Args:
|
| 113 |
+
level: The loglevel of the received log message
|
| 114 |
+
msg: The content of the message
|
| 115 |
+
"""
|
| 116 |
+
print_file = sys.stderr if level == -2 else sys.stdout
|
| 117 |
+
print(msg, file=print_file, end="")
|
| 118 |
+
|
| 119 |
+
def set_logstream_level(self, level: int):
|
| 120 |
+
logger.setLevel(level)
|
| 121 |
+
req = ray_client_pb2.LogSettingsRequest()
|
| 122 |
+
req.enabled = True
|
| 123 |
+
req.loglevel = level
|
| 124 |
+
self.request_queue.put(req)
|
| 125 |
+
self.last_req = req
|
| 126 |
+
|
| 127 |
+
def close(self) -> None:
|
| 128 |
+
self.request_queue.put(None)
|
| 129 |
+
if self.log_thread is not None:
|
| 130 |
+
self.log_thread.join()
|
| 131 |
+
|
| 132 |
+
def disable_logs(self) -> None:
|
| 133 |
+
req = ray_client_pb2.LogSettingsRequest()
|
| 134 |
+
req.enabled = False
|
| 135 |
+
self.request_queue.put(req)
|
| 136 |
+
self.last_req = req
|
.venv/lib/python3.11/site-packages/ray/util/client/server/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
from ray.util.client.server.server import serve # noqa
|
.venv/lib/python3.11/site-packages/ray/util/client/server/__main__.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
if __name__ == "__main__":
|
| 2 |
+
from ray.util.client.server.server import main
|
| 3 |
+
|
| 4 |
+
main()
|
.venv/lib/python3.11/site-packages/ray/util/client/server/__pycache__/__init__.cpython-311.pyc
ADDED
|
Binary file (268 Bytes). View file
|
|
|
.venv/lib/python3.11/site-packages/ray/util/client/server/__pycache__/__main__.cpython-311.pyc
ADDED
|
Binary file (351 Bytes). View file
|
|
|
.venv/lib/python3.11/site-packages/ray/util/client/server/__pycache__/dataservicer.cpython-311.pyc
ADDED
|
Binary file (23.5 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/ray/util/client/server/__pycache__/logservicer.cpython-311.pyc
ADDED
|
Binary file (9.1 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/ray/util/client/server/__pycache__/proxier.cpython-311.pyc
ADDED
|
Binary file (49.8 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/ray/util/client/server/__pycache__/server.cpython-311.pyc
ADDED
|
Binary file (56.7 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/ray/util/client/server/__pycache__/server_pickler.cpython-311.pyc
ADDED
|
Binary file (7.03 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/ray/util/client/server/__pycache__/server_stubs.cpython-311.pyc
ADDED
|
Binary file (3.36 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/ray/util/client/server/dataservicer.py
ADDED
|
@@ -0,0 +1,416 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from collections import defaultdict
|
| 2 |
+
from ray.util.client.server.server_pickler import loads_from_client
|
| 3 |
+
import ray
|
| 4 |
+
import logging
|
| 5 |
+
import grpc
|
| 6 |
+
from queue import Queue
|
| 7 |
+
import sys
|
| 8 |
+
|
| 9 |
+
from typing import Any, Dict, Iterator, TYPE_CHECKING, Union
|
| 10 |
+
from threading import Event, Lock, Thread
|
| 11 |
+
import time
|
| 12 |
+
|
| 13 |
+
import ray.core.generated.ray_client_pb2 as ray_client_pb2
|
| 14 |
+
import ray.core.generated.ray_client_pb2_grpc as ray_client_pb2_grpc
|
| 15 |
+
from ray.util.client.common import (
|
| 16 |
+
CLIENT_SERVER_MAX_THREADS,
|
| 17 |
+
_propagate_error_in_context,
|
| 18 |
+
OrderedResponseCache,
|
| 19 |
+
)
|
| 20 |
+
from ray.util.debug import log_once
|
| 21 |
+
from ray._private.client_mode_hook import disable_client_hook
|
| 22 |
+
|
| 23 |
+
if TYPE_CHECKING:
|
| 24 |
+
from ray.util.client.server.server import RayletServicer
|
| 25 |
+
|
| 26 |
+
logger = logging.getLogger(__name__)
|
| 27 |
+
|
| 28 |
+
QUEUE_JOIN_SECONDS = 10
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def _get_reconnecting_from_context(context: Any) -> bool:
|
| 32 |
+
"""
|
| 33 |
+
Get `reconnecting` from gRPC metadata, or False if missing.
|
| 34 |
+
"""
|
| 35 |
+
metadata = {k: v for k, v in context.invocation_metadata()}
|
| 36 |
+
val = metadata.get("reconnecting")
|
| 37 |
+
if val is None or val not in ("True", "False"):
|
| 38 |
+
logger.error(
|
| 39 |
+
f'Client connecting with invalid value for "reconnecting": {val}, '
|
| 40 |
+
"This may be because you have a mismatched client and server "
|
| 41 |
+
"version."
|
| 42 |
+
)
|
| 43 |
+
return False
|
| 44 |
+
return val == "True"
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def _should_cache(req: ray_client_pb2.DataRequest) -> bool:
|
| 48 |
+
"""
|
| 49 |
+
Returns True if the response should to the given request should be cached,
|
| 50 |
+
false otherwise. At the moment the only requests we do not cache are:
|
| 51 |
+
- asynchronous gets: These arrive out of order. Skipping caching here
|
| 52 |
+
is fine, since repeating an async get is idempotent
|
| 53 |
+
- acks: Repeating acks is idempotent
|
| 54 |
+
- clean up requests: Also idempotent, and client has likely already
|
| 55 |
+
wrapped up the data connection by this point.
|
| 56 |
+
- puts: We should only cache when we receive the final chunk, since
|
| 57 |
+
any earlier chunks won't generate a response
|
| 58 |
+
- tasks: We should only cache when we receive the final chunk,
|
| 59 |
+
since any earlier chunks won't generate a response
|
| 60 |
+
"""
|
| 61 |
+
req_type = req.WhichOneof("type")
|
| 62 |
+
if req_type == "get" and req.get.asynchronous:
|
| 63 |
+
return False
|
| 64 |
+
if req_type == "put":
|
| 65 |
+
return req.put.chunk_id == req.put.total_chunks - 1
|
| 66 |
+
if req_type == "task":
|
| 67 |
+
return req.task.chunk_id == req.task.total_chunks - 1
|
| 68 |
+
return req_type not in ("acknowledge", "connection_cleanup")
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def fill_queue(
|
| 72 |
+
grpc_input_generator: Iterator[ray_client_pb2.DataRequest],
|
| 73 |
+
output_queue: "Queue[Union[ray_client_pb2.DataRequest, ray_client_pb2.DataResponse]]", # noqa: E501
|
| 74 |
+
) -> None:
|
| 75 |
+
"""
|
| 76 |
+
Pushes incoming requests to a shared output_queue.
|
| 77 |
+
"""
|
| 78 |
+
try:
|
| 79 |
+
for req in grpc_input_generator:
|
| 80 |
+
output_queue.put(req)
|
| 81 |
+
except grpc.RpcError as e:
|
| 82 |
+
logger.debug(
|
| 83 |
+
"closing dataservicer reader thread "
|
| 84 |
+
f"grpc error reading request_iterator: {e}"
|
| 85 |
+
)
|
| 86 |
+
finally:
|
| 87 |
+
# Set the sentinel value for the output_queue
|
| 88 |
+
output_queue.put(None)
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
class ChunkCollector:
|
| 92 |
+
"""
|
| 93 |
+
Helper class for collecting chunks from PutObject or ClientTask messages
|
| 94 |
+
"""
|
| 95 |
+
|
| 96 |
+
def __init__(self):
|
| 97 |
+
self.curr_req_id = None
|
| 98 |
+
self.last_seen_chunk_id = -1
|
| 99 |
+
self.data = bytearray()
|
| 100 |
+
|
| 101 |
+
def add_chunk(
|
| 102 |
+
self,
|
| 103 |
+
req: ray_client_pb2.DataRequest,
|
| 104 |
+
chunk: Union[ray_client_pb2.PutRequest, ray_client_pb2.ClientTask],
|
| 105 |
+
):
|
| 106 |
+
if self.curr_req_id is not None and self.curr_req_id != req.req_id:
|
| 107 |
+
raise RuntimeError(
|
| 108 |
+
"Expected to receive a chunk from request with id "
|
| 109 |
+
f"{self.curr_req_id}, but found {req.req_id} instead."
|
| 110 |
+
)
|
| 111 |
+
self.curr_req_id = req.req_id
|
| 112 |
+
next_chunk = self.last_seen_chunk_id + 1
|
| 113 |
+
if chunk.chunk_id < next_chunk:
|
| 114 |
+
# Repeated chunk, ignore
|
| 115 |
+
return
|
| 116 |
+
if chunk.chunk_id > next_chunk:
|
| 117 |
+
raise RuntimeError(
|
| 118 |
+
f"A chunk {chunk.chunk_id} of request {req.req_id} was "
|
| 119 |
+
"received out of order."
|
| 120 |
+
)
|
| 121 |
+
elif chunk.chunk_id == self.last_seen_chunk_id + 1:
|
| 122 |
+
self.data.extend(chunk.data)
|
| 123 |
+
self.last_seen_chunk_id = chunk.chunk_id
|
| 124 |
+
return chunk.chunk_id + 1 == chunk.total_chunks
|
| 125 |
+
|
| 126 |
+
def reset(self):
|
| 127 |
+
self.curr_req_id = None
|
| 128 |
+
self.last_seen_chunk_id = -1
|
| 129 |
+
self.data = bytearray()
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
class DataServicer(ray_client_pb2_grpc.RayletDataStreamerServicer):
|
| 133 |
+
def __init__(self, basic_service: "RayletServicer"):
|
| 134 |
+
self.basic_service = basic_service
|
| 135 |
+
self.clients_lock = Lock()
|
| 136 |
+
self.num_clients = 0 # guarded by self.clients_lock
|
| 137 |
+
# dictionary mapping client_id's to the last time they connected
|
| 138 |
+
self.client_last_seen: Dict[str, float] = {}
|
| 139 |
+
# dictionary mapping client_id's to their reconnect grace periods
|
| 140 |
+
self.reconnect_grace_periods: Dict[str, float] = {}
|
| 141 |
+
# dictionary mapping client_id's to their response cache
|
| 142 |
+
self.response_caches: Dict[str, OrderedResponseCache] = defaultdict(
|
| 143 |
+
OrderedResponseCache
|
| 144 |
+
)
|
| 145 |
+
# stopped event, useful for signals that the server is shut down
|
| 146 |
+
self.stopped = Event()
|
| 147 |
+
# Helper for collecting chunks from PutObject calls. Assumes that
|
| 148 |
+
# that put requests from different objects aren't interleaved.
|
| 149 |
+
self.put_request_chunk_collector = ChunkCollector()
|
| 150 |
+
# Helper for collecting chunks from ClientTask calls. Assumes that
|
| 151 |
+
# schedule requests from different remote calls aren't interleaved.
|
| 152 |
+
self.client_task_chunk_collector = ChunkCollector()
|
| 153 |
+
|
| 154 |
+
def Datapath(self, request_iterator, context):
|
| 155 |
+
start_time = time.time()
|
| 156 |
+
# set to True if client shuts down gracefully
|
| 157 |
+
cleanup_requested = False
|
| 158 |
+
metadata = {k: v for k, v in context.invocation_metadata()}
|
| 159 |
+
client_id = metadata.get("client_id")
|
| 160 |
+
if client_id is None:
|
| 161 |
+
logger.error("Client connecting with no client_id")
|
| 162 |
+
return
|
| 163 |
+
logger.debug(f"New data connection from client {client_id}: ")
|
| 164 |
+
accepted_connection = self._init(client_id, context, start_time)
|
| 165 |
+
response_cache = self.response_caches[client_id]
|
| 166 |
+
# Set to False if client requests a reconnect grace period of 0
|
| 167 |
+
reconnect_enabled = True
|
| 168 |
+
if not accepted_connection:
|
| 169 |
+
return
|
| 170 |
+
try:
|
| 171 |
+
request_queue = Queue()
|
| 172 |
+
queue_filler_thread = Thread(
|
| 173 |
+
target=fill_queue, daemon=True, args=(request_iterator, request_queue)
|
| 174 |
+
)
|
| 175 |
+
queue_filler_thread.start()
|
| 176 |
+
"""For non `async get` requests, this loop yields immediately
|
| 177 |
+
For `async get` requests, this loop:
|
| 178 |
+
1) does not yield, it just continues
|
| 179 |
+
2) When the result is ready, it yields
|
| 180 |
+
"""
|
| 181 |
+
for req in iter(request_queue.get, None):
|
| 182 |
+
if isinstance(req, ray_client_pb2.DataResponse):
|
| 183 |
+
# Early shortcut if this is the result of an async get.
|
| 184 |
+
yield req
|
| 185 |
+
continue
|
| 186 |
+
|
| 187 |
+
assert isinstance(req, ray_client_pb2.DataRequest)
|
| 188 |
+
if _should_cache(req) and reconnect_enabled:
|
| 189 |
+
cached_resp = response_cache.check_cache(req.req_id)
|
| 190 |
+
if isinstance(cached_resp, Exception):
|
| 191 |
+
# Cache state is invalid, raise exception
|
| 192 |
+
raise cached_resp
|
| 193 |
+
if cached_resp is not None:
|
| 194 |
+
yield cached_resp
|
| 195 |
+
continue
|
| 196 |
+
|
| 197 |
+
resp = None
|
| 198 |
+
req_type = req.WhichOneof("type")
|
| 199 |
+
if req_type == "init":
|
| 200 |
+
resp_init = self.basic_service.Init(req.init)
|
| 201 |
+
resp = ray_client_pb2.DataResponse(
|
| 202 |
+
init=resp_init,
|
| 203 |
+
)
|
| 204 |
+
with self.clients_lock:
|
| 205 |
+
self.reconnect_grace_periods[
|
| 206 |
+
client_id
|
| 207 |
+
] = req.init.reconnect_grace_period
|
| 208 |
+
if req.init.reconnect_grace_period == 0:
|
| 209 |
+
reconnect_enabled = False
|
| 210 |
+
|
| 211 |
+
elif req_type == "get":
|
| 212 |
+
if req.get.asynchronous:
|
| 213 |
+
get_resp = self.basic_service._async_get_object(
|
| 214 |
+
req.get, client_id, req.req_id, request_queue
|
| 215 |
+
)
|
| 216 |
+
if get_resp is None:
|
| 217 |
+
# Skip sending a response for this request and
|
| 218 |
+
# continue to the next requst. The response for
|
| 219 |
+
# this request will be sent when the object is
|
| 220 |
+
# ready.
|
| 221 |
+
continue
|
| 222 |
+
else:
|
| 223 |
+
get_resp = self.basic_service._get_object(req.get, client_id)
|
| 224 |
+
resp = ray_client_pb2.DataResponse(get=get_resp)
|
| 225 |
+
elif req_type == "put":
|
| 226 |
+
if not self.put_request_chunk_collector.add_chunk(req, req.put):
|
| 227 |
+
# Put request still in progress
|
| 228 |
+
continue
|
| 229 |
+
put_resp = self.basic_service._put_object(
|
| 230 |
+
self.put_request_chunk_collector.data,
|
| 231 |
+
req.put.client_ref_id,
|
| 232 |
+
client_id,
|
| 233 |
+
req.put.owner_id,
|
| 234 |
+
)
|
| 235 |
+
self.put_request_chunk_collector.reset()
|
| 236 |
+
resp = ray_client_pb2.DataResponse(put=put_resp)
|
| 237 |
+
elif req_type == "release":
|
| 238 |
+
released = []
|
| 239 |
+
for rel_id in req.release.ids:
|
| 240 |
+
rel = self.basic_service.release(client_id, rel_id)
|
| 241 |
+
released.append(rel)
|
| 242 |
+
resp = ray_client_pb2.DataResponse(
|
| 243 |
+
release=ray_client_pb2.ReleaseResponse(ok=released)
|
| 244 |
+
)
|
| 245 |
+
elif req_type == "connection_info":
|
| 246 |
+
resp = ray_client_pb2.DataResponse(
|
| 247 |
+
connection_info=self._build_connection_response()
|
| 248 |
+
)
|
| 249 |
+
elif req_type == "prep_runtime_env":
|
| 250 |
+
with self.clients_lock:
|
| 251 |
+
resp_prep = self.basic_service.PrepRuntimeEnv(
|
| 252 |
+
req.prep_runtime_env
|
| 253 |
+
)
|
| 254 |
+
resp = ray_client_pb2.DataResponse(prep_runtime_env=resp_prep)
|
| 255 |
+
elif req_type == "connection_cleanup":
|
| 256 |
+
cleanup_requested = True
|
| 257 |
+
cleanup_resp = ray_client_pb2.ConnectionCleanupResponse()
|
| 258 |
+
resp = ray_client_pb2.DataResponse(connection_cleanup=cleanup_resp)
|
| 259 |
+
elif req_type == "acknowledge":
|
| 260 |
+
# Clean up acknowledged cache entries
|
| 261 |
+
response_cache.cleanup(req.acknowledge.req_id)
|
| 262 |
+
continue
|
| 263 |
+
elif req_type == "task":
|
| 264 |
+
with self.clients_lock:
|
| 265 |
+
task = req.task
|
| 266 |
+
if not self.client_task_chunk_collector.add_chunk(req, task):
|
| 267 |
+
# Not all serialized arguments have arrived
|
| 268 |
+
continue
|
| 269 |
+
arglist, kwargs = loads_from_client(
|
| 270 |
+
self.client_task_chunk_collector.data, self.basic_service
|
| 271 |
+
)
|
| 272 |
+
self.client_task_chunk_collector.reset()
|
| 273 |
+
resp_ticket = self.basic_service.Schedule(
|
| 274 |
+
req.task, arglist, kwargs, context
|
| 275 |
+
)
|
| 276 |
+
resp = ray_client_pb2.DataResponse(task_ticket=resp_ticket)
|
| 277 |
+
del arglist
|
| 278 |
+
del kwargs
|
| 279 |
+
elif req_type == "terminate":
|
| 280 |
+
with self.clients_lock:
|
| 281 |
+
response = self.basic_service.Terminate(req.terminate, context)
|
| 282 |
+
resp = ray_client_pb2.DataResponse(terminate=response)
|
| 283 |
+
elif req_type == "list_named_actors":
|
| 284 |
+
with self.clients_lock:
|
| 285 |
+
response = self.basic_service.ListNamedActors(
|
| 286 |
+
req.list_named_actors
|
| 287 |
+
)
|
| 288 |
+
resp = ray_client_pb2.DataResponse(list_named_actors=response)
|
| 289 |
+
else:
|
| 290 |
+
raise Exception(
|
| 291 |
+
f"Unreachable code: Request type "
|
| 292 |
+
f"{req_type} not handled in Datapath"
|
| 293 |
+
)
|
| 294 |
+
resp.req_id = req.req_id
|
| 295 |
+
if _should_cache(req) and reconnect_enabled:
|
| 296 |
+
response_cache.update_cache(req.req_id, resp)
|
| 297 |
+
yield resp
|
| 298 |
+
except Exception as e:
|
| 299 |
+
logger.exception("Error in data channel:")
|
| 300 |
+
recoverable = _propagate_error_in_context(e, context)
|
| 301 |
+
invalid_cache = response_cache.invalidate(e)
|
| 302 |
+
if not recoverable or invalid_cache:
|
| 303 |
+
context.set_code(grpc.StatusCode.FAILED_PRECONDITION)
|
| 304 |
+
# Connection isn't recoverable, skip cleanup
|
| 305 |
+
cleanup_requested = True
|
| 306 |
+
finally:
|
| 307 |
+
logger.debug(f"Stream is broken with client {client_id}")
|
| 308 |
+
queue_filler_thread.join(QUEUE_JOIN_SECONDS)
|
| 309 |
+
if queue_filler_thread.is_alive():
|
| 310 |
+
logger.error(
|
| 311 |
+
"Queue filler thread failed to join before timeout: {}".format(
|
| 312 |
+
QUEUE_JOIN_SECONDS
|
| 313 |
+
)
|
| 314 |
+
)
|
| 315 |
+
cleanup_delay = self.reconnect_grace_periods.get(client_id)
|
| 316 |
+
if not cleanup_requested and cleanup_delay is not None:
|
| 317 |
+
logger.debug(
|
| 318 |
+
"Cleanup wasn't requested, delaying cleanup by"
|
| 319 |
+
f"{cleanup_delay} seconds."
|
| 320 |
+
)
|
| 321 |
+
# Delay cleanup, since client may attempt a reconnect
|
| 322 |
+
# Wait on the "stopped" event in case the grpc server is
|
| 323 |
+
# stopped and we can clean up earlier.
|
| 324 |
+
self.stopped.wait(timeout=cleanup_delay)
|
| 325 |
+
else:
|
| 326 |
+
logger.debug("Cleanup was requested, cleaning up immediately.")
|
| 327 |
+
with self.clients_lock:
|
| 328 |
+
if client_id not in self.client_last_seen:
|
| 329 |
+
logger.debug("Connection already cleaned up.")
|
| 330 |
+
# Some other connection has already cleaned up this
|
| 331 |
+
# this client's session. This can happen if the client
|
| 332 |
+
# reconnects and then gracefully shut's down immediately.
|
| 333 |
+
return
|
| 334 |
+
last_seen = self.client_last_seen[client_id]
|
| 335 |
+
if last_seen > start_time:
|
| 336 |
+
# The client successfully reconnected and updated
|
| 337 |
+
# last seen some time during the grace period
|
| 338 |
+
logger.debug("Client reconnected, skipping cleanup")
|
| 339 |
+
return
|
| 340 |
+
# Either the client shut down gracefully, or the client
|
| 341 |
+
# failed to reconnect within the grace period. Clean up
|
| 342 |
+
# the connection.
|
| 343 |
+
self.basic_service.release_all(client_id)
|
| 344 |
+
del self.client_last_seen[client_id]
|
| 345 |
+
if client_id in self.reconnect_grace_periods:
|
| 346 |
+
del self.reconnect_grace_periods[client_id]
|
| 347 |
+
if client_id in self.response_caches:
|
| 348 |
+
del self.response_caches[client_id]
|
| 349 |
+
self.num_clients -= 1
|
| 350 |
+
logger.debug(
|
| 351 |
+
f"Removed client {client_id}, " f"remaining={self.num_clients}"
|
| 352 |
+
)
|
| 353 |
+
|
| 354 |
+
# It's important to keep the Ray shutdown
|
| 355 |
+
# within this locked context or else Ray could hang.
|
| 356 |
+
# NOTE: it is strange to start ray in server.py but shut it
|
| 357 |
+
# down here. Consider consolidating ray lifetime management.
|
| 358 |
+
with disable_client_hook():
|
| 359 |
+
if self.num_clients == 0:
|
| 360 |
+
logger.debug("Shutting down ray.")
|
| 361 |
+
ray.shutdown()
|
| 362 |
+
|
| 363 |
+
def _init(self, client_id: str, context: Any, start_time: float):
|
| 364 |
+
"""
|
| 365 |
+
Checks if resources allow for another client.
|
| 366 |
+
Returns a boolean indicating if initialization was successful.
|
| 367 |
+
"""
|
| 368 |
+
with self.clients_lock:
|
| 369 |
+
reconnecting = _get_reconnecting_from_context(context)
|
| 370 |
+
threshold = int(CLIENT_SERVER_MAX_THREADS / 2)
|
| 371 |
+
if self.num_clients >= threshold:
|
| 372 |
+
logger.warning(
|
| 373 |
+
f"[Data Servicer]: Num clients {self.num_clients} "
|
| 374 |
+
f"has reached the threshold {threshold}. "
|
| 375 |
+
f"Rejecting client: {client_id}. "
|
| 376 |
+
)
|
| 377 |
+
if log_once("client_threshold"):
|
| 378 |
+
logger.warning(
|
| 379 |
+
"You can configure the client connection "
|
| 380 |
+
"threshold by setting the "
|
| 381 |
+
"RAY_CLIENT_SERVER_MAX_THREADS env var "
|
| 382 |
+
f"(currently set to {CLIENT_SERVER_MAX_THREADS})."
|
| 383 |
+
)
|
| 384 |
+
context.set_code(grpc.StatusCode.RESOURCE_EXHAUSTED)
|
| 385 |
+
return False
|
| 386 |
+
if reconnecting and client_id not in self.client_last_seen:
|
| 387 |
+
# Client took too long to reconnect, session has been
|
| 388 |
+
# cleaned up.
|
| 389 |
+
context.set_code(grpc.StatusCode.NOT_FOUND)
|
| 390 |
+
context.set_details(
|
| 391 |
+
"Attempted to reconnect to a session that has already "
|
| 392 |
+
"been cleaned up."
|
| 393 |
+
)
|
| 394 |
+
return False
|
| 395 |
+
if client_id in self.client_last_seen:
|
| 396 |
+
logger.debug(f"Client {client_id} has reconnected.")
|
| 397 |
+
else:
|
| 398 |
+
self.num_clients += 1
|
| 399 |
+
logger.debug(
|
| 400 |
+
f"Accepted data connection from {client_id}. "
|
| 401 |
+
f"Total clients: {self.num_clients}"
|
| 402 |
+
)
|
| 403 |
+
self.client_last_seen[client_id] = start_time
|
| 404 |
+
return True
|
| 405 |
+
|
| 406 |
+
def _build_connection_response(self):
|
| 407 |
+
with self.clients_lock:
|
| 408 |
+
cur_num_clients = self.num_clients
|
| 409 |
+
return ray_client_pb2.ConnectionInfoResponse(
|
| 410 |
+
num_clients=cur_num_clients,
|
| 411 |
+
python_version="{}.{}.{}".format(
|
| 412 |
+
sys.version_info[0], sys.version_info[1], sys.version_info[2]
|
| 413 |
+
),
|
| 414 |
+
ray_version=ray.__version__,
|
| 415 |
+
ray_commit=ray.__commit__,
|
| 416 |
+
)
|